1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'force_gather_logs', SEP,
110 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
111 'plc_stop', 'vs_start', 'vs_stop', SEP,
112 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
113 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
114 'delete_leases', 'list_leases', SEP,
116 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
117 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
118 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
119 'plc_db_dump' , 'plc_db_restore', SEP,
120 'standby_1 through 20',SEP,
124 def printable_steps (list):
125 single_line=" ".join(list)+" "
126 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
128 def valid_step (step):
129 return step != SEP and step != SEPSFA
131 # turn off the sfa-related steps when build has skipped SFA
132 # this is originally for centos5 as recent SFAs won't build on this platformb
134 def check_whether_build_has_sfa (rpms_url):
135 # warning, we're now building 'sface' so let's be a bit more picky
136 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
137 # full builds are expected to return with 0 here
139 # move all steps containing 'sfa' from default_steps to other_steps
140 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
141 TestPlc.other_steps += sfa_steps
142 for step in sfa_steps: TestPlc.default_steps.remove(step)
144 def __init__ (self,plc_spec,options):
145 self.plc_spec=plc_spec
147 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
149 self.vserverip=plc_spec['vserverip']
150 self.vservername=plc_spec['vservername']
151 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
154 raise Exception,'chroot-based myplc testing is deprecated'
155 self.apiserver=TestApiserver(self.url,options.dry_run)
158 name=self.plc_spec['name']
159 return "%s.%s"%(name,self.vservername)
162 return self.plc_spec['host_box']
165 return self.test_ssh.is_local()
167 # define the API methods on this object through xmlrpc
168 # would help, but not strictly necessary
172 def actual_command_in_guest (self,command):
173 return self.test_ssh.actual_command(self.host_to_guest(command))
175 def start_guest (self):
176 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
178 def stop_guest (self):
179 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
181 def run_in_guest (self,command):
182 return utils.system(self.actual_command_in_guest(command))
184 def run_in_host (self,command):
185 return self.test_ssh.run_in_buildname(command)
187 #command gets run in the vserver
188 def host_to_guest(self,command):
189 return "vserver %s exec %s"%(self.vservername,command)
191 #start/stop the vserver
192 def start_guest_in_host(self):
193 return "vserver %s start"%(self.vservername)
195 def stop_guest_in_host(self):
196 return "vserver %s stop"%(self.vservername)
199 def run_in_guest_piped (self,local,remote):
200 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
202 def auth_root (self):
203 return {'Username':self.plc_spec['PLC_ROOT_USER'],
204 'AuthMethod':'password',
205 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
206 'Role' : self.plc_spec['role']
208 def locate_site (self,sitename):
209 for site in self.plc_spec['sites']:
210 if site['site_fields']['name'] == sitename:
212 if site['site_fields']['login_base'] == sitename:
214 raise Exception,"Cannot locate site %s"%sitename
216 def locate_node (self,nodename):
217 for site in self.plc_spec['sites']:
218 for node in site['nodes']:
219 if node['name'] == nodename:
221 raise Exception,"Cannot locate node %s"%nodename
223 def locate_hostname (self,hostname):
224 for site in self.plc_spec['sites']:
225 for node in site['nodes']:
226 if node['node_fields']['hostname'] == hostname:
228 raise Exception,"Cannot locate hostname %s"%hostname
230 def locate_key (self,keyname):
231 for key in self.plc_spec['keys']:
232 if key['name'] == keyname:
234 raise Exception,"Cannot locate key %s"%keyname
236 def locate_slice (self, slicename):
237 for slice in self.plc_spec['slices']:
238 if slice['slice_fields']['name'] == slicename:
240 raise Exception,"Cannot locate slice %s"%slicename
242 def all_sliver_objs (self):
244 for slice_spec in self.plc_spec['slices']:
245 slicename = slice_spec['slice_fields']['name']
246 for nodename in slice_spec['nodenames']:
247 result.append(self.locate_sliver_obj (nodename,slicename))
250 def locate_sliver_obj (self,nodename,slicename):
251 (site,node) = self.locate_node(nodename)
252 slice = self.locate_slice (slicename)
254 test_site = TestSite (self, site)
255 test_node = TestNode (self, test_site,node)
256 # xxx the slice site is assumed to be the node site - mhh - probably harmless
257 test_slice = TestSlice (self, test_site, slice)
258 return TestSliver (self, test_node, test_slice)
260 def locate_first_node(self):
261 nodename=self.plc_spec['slices'][0]['nodenames'][0]
262 (site,node) = self.locate_node(nodename)
263 test_site = TestSite (self, site)
264 test_node = TestNode (self, test_site,node)
267 def locate_first_sliver (self):
268 slice_spec=self.plc_spec['slices'][0]
269 slicename=slice_spec['slice_fields']['name']
270 nodename=slice_spec['nodenames'][0]
271 return self.locate_sliver_obj(nodename,slicename)
273 # all different hostboxes used in this plc
274 def gather_hostBoxes(self):
275 # maps on sites and nodes, return [ (host_box,test_node) ]
277 for site_spec in self.plc_spec['sites']:
278 test_site = TestSite (self,site_spec)
279 for node_spec in site_spec['nodes']:
280 test_node = TestNode (self, test_site, node_spec)
281 if not test_node.is_real():
282 tuples.append( (test_node.host_box(),test_node) )
283 # transform into a dict { 'host_box' -> [ test_node .. ] }
285 for (box,node) in tuples:
286 if not result.has_key(box):
289 result[box].append(node)
292 # a step for checking this stuff
293 def show_boxes (self):
294 'print summary of nodes location'
295 for (box,nodes) in self.gather_hostBoxes().iteritems():
296 print box,":"," + ".join( [ node.name() for node in nodes ] )
299 # make this a valid step
300 def qemu_kill_all(self):
301 'kill all qemu instances on the qemu boxes involved by this setup'
302 # this is the brute force version, kill all qemus on that host box
303 for (box,nodes) in self.gather_hostBoxes().iteritems():
304 # pass the first nodename, as we don't push template-qemu on testboxes
305 nodedir=nodes[0].nodedir()
306 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
309 # make this a valid step
310 def qemu_list_all(self):
311 'list all qemu instances on the qemu boxes involved by this setup'
312 for (box,nodes) in self.gather_hostBoxes().iteritems():
313 # this is the brute force version, kill all qemus on that host box
314 TestBoxQemu(box,self.options.buildname).qemu_list_all()
317 # kill only the right qemus
318 def qemu_list_mine(self):
319 'list qemu instances for our nodes'
320 for (box,nodes) in self.gather_hostBoxes().iteritems():
321 # the fine-grain version
326 # kill only the right qemus
327 def qemu_kill_mine(self):
328 'kill the qemu instances for our nodes'
329 for (box,nodes) in self.gather_hostBoxes().iteritems():
330 # the fine-grain version
335 #################### display config
337 "show test configuration after localization"
338 self.display_pass (1)
339 self.display_pass (2)
343 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
344 def display_pass (self,passno):
345 for (key,val) in self.plc_spec.iteritems():
346 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
350 self.display_site_spec(site)
351 for node in site['nodes']:
352 self.display_node_spec(node)
353 elif key=='initscripts':
354 for initscript in val:
355 self.display_initscript_spec (initscript)
358 self.display_slice_spec (slice)
361 self.display_key_spec (key)
363 if key not in ['sites','initscripts','slices','keys', 'sfa']:
364 print '+ ',key,':',val
366 def display_site_spec (self,site):
367 print '+ ======== site',site['site_fields']['name']
368 for (k,v) in site.iteritems():
369 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
372 print '+ ','nodes : ',
374 print node['node_fields']['hostname'],'',
380 print user['name'],'',
382 elif k == 'site_fields':
383 print '+ login_base',':',v['login_base']
384 elif k == 'address_fields':
390 def display_initscript_spec (self,initscript):
391 print '+ ======== initscript',initscript['initscript_fields']['name']
393 def display_key_spec (self,key):
394 print '+ ======== key',key['name']
396 def display_slice_spec (self,slice):
397 print '+ ======== slice',slice['slice_fields']['name']
398 for (k,v) in slice.iteritems():
411 elif k=='slice_fields':
412 print '+ fields',':',
413 print 'max_nodes=',v['max_nodes'],
418 def display_node_spec (self,node):
419 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
420 print "hostname=",node['node_fields']['hostname'],
421 print "ip=",node['interface_fields']['ip']
422 if self.options.verbose:
423 utils.pprint("node details",node,depth=3)
425 # another entry point for just showing the boxes involved
426 def display_mapping (self):
427 TestPlc.display_mapping_plc(self.plc_spec)
431 def display_mapping_plc (plc_spec):
432 print '+ MyPLC',plc_spec['name']
433 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
434 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
435 for site_spec in plc_spec['sites']:
436 for node_spec in site_spec['nodes']:
437 TestPlc.display_mapping_node(node_spec)
440 def display_mapping_node (node_spec):
441 print '+ NODE %s'%(node_spec['name'])
442 print '+\tqemu box %s'%node_spec['host_box']
443 print '+\thostname=%s'%node_spec['node_fields']['hostname']
445 # write a timestamp in /vservers/<>.timestamp
446 # cannot be inside the vserver, that causes vserver .. build to cough
447 def timestamp_vs (self):
449 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
451 def local_pre (self):
452 "run site-dependant pre-test script as defined in LocalTestResources"
453 from LocalTestResources import local_resources
454 return local_resources.step_pre(self)
456 def local_post (self):
457 "run site-dependant post-test script as defined in LocalTestResources"
458 from LocalTestResources import local_resources
459 return local_resources.step_post(self)
461 def local_list (self):
462 "run site-dependant list script as defined in LocalTestResources"
463 from LocalTestResources import local_resources
464 return local_resources.step_list(self)
466 def local_rel (self):
467 "run site-dependant release script as defined in LocalTestResources"
468 from LocalTestResources import local_resources
469 return local_resources.step_release(self)
471 def local_rel_plc (self):
472 "run site-dependant release script as defined in LocalTestResources"
473 from LocalTestResources import local_resources
474 return local_resources.step_release_plc(self)
476 def local_rel_qemu (self):
477 "run site-dependant release script as defined in LocalTestResources"
478 from LocalTestResources import local_resources
479 return local_resources.step_release_qemu(self)
482 "vserver delete the test myplc"
483 self.run_in_host("vserver --silent %s delete"%self.vservername)
484 self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
488 # historically the build was being fetched by the tests
489 # now the build pushes itself as a subdir of the tests workdir
490 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
491 def vs_create (self):
492 "vserver creation (no install done)"
493 # push the local build/ dir to the testplc box
495 # a full path for the local calls
496 build_dir=os.path.dirname(sys.argv[0])
497 # sometimes this is empty - set to "." in such a case
498 if not build_dir: build_dir="."
499 build_dir += "/build"
501 # use a standard name - will be relative to remote buildname
503 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
504 self.test_ssh.rmdir(build_dir)
505 self.test_ssh.copy(build_dir,recursive=True)
506 # the repo url is taken from arch-rpms-url
507 # with the last step (i386) removed
508 repo_url = self.options.arch_rpms_url
509 for level in [ 'arch' ]:
510 repo_url = os.path.dirname(repo_url)
511 # pass the vbuild-nightly options to vtest-init-vserver
513 test_env_options += " -p %s"%self.options.personality
514 test_env_options += " -d %s"%self.options.pldistro
515 test_env_options += " -f %s"%self.options.fcdistro
516 script="vtest-init-vserver.sh"
517 vserver_name = self.vservername
518 vserver_options="--netdev eth0 --interface %s"%self.vserverip
520 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
521 vserver_options += " --hostname %s"%vserver_hostname
523 print "Cannot reverse lookup %s"%self.vserverip
524 print "This is considered fatal, as this might pollute the test results"
526 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
527 return self.run_in_host(create_vserver) == 0
530 def plc_install(self):
531 "yum install myplc, noderepo, and the plain bootstrapfs"
533 # workaround for getting pgsql8.2 on centos5
534 if self.options.fcdistro == "centos5":
535 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
538 if self.options.personality == "linux32":
540 elif self.options.personality == "linux64":
543 raise Exception, "Unsupported personality %r"%self.options.personality
544 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
547 pkgs_list.append ("slicerepo-%s"%nodefamily)
548 pkgs_list.append ("myplc")
549 pkgs_list.append ("noderepo-%s"%nodefamily)
550 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
551 pkgs_string=" ".join(pkgs_list)
552 self.run_in_guest("yum -y install %s"%pkgs_string)
553 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
556 def plc_configure(self):
558 tmpname='%s.plc-config-tty'%(self.name())
559 fileconf=open(tmpname,'w')
560 for var in [ 'PLC_NAME',
565 'PLC_MAIL_SUPPORT_ADDRESS',
568 # Above line was added for integrating SFA Testing
574 'PLC_RESERVATION_GRANULARITY',
576 'PLC_OMF_XMPP_SERVER',
578 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
579 fileconf.write('w\n')
580 fileconf.write('q\n')
582 utils.system('cat %s'%tmpname)
583 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
584 utils.system('rm %s'%tmpname)
589 self.run_in_guest('service plc start')
594 self.run_in_guest('service plc stop')
598 "start the PLC vserver"
603 "stop the PLC vserver"
607 # stores the keys from the config for further use
608 def keys_store(self):
609 "stores test users ssh keys in keys/"
610 for key_spec in self.plc_spec['keys']:
611 TestKey(self,key_spec).store_key()
614 def keys_clean(self):
615 "removes keys cached in keys/"
616 utils.system("rm -rf ./keys")
619 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
620 # for later direct access to the nodes
621 def keys_fetch(self):
622 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
624 if not os.path.isdir(dir):
626 vservername=self.vservername
628 prefix = 'debug_ssh_key'
629 for ext in [ 'pub', 'rsa' ] :
630 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
631 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
632 if self.test_ssh.fetch(src,dst) != 0: overall=False
636 "create sites with PLCAPI"
637 return self.do_sites()
639 def delete_sites (self):
640 "delete sites with PLCAPI"
641 return self.do_sites(action="delete")
643 def do_sites (self,action="add"):
644 for site_spec in self.plc_spec['sites']:
645 test_site = TestSite (self,site_spec)
646 if (action != "add"):
647 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
648 test_site.delete_site()
649 # deleted with the site
650 #test_site.delete_users()
653 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
654 test_site.create_site()
655 test_site.create_users()
658 def delete_all_sites (self):
659 "Delete all sites in PLC, and related objects"
660 print 'auth_root',self.auth_root()
661 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
662 for site_id in site_ids:
663 print 'Deleting site_id',site_id
664 self.apiserver.DeleteSite(self.auth_root(),site_id)
668 "create nodes with PLCAPI"
669 return self.do_nodes()
670 def delete_nodes (self):
671 "delete nodes with PLCAPI"
672 return self.do_nodes(action="delete")
674 def do_nodes (self,action="add"):
675 for site_spec in self.plc_spec['sites']:
676 test_site = TestSite (self,site_spec)
678 utils.header("Deleting nodes in site %s"%test_site.name())
679 for node_spec in site_spec['nodes']:
680 test_node=TestNode(self,test_site,node_spec)
681 utils.header("Deleting %s"%test_node.name())
682 test_node.delete_node()
684 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
685 for node_spec in site_spec['nodes']:
686 utils.pprint('Creating node %s'%node_spec,node_spec)
687 test_node = TestNode (self,test_site,node_spec)
688 test_node.create_node ()
691 def nodegroups (self):
692 "create nodegroups with PLCAPI"
693 return self.do_nodegroups("add")
694 def delete_nodegroups (self):
695 "delete nodegroups with PLCAPI"
696 return self.do_nodegroups("delete")
700 def translate_timestamp (start,grain,timestamp):
701 if timestamp < TestPlc.YEAR: return start+timestamp*grain
702 else: return timestamp
705 def timestamp_printable (timestamp):
706 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
709 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
711 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
712 print 'API answered grain=',grain
713 start=(now/grain)*grain
715 # find out all nodes that are reservable
716 nodes=self.all_reservable_nodenames()
718 utils.header ("No reservable node found - proceeding without leases")
721 # attach them to the leases as specified in plc_specs
722 # this is where the 'leases' field gets interpreted as relative of absolute
723 for lease_spec in self.plc_spec['leases']:
724 # skip the ones that come with a null slice id
725 if not lease_spec['slice']: continue
726 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
727 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
728 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
729 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
730 if lease_addition['errors']:
731 utils.header("Cannot create leases, %s"%lease_addition['errors'])
734 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
735 (nodes,lease_spec['slice'],
736 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
737 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
741 def delete_leases (self):
742 "remove all leases in the myplc side"
743 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
744 utils.header("Cleaning leases %r"%lease_ids)
745 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
748 def list_leases (self):
749 "list all leases known to the myplc"
750 leases = self.apiserver.GetLeases(self.auth_root())
753 current=l['t_until']>=now
754 if self.options.verbose or current:
755 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
756 TestPlc.timestamp_printable(l['t_from']),
757 TestPlc.timestamp_printable(l['t_until'])))
760 # create nodegroups if needed, and populate
761 def do_nodegroups (self, action="add"):
762 # 1st pass to scan contents
764 for site_spec in self.plc_spec['sites']:
765 test_site = TestSite (self,site_spec)
766 for node_spec in site_spec['nodes']:
767 test_node=TestNode (self,test_site,node_spec)
768 if node_spec.has_key('nodegroups'):
769 nodegroupnames=node_spec['nodegroups']
770 if isinstance(nodegroupnames,StringTypes):
771 nodegroupnames = [ nodegroupnames ]
772 for nodegroupname in nodegroupnames:
773 if not groups_dict.has_key(nodegroupname):
774 groups_dict[nodegroupname]=[]
775 groups_dict[nodegroupname].append(test_node.name())
776 auth=self.auth_root()
778 for (nodegroupname,group_nodes) in groups_dict.iteritems():
780 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
781 # first, check if the nodetagtype is here
782 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
784 tag_type_id = tag_types[0]['tag_type_id']
786 tag_type_id = self.apiserver.AddTagType(auth,
787 {'tagname':nodegroupname,
788 'description': 'for nodegroup %s'%nodegroupname,
790 print 'located tag (type)',nodegroupname,'as',tag_type_id
792 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
794 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
795 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
796 # set node tag on all nodes, value='yes'
797 for nodename in group_nodes:
799 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
801 traceback.print_exc()
802 print 'node',nodename,'seems to already have tag',nodegroupname
805 expect_yes = self.apiserver.GetNodeTags(auth,
806 {'hostname':nodename,
807 'tagname':nodegroupname},
808 ['value'])[0]['value']
809 if expect_yes != "yes":
810 print 'Mismatch node tag on node',nodename,'got',expect_yes
813 if not self.options.dry_run:
814 print 'Cannot find tag',nodegroupname,'on node',nodename
818 print 'cleaning nodegroup',nodegroupname
819 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
821 traceback.print_exc()
825 # return a list of tuples (nodename,qemuname)
826 def all_node_infos (self) :
828 for site_spec in self.plc_spec['sites']:
829 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
830 for node_spec in site_spec['nodes'] ]
833 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
834 def all_reservable_nodenames (self):
836 for site_spec in self.plc_spec['sites']:
837 for node_spec in site_spec['nodes']:
838 node_fields=node_spec['node_fields']
839 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
840 res.append(node_fields['hostname'])
843 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
844 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
845 if self.options.dry_run:
849 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
850 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
851 # the nodes that haven't checked yet - start with a full list and shrink over time
852 tocheck = self.all_hostnames()
853 utils.header("checking nodes %r"%tocheck)
854 # create a dict hostname -> status
855 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
858 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
860 for array in tocheck_status:
861 hostname=array['hostname']
862 boot_state=array['boot_state']
863 if boot_state == target_boot_state:
864 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
866 # if it's a real node, never mind
867 (site_spec,node_spec)=self.locate_hostname(hostname)
868 if TestNode.is_real_model(node_spec['node_fields']['model']):
869 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
871 boot_state = target_boot_state
872 elif datetime.datetime.now() > graceout:
873 utils.header ("%s still in '%s' state"%(hostname,boot_state))
874 graceout=datetime.datetime.now()+datetime.timedelta(1)
875 status[hostname] = boot_state
877 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
880 if datetime.datetime.now() > timeout:
881 for hostname in tocheck:
882 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
884 # otherwise, sleep for a while
886 # only useful in empty plcs
889 def nodes_booted(self):
890 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
892 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
894 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
895 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
896 vservername=self.vservername
899 local_key = "keys/%(vservername)s-debug.rsa"%locals()
902 local_key = "keys/key1.rsa"
903 node_infos = self.all_node_infos()
904 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
905 for (nodename,qemuname) in node_infos:
906 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
907 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
908 (timeout_minutes,silent_minutes,period))
910 for node_info in node_infos:
911 (hostname,qemuname) = node_info
912 # try to run 'hostname' in the node
913 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
914 # don't spam logs - show the command only after the grace period
915 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
917 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
919 node_infos.remove(node_info)
921 # we will have tried real nodes once, in case they're up - but if not, just skip
922 (site_spec,node_spec)=self.locate_hostname(hostname)
923 if TestNode.is_real_model(node_spec['node_fields']['model']):
924 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
925 node_infos.remove(node_info)
928 if datetime.datetime.now() > timeout:
929 for (hostname,qemuname) in node_infos:
930 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
932 # otherwise, sleep for a while
934 # only useful in empty plcs
937 def ssh_node_debug(self):
938 "Tries to ssh into nodes in debug mode with the debug ssh key"
939 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
941 def ssh_node_boot(self):
942 "Tries to ssh into nodes in production mode with the root ssh key"
943 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
946 def qemu_local_init (self):
947 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
951 "all nodes: invoke GetBootMedium and store result locally"
954 def qemu_local_config (self):
955 "all nodes: compute qemu config qemu.conf and store it locally"
958 def nodestate_reinstall (self):
959 "all nodes: mark PLCAPI boot_state as reinstall"
962 def nodestate_safeboot (self):
963 "all nodes: mark PLCAPI boot_state as safeboot"
966 def nodestate_boot (self):
967 "all nodes: mark PLCAPI boot_state as boot"
970 def nodestate_show (self):
971 "all nodes: show PLCAPI boot_state"
974 def qemu_export (self):
975 "all nodes: push local node-dep directory on the qemu box"
978 ### check hooks : invoke scripts from hooks/{node,slice}
979 def check_hooks_node (self):
980 return self.locate_first_node().check_hooks()
981 def check_hooks_sliver (self) :
982 return self.locate_first_sliver().check_hooks()
984 def check_hooks (self):
985 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
986 return self.check_hooks_node() and self.check_hooks_sliver()
989 def do_check_initscripts(self):
991 for slice_spec in self.plc_spec['slices']:
992 if not slice_spec.has_key('initscriptstamp'):
994 stamp=slice_spec['initscriptstamp']
995 for nodename in slice_spec['nodenames']:
996 (site,node) = self.locate_node (nodename)
997 # xxx - passing the wrong site - probably harmless
998 test_site = TestSite (self,site)
999 test_slice = TestSlice (self,test_site,slice_spec)
1000 test_node = TestNode (self,test_site,node)
1001 test_sliver = TestSliver (self, test_node, test_slice)
1002 if not test_sliver.check_initscript_stamp(stamp):
1006 def check_initscripts(self):
1007 "check that the initscripts have triggered"
1008 return self.do_check_initscripts()
1010 def initscripts (self):
1011 "create initscripts with PLCAPI"
1012 for initscript in self.plc_spec['initscripts']:
1013 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1014 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1017 def delete_initscripts (self):
1018 "delete initscripts with PLCAPI"
1019 for initscript in self.plc_spec['initscripts']:
1020 initscript_name = initscript['initscript_fields']['name']
1021 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1023 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1024 print initscript_name,'deleted'
1026 print 'deletion went wrong - probably did not exist'
1031 "create slices with PLCAPI"
1032 return self.do_slices()
1034 def delete_slices (self):
1035 "delete slices with PLCAPI"
1036 return self.do_slices("delete")
1038 def do_slices (self, action="add"):
1039 for slice in self.plc_spec['slices']:
1040 site_spec = self.locate_site (slice['sitename'])
1041 test_site = TestSite(self,site_spec)
1042 test_slice=TestSlice(self,test_site,slice)
1044 utils.header("Deleting slices in site %s"%test_site.name())
1045 test_slice.delete_slice()
1047 utils.pprint("Creating slice",slice)
1048 test_slice.create_slice()
1049 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1053 def ssh_slice(self):
1054 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1058 def keys_clear_known_hosts (self):
1059 "remove test nodes entries from the local known_hosts file"
1063 def qemu_start (self) :
1064 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1068 def timestamp_qemu (self) :
1069 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1072 def check_tcp (self):
1073 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1074 specs = self.plc_spec['tcp_test']
1079 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1080 if not s_test_sliver.run_tcp_server(port,timeout=10):
1084 # idem for the client side
1085 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1086 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1090 def plcsh_stress_test (self):
1091 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1092 # install the stress-test in the plc image
1093 location = "/usr/share/plc_api/plcsh_stress_test.py"
1094 remote="/vservers/%s/%s"%(self.vservername,location)
1095 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1097 command += " -- --check"
1098 if self.options.size == 1:
1099 command += " --tiny"
1100 return ( self.run_in_guest(command) == 0)
1102 # populate runs the same utility without slightly different options
1103 # in particular runs with --preserve (dont cleanup) and without --check
1104 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1107 def sfa_install(self):
1108 "yum install sfa, sfa-plc and sfa-client"
1110 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1111 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1114 def sfa_dbclean(self):
1115 "thoroughly wipes off the SFA database"
1116 self.run_in_guest("sfa-nuke-plc.py")==0
1119 def sfa_plcclean(self):
1120 "cleans the PLC entries that were created as a side effect of running the script"
1122 sfa_spec=self.plc_spec['sfa']
1124 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1125 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1126 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1127 except: print "Slice %s already absent from PLC db"%slicename
1129 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1130 try: self.apiserver.DeletePerson(self.auth_root(),username)
1131 except: print "User %s already absent from PLC db"%username
1133 print "REMEMBER TO RUN sfa_import AGAIN"
1136 def sfa_uninstall(self):
1137 "uses rpm to uninstall sfa - ignore result"
1138 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1139 self.run_in_guest("rm -rf /var/lib/sfa")
1140 self.run_in_guest("rm -rf /etc/sfa")
1141 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1143 self.run_in_guest("rpm -e --noscripts sfa-plc")
1146 ### run unit tests for SFA
1147 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1148 # Running Transaction
1149 # Transaction couldn't start:
1150 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1151 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1152 # no matter how many Gbs are available on the testplc
1153 # could not figure out what's wrong, so...
1154 # if the yum install phase fails, consider the test is successful
1155 # other combinations will eventually run it hopefully
1156 def sfa_utest(self):
1157 "yum install sfa-tests and run SFA unittests"
1158 self.run_in_guest("yum -y install sfa-tests")
1159 # failed to install - forget it
1160 if self.run_in_guest("rpm -q sfa-tests")!=0:
1161 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1163 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1167 dirname="conf.%s"%self.plc_spec['name']
1168 if not os.path.isdir(dirname):
1169 utils.system("mkdir -p %s"%dirname)
1170 if not os.path.isdir(dirname):
1171 raise "Cannot create config dir for plc %s"%self.name()
1174 def conffile(self,filename):
1175 return "%s/%s"%(self.confdir(),filename)
1176 def confsubdir(self,dirname,clean,dry_run=False):
1177 subdirname="%s/%s"%(self.confdir(),dirname)
1179 utils.system("rm -rf %s"%subdirname)
1180 if not os.path.isdir(subdirname):
1181 utils.system("mkdir -p %s"%subdirname)
1182 if not dry_run and not os.path.isdir(subdirname):
1183 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1186 def conffile_clean (self,filename):
1187 filename=self.conffile(filename)
1188 return utils.system("rm -rf %s"%filename)==0
1191 def sfa_configure(self):
1192 "run sfa-config-tty"
1193 tmpname=self.conffile("sfa-config-tty")
1194 fileconf=open(tmpname,'w')
1195 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1196 'SFA_INTERFACE_HRN',
1197 # 'SFA_REGISTRY_LEVEL1_AUTH',
1198 'SFA_REGISTRY_HOST',
1199 'SFA_AGGREGATE_HOST',
1205 'SFA_PLC_DB_PASSWORD',
1208 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1209 # the way plc_config handles booleans just sucks..
1210 for var in ['SFA_API_DEBUG']:
1212 if self.plc_spec['sfa'][var]: val='true'
1213 fileconf.write ('e %s\n%s\n'%(var,val))
1214 fileconf.write('w\n')
1215 fileconf.write('R\n')
1216 fileconf.write('q\n')
1218 utils.system('cat %s'%tmpname)
1219 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1222 def aggregate_xml_line(self):
1223 port=self.plc_spec['sfa']['neighbours-port']
1224 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1225 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1227 def registry_xml_line(self):
1228 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1229 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1232 # a cross step that takes all other plcs in argument
1233 def cross_sfa_configure(self, other_plcs):
1234 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1235 # of course with a single plc, other_plcs is an empty list
1238 agg_fname=self.conffile("agg.xml")
1239 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1240 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1241 utils.header ("(Over)wrote %s"%agg_fname)
1242 reg_fname=self.conffile("reg.xml")
1243 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1244 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1245 utils.header ("(Over)wrote %s"%reg_fname)
1246 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1247 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1249 def sfa_import(self):
1251 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1252 return self.run_in_guest('sfa-import-plc.py')==0
1253 # not needed anymore
1254 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1256 def sfa_start(self):
1258 return self.run_in_guest('service sfa start')==0
1260 def sfi_configure(self):
1261 "Create /root/sfi on the plc side for sfi client configuration"
1262 if self.options.dry_run:
1263 utils.header("DRY RUN - skipping step")
1265 sfa_spec=self.plc_spec['sfa']
1266 # cannot use sfa_slice_mapper to pass dir_name
1267 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1268 site_spec = self.locate_site (slice_spec['sitename'])
1269 test_site = TestSite(self,site_spec)
1270 test_slice=TestSliceSfa(self,test_site,slice_spec)
1271 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1272 test_slice.sfi_config(dir_name)
1273 # push into the remote /root/sfi area
1274 location = test_slice.sfi_path()
1275 remote="/vservers/%s/%s"%(self.vservername,location)
1276 self.test_ssh.mkdir(remote,abs=True)
1277 # need to strip last level or remote otherwise we get an extra dir level
1278 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1282 def sfi_clean (self):
1283 "clean up /root/sfi on the plc side"
1284 self.run_in_guest("rm -rf /root/sfi")
1288 def sfa_add_user(self):
1293 def sfa_update_user(self):
1297 def sfa_add_slice(self):
1298 "run sfi.py add (on Registry) from slice.xml"
1302 def sfa_discover(self):
1303 "discover resources into resouces_in.rspec"
1307 def sfa_create_slice(self):
1308 "run sfi.py create (on SM) - 1st time"
1312 def sfa_check_slice_plc(self):
1313 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1317 def sfa_update_slice(self):
1318 "run sfi.py create (on SM) on existing object"
1323 "various registry-related calls"
1327 def ssh_slice_sfa(self):
1328 "tries to ssh-enter the SFA slice"
1332 def sfa_delete_user(self):
1337 def sfa_delete_slice(self):
1338 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1343 self.run_in_guest('service sfa stop')==0
1346 def populate (self):
1347 "creates random entries in the PLCAPI"
1348 # install the stress-test in the plc image
1349 location = "/usr/share/plc_api/plcsh_stress_test.py"
1350 remote="/vservers/%s/%s"%(self.vservername,location)
1351 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1353 command += " -- --preserve --short-names"
1354 local = (self.run_in_guest(command) == 0);
1355 # second run with --foreign
1356 command += ' --foreign'
1357 remote = (self.run_in_guest(command) == 0);
1358 return ( local and remote)
1360 def gather_logs (self):
1361 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1362 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1363 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1364 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1365 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1366 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1368 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1369 self.gather_var_logs ()
1371 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1372 self.gather_pgsql_logs ()
1374 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1375 for site_spec in self.plc_spec['sites']:
1376 test_site = TestSite (self,site_spec)
1377 for node_spec in site_spec['nodes']:
1378 test_node=TestNode(self,test_site,node_spec)
1379 test_node.gather_qemu_logs()
1381 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1382 self.gather_nodes_var_logs()
1384 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1385 self.gather_slivers_var_logs()
1388 def gather_slivers_var_logs(self):
1389 for test_sliver in self.all_sliver_objs():
1390 remote = test_sliver.tar_var_logs()
1391 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1392 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1393 utils.system(command)
1396 def gather_var_logs (self):
1397 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1398 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1399 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1400 utils.system(command)
1401 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1402 utils.system(command)
1404 def gather_pgsql_logs (self):
1405 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1406 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1407 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1408 utils.system(command)
1410 def gather_nodes_var_logs (self):
1411 for site_spec in self.plc_spec['sites']:
1412 test_site = TestSite (self,site_spec)
1413 for node_spec in site_spec['nodes']:
1414 test_node=TestNode(self,test_site,node_spec)
1415 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1416 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1417 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1418 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1419 utils.system(command)
1422 # returns the filename to use for sql dump/restore, using options.dbname if set
1423 def dbfile (self, database):
1424 # uses options.dbname if it is found
1426 name=self.options.dbname
1427 if not isinstance(name,StringTypes):
1430 t=datetime.datetime.now()
1433 return "/root/%s-%s.sql"%(database,name)
1435 def plc_db_dump(self):
1436 'dump the planetlab5 DB in /root in the PLC - filename has time'
1437 dump=self.dbfile("planetab5")
1438 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1439 utils.header('Dumped planetlab5 database in %s'%dump)
1442 def plc_db_restore(self):
1443 'restore the planetlab5 DB - looks broken, but run -n might help'
1444 dump=self.dbfile("planetab5")
1445 ##stop httpd service
1446 self.run_in_guest('service httpd stop')
1447 # xxx - need another wrapper
1448 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1449 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1450 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1451 ##starting httpd service
1452 self.run_in_guest('service httpd start')
1454 utils.header('Database restored from ' + dump)
1457 def standby_1(): pass
1459 def standby_2(): pass
1461 def standby_3(): pass
1463 def standby_4(): pass
1465 def standby_5(): pass
1467 def standby_6(): pass
1469 def standby_7(): pass
1471 def standby_8(): pass
1473 def standby_9(): pass
1475 def standby_10(): pass
1477 def standby_11(): pass
1479 def standby_12(): pass
1481 def standby_13(): pass
1483 def standby_14(): pass
1485 def standby_15(): pass
1487 def standby_16(): pass
1489 def standby_17(): pass
1491 def standby_18(): pass
1493 def standby_19(): pass
1495 def standby_20(): pass