1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper_options (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_mapper_options_sfa (method):
73 slice_method = TestSliceSfa.__dict__[method.__name__]
74 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
75 site_spec = self.locate_site (slice_spec['sitename'])
76 test_site = TestSite(self,site_spec)
77 test_slice=TestSliceSfa(test_plc,test_site,slice_spec)
78 if not slice_method(test_slice,self.options): overall=False
80 # restore the doc text
81 actual.__doc__=method.__doc__
90 'display', 'resources_pre', SEP,
91 'delete_vs','create_vs','install', 'configure', 'start', SEP,
92 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
93 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
94 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu',
95 'kill_all_qemus', 'start_node', SEP,
96 # better use of time: do this now that the nodes are taking off
97 'plcsh_stress_test', SEP,
98 'install_sfa', 'configure_sfa', 'cross_configure_sfa', 'import_sfa', 'start_sfa', SEPSFA,
99 'configure_sfi', 'add_sfa', 'update_sfa', 'view_sfa', SEPSFA,
100 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEPSFA,
101 'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEPSFA,
102 'check_tcp', 'check_hooks', SEP,
103 'force_gather_logs', 'force_resources_post', SEP,
106 'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP,
107 'stop', 'vs_start', SEP,
108 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
109 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
110 'clean_leases', 'list_leases', SEP,
112 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
113 'plcclean_sfa', 'dbclean_sfa', 'uninstall_sfa', 'clean_sfi', SEP,
114 'db_dump' , 'db_restore', SEP,
115 'standby_1 through 20',SEP,
119 def printable_steps (list):
120 single_line=" ".join(list)+" "
121 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
123 def valid_step (step):
124 return step != SEP and step != SEPSFA
126 # turn off the sfa-related steps when build has skipped SFA
127 # this is originally for centos5 as recent SFAs won't build on this platformb
129 def check_whether_build_has_sfa (rpms_url):
130 # warning, we're now building 'sface' so let's be a bit more picky
131 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
132 # full builds are expected to return with 0 here
134 # move all steps containing 'sfa' from default_steps to other_steps
135 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
136 TestPlc.other_steps += sfa_steps
137 for step in sfa_steps: TestPlc.default_steps.remove(step)
139 def __init__ (self,plc_spec,options):
140 self.plc_spec=plc_spec
142 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
144 self.vserverip=plc_spec['vserverip']
145 self.vservername=plc_spec['vservername']
146 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
149 raise Exception,'chroot-based myplc testing is deprecated'
150 self.apiserver=TestApiserver(self.url,options.dry_run)
153 name=self.plc_spec['name']
154 return "%s.%s"%(name,self.vservername)
157 return self.plc_spec['hostname']
160 return self.test_ssh.is_local()
162 # define the API methods on this object through xmlrpc
163 # would help, but not strictly necessary
167 def actual_command_in_guest (self,command):
168 return self.test_ssh.actual_command(self.host_to_guest(command))
170 def start_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
173 def run_in_guest (self,command):
174 return utils.system(self.actual_command_in_guest(command))
176 def run_in_host (self,command):
177 return self.test_ssh.run_in_buildname(command)
179 #command gets run in the vserver
180 def host_to_guest(self,command):
181 return "vserver %s exec %s"%(self.vservername,command)
183 #command gets run in the vserver
184 def start_guest_in_host(self):
185 return "vserver %s start"%(self.vservername)
188 def run_in_guest_piped (self,local,remote):
189 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
191 def auth_root (self):
192 return {'Username':self.plc_spec['PLC_ROOT_USER'],
193 'AuthMethod':'password',
194 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
195 'Role' : self.plc_spec['role']
197 def locate_site (self,sitename):
198 for site in self.plc_spec['sites']:
199 if site['site_fields']['name'] == sitename:
201 if site['site_fields']['login_base'] == sitename:
203 raise Exception,"Cannot locate site %s"%sitename
205 def locate_node (self,nodename):
206 for site in self.plc_spec['sites']:
207 for node in site['nodes']:
208 if node['name'] == nodename:
210 raise Exception,"Cannot locate node %s"%nodename
212 def locate_hostname (self,hostname):
213 for site in self.plc_spec['sites']:
214 for node in site['nodes']:
215 if node['node_fields']['hostname'] == hostname:
217 raise Exception,"Cannot locate hostname %s"%hostname
219 def locate_key (self,keyname):
220 for key in self.plc_spec['keys']:
221 if key['name'] == keyname:
223 raise Exception,"Cannot locate key %s"%keyname
225 def locate_slice (self, slicename):
226 for slice in self.plc_spec['slices']:
227 if slice['slice_fields']['name'] == slicename:
229 raise Exception,"Cannot locate slice %s"%slicename
231 def all_sliver_objs (self):
233 for slice_spec in self.plc_spec['slices']:
234 slicename = slice_spec['slice_fields']['name']
235 for nodename in slice_spec['nodenames']:
236 result.append(self.locate_sliver_obj (nodename,slicename))
239 def locate_sliver_obj (self,nodename,slicename):
240 (site,node) = self.locate_node(nodename)
241 slice = self.locate_slice (slicename)
243 test_site = TestSite (self, site)
244 test_node = TestNode (self, test_site,node)
245 # xxx the slice site is assumed to be the node site - mhh - probably harmless
246 test_slice = TestSlice (self, test_site, slice)
247 return TestSliver (self, test_node, test_slice)
249 def locate_first_node(self):
250 nodename=self.plc_spec['slices'][0]['nodenames'][0]
251 (site,node) = self.locate_node(nodename)
252 test_site = TestSite (self, site)
253 test_node = TestNode (self, test_site,node)
256 def locate_first_sliver (self):
257 slice_spec=self.plc_spec['slices'][0]
258 slicename=slice_spec['slice_fields']['name']
259 nodename=slice_spec['nodenames'][0]
260 return self.locate_sliver_obj(nodename,slicename)
262 # all different hostboxes used in this plc
263 def gather_hostBoxes(self):
264 # maps on sites and nodes, return [ (host_box,test_node) ]
266 for site_spec in self.plc_spec['sites']:
267 test_site = TestSite (self,site_spec)
268 for node_spec in site_spec['nodes']:
269 test_node = TestNode (self, test_site, node_spec)
270 if not test_node.is_real():
271 tuples.append( (test_node.host_box(),test_node) )
272 # transform into a dict { 'host_box' -> [ test_node .. ] }
274 for (box,node) in tuples:
275 if not result.has_key(box):
278 result[box].append(node)
281 # a step for checking this stuff
282 def show_boxes (self):
283 'print summary of nodes location'
284 for (box,nodes) in self.gather_hostBoxes().iteritems():
285 print box,":"," + ".join( [ node.name() for node in nodes ] )
288 # make this a valid step
289 def kill_all_qemus(self):
290 'kill all qemu instances on the qemu boxes involved by this setup'
291 # this is the brute force version, kill all qemus on that host box
292 for (box,nodes) in self.gather_hostBoxes().iteritems():
293 # pass the first nodename, as we don't push template-qemu on testboxes
294 nodedir=nodes[0].nodedir()
295 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
298 # make this a valid step
299 def list_all_qemus(self):
300 'list all qemu instances on the qemu boxes involved by this setup'
301 for (box,nodes) in self.gather_hostBoxes().iteritems():
302 # this is the brute force version, kill all qemus on that host box
303 TestBox(box,self.options.buildname).list_all_qemus()
306 # kill only the right qemus
307 def list_qemus(self):
308 'list qemu instances for our nodes'
309 for (box,nodes) in self.gather_hostBoxes().iteritems():
310 # the fine-grain version
315 # kill only the right qemus
316 def kill_qemus(self):
317 'kill the qemu instances for our nodes'
318 for (box,nodes) in self.gather_hostBoxes().iteritems():
319 # the fine-grain version
324 #################### display config
326 "show test configuration after localization"
327 self.display_pass (1)
328 self.display_pass (2)
332 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
333 def display_pass (self,passno):
334 for (key,val) in self.plc_spec.iteritems():
335 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
339 self.display_site_spec(site)
340 for node in site['nodes']:
341 self.display_node_spec(node)
342 elif key=='initscripts':
343 for initscript in val:
344 self.display_initscript_spec (initscript)
347 self.display_slice_spec (slice)
350 self.display_key_spec (key)
352 if key not in ['sites','initscripts','slices','keys', 'sfa']:
353 print '+ ',key,':',val
355 def display_site_spec (self,site):
356 print '+ ======== site',site['site_fields']['name']
357 for (k,v) in site.iteritems():
358 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
361 print '+ ','nodes : ',
363 print node['node_fields']['hostname'],'',
369 print user['name'],'',
371 elif k == 'site_fields':
372 print '+ login_base',':',v['login_base']
373 elif k == 'address_fields':
379 def display_initscript_spec (self,initscript):
380 print '+ ======== initscript',initscript['initscript_fields']['name']
382 def display_key_spec (self,key):
383 print '+ ======== key',key['name']
385 def display_slice_spec (self,slice):
386 print '+ ======== slice',slice['slice_fields']['name']
387 for (k,v) in slice.iteritems():
400 elif k=='slice_fields':
401 print '+ fields',':',
402 print 'max_nodes=',v['max_nodes'],
407 def display_node_spec (self,node):
408 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
409 print "hostname=",node['node_fields']['hostname'],
410 print "ip=",node['interface_fields']['ip']
411 if self.options.verbose:
412 utils.pprint("node details",node,depth=3)
414 # another entry point for just showing the boxes involved
415 def display_mapping (self):
416 TestPlc.display_mapping_plc(self.plc_spec)
420 def display_mapping_plc (plc_spec):
421 print '+ MyPLC',plc_spec['name']
422 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
423 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
424 for site_spec in plc_spec['sites']:
425 for node_spec in site_spec['nodes']:
426 TestPlc.display_mapping_node(node_spec)
429 def display_mapping_node (node_spec):
430 print '+ NODE %s'%(node_spec['name'])
431 print '+\tqemu box %s'%node_spec['host_box']
432 print '+\thostname=%s'%node_spec['node_fields']['hostname']
434 def resources_pre (self):
435 "run site-dependant pre-test script as defined in LocalTestResources"
436 from LocalTestResources import local_resources
437 return local_resources.step_pre(self)
439 def resources_post (self):
440 "run site-dependant post-test script as defined in LocalTestResources"
441 from LocalTestResources import local_resources
442 return local_resources.step_post(self)
444 def resources_list (self):
445 "run site-dependant list script as defined in LocalTestResources"
446 from LocalTestResources import local_resources
447 return local_resources.step_list(self)
449 def resources_release (self):
450 "run site-dependant release script as defined in LocalTestResources"
451 from LocalTestResources import local_resources
452 return local_resources.step_release(self)
454 def resources_release_plc (self):
455 "run site-dependant release script as defined in LocalTestResources"
456 from LocalTestResources import local_resources
457 return local_resources.step_release_plc(self)
459 def resources_release_qemu (self):
460 "run site-dependant release script as defined in LocalTestResources"
461 from LocalTestResources import local_resources
462 return local_resources.step_release_qemu(self)
465 "vserver delete the test myplc"
466 self.run_in_host("vserver --silent %s delete"%self.vservername)
470 # historically the build was being fetched by the tests
471 # now the build pushes itself as a subdir of the tests workdir
472 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
473 def create_vs (self):
474 "vserver creation (no install done)"
475 # push the local build/ dir to the testplc box
477 # a full path for the local calls
478 build_dir=os.path.dirname(sys.argv[0])
479 # sometimes this is empty - set to "." in such a case
480 if not build_dir: build_dir="."
481 build_dir += "/build"
483 # use a standard name - will be relative to remote buildname
485 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
486 self.test_ssh.rmdir(build_dir)
487 self.test_ssh.copy(build_dir,recursive=True)
488 # the repo url is taken from arch-rpms-url
489 # with the last step (i386) removed
490 repo_url = self.options.arch_rpms_url
491 for level in [ 'arch' ]:
492 repo_url = os.path.dirname(repo_url)
493 # pass the vbuild-nightly options to vtest-init-vserver
495 test_env_options += " -p %s"%self.options.personality
496 test_env_options += " -d %s"%self.options.pldistro
497 test_env_options += " -f %s"%self.options.fcdistro
498 script="vtest-init-vserver.sh"
499 vserver_name = self.vservername
500 vserver_options="--netdev eth0 --interface %s"%self.vserverip
502 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
503 vserver_options += " --hostname %s"%vserver_hostname
505 print "Cannot reverse lookup %s"%self.vserverip
506 print "This is considered fatal, as this might pollute the test results"
508 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
509 return self.run_in_host(create_vserver) == 0
513 "yum install myplc, noderepo, and the plain bootstrapfs"
515 # workaround for getting pgsql8.2 on centos5
516 if self.options.fcdistro == "centos5":
517 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
519 if self.options.personality == "linux32":
521 elif self.options.personality == "linux64":
524 raise Exception, "Unsupported personality %r"%self.options.personality
526 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
528 # try to install slicerepo - not fatal yet
529 self.run_in_guest("yum -y install slicerepo-%s"%nodefamily)
532 self.run_in_guest("yum -y install myplc")==0 and \
533 self.run_in_guest("yum -y install noderepo-%s"%nodefamily)==0 and \
534 self.run_in_guest("yum -y install bootstrapfs-%s-plain"%nodefamily)==0
539 tmpname='%s.plc-config-tty'%(self.name())
540 fileconf=open(tmpname,'w')
541 for var in [ 'PLC_NAME',
546 'PLC_MAIL_SUPPORT_ADDRESS',
549 # Above line was added for integrating SFA Testing
555 'PLC_RESERVATION_GRANULARITY',
558 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
559 fileconf.write('w\n')
560 fileconf.write('q\n')
562 utils.system('cat %s'%tmpname)
563 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
564 utils.system('rm %s'%tmpname)
569 self.run_in_guest('service plc start')
574 self.run_in_guest('service plc stop')
578 "start the PLC vserver"
582 # stores the keys from the config for further use
583 def store_keys(self):
584 "stores test users ssh keys in keys/"
585 for key_spec in self.plc_spec['keys']:
586 TestKey(self,key_spec).store_key()
589 def clean_keys(self):
590 "removes keys cached in keys/"
591 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
593 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
594 # for later direct access to the nodes
595 def fetch_keys(self):
596 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
598 if not os.path.isdir(dir):
600 vservername=self.vservername
602 prefix = 'debug_ssh_key'
603 for ext in [ 'pub', 'rsa' ] :
604 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
605 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
606 if self.test_ssh.fetch(src,dst) != 0: overall=False
610 "create sites with PLCAPI"
611 return self.do_sites()
613 def clean_sites (self):
614 "delete sites with PLCAPI"
615 return self.do_sites(action="delete")
617 def do_sites (self,action="add"):
618 for site_spec in self.plc_spec['sites']:
619 test_site = TestSite (self,site_spec)
620 if (action != "add"):
621 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
622 test_site.delete_site()
623 # deleted with the site
624 #test_site.delete_users()
627 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
628 test_site.create_site()
629 test_site.create_users()
632 def clean_all_sites (self):
633 "Delete all sites in PLC, and related objects"
634 print 'auth_root',self.auth_root()
635 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
636 for site_id in site_ids:
637 print 'Deleting site_id',site_id
638 self.apiserver.DeleteSite(self.auth_root(),site_id)
641 "create nodes with PLCAPI"
642 return self.do_nodes()
643 def clean_nodes (self):
644 "delete nodes with PLCAPI"
645 return self.do_nodes(action="delete")
647 def do_nodes (self,action="add"):
648 for site_spec in self.plc_spec['sites']:
649 test_site = TestSite (self,site_spec)
651 utils.header("Deleting nodes in site %s"%test_site.name())
652 for node_spec in site_spec['nodes']:
653 test_node=TestNode(self,test_site,node_spec)
654 utils.header("Deleting %s"%test_node.name())
655 test_node.delete_node()
657 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
658 for node_spec in site_spec['nodes']:
659 utils.pprint('Creating node %s'%node_spec,node_spec)
660 test_node = TestNode (self,test_site,node_spec)
661 test_node.create_node ()
664 def nodegroups (self):
665 "create nodegroups with PLCAPI"
666 return self.do_nodegroups("add")
667 def clean_nodegroups (self):
668 "delete nodegroups with PLCAPI"
669 return self.do_nodegroups("delete")
673 def translate_timestamp (start,grain,timestamp):
674 if timestamp < TestPlc.YEAR: return start+timestamp*grain
675 else: return timestamp
678 def timestamp_printable (timestamp):
679 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
682 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
684 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
685 print 'API answered grain=',grain
686 start=(now/grain)*grain
688 # find out all nodes that are reservable
689 nodes=self.all_reservable_nodenames()
691 utils.header ("No reservable node found - proceeding without leases")
694 # attach them to the leases as specified in plc_specs
695 # this is where the 'leases' field gets interpreted as relative of absolute
696 for lease_spec in self.plc_spec['leases']:
697 # skip the ones that come with a null slice id
698 if not lease_spec['slice']: continue
699 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
700 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
701 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
702 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
703 if lease_addition['errors']:
704 utils.header("Cannot create leases, %s"%lease_addition['errors'])
707 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
708 (nodes,lease_spec['slice'],
709 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
710 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
714 def clean_leases (self):
715 "remove all leases in the myplc side"
716 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
717 utils.header("Cleaning leases %r"%lease_ids)
718 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
721 def list_leases (self):
722 "list all leases known to the myplc"
723 leases = self.apiserver.GetLeases(self.auth_root())
726 current=l['t_until']>=now
727 if self.options.verbose or current:
728 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
729 TestPlc.timestamp_printable(l['t_from']),
730 TestPlc.timestamp_printable(l['t_until'])))
733 # create nodegroups if needed, and populate
734 def do_nodegroups (self, action="add"):
735 # 1st pass to scan contents
737 for site_spec in self.plc_spec['sites']:
738 test_site = TestSite (self,site_spec)
739 for node_spec in site_spec['nodes']:
740 test_node=TestNode (self,test_site,node_spec)
741 if node_spec.has_key('nodegroups'):
742 nodegroupnames=node_spec['nodegroups']
743 if isinstance(nodegroupnames,StringTypes):
744 nodegroupnames = [ nodegroupnames ]
745 for nodegroupname in nodegroupnames:
746 if not groups_dict.has_key(nodegroupname):
747 groups_dict[nodegroupname]=[]
748 groups_dict[nodegroupname].append(test_node.name())
749 auth=self.auth_root()
751 for (nodegroupname,group_nodes) in groups_dict.iteritems():
753 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
754 # first, check if the nodetagtype is here
755 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
757 tag_type_id = tag_types[0]['tag_type_id']
759 tag_type_id = self.apiserver.AddTagType(auth,
760 {'tagname':nodegroupname,
761 'description': 'for nodegroup %s'%nodegroupname,
764 print 'located tag (type)',nodegroupname,'as',tag_type_id
766 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
768 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
769 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
770 # set node tag on all nodes, value='yes'
771 for nodename in group_nodes:
773 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
775 traceback.print_exc()
776 print 'node',nodename,'seems to already have tag',nodegroupname
779 expect_yes = self.apiserver.GetNodeTags(auth,
780 {'hostname':nodename,
781 'tagname':nodegroupname},
782 ['value'])[0]['value']
783 if expect_yes != "yes":
784 print 'Mismatch node tag on node',nodename,'got',expect_yes
787 if not self.options.dry_run:
788 print 'Cannot find tag',nodegroupname,'on node',nodename
792 print 'cleaning nodegroup',nodegroupname
793 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
795 traceback.print_exc()
799 # return a list of tuples (nodename,qemuname)
800 def all_node_infos (self) :
802 for site_spec in self.plc_spec['sites']:
803 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
804 for node_spec in site_spec['nodes'] ]
807 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
808 def all_reservable_nodenames (self):
810 for site_spec in self.plc_spec['sites']:
811 for node_spec in site_spec['nodes']:
812 node_fields=node_spec['node_fields']
813 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
814 res.append(node_fields['hostname'])
817 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
818 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
819 if self.options.dry_run:
823 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
824 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
825 # the nodes that haven't checked yet - start with a full list and shrink over time
826 tocheck = self.all_hostnames()
827 utils.header("checking nodes %r"%tocheck)
828 # create a dict hostname -> status
829 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
832 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
834 for array in tocheck_status:
835 hostname=array['hostname']
836 boot_state=array['boot_state']
837 if boot_state == target_boot_state:
838 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
840 # if it's a real node, never mind
841 (site_spec,node_spec)=self.locate_hostname(hostname)
842 if TestNode.is_real_model(node_spec['node_fields']['model']):
843 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
845 boot_state = target_boot_state
846 elif datetime.datetime.now() > graceout:
847 utils.header ("%s still in '%s' state"%(hostname,boot_state))
848 graceout=datetime.datetime.now()+datetime.timedelta(1)
849 status[hostname] = boot_state
851 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
854 if datetime.datetime.now() > timeout:
855 for hostname in tocheck:
856 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
858 # otherwise, sleep for a while
860 # only useful in empty plcs
863 def nodes_booted(self):
864 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
866 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
868 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
869 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
870 vservername=self.vservername
873 local_key = "keys/%(vservername)s-debug.rsa"%locals()
876 local_key = "keys/key1.rsa"
877 node_infos = self.all_node_infos()
878 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
879 for (nodename,qemuname) in node_infos:
880 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
881 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
882 (timeout_minutes,silent_minutes,period))
884 for node_info in node_infos:
885 (hostname,qemuname) = node_info
886 # try to run 'hostname' in the node
887 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
888 # don't spam logs - show the command only after the grace period
889 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
891 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
893 node_infos.remove(node_info)
895 # we will have tried real nodes once, in case they're up - but if not, just skip
896 (site_spec,node_spec)=self.locate_hostname(hostname)
897 if TestNode.is_real_model(node_spec['node_fields']['model']):
898 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
899 node_infos.remove(node_info)
902 if datetime.datetime.now() > timeout:
903 for (hostname,qemuname) in node_infos:
904 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
906 # otherwise, sleep for a while
908 # only useful in empty plcs
911 def nodes_ssh_debug(self):
912 "Tries to ssh into nodes in debug mode with the debug ssh key"
913 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
915 def nodes_ssh_boot(self):
916 "Tries to ssh into nodes in production mode with the root ssh key"
917 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
920 def init_node (self):
921 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
925 "all nodes: invoke GetBootMedium and store result locally"
928 def configure_qemu (self):
929 "all nodes: compute qemu config qemu.conf and store it locally"
932 def reinstall_node (self):
933 "all nodes: mark PLCAPI boot_state as reinstall"
936 def export_qemu (self):
937 "all nodes: push local node-dep directory on the qemu box"
940 ### check hooks : invoke scripts from hooks/{node,slice}
941 def check_hooks_node (self):
942 return self.locate_first_node().check_hooks()
943 def check_hooks_sliver (self) :
944 return self.locate_first_sliver().check_hooks()
946 def check_hooks (self):
947 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
948 return self.check_hooks_node() and self.check_hooks_sliver()
951 def do_check_initscripts(self):
953 for slice_spec in self.plc_spec['slices']:
954 if not slice_spec.has_key('initscriptname'):
956 initscript=slice_spec['initscriptname']
957 for nodename in slice_spec['nodenames']:
958 (site,node) = self.locate_node (nodename)
959 # xxx - passing the wrong site - probably harmless
960 test_site = TestSite (self,site)
961 test_slice = TestSlice (self,test_site,slice_spec)
962 test_node = TestNode (self,test_site,node)
963 test_sliver = TestSliver (self, test_node, test_slice)
964 if not test_sliver.check_initscript(initscript):
968 def check_initscripts(self):
969 "check that the initscripts have triggered"
970 return self.do_check_initscripts()
972 def initscripts (self):
973 "create initscripts with PLCAPI"
974 for initscript in self.plc_spec['initscripts']:
975 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
976 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
979 def clean_initscripts (self):
980 "delete initscripts with PLCAPI"
981 for initscript in self.plc_spec['initscripts']:
982 initscript_name = initscript['initscript_fields']['name']
983 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
985 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
986 print initscript_name,'deleted'
988 print 'deletion went wrong - probably did not exist'
993 "create slices with PLCAPI"
994 return self.do_slices()
996 def clean_slices (self):
997 "delete slices with PLCAPI"
998 return self.do_slices("delete")
1000 def do_slices (self, action="add"):
1001 for slice in self.plc_spec['slices']:
1002 site_spec = self.locate_site (slice['sitename'])
1003 test_site = TestSite(self,site_spec)
1004 test_slice=TestSlice(self,test_site,slice)
1006 utils.header("Deleting slices in site %s"%test_site.name())
1007 test_slice.delete_slice()
1009 utils.pprint("Creating slice",slice)
1010 test_slice.create_slice()
1011 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1014 @slice_mapper_options
1015 def check_slice(self):
1016 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1020 def clear_known_hosts (self):
1021 "remove test nodes entries from the local known_hosts file"
1025 def start_node (self) :
1026 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1029 def check_tcp (self):
1030 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1031 specs = self.plc_spec['tcp_test']
1036 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1037 if not s_test_sliver.run_tcp_server(port,timeout=10):
1041 # idem for the client side
1042 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1043 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1047 def plcsh_stress_test (self):
1048 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1049 # install the stress-test in the plc image
1050 location = "/usr/share/plc_api/plcsh_stress_test.py"
1051 remote="/vservers/%s/%s"%(self.vservername,location)
1052 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1054 command += " -- --check"
1055 if self.options.size == 1:
1056 command += " --tiny"
1057 return ( self.run_in_guest(command) == 0)
1059 # populate runs the same utility without slightly different options
1060 # in particular runs with --preserve (dont cleanup) and without --check
1061 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1064 def install_sfa(self):
1065 "yum install sfa, sfa-plc and sfa-client"
1066 return self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")==0
1068 def dbclean_sfa(self):
1069 "thoroughly wipes off the SFA database"
1070 return self.run_in_guest("sfa-nuke-plc.py")==0
1072 def plcclean_sfa(self):
1073 "cleans the PLC entries that were created as a side effect of running the script"
1075 sfa_spec=self.plc_spec['sfa']
1077 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1078 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1079 except: print "Slice %s already absent from PLC db"%slicename
1081 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1082 try: self.apiserver.DeletePerson(self.auth_root(),username)
1083 except: print "User %s already absent from PLC db"%username
1085 print "REMEMBER TO RUN import_sfa AGAIN"
1088 def uninstall_sfa(self):
1089 "uses rpm to uninstall sfa - ignore result"
1090 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1091 self.run_in_guest("rm -rf /var/lib/sfa")
1095 def configure_sfa(self):
1096 "run sfa-config-tty"
1097 tmpname='%s.sfa-config-tty'%(self.name())
1098 fileconf=open(tmpname,'w')
1099 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1100 'SFA_INTERFACE_HRN',
1101 # 'SFA_REGISTRY_LEVEL1_AUTH',
1102 'SFA_REGISTRY_HOST',
1103 'SFA_AGGREGATE_HOST',
1109 'SFA_PLC_DB_PASSWORD',
1112 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1113 fileconf.write('w\n')
1114 fileconf.write('R\n')
1115 fileconf.write('q\n')
1117 utils.system('cat %s'%tmpname)
1118 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1119 utils.system('rm %s'%tmpname)
1122 def aggregate_xml_line(self):
1123 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1124 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1126 def registry_xml_line(self):
1127 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1128 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1131 # a cross step that takes all other plcs in argument
1132 def cross_configure_sfa(self, other_plcs):
1133 # of course with a single plc, other_plcs is an empty list
1135 filename="%s-agg.xml"%self.name()
1136 agg_file=file(filename,"w")
1137 agg_file.write("<aggregates>%s</aggregates>\n" % \
1138 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1140 if self.test_ssh.copy_abs(filename,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername) !=0 : return False
1142 filename="%s-reg.xml"%self.name()
1143 agg_file=file(filename,"w")
1144 agg_file.write("<registries>%s</registries>\n" % \
1145 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1147 if self.test_ssh.copy_abs(filename,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername) !=0 : return False
1150 def import_sfa(self):
1152 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1153 return self.run_in_guest('sfa-import-plc.py')==0
1154 # not needed anymore
1155 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1157 def start_sfa(self):
1159 return self.run_in_guest('service sfa start')==0
1161 def configure_sfi(self):
1162 sfa_spec=self.plc_spec['sfa']
1163 "sfi client configuration"
1165 if os.path.exists(dir_name):
1166 utils.system('rm -rf %s'%dir_name)
1167 utils.system('mkdir %s'%dir_name)
1168 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1169 fileconf=open(file_name,'w')
1170 fileconf.write (self.plc_spec['keys'][0]['private'])
1173 file_name=dir_name + os.sep + 'sfi_config'
1174 fileconf=open(file_name,'w')
1175 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1176 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1177 fileconf.write('\n')
1178 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1179 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1180 fileconf.write('\n')
1181 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1182 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1183 fileconf.write('\n')
1184 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1185 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1186 fileconf.write('\n')
1189 file_name=dir_name + os.sep + 'person.xml'
1190 fileconf=open(file_name,'w')
1191 for record in sfa_spec['sfa_person_xml']:
1192 person_record=record
1193 fileconf.write(person_record)
1194 fileconf.write('\n')
1197 file_name=dir_name + os.sep + 'slice.xml'
1198 fileconf=open(file_name,'w')
1199 for record in sfa_spec['sfa_slice_xml']:
1201 #slice_record=sfa_spec['sfa_slice_xml']
1202 fileconf.write(slice_record)
1203 fileconf.write('\n')
1206 file_name=dir_name + os.sep + 'slice.rspec'
1207 fileconf=open(file_name,'w')
1209 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1211 fileconf.write(slice_rspec)
1212 fileconf.write('\n')
1215 remote="/vservers/%s/%s"%(self.vservername,location)
1216 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1218 #utils.system('cat %s'%tmpname)
1219 utils.system('rm -rf %s'%dir_name)
1222 def clean_sfi (self):
1223 self.run_in_guest("rm -rf /root/.sfi")
1227 "run sfi.py add (on Registry) and sfi.py create (on SM) to form new objects"
1228 test_user_sfa=TestUserSfa(self)
1229 if not test_user_sfa.add_user(): return False
1231 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1232 site_spec = self.locate_site (slice_spec['sitename'])
1233 test_site = TestSite(self,site_spec)
1234 test_slice_sfa=TestSliceSfa(self,test_site,slice_spec)
1235 if not test_slice_sfa.add_slice(): return False
1236 if not test_slice_sfa.create_slice(): return False
1239 def update_sfa(self):
1240 "run sfi.py update (on Registry) and sfi.py create (on SM) on existing objects"
1241 test_user_sfa=TestUserSfa(self)
1242 if not test_user_sfa.update_user(): return False
1244 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1245 site_spec = self.locate_site (slice_spec['sitename'])
1246 test_site = TestSite(self,site_spec)
1247 test_slice_sfa=TestSliceSfa(self,test_site,slice_spec)
1248 if not test_slice_sfa.update_slice(): return False
1252 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1253 sfa_spec=self.plc_spec['sfa']
1254 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1256 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1257 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1258 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1259 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1261 @slice_mapper_options_sfa
1262 def check_slice_sfa(self):
1263 "tries to ssh-enter the SFA slice"
1266 def delete_sfa(self):
1267 "run sfi.py delete (on SM), sfi.py remove (on Registry)"
1268 test_user_sfa=TestUserSfa(self)
1269 success1=test_user_sfa.delete_user()
1270 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1271 site_spec = self.locate_site (slice_spec['sitename'])
1272 test_site = TestSite(self,site_spec)
1273 test_slice_sfa=TestSliceSfa(self,test_site,slice_spec)
1274 success2=test_slice_sfa.delete_slice()
1276 return success1 and success2
1280 return self.run_in_guest('service sfa stop')==0
1282 def populate (self):
1283 "creates random entries in the PLCAPI"
1284 # install the stress-test in the plc image
1285 location = "/usr/share/plc_api/plcsh_stress_test.py"
1286 remote="/vservers/%s/%s"%(self.vservername,location)
1287 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1289 command += " -- --preserve --short-names"
1290 local = (self.run_in_guest(command) == 0);
1291 # second run with --foreign
1292 command += ' --foreign'
1293 remote = (self.run_in_guest(command) == 0);
1294 return ( local and remote)
1296 def gather_logs (self):
1297 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1298 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1299 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1300 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1301 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1302 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1304 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1305 self.gather_var_logs ()
1307 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1308 self.gather_pgsql_logs ()
1310 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1311 for site_spec in self.plc_spec['sites']:
1312 test_site = TestSite (self,site_spec)
1313 for node_spec in site_spec['nodes']:
1314 test_node=TestNode(self,test_site,node_spec)
1315 test_node.gather_qemu_logs()
1317 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1318 self.gather_nodes_var_logs()
1320 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1321 self.gather_slivers_var_logs()
1324 def gather_slivers_var_logs(self):
1325 for test_sliver in self.all_sliver_objs():
1326 remote = test_sliver.tar_var_logs()
1327 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1328 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1329 utils.system(command)
1332 def gather_var_logs (self):
1333 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1334 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1335 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1336 utils.system(command)
1337 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1338 utils.system(command)
1340 def gather_pgsql_logs (self):
1341 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1342 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1343 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1344 utils.system(command)
1346 def gather_nodes_var_logs (self):
1347 for site_spec in self.plc_spec['sites']:
1348 test_site = TestSite (self,site_spec)
1349 for node_spec in site_spec['nodes']:
1350 test_node=TestNode(self,test_site,node_spec)
1351 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1352 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1353 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1354 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1355 utils.system(command)
1358 # returns the filename to use for sql dump/restore, using options.dbname if set
1359 def dbfile (self, database):
1360 # uses options.dbname if it is found
1362 name=self.options.dbname
1363 if not isinstance(name,StringTypes):
1366 t=datetime.datetime.now()
1369 return "/root/%s-%s.sql"%(database,name)
1372 'dump the planetlab5 DB in /root in the PLC - filename has time'
1373 dump=self.dbfile("planetab5")
1374 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1375 utils.header('Dumped planetlab5 database in %s'%dump)
1378 def db_restore(self):
1379 'restore the planetlab5 DB - looks broken, but run -n might help'
1380 dump=self.dbfile("planetab5")
1381 ##stop httpd service
1382 self.run_in_guest('service httpd stop')
1383 # xxx - need another wrapper
1384 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1385 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1386 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1387 ##starting httpd service
1388 self.run_in_guest('service httpd start')
1390 utils.header('Database restored from ' + dump)
1393 def standby_1(): pass
1395 def standby_2(): pass
1397 def standby_3(): pass
1399 def standby_4(): pass
1401 def standby_5(): pass
1403 def standby_6(): pass
1405 def standby_7(): pass
1407 def standby_8(): pass
1409 def standby_9(): pass
1411 def standby_10(): pass
1413 def standby_11(): pass
1415 def standby_12(): pass
1417 def standby_13(): pass
1419 def standby_14(): pass
1421 def standby_15(): pass
1423 def standby_16(): pass
1425 def standby_17(): pass
1427 def standby_18(): pass
1429 def standby_19(): pass
1431 def standby_20(): pass