7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
65 default_steps = ['uninstall','install','install_rpm',
66 'configure', 'start', SEP,
67 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
68 'sites', 'nodes', 'slices', 'nodegroups', SEP,
69 'init_node','bootcd', 'configure_qemu', 'export_qemu',
70 'kill_all_qemus', 'reinstall_node','start_node', SEP,
71 'nodes_booted', 'nodes_ssh', 'check_slice',
72 'check_initscripts', 'check_tcp',SEP,
73 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
74 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', SEP,
75 'clean_initscripts', 'clean_sites', 'clean_nodes',
76 'clean_slices', 'clean_keys', SEP,
77 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
78 'db_dump' , 'db_restore', ' cleanup_tracker',
79 'standby_1 through 20'
83 def printable_steps (list):
84 return " ".join(list).replace(" "+SEP+" "," \\\n")
86 def valid_step (step):
89 def __init__ (self,plc_spec,options):
90 self.plc_spec=plc_spec
92 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
94 self.vserverip=plc_spec['vserverip']
95 self.vservername=plc_spec['vservername']
96 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
99 raise Exception,'chroot-based myplc testing is deprecated'
100 self.apiserver=TestApiserver(self.url,options.dry_run)
103 name=self.plc_spec['name']
104 return "%s.%s"%(name,self.vservername)
107 return self.plc_spec['hostname']
110 return self.test_ssh.is_local()
112 # define the API methods on this object through xmlrpc
113 # would help, but not strictly necessary
117 def actual_command_in_guest (self,command):
118 return self.test_ssh.actual_command(self.host_to_guest(command))
120 def run_in_guest (self,command):
121 return utils.system(self.actual_command_in_guest(command))
123 def run_in_host (self,command):
124 return self.test_ssh.run_in_buildname(command)
126 #command gets run in the vserver
127 def host_to_guest(self,command):
128 return "vserver %s exec %s"%(self.vservername,command)
131 def run_in_guest_piped (self,local,remote):
132 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
134 def auth_root (self):
135 return {'Username':self.plc_spec['PLC_ROOT_USER'],
136 'AuthMethod':'password',
137 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
138 'Role' : self.plc_spec['role']
140 def locate_site (self,sitename):
141 for site in self.plc_spec['sites']:
142 if site['site_fields']['name'] == sitename:
144 if site['site_fields']['login_base'] == sitename:
146 raise Exception,"Cannot locate site %s"%sitename
148 def locate_node (self,nodename):
149 for site in self.plc_spec['sites']:
150 for node in site['nodes']:
151 if node['name'] == nodename:
153 raise Exception,"Cannot locate node %s"%nodename
155 def locate_hostname (self,hostname):
156 for site in self.plc_spec['sites']:
157 for node in site['nodes']:
158 if node['node_fields']['hostname'] == hostname:
160 raise Exception,"Cannot locate hostname %s"%hostname
162 def locate_key (self,keyname):
163 for key in self.plc_spec['keys']:
164 if key['name'] == keyname:
166 raise Exception,"Cannot locate key %s"%keyname
168 def locate_slice (self, slicename):
169 for slice in self.plc_spec['slices']:
170 if slice['slice_fields']['name'] == slicename:
172 raise Exception,"Cannot locate slice %s"%slicename
174 # all different hostboxes used in this plc
175 def gather_hostBoxes(self):
176 # maps on sites and nodes, return [ (host_box,test_node) ]
178 for site_spec in self.plc_spec['sites']:
179 test_site = TestSite (self,site_spec)
180 for node_spec in site_spec['nodes']:
181 test_node = TestNode (self, test_site, node_spec)
182 if not test_node.is_real():
183 tuples.append( (test_node.host_box(),test_node) )
184 # transform into a dict { 'host_box' -> [ test_node .. ] }
186 for (box,node) in tuples:
187 if not result.has_key(box):
190 result[box].append(node)
193 # a step for checking this stuff
194 def show_boxes (self):
195 for (box,nodes) in self.gather_hostBoxes().iteritems():
196 print box,":"," + ".join( [ node.name() for node in nodes ] )
199 # make this a valid step
200 def kill_all_qemus(self):
201 # this is the brute force version, kill all qemus on that host box
202 for (box,nodes) in self.gather_hostBoxes().iteritems():
203 # pass the first nodename, as we don't push template-qemu on testboxes
204 nodedir=nodes[0].nodedir()
205 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
208 # make this a valid step
209 def list_all_qemus(self):
210 for (box,nodes) in self.gather_hostBoxes().iteritems():
211 # this is the brute force version, kill all qemus on that host box
212 TestBox(box,self.options.buildname).list_all_qemus()
215 # kill only the right qemus
216 def list_qemus(self):
217 for (box,nodes) in self.gather_hostBoxes().iteritems():
218 # the fine-grain version
223 # kill only the right qemus
224 def kill_qemus(self):
225 for (box,nodes) in self.gather_hostBoxes().iteritems():
226 # the fine-grain version
232 ### utility methods for handling the pool of IP addresses allocated to plcs
234 # (*) running plcs are recorded in the file named ~/running-test-plcs
235 # (*) this file contains a line for each running plc, older first
236 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
237 # (*) the free_tracker method performs a vserver stop on the oldest entry
238 # (*) the record_tracker method adds an entry at the bottom of the file
239 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
241 TRACKER_FILE="~/running-test-plcs"
243 def record_tracker (self):
244 command="echo %s %s >> %s"%(self.vservername,self.test_ssh.hostname,TestPlc.TRACKER_FILE)
245 (code,output) = utils.output_of (self.test_ssh.actual_command(command))
247 print "WARNING : COULD NOT record_tracker %s as a running plc on %s"%(self.vservername,self.test_ssh.hostname)
249 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
252 def free_tracker (self):
253 command="head -1 %s"%TestPlc.TRACKER_FILE
254 (code,line) = utils.output_of(self.test_ssh.actual_command(command))
256 print "No entry found in %s on %s"%(TestPlc.TRACKER_FILE,self.test_ssh.hostname)
259 [vserver_to_stop,hostname] = line.split()
261 print "WARNING: free_tracker: Could not parse %s - skipped"%TestPlc.TRACKER_FILE
263 stop_command = "vserver --silent %s stop"%vserver_to_stop
264 utils.system(self.test_ssh.actual_command(stop_command))
265 x=TestPlc.TRACKER_FILE
266 flush_command = "tail --lines=+2 %s > %s.tmp ; mv %s.tmp %s"%(x,x,x,x)
267 utils.system(self.test_ssh.actual_command(flush_command))
270 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
271 def cleanup_tracker (self):
272 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
273 utils.system(self.test_ssh.actual_command(stop_all))
274 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
275 utils.system(self.test_ssh.actual_command(clean_tracker))
278 self.run_in_host("vserver --silent %s delete"%self.vservername)
283 # we need build dir for vtest-init-vserver
285 # a full path for the local calls
286 build_dir=os.path.dirname(sys.argv[0])+"/build"
288 # use a standard name - will be relative to remote buildname
290 # run checkout in any case - would do an update if already exists
291 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
292 if self.run_in_host(build_checkout) != 0:
294 # the repo url is taken from arch-rpms-url
295 # with the last step (i386.) removed
296 repo_url = self.options.arch_rpms_url
297 for level in [ 'arch' ]:
298 repo_url = os.path.dirname(repo_url)
299 if self.options.arch == "i386":
300 personality_option="-p linux32"
302 personality_option="-p linux64"
303 script="vtest-init-vserver.sh"
304 vserver_name = self.vservername
305 vserver_options="--netdev eth0 --interface %s"%self.vserverip
307 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
308 vserver_options += " --hostname %s"%vserver_hostname
311 create_vserver="%(build_dir)s/%(script)s %(personality_option)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
312 return self.run_in_host(create_vserver) == 0
315 def install_rpm(self):
316 return self.run_in_guest("yum -y install myplc-native")==0
320 tmpname='%s.plc-config-tty'%(self.name())
321 fileconf=open(tmpname,'w')
322 for var in [ 'PLC_NAME',
326 'PLC_MAIL_SUPPORT_ADDRESS',
333 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
334 fileconf.write('w\n')
335 fileconf.write('q\n')
337 utils.system('cat %s'%tmpname)
338 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
339 utils.system('rm %s'%tmpname)
343 self.run_in_guest('service plc start')
347 self.run_in_guest('service plc stop')
350 # could use a TestKey class
351 def store_keys(self):
352 for key_spec in self.plc_spec['keys']:
353 TestKey(self,key_spec).store_key()
356 def clean_keys(self):
357 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
360 return self.do_sites()
362 def clean_sites (self):
363 return self.do_sites(action="delete")
365 def do_sites (self,action="add"):
366 for site_spec in self.plc_spec['sites']:
367 test_site = TestSite (self,site_spec)
368 if (action != "add"):
369 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
370 test_site.delete_site()
371 # deleted with the site
372 #test_site.delete_users()
375 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
376 test_site.create_site()
377 test_site.create_users()
381 return self.do_nodes()
382 def clean_nodes (self):
383 return self.do_nodes(action="delete")
385 def do_nodes (self,action="add"):
386 for site_spec in self.plc_spec['sites']:
387 test_site = TestSite (self,site_spec)
389 utils.header("Deleting nodes in site %s"%test_site.name())
390 for node_spec in site_spec['nodes']:
391 test_node=TestNode(self,test_site,node_spec)
392 utils.header("Deleting %s"%test_node.name())
393 test_node.delete_node()
395 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
396 for node_spec in site_spec['nodes']:
397 utils.pprint('Creating node %s'%node_spec,node_spec)
398 test_node = TestNode (self,test_site,node_spec)
399 test_node.create_node ()
402 # create nodegroups if needed, and populate
403 # no need for a clean_nodegroups if we are careful enough
404 def nodegroups (self):
405 # 1st pass to scan contents
407 for site_spec in self.plc_spec['sites']:
408 test_site = TestSite (self,site_spec)
409 for node_spec in site_spec['nodes']:
410 test_node=TestNode (self,test_site,node_spec)
411 if node_spec.has_key('nodegroups'):
412 nodegroupnames=node_spec['nodegroups']
413 if isinstance(nodegroupnames,StringTypes):
414 nodegroupnames = [ nodegroupnames ]
415 for nodegroupname in nodegroupnames:
416 if not groups_dict.has_key(nodegroupname):
417 groups_dict[nodegroupname]=[]
418 groups_dict[nodegroupname].append(test_node.name())
419 auth=self.auth_root()
420 for (nodegroupname,group_nodes) in groups_dict.iteritems():
421 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
422 # first, check if the nodetagtype is here
423 tag_types = self.apiserver.GetNodeTagTypes(auth,{'tagname':nodegroupname})
425 tag_type_id = tag_types[0]['node_tag_type_id']
426 print 'node-tag-type',nodegroupname,'already exists'
428 tag_type_id = self.apiserver.AddNodeTagType(auth,
429 {'tagname':nodegroupname,
430 'description': 'for nodegroup %s'%nodegroupname,
434 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
436 print 'nodegroup',nodegroupname,'already exists'
438 self.apiserver.AddNodeGroup(auth,
439 {'groupname': nodegroupname,
440 'node_tag_type_id': tag_type_id,
442 # set node tag on all nodes, value='yes'
444 for nodename in group_nodes:
446 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
448 print 'node',nodename,'seems to already have tag',nodegroupname
451 expect_yes = self.apiserver.GetNodeTags(
453 {'hostname':nodename,
454 'tagname':nodegroupname},
455 ['tagvalue'])[0]['tagvalue']
456 if expect_yes != "yes":
457 print 'Mismatch node tag on node',nodename,'got',expect_yes
460 print 'Cannot find tag',nodegroupname,'on node',nodename
464 def all_hostnames (self) :
466 for site_spec in self.plc_spec['sites']:
467 hostnames += [ node_spec['node_fields']['hostname'] \
468 for node_spec in site_spec['nodes'] ]
471 # gracetime : during the first <gracetime> minutes nothing gets printed
472 def do_nodes_booted (self, minutes, gracetime,period=30):
473 if self.options.dry_run:
477 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
478 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
479 # the nodes that haven't checked yet - start with a full list and shrink over time
480 tocheck = self.all_hostnames()
481 utils.header("checking nodes %r"%tocheck)
482 # create a dict hostname -> status
483 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
486 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
488 for array in tocheck_status:
489 hostname=array['hostname']
490 boot_state=array['boot_state']
491 if boot_state == 'boot':
492 utils.header ("%s has reached the 'boot' state"%hostname)
494 # if it's a real node, never mind
495 (site_spec,node_spec)=self.locate_hostname(hostname)
496 if TestNode.is_real_model(node_spec['node_fields']['model']):
497 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
500 elif datetime.datetime.now() > graceout:
501 utils.header ("%s still in '%s' state"%(hostname,boot_state))
502 graceout=datetime.datetime.now()+datetime.timedelta(1)
503 status[hostname] = boot_state
505 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
508 if datetime.datetime.now() > timeout:
509 for hostname in tocheck:
510 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
512 # otherwise, sleep for a while
514 # only useful in empty plcs
517 def nodes_booted(self):
518 return self.do_nodes_booted(minutes=20,gracetime=15)
520 def do_nodes_ssh(self,minutes,gracetime,period=30):
522 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
523 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
524 tocheck = self.all_hostnames()
525 # self.scan_publicKeys(tocheck)
526 utils.header("checking Connectivity on nodes %r"%tocheck)
528 for hostname in tocheck:
529 # try to ssh in nodes
530 node_test_ssh = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa")
531 success=self.run_in_guest(node_test_ssh.actual_command("hostname"))==0
533 utils.header('The node %s is sshable -->'%hostname)
535 tocheck.remove(hostname)
537 # we will have tried real nodes once, in case they're up - but if not, just skip
538 (site_spec,node_spec)=self.locate_hostname(hostname)
539 if TestNode.is_real_model(node_spec['node_fields']['model']):
540 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
541 tocheck.remove(hostname)
542 elif datetime.datetime.now() > graceout:
543 utils.header("Could not ssh-enter root context on %s"%hostname)
546 if datetime.datetime.now() > timeout:
547 for hostname in tocheck:
548 utils.header("FAILURE to ssh into %s"%hostname)
550 # otherwise, sleep for a while
552 # only useful in empty plcs
556 return self.do_nodes_ssh(minutes=6,gracetime=4)
559 def init_node (self): pass
561 def bootcd (self): pass
563 def configure_qemu (self): pass
565 def reinstall_node (self): pass
567 def export_qemu (self): pass
569 def do_check_initscripts(self):
571 for slice_spec in self.plc_spec['slices']:
572 if not slice_spec.has_key('initscriptname'):
574 initscript=slice_spec['initscriptname']
575 for nodename in slice_spec['nodenames']:
576 (site,node) = self.locate_node (nodename)
577 # xxx - passing the wrong site - probably harmless
578 test_site = TestSite (self,site)
579 test_slice = TestSlice (self,test_site,slice_spec)
580 test_node = TestNode (self,test_site,node)
581 test_sliver = TestSliver (self, test_node, test_slice)
582 if not test_sliver.check_initscript(initscript):
586 def check_initscripts(self):
587 return self.do_check_initscripts()
589 def initscripts (self):
590 for initscript in self.plc_spec['initscripts']:
591 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
592 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
595 def clean_initscripts (self):
596 for initscript in self.plc_spec['initscripts']:
597 initscript_name = initscript['initscript_fields']['name']
598 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
600 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
601 print initscript_name,'deleted'
603 print 'deletion went wrong - probably did not exist'
607 return self.do_slices()
609 def clean_slices (self):
610 return self.do_slices("delete")
612 def do_slices (self, action="add"):
613 for slice in self.plc_spec['slices']:
614 site_spec = self.locate_site (slice['sitename'])
615 test_site = TestSite(self,site_spec)
616 test_slice=TestSlice(self,test_site,slice)
618 utils.header("Deleting slices in site %s"%test_site.name())
619 test_slice.delete_slice()
621 utils.pprint("Creating slice",slice)
622 test_slice.create_slice()
623 utils.header('Created Slice %s'%slice['slice_fields']['name'])
626 @slice_mapper_options
627 def check_slice(self): pass
630 def clear_known_hosts (self): pass
633 def start_node (self) : pass
635 def all_sliver_objs (self):
637 for slice_spec in self.plc_spec['slices']:
638 slicename = slice_spec['slice_fields']['name']
639 for nodename in slice_spec['nodenames']:
640 result.append(self.locate_sliver_obj (nodename,slicename))
643 def locate_sliver_obj (self,nodename,slicename):
644 (site,node) = self.locate_node(nodename)
645 slice = self.locate_slice (slicename)
647 test_site = TestSite (self, site)
648 test_node = TestNode (self, test_site,node)
649 # xxx the slice site is assumed to be the node site - mhh - probably harmless
650 test_slice = TestSlice (self, test_site, slice)
651 return TestSliver (self, test_node, test_slice)
653 def check_tcp (self):
654 specs = self.plc_spec['tcp_test']
659 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
660 if not s_test_sliver.run_tcp_server(port,timeout=10):
664 # idem for the client side
665 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
666 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
671 def gather_logs (self):
672 # (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
673 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
674 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
675 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
677 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
678 self.gather_var_logs ()
680 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
681 for site_spec in self.plc_spec['sites']:
682 test_site = TestSite (self,site_spec)
683 for node_spec in site_spec['nodes']:
684 test_node=TestNode(self,test_site,node_spec)
685 test_node.gather_qemu_logs()
687 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
688 self.gather_nodes_var_logs()
690 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
691 self.gather_slivers_var_logs()
694 def gather_slivers_var_logs(self):
695 for test_sliver in self.all_sliver_objs():
696 remote = test_sliver.tar_var_logs()
697 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
698 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
699 utils.system(command)
702 def gather_var_logs (self):
703 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
704 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
705 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
706 utils.system(command)
708 def gather_nodes_var_logs (self):
709 for site_spec in self.plc_spec['sites']:
710 test_site = TestSite (self,site_spec)
711 for node_spec in site_spec['nodes']:
712 test_node=TestNode(self,test_site,node_spec)
713 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
714 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
715 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
716 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
717 utils.system(command)
720 # returns the filename to use for sql dump/restore, using options.dbname if set
721 def dbfile (self, database):
722 # uses options.dbname if it is found
724 name=self.options.dbname
725 if not isinstance(name,StringTypes):
728 t=datetime.datetime.now()
731 return "/root/%s-%s.sql"%(database,name)
734 dump=self.dbfile("planetab4")
735 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
736 utils.header('Dumped planetlab4 database in %s'%dump)
739 def db_restore(self):
740 dump=self.dbfile("planetab4")
742 self.run_in_guest('service httpd stop')
743 # xxx - need another wrapper
744 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
745 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
746 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
747 ##starting httpd service
748 self.run_in_guest('service httpd start')
750 utils.header('Database restored from ' + dump)
753 def standby_1(): pass
755 def standby_2(): pass
757 def standby_3(): pass
759 def standby_4(): pass
761 def standby_5(): pass
763 def standby_6(): pass
765 def standby_7(): pass
767 def standby_8(): pass
769 def standby_9(): pass
771 def standby_10(): pass
773 def standby_11(): pass
775 def standby_12(): pass
777 def standby_13(): pass
779 def standby_14(): pass
781 def standby_15(): pass
783 def standby_16(): pass
785 def standby_17(): pass
787 def standby_18(): pass
789 def standby_19(): pass
791 def standby_20(): pass