7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
65 default_steps = ['uninstall','install','install_rpm',
66 'configure', 'start', SEP,
67 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
68 'sites', 'nodes', 'slices', 'nodegroups', SEP,
69 'init_node','bootcd', 'configure_qemu', 'export_qemu',
70 'kill_all_qemus', 'reinstall_node','start_node', SEP,
71 'nodes_booted', 'nodes_ssh', 'check_slice',
72 'check_initscripts', 'check_tcp',SEP,
73 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
74 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', SEP,
75 'clean_initscripts', 'clean_sites', 'clean_nodes',
76 'clean_slices', 'clean_keys', SEP,
77 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
78 'db_dump' , 'db_restore', ' cleanup_tracker',
79 'standby_1 through 20'
83 def printable_steps (list):
84 return " ".join(list).replace(" "+SEP+" "," \\\n")
86 def valid_step (step):
89 def __init__ (self,plc_spec,options):
90 self.plc_spec=plc_spec
92 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
94 self.vserverip=plc_spec['vserverip']
95 self.vservername=plc_spec['vservername']
96 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
99 raise Exception,'chroot-based myplc testing is deprecated'
100 self.apiserver=TestApiserver(self.url,options.dry_run)
103 name=self.plc_spec['name']
104 return "%s.%s"%(name,self.vservername)
107 return self.plc_spec['hostname']
110 return self.test_ssh.is_local()
112 # define the API methods on this object through xmlrpc
113 # would help, but not strictly necessary
117 def actual_command_in_guest (self,command):
118 return self.test_ssh.actual_command(self.host_to_guest(command))
120 def run_in_guest (self,command):
121 return utils.system(self.actual_command_in_guest(command))
123 def run_in_host (self,command):
124 return self.test_ssh.run_in_buildname(command)
126 #command gets run in the vserver
127 def host_to_guest(self,command):
128 return "vserver %s exec %s"%(self.vservername,command)
131 def run_in_guest_piped (self,local,remote):
132 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
134 def auth_root (self):
135 return {'Username':self.plc_spec['PLC_ROOT_USER'],
136 'AuthMethod':'password',
137 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
138 'Role' : self.plc_spec['role']
140 def locate_site (self,sitename):
141 for site in self.plc_spec['sites']:
142 if site['site_fields']['name'] == sitename:
144 if site['site_fields']['login_base'] == sitename:
146 raise Exception,"Cannot locate site %s"%sitename
148 def locate_node (self,nodename):
149 for site in self.plc_spec['sites']:
150 for node in site['nodes']:
151 if node['name'] == nodename:
153 raise Exception,"Cannot locate node %s"%nodename
155 def locate_hostname (self,hostname):
156 for site in self.plc_spec['sites']:
157 for node in site['nodes']:
158 if node['node_fields']['hostname'] == hostname:
160 raise Exception,"Cannot locate hostname %s"%hostname
162 def locate_key (self,keyname):
163 for key in self.plc_spec['keys']:
164 if key['name'] == keyname:
166 raise Exception,"Cannot locate key %s"%keyname
168 def locate_slice (self, slicename):
169 for slice in self.plc_spec['slices']:
170 if slice['slice_fields']['name'] == slicename:
172 raise Exception,"Cannot locate slice %s"%slicename
174 # all different hostboxes used in this plc
175 def gather_hostBoxes(self):
176 # maps on sites and nodes, return [ (host_box,test_node) ]
178 for site_spec in self.plc_spec['sites']:
179 test_site = TestSite (self,site_spec)
180 for node_spec in site_spec['nodes']:
181 test_node = TestNode (self, test_site, node_spec)
182 if not test_node.is_real():
183 tuples.append( (test_node.host_box(),test_node) )
184 # transform into a dict { 'host_box' -> [ test_node .. ] }
186 for (box,node) in tuples:
187 if not result.has_key(box):
190 result[box].append(node)
193 # a step for checking this stuff
194 def show_boxes (self):
195 for (box,nodes) in self.gather_hostBoxes().iteritems():
196 print box,":"," + ".join( [ node.name() for node in nodes ] )
199 # make this a valid step
200 def kill_all_qemus(self):
201 # this is the brute force version, kill all qemus on that host box
202 for (box,nodes) in self.gather_hostBoxes().iteritems():
203 # pass the first nodename, as we don't push template-qemu on testboxes
204 nodedir=nodes[0].nodedir()
205 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
208 # make this a valid step
209 def list_all_qemus(self):
210 for (box,nodes) in self.gather_hostBoxes().iteritems():
211 # this is the brute force version, kill all qemus on that host box
212 TestBox(box,self.options.buildname).list_all_qemus()
215 # kill only the right qemus
216 def list_qemus(self):
217 for (box,nodes) in self.gather_hostBoxes().iteritems():
218 # the fine-grain version
223 # kill only the right qemus
224 def kill_qemus(self):
225 for (box,nodes) in self.gather_hostBoxes().iteritems():
226 # the fine-grain version
232 ### utility methods for handling the pool of IP addresses allocated to plcs
234 # (*) running plcs are recorded in the file named ~/running-test-plcs
235 # (*) this file contains a line for each running plc, older first
236 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
237 # (*) the free_tracker method performs a vserver stop on the oldest entry
238 # (*) the record_tracker method adds an entry at the bottom of the file
239 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
241 TRACKER_FILE="~/running-test-plcs"
243 def record_tracker (self):
244 command="echo %s %s >> %s"%(self.vservername,self.test_ssh.hostname,TestPlc.TRACKER_FILE)
245 (code,output) = utils.output_of (self.test_ssh.actual_command(command))
247 print "WARNING : COULD NOT record_tracker %s as a running plc on %s"%(self.vservername,self.test_ssh.hostname)
249 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
252 def free_tracker (self):
253 command="head -1 %s"%TestPlc.TRACKER_FILE
254 (code,line) = utils.output_of(self.test_ssh.actual_command(command))
256 print "No entry found in %s on %s"%(TestPlc.TRACKER_FILE,self.test_ssh.hostname)
259 [vserver_to_stop,hostname] = line.split()
261 print "WARNING: free_tracker: Could not parse %s - skipped"%TestPlc.TRACKER_FILE
263 stop_command = "vserver --silent %s stop"%vserver_to_stop
264 utils.system(self.test_ssh.actual_command(stop_command))
265 x=TestPlc.TRACKER_FILE
266 flush_command = "tail --lines=+2 %s > %s.tmp ; mv %s.tmp %s"%(x,x,x,x)
267 utils.system(self.test_ssh.actual_command(flush_command))
270 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
271 def cleanup_tracker (self):
272 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
273 utils.system(self.test_ssh.actual_command(stop_all))
274 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
275 utils.system(self.test_ssh.actual_command(clean_tracker))
278 self.run_in_host("vserver --silent %s delete"%self.vservername)
284 # a full path for the local calls
285 build_dir=os.path.dirname(sys.argv[0])
286 # sometimes this is empty - set to "." in such a case
287 if not build_dir: build_dir="."
288 build_dir += "/build"
290 # use a standard name - will be relative to remote buildname
292 # run checkout in any case - would do an update if already exists
293 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
294 if self.run_in_host(build_checkout) != 0:
296 # the repo url is taken from arch-rpms-url
297 # with the last step (i386.) removed
298 repo_url = self.options.arch_rpms_url
299 for level in [ 'arch' ]:
300 repo_url = os.path.dirname(repo_url)
301 if self.options.arch == "i386":
302 personality_option="-p linux32"
304 personality_option="-p linux64"
305 script="vtest-init-vserver.sh"
306 vserver_name = self.vservername
307 vserver_options="--netdev eth0 --interface %s"%self.vserverip
309 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
310 vserver_options += " --hostname %s"%vserver_hostname
313 create_vserver="%(build_dir)s/%(script)s %(personality_option)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
314 return self.run_in_host(create_vserver) == 0
317 def install_rpm(self):
318 return self.run_in_guest("yum -y install myplc-native")==0
322 tmpname='%s.plc-config-tty'%(self.name())
323 fileconf=open(tmpname,'w')
324 for var in [ 'PLC_NAME',
328 'PLC_MAIL_SUPPORT_ADDRESS',
335 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
336 fileconf.write('w\n')
337 fileconf.write('q\n')
339 utils.system('cat %s'%tmpname)
340 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
341 utils.system('rm %s'%tmpname)
345 self.run_in_guest('service plc start')
349 self.run_in_guest('service plc stop')
352 # could use a TestKey class
353 def store_keys(self):
354 for key_spec in self.plc_spec['keys']:
355 TestKey(self,key_spec).store_key()
358 def clean_keys(self):
359 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
362 return self.do_sites()
364 def clean_sites (self):
365 return self.do_sites(action="delete")
367 def do_sites (self,action="add"):
368 for site_spec in self.plc_spec['sites']:
369 test_site = TestSite (self,site_spec)
370 if (action != "add"):
371 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
372 test_site.delete_site()
373 # deleted with the site
374 #test_site.delete_users()
377 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
378 test_site.create_site()
379 test_site.create_users()
383 return self.do_nodes()
384 def clean_nodes (self):
385 return self.do_nodes(action="delete")
387 def do_nodes (self,action="add"):
388 for site_spec in self.plc_spec['sites']:
389 test_site = TestSite (self,site_spec)
391 utils.header("Deleting nodes in site %s"%test_site.name())
392 for node_spec in site_spec['nodes']:
393 test_node=TestNode(self,test_site,node_spec)
394 utils.header("Deleting %s"%test_node.name())
395 test_node.delete_node()
397 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
398 for node_spec in site_spec['nodes']:
399 utils.pprint('Creating node %s'%node_spec,node_spec)
400 test_node = TestNode (self,test_site,node_spec)
401 test_node.create_node ()
404 # create nodegroups if needed, and populate
405 # no need for a clean_nodegroups if we are careful enough
406 def nodegroups (self):
407 # 1st pass to scan contents
409 for site_spec in self.plc_spec['sites']:
410 test_site = TestSite (self,site_spec)
411 for node_spec in site_spec['nodes']:
412 test_node=TestNode (self,test_site,node_spec)
413 if node_spec.has_key('nodegroups'):
414 nodegroupnames=node_spec['nodegroups']
415 if isinstance(nodegroupnames,StringTypes):
416 nodegroupnames = [ nodegroupnames ]
417 for nodegroupname in nodegroupnames:
418 if not groups_dict.has_key(nodegroupname):
419 groups_dict[nodegroupname]=[]
420 groups_dict[nodegroupname].append(test_node.name())
421 auth=self.auth_root()
422 for (nodegroupname,group_nodes) in groups_dict.iteritems():
423 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
424 # first, check if the nodetagtype is here
425 tag_types = self.apiserver.GetNodeTagTypes(auth,{'tagname':nodegroupname})
427 tag_type_id = tag_types[0]['node_tag_type_id']
428 print 'node-tag-type',nodegroupname,'already exists'
430 tag_type_id = self.apiserver.AddNodeTagType(auth,
431 {'tagname':nodegroupname,
432 'description': 'for nodegroup %s'%nodegroupname,
436 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
438 print 'nodegroup',nodegroupname,'already exists'
440 self.apiserver.AddNodeGroup(auth,
441 {'groupname': nodegroupname,
442 'node_tag_type_id': tag_type_id,
444 # set node tag on all nodes, value='yes'
446 for nodename in group_nodes:
448 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
450 print 'node',nodename,'seems to already have tag',nodegroupname
453 expect_yes = self.apiserver.GetNodeTags(
455 {'hostname':nodename,
456 'tagname':nodegroupname},
457 ['tagvalue'])[0]['tagvalue']
458 if expect_yes != "yes":
459 print 'Mismatch node tag on node',nodename,'got',expect_yes
462 print 'Cannot find tag',nodegroupname,'on node',nodename
466 def all_hostnames (self) :
468 for site_spec in self.plc_spec['sites']:
469 hostnames += [ node_spec['node_fields']['hostname'] \
470 for node_spec in site_spec['nodes'] ]
473 # gracetime : during the first <gracetime> minutes nothing gets printed
474 def do_nodes_booted (self, minutes, gracetime,period=30):
475 if self.options.dry_run:
479 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
480 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
481 # the nodes that haven't checked yet - start with a full list and shrink over time
482 tocheck = self.all_hostnames()
483 utils.header("checking nodes %r"%tocheck)
484 # create a dict hostname -> status
485 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
488 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
490 for array in tocheck_status:
491 hostname=array['hostname']
492 boot_state=array['boot_state']
493 if boot_state == 'boot':
494 utils.header ("%s has reached the 'boot' state"%hostname)
496 # if it's a real node, never mind
497 (site_spec,node_spec)=self.locate_hostname(hostname)
498 if TestNode.is_real_model(node_spec['node_fields']['model']):
499 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
502 elif datetime.datetime.now() > graceout:
503 utils.header ("%s still in '%s' state"%(hostname,boot_state))
504 graceout=datetime.datetime.now()+datetime.timedelta(1)
505 status[hostname] = boot_state
507 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
510 if datetime.datetime.now() > timeout:
511 for hostname in tocheck:
512 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
514 # otherwise, sleep for a while
516 # only useful in empty plcs
519 def nodes_booted(self):
520 return self.do_nodes_booted(minutes=20,gracetime=15)
522 def do_nodes_ssh(self,minutes,gracetime,period=30):
524 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
525 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
526 tocheck = self.all_hostnames()
527 # self.scan_publicKeys(tocheck)
528 utils.header("checking Connectivity on nodes %r"%tocheck)
530 for hostname in tocheck:
531 # try to ssh in nodes
532 node_test_ssh = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa")
533 success=self.run_in_guest(node_test_ssh.actual_command("hostname"))==0
535 utils.header('The node %s is sshable -->'%hostname)
537 tocheck.remove(hostname)
539 # we will have tried real nodes once, in case they're up - but if not, just skip
540 (site_spec,node_spec)=self.locate_hostname(hostname)
541 if TestNode.is_real_model(node_spec['node_fields']['model']):
542 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
543 tocheck.remove(hostname)
544 elif datetime.datetime.now() > graceout:
545 utils.header("Could not ssh-enter root context on %s"%hostname)
548 if datetime.datetime.now() > timeout:
549 for hostname in tocheck:
550 utils.header("FAILURE to ssh into %s"%hostname)
552 # otherwise, sleep for a while
554 # only useful in empty plcs
558 return self.do_nodes_ssh(minutes=6,gracetime=4)
561 def init_node (self): pass
563 def bootcd (self): pass
565 def configure_qemu (self): pass
567 def reinstall_node (self): pass
569 def export_qemu (self): pass
571 def do_check_initscripts(self):
573 for slice_spec in self.plc_spec['slices']:
574 if not slice_spec.has_key('initscriptname'):
576 initscript=slice_spec['initscriptname']
577 for nodename in slice_spec['nodenames']:
578 (site,node) = self.locate_node (nodename)
579 # xxx - passing the wrong site - probably harmless
580 test_site = TestSite (self,site)
581 test_slice = TestSlice (self,test_site,slice_spec)
582 test_node = TestNode (self,test_site,node)
583 test_sliver = TestSliver (self, test_node, test_slice)
584 if not test_sliver.check_initscript(initscript):
588 def check_initscripts(self):
589 return self.do_check_initscripts()
591 def initscripts (self):
592 for initscript in self.plc_spec['initscripts']:
593 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
594 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
597 def clean_initscripts (self):
598 for initscript in self.plc_spec['initscripts']:
599 initscript_name = initscript['initscript_fields']['name']
600 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
602 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
603 print initscript_name,'deleted'
605 print 'deletion went wrong - probably did not exist'
609 return self.do_slices()
611 def clean_slices (self):
612 return self.do_slices("delete")
614 def do_slices (self, action="add"):
615 for slice in self.plc_spec['slices']:
616 site_spec = self.locate_site (slice['sitename'])
617 test_site = TestSite(self,site_spec)
618 test_slice=TestSlice(self,test_site,slice)
620 utils.header("Deleting slices in site %s"%test_site.name())
621 test_slice.delete_slice()
623 utils.pprint("Creating slice",slice)
624 test_slice.create_slice()
625 utils.header('Created Slice %s'%slice['slice_fields']['name'])
628 @slice_mapper_options
629 def check_slice(self): pass
632 def clear_known_hosts (self): pass
635 def start_node (self) : pass
637 def all_sliver_objs (self):
639 for slice_spec in self.plc_spec['slices']:
640 slicename = slice_spec['slice_fields']['name']
641 for nodename in slice_spec['nodenames']:
642 result.append(self.locate_sliver_obj (nodename,slicename))
645 def locate_sliver_obj (self,nodename,slicename):
646 (site,node) = self.locate_node(nodename)
647 slice = self.locate_slice (slicename)
649 test_site = TestSite (self, site)
650 test_node = TestNode (self, test_site,node)
651 # xxx the slice site is assumed to be the node site - mhh - probably harmless
652 test_slice = TestSlice (self, test_site, slice)
653 return TestSliver (self, test_node, test_slice)
655 def check_tcp (self):
656 specs = self.plc_spec['tcp_test']
661 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
662 if not s_test_sliver.run_tcp_server(port,timeout=10):
666 # idem for the client side
667 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
668 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
673 def gather_logs (self):
674 # (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
675 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
676 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
677 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
679 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
680 self.gather_var_logs ()
682 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
683 for site_spec in self.plc_spec['sites']:
684 test_site = TestSite (self,site_spec)
685 for node_spec in site_spec['nodes']:
686 test_node=TestNode(self,test_site,node_spec)
687 test_node.gather_qemu_logs()
689 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
690 self.gather_nodes_var_logs()
692 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
693 self.gather_slivers_var_logs()
696 def gather_slivers_var_logs(self):
697 for test_sliver in self.all_sliver_objs():
698 remote = test_sliver.tar_var_logs()
699 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
700 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
701 utils.system(command)
704 def gather_var_logs (self):
705 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
706 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
707 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
708 utils.system(command)
710 def gather_nodes_var_logs (self):
711 for site_spec in self.plc_spec['sites']:
712 test_site = TestSite (self,site_spec)
713 for node_spec in site_spec['nodes']:
714 test_node=TestNode(self,test_site,node_spec)
715 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
716 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
717 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
718 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
719 utils.system(command)
722 # returns the filename to use for sql dump/restore, using options.dbname if set
723 def dbfile (self, database):
724 # uses options.dbname if it is found
726 name=self.options.dbname
727 if not isinstance(name,StringTypes):
730 t=datetime.datetime.now()
733 return "/root/%s-%s.sql"%(database,name)
736 dump=self.dbfile("planetab4")
737 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
738 utils.header('Dumped planetlab4 database in %s'%dump)
741 def db_restore(self):
742 dump=self.dbfile("planetab4")
744 self.run_in_guest('service httpd stop')
745 # xxx - need another wrapper
746 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
747 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
748 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
749 ##starting httpd service
750 self.run_in_guest('service httpd start')
752 utils.header('Database restored from ' + dump)
755 def standby_1(): pass
757 def standby_2(): pass
759 def standby_3(): pass
761 def standby_4(): pass
763 def standby_5(): pass
765 def standby_6(): pass
767 def standby_7(): pass
769 def standby_8(): pass
771 def standby_9(): pass
773 def standby_10(): pass
775 def standby_11(): pass
777 def standby_12(): pass
779 def standby_13(): pass
781 def standby_14(): pass
783 def standby_15(): pass
785 def standby_16(): pass
787 def standby_17(): pass
789 def standby_18(): pass
791 def standby_19(): pass
793 def standby_20(): pass