7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
65 default_steps = ['uninstall','install','install_rpm',
66 'configure', 'start', SEP,
67 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
68 'sites', 'nodes', 'slices', 'nodegroups', SEP,
69 'init_node','bootcd', 'configure_qemu', 'export_qemu',
70 'kill_all_qemus', 'reinstall_node','start_node', SEP,
71 'nodes_booted', 'nodes_ssh', 'check_slice',
72 'check_initscripts', 'check_tcp',SEP,
73 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
74 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
75 'clean_initscripts', 'clean_sites', 'clean_nodes',
76 'clean_slices', 'clean_keys', SEP,
77 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
78 'db_dump' , 'db_restore', ' cleanup_tracker',
79 'standby_1 through 20'
83 def printable_steps (list):
84 return " ".join(list).replace(" "+SEP+" "," \\\n")
86 def valid_step (step):
89 def __init__ (self,plc_spec,options):
90 self.plc_spec=plc_spec
92 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
94 self.vserverip=plc_spec['vserverip']
95 self.vservername=plc_spec['vservername']
96 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
99 raise Exception,'chroot-based myplc testing is deprecated'
100 self.apiserver=TestApiserver(self.url,options.dry_run)
103 name=self.plc_spec['name']
104 return "%s.%s"%(name,self.vservername)
107 return self.plc_spec['hostname']
110 return self.test_ssh.is_local()
112 # define the API methods on this object through xmlrpc
113 # would help, but not strictly necessary
117 def actual_command_in_guest (self,command):
118 return self.test_ssh.actual_command(self.host_to_guest(command))
120 def start_guest (self):
121 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
123 def run_in_guest (self,command):
124 return utils.system(self.actual_command_in_guest(command))
126 def run_in_host (self,command):
127 return self.test_ssh.run_in_buildname(command)
129 #command gets run in the vserver
130 def host_to_guest(self,command):
131 return "vserver %s exec %s"%(self.vservername,command)
133 #command gets run in the vserver
134 def start_guest_in_host(self):
135 return "vserver %s start"%(self.vservername)
138 def run_in_guest_piped (self,local,remote):
139 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
141 def auth_root (self):
142 return {'Username':self.plc_spec['PLC_ROOT_USER'],
143 'AuthMethod':'password',
144 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
145 'Role' : self.plc_spec['role']
147 def locate_site (self,sitename):
148 for site in self.plc_spec['sites']:
149 if site['site_fields']['name'] == sitename:
151 if site['site_fields']['login_base'] == sitename:
153 raise Exception,"Cannot locate site %s"%sitename
155 def locate_node (self,nodename):
156 for site in self.plc_spec['sites']:
157 for node in site['nodes']:
158 if node['name'] == nodename:
160 raise Exception,"Cannot locate node %s"%nodename
162 def locate_hostname (self,hostname):
163 for site in self.plc_spec['sites']:
164 for node in site['nodes']:
165 if node['node_fields']['hostname'] == hostname:
167 raise Exception,"Cannot locate hostname %s"%hostname
169 def locate_key (self,keyname):
170 for key in self.plc_spec['keys']:
171 if key['name'] == keyname:
173 raise Exception,"Cannot locate key %s"%keyname
175 def locate_slice (self, slicename):
176 for slice in self.plc_spec['slices']:
177 if slice['slice_fields']['name'] == slicename:
179 raise Exception,"Cannot locate slice %s"%slicename
181 # all different hostboxes used in this plc
182 def gather_hostBoxes(self):
183 # maps on sites and nodes, return [ (host_box,test_node) ]
185 for site_spec in self.plc_spec['sites']:
186 test_site = TestSite (self,site_spec)
187 for node_spec in site_spec['nodes']:
188 test_node = TestNode (self, test_site, node_spec)
189 if not test_node.is_real():
190 tuples.append( (test_node.host_box(),test_node) )
191 # transform into a dict { 'host_box' -> [ test_node .. ] }
193 for (box,node) in tuples:
194 if not result.has_key(box):
197 result[box].append(node)
200 # a step for checking this stuff
201 def show_boxes (self):
202 for (box,nodes) in self.gather_hostBoxes().iteritems():
203 print box,":"," + ".join( [ node.name() for node in nodes ] )
206 # make this a valid step
207 def kill_all_qemus(self):
208 # this is the brute force version, kill all qemus on that host box
209 for (box,nodes) in self.gather_hostBoxes().iteritems():
210 # pass the first nodename, as we don't push template-qemu on testboxes
211 nodedir=nodes[0].nodedir()
212 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
215 # make this a valid step
216 def list_all_qemus(self):
217 for (box,nodes) in self.gather_hostBoxes().iteritems():
218 # this is the brute force version, kill all qemus on that host box
219 TestBox(box,self.options.buildname).list_all_qemus()
222 # kill only the right qemus
223 def list_qemus(self):
224 for (box,nodes) in self.gather_hostBoxes().iteritems():
225 # the fine-grain version
230 # kill only the right qemus
231 def kill_qemus(self):
232 for (box,nodes) in self.gather_hostBoxes().iteritems():
233 # the fine-grain version
239 ### utility methods for handling the pool of IP addresses allocated to plcs
241 # (*) running plcs are recorded in the file named ~/running-test-plcs
242 # (*) this file contains a line for each running plc, older first
243 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
244 # (*) the free_tracker method performs a vserver stop on the oldest entry
245 # (*) the record_tracker method adds an entry at the bottom of the file
246 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
248 TRACKER_FILE="~/running-test-plcs"
250 def record_tracker (self):
251 command="echo %s %s >> %s"%(self.vservername,self.test_ssh.hostname,TestPlc.TRACKER_FILE)
252 (code,output) = utils.output_of (self.test_ssh.actual_command(command))
254 print "WARNING : COULD NOT record_tracker %s as a running plc on %s"%(self.vservername,self.test_ssh.hostname)
256 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
259 def free_tracker (self):
260 command="head -1 %s"%TestPlc.TRACKER_FILE
261 (code,line) = utils.output_of(self.test_ssh.actual_command(command))
263 print "No entry found in %s on %s"%(TestPlc.TRACKER_FILE,self.test_ssh.hostname)
266 [vserver_to_stop,hostname] = line.split()
268 print "WARNING: free_tracker: Could not parse %s - skipped"%TestPlc.TRACKER_FILE
270 stop_command = "vserver --silent %s stop"%vserver_to_stop
271 utils.system(self.test_ssh.actual_command(stop_command))
272 x=TestPlc.TRACKER_FILE
273 flush_command = "tail --lines=+2 %s > %s.tmp ; mv %s.tmp %s"%(x,x,x,x)
274 utils.system(self.test_ssh.actual_command(flush_command))
277 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
278 def cleanup_tracker (self):
279 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
280 utils.system(self.test_ssh.actual_command(stop_all))
281 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
282 utils.system(self.test_ssh.actual_command(clean_tracker))
285 self.run_in_host("vserver --silent %s delete"%self.vservername)
291 # a full path for the local calls
292 build_dir=os.path.dirname(sys.argv[0])
293 # sometimes this is empty - set to "." in such a case
294 if not build_dir: build_dir="."
295 build_dir += "/build"
297 # use a standard name - will be relative to remote buildname
299 # run checkout in any case - would do an update if already exists
300 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
301 if self.run_in_host(build_checkout) != 0:
303 # the repo url is taken from arch-rpms-url
304 # with the last step (i386.) removed
305 repo_url = self.options.arch_rpms_url
306 for level in [ 'arch' ]:
307 repo_url = os.path.dirname(repo_url)
308 if self.options.arch == "i386":
309 personality_option="-p linux32"
311 personality_option="-p linux64"
312 script="vtest-init-vserver.sh"
313 vserver_name = self.vservername
314 vserver_options="--netdev eth0 --interface %s"%self.vserverip
316 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
317 vserver_options += " --hostname %s"%vserver_hostname
320 create_vserver="%(build_dir)s/%(script)s %(personality_option)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
321 return self.run_in_host(create_vserver) == 0
324 def install_rpm(self):
325 return self.run_in_guest("yum -y install myplc-native")==0
329 tmpname='%s.plc-config-tty'%(self.name())
330 fileconf=open(tmpname,'w')
331 for var in [ 'PLC_NAME',
335 'PLC_MAIL_SUPPORT_ADDRESS',
342 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
343 fileconf.write('w\n')
344 fileconf.write('q\n')
346 utils.system('cat %s'%tmpname)
347 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
348 utils.system('rm %s'%tmpname)
352 self.run_in_guest('service plc start')
356 self.run_in_guest('service plc stop')
363 # could use a TestKey class
364 def store_keys(self):
365 for key_spec in self.plc_spec['keys']:
366 TestKey(self,key_spec).store_key()
369 def clean_keys(self):
370 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
373 return self.do_sites()
375 def clean_sites (self):
376 return self.do_sites(action="delete")
378 def do_sites (self,action="add"):
379 for site_spec in self.plc_spec['sites']:
380 test_site = TestSite (self,site_spec)
381 if (action != "add"):
382 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
383 test_site.delete_site()
384 # deleted with the site
385 #test_site.delete_users()
388 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
389 test_site.create_site()
390 test_site.create_users()
394 return self.do_nodes()
395 def clean_nodes (self):
396 return self.do_nodes(action="delete")
398 def do_nodes (self,action="add"):
399 for site_spec in self.plc_spec['sites']:
400 test_site = TestSite (self,site_spec)
402 utils.header("Deleting nodes in site %s"%test_site.name())
403 for node_spec in site_spec['nodes']:
404 test_node=TestNode(self,test_site,node_spec)
405 utils.header("Deleting %s"%test_node.name())
406 test_node.delete_node()
408 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
409 for node_spec in site_spec['nodes']:
410 utils.pprint('Creating node %s'%node_spec,node_spec)
411 test_node = TestNode (self,test_site,node_spec)
412 test_node.create_node ()
415 # create nodegroups if needed, and populate
416 # no need for a clean_nodegroups if we are careful enough
417 def nodegroups (self):
418 # 1st pass to scan contents
420 for site_spec in self.plc_spec['sites']:
421 test_site = TestSite (self,site_spec)
422 for node_spec in site_spec['nodes']:
423 test_node=TestNode (self,test_site,node_spec)
424 if node_spec.has_key('nodegroups'):
425 nodegroupnames=node_spec['nodegroups']
426 if isinstance(nodegroupnames,StringTypes):
427 nodegroupnames = [ nodegroupnames ]
428 for nodegroupname in nodegroupnames:
429 if not groups_dict.has_key(nodegroupname):
430 groups_dict[nodegroupname]=[]
431 groups_dict[nodegroupname].append(test_node.name())
432 auth=self.auth_root()
433 for (nodegroupname,group_nodes) in groups_dict.iteritems():
434 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
435 # first, check if the nodetagtype is here
436 tag_types = self.apiserver.GetNodeTagTypes(auth,{'tagname':nodegroupname})
438 tag_type_id = tag_types[0]['node_tag_type_id']
439 print 'node-tag-type',nodegroupname,'already exists'
441 tag_type_id = self.apiserver.AddNodeTagType(auth,
442 {'tagname':nodegroupname,
443 'description': 'for nodegroup %s'%nodegroupname,
447 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
449 print 'nodegroup',nodegroupname,'already exists'
451 self.apiserver.AddNodeGroup(auth,
452 {'groupname': nodegroupname,
453 'node_tag_type_id': tag_type_id,
455 # set node tag on all nodes, value='yes'
457 for nodename in group_nodes:
459 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
461 print 'node',nodename,'seems to already have tag',nodegroupname
464 expect_yes = self.apiserver.GetNodeTags(
466 {'hostname':nodename,
467 'tagname':nodegroupname},
468 ['tagvalue'])[0]['tagvalue']
469 if expect_yes != "yes":
470 print 'Mismatch node tag on node',nodename,'got',expect_yes
473 print 'Cannot find tag',nodegroupname,'on node',nodename
477 def all_hostnames (self) :
479 for site_spec in self.plc_spec['sites']:
480 hostnames += [ node_spec['node_fields']['hostname'] \
481 for node_spec in site_spec['nodes'] ]
484 # gracetime : during the first <gracetime> minutes nothing gets printed
485 def do_nodes_booted (self, minutes, gracetime,period=30):
486 if self.options.dry_run:
490 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
491 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
492 # the nodes that haven't checked yet - start with a full list and shrink over time
493 tocheck = self.all_hostnames()
494 utils.header("checking nodes %r"%tocheck)
495 # create a dict hostname -> status
496 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
499 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
501 for array in tocheck_status:
502 hostname=array['hostname']
503 boot_state=array['boot_state']
504 if boot_state == 'boot':
505 utils.header ("%s has reached the 'boot' state"%hostname)
507 # if it's a real node, never mind
508 (site_spec,node_spec)=self.locate_hostname(hostname)
509 if TestNode.is_real_model(node_spec['node_fields']['model']):
510 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
513 elif datetime.datetime.now() > graceout:
514 utils.header ("%s still in '%s' state"%(hostname,boot_state))
515 graceout=datetime.datetime.now()+datetime.timedelta(1)
516 status[hostname] = boot_state
518 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
521 if datetime.datetime.now() > timeout:
522 for hostname in tocheck:
523 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
525 # otherwise, sleep for a while
527 # only useful in empty plcs
530 def nodes_booted(self):
531 return self.do_nodes_booted(minutes=20,gracetime=15)
533 def do_nodes_ssh(self,minutes,gracetime,period=30):
535 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
536 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
537 tocheck = self.all_hostnames()
538 # self.scan_publicKeys(tocheck)
539 utils.header("checking Connectivity on nodes %r"%tocheck)
541 for hostname in tocheck:
542 # try to ssh in nodes
543 node_test_ssh = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa")
544 success=self.run_in_guest(node_test_ssh.actual_command("hostname"))==0
546 utils.header('The node %s is sshable -->'%hostname)
548 tocheck.remove(hostname)
550 # we will have tried real nodes once, in case they're up - but if not, just skip
551 (site_spec,node_spec)=self.locate_hostname(hostname)
552 if TestNode.is_real_model(node_spec['node_fields']['model']):
553 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
554 tocheck.remove(hostname)
555 elif datetime.datetime.now() > graceout:
556 utils.header("Could not ssh-enter root context on %s"%hostname)
559 if datetime.datetime.now() > timeout:
560 for hostname in tocheck:
561 utils.header("FAILURE to ssh into %s"%hostname)
563 # otherwise, sleep for a while
565 # only useful in empty plcs
569 return self.do_nodes_ssh(minutes=6,gracetime=4)
572 def init_node (self): pass
574 def bootcd (self): pass
576 def configure_qemu (self): pass
578 def reinstall_node (self): pass
580 def export_qemu (self): pass
582 def do_check_initscripts(self):
584 for slice_spec in self.plc_spec['slices']:
585 if not slice_spec.has_key('initscriptname'):
587 initscript=slice_spec['initscriptname']
588 for nodename in slice_spec['nodenames']:
589 (site,node) = self.locate_node (nodename)
590 # xxx - passing the wrong site - probably harmless
591 test_site = TestSite (self,site)
592 test_slice = TestSlice (self,test_site,slice_spec)
593 test_node = TestNode (self,test_site,node)
594 test_sliver = TestSliver (self, test_node, test_slice)
595 if not test_sliver.check_initscript(initscript):
599 def check_initscripts(self):
600 return self.do_check_initscripts()
602 def initscripts (self):
603 for initscript in self.plc_spec['initscripts']:
604 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
605 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
608 def clean_initscripts (self):
609 for initscript in self.plc_spec['initscripts']:
610 initscript_name = initscript['initscript_fields']['name']
611 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
613 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
614 print initscript_name,'deleted'
616 print 'deletion went wrong - probably did not exist'
620 return self.do_slices()
622 def clean_slices (self):
623 return self.do_slices("delete")
625 def do_slices (self, action="add"):
626 for slice in self.plc_spec['slices']:
627 site_spec = self.locate_site (slice['sitename'])
628 test_site = TestSite(self,site_spec)
629 test_slice=TestSlice(self,test_site,slice)
631 utils.header("Deleting slices in site %s"%test_site.name())
632 test_slice.delete_slice()
634 utils.pprint("Creating slice",slice)
635 test_slice.create_slice()
636 utils.header('Created Slice %s'%slice['slice_fields']['name'])
639 @slice_mapper_options
640 def check_slice(self): pass
643 def clear_known_hosts (self): pass
646 def start_node (self) : pass
648 def all_sliver_objs (self):
650 for slice_spec in self.plc_spec['slices']:
651 slicename = slice_spec['slice_fields']['name']
652 for nodename in slice_spec['nodenames']:
653 result.append(self.locate_sliver_obj (nodename,slicename))
656 def locate_sliver_obj (self,nodename,slicename):
657 (site,node) = self.locate_node(nodename)
658 slice = self.locate_slice (slicename)
660 test_site = TestSite (self, site)
661 test_node = TestNode (self, test_site,node)
662 # xxx the slice site is assumed to be the node site - mhh - probably harmless
663 test_slice = TestSlice (self, test_site, slice)
664 return TestSliver (self, test_node, test_slice)
666 def check_tcp (self):
667 specs = self.plc_spec['tcp_test']
672 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
673 if not s_test_sliver.run_tcp_server(port,timeout=10):
677 # idem for the client side
678 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
679 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
684 def gather_logs (self):
685 # (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
686 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
687 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
688 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
690 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
691 self.gather_var_logs ()
693 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
694 for site_spec in self.plc_spec['sites']:
695 test_site = TestSite (self,site_spec)
696 for node_spec in site_spec['nodes']:
697 test_node=TestNode(self,test_site,node_spec)
698 test_node.gather_qemu_logs()
700 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
701 self.gather_nodes_var_logs()
703 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
704 self.gather_slivers_var_logs()
707 def gather_slivers_var_logs(self):
708 for test_sliver in self.all_sliver_objs():
709 remote = test_sliver.tar_var_logs()
710 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
711 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
712 utils.system(command)
715 def gather_var_logs (self):
716 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
717 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
718 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
719 utils.system(command)
721 def gather_nodes_var_logs (self):
722 for site_spec in self.plc_spec['sites']:
723 test_site = TestSite (self,site_spec)
724 for node_spec in site_spec['nodes']:
725 test_node=TestNode(self,test_site,node_spec)
726 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
727 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
728 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
729 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
730 utils.system(command)
733 # returns the filename to use for sql dump/restore, using options.dbname if set
734 def dbfile (self, database):
735 # uses options.dbname if it is found
737 name=self.options.dbname
738 if not isinstance(name,StringTypes):
741 t=datetime.datetime.now()
744 return "/root/%s-%s.sql"%(database,name)
747 dump=self.dbfile("planetab4")
748 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
749 utils.header('Dumped planetlab4 database in %s'%dump)
752 def db_restore(self):
753 dump=self.dbfile("planetab4")
755 self.run_in_guest('service httpd stop')
756 # xxx - need another wrapper
757 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
758 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
759 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
760 ##starting httpd service
761 self.run_in_guest('service httpd start')
763 utils.header('Database restored from ' + dump)
766 def standby_1(): pass
768 def standby_2(): pass
770 def standby_3(): pass
772 def standby_4(): pass
774 def standby_5(): pass
776 def standby_6(): pass
778 def standby_7(): pass
780 def standby_8(): pass
782 def standby_9(): pass
784 def standby_10(): pass
786 def standby_11(): pass
788 def standby_12(): pass
790 def standby_13(): pass
792 def standby_14(): pass
794 def standby_15(): pass
796 def standby_16(): pass
798 def standby_17(): pass
800 def standby_18(): pass
802 def standby_19(): pass
804 def standby_20(): pass