7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
65 default_steps = ['uninstall','install','install_rpm',
66 'configure', 'start', SEP,
67 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
68 'sites', 'nodes', 'slices', 'nodegroups', SEP,
69 'init_node','bootcd', 'configure_qemu', 'export_qemu',
70 'kill_all_qemus', 'reinstall_node','start_node', SEP,
71 'nodes_booted', 'nodes_ssh', 'check_slice',
72 'check_initscripts', 'check_tcp',SEP,
73 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
74 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
75 'clean_initscripts', 'clean_all_sites',
76 'clean_sites', 'clean_nodes',
77 'clean_slices', 'clean_keys', SEP,
78 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
79 'db_dump' , 'db_restore', ' cleanup_tracker',
80 'standby_1 through 20'
84 def printable_steps (list):
85 return " ".join(list).replace(" "+SEP+" "," \\\n")
87 def valid_step (step):
90 def __init__ (self,plc_spec,options):
91 self.plc_spec=plc_spec
93 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
95 self.vserverip=plc_spec['vserverip']
96 self.vservername=plc_spec['vservername']
97 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
100 raise Exception,'chroot-based myplc testing is deprecated'
101 self.apiserver=TestApiserver(self.url,options.dry_run)
104 name=self.plc_spec['name']
105 return "%s.%s"%(name,self.vservername)
108 return self.plc_spec['hostname']
111 return self.test_ssh.is_local()
113 # define the API methods on this object through xmlrpc
114 # would help, but not strictly necessary
118 def actual_command_in_guest (self,command):
119 return self.test_ssh.actual_command(self.host_to_guest(command))
121 def start_guest (self):
122 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
124 def run_in_guest (self,command):
125 return utils.system(self.actual_command_in_guest(command))
127 def run_in_host (self,command):
128 return self.test_ssh.run_in_buildname(command)
130 #command gets run in the vserver
131 def host_to_guest(self,command):
132 return "vserver %s exec %s"%(self.vservername,command)
134 #command gets run in the vserver
135 def start_guest_in_host(self):
136 return "vserver %s start"%(self.vservername)
139 def run_in_guest_piped (self,local,remote):
140 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
142 def auth_root (self):
143 return {'Username':self.plc_spec['PLC_ROOT_USER'],
144 'AuthMethod':'password',
145 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
146 'Role' : self.plc_spec['role']
148 def locate_site (self,sitename):
149 for site in self.plc_spec['sites']:
150 if site['site_fields']['name'] == sitename:
152 if site['site_fields']['login_base'] == sitename:
154 raise Exception,"Cannot locate site %s"%sitename
156 def locate_node (self,nodename):
157 for site in self.plc_spec['sites']:
158 for node in site['nodes']:
159 if node['name'] == nodename:
161 raise Exception,"Cannot locate node %s"%nodename
163 def locate_hostname (self,hostname):
164 for site in self.plc_spec['sites']:
165 for node in site['nodes']:
166 if node['node_fields']['hostname'] == hostname:
168 raise Exception,"Cannot locate hostname %s"%hostname
170 def locate_key (self,keyname):
171 for key in self.plc_spec['keys']:
172 if key['name'] == keyname:
174 raise Exception,"Cannot locate key %s"%keyname
176 def locate_slice (self, slicename):
177 for slice in self.plc_spec['slices']:
178 if slice['slice_fields']['name'] == slicename:
180 raise Exception,"Cannot locate slice %s"%slicename
182 # all different hostboxes used in this plc
183 def gather_hostBoxes(self):
184 # maps on sites and nodes, return [ (host_box,test_node) ]
186 for site_spec in self.plc_spec['sites']:
187 test_site = TestSite (self,site_spec)
188 for node_spec in site_spec['nodes']:
189 test_node = TestNode (self, test_site, node_spec)
190 if not test_node.is_real():
191 tuples.append( (test_node.host_box(),test_node) )
192 # transform into a dict { 'host_box' -> [ test_node .. ] }
194 for (box,node) in tuples:
195 if not result.has_key(box):
198 result[box].append(node)
201 # a step for checking this stuff
202 def show_boxes (self):
203 for (box,nodes) in self.gather_hostBoxes().iteritems():
204 print box,":"," + ".join( [ node.name() for node in nodes ] )
207 # make this a valid step
208 def kill_all_qemus(self):
209 # this is the brute force version, kill all qemus on that host box
210 for (box,nodes) in self.gather_hostBoxes().iteritems():
211 # pass the first nodename, as we don't push template-qemu on testboxes
212 nodedir=nodes[0].nodedir()
213 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
216 # make this a valid step
217 def list_all_qemus(self):
218 for (box,nodes) in self.gather_hostBoxes().iteritems():
219 # this is the brute force version, kill all qemus on that host box
220 TestBox(box,self.options.buildname).list_all_qemus()
223 # kill only the right qemus
224 def list_qemus(self):
225 for (box,nodes) in self.gather_hostBoxes().iteritems():
226 # the fine-grain version
231 # kill only the right qemus
232 def kill_qemus(self):
233 for (box,nodes) in self.gather_hostBoxes().iteritems():
234 # the fine-grain version
240 ### utility methods for handling the pool of IP addresses allocated to plcs
242 # (*) running plcs are recorded in the file named ~/running-test-plcs
243 # (*) this file contains a line for each running plc, older first
244 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
245 # (*) the free_tracker method performs a vserver stop on the oldest entry
246 # (*) the record_tracker method adds an entry at the bottom of the file
247 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
249 TRACKER_FILE="~/running-test-plcs"
251 def record_tracker (self):
252 command="echo %s %s >> %s"%(self.vservername,self.test_ssh.hostname,TestPlc.TRACKER_FILE)
253 (code,output) = utils.output_of (self.test_ssh.actual_command(command))
255 print "WARNING : COULD NOT record_tracker %s as a running plc on %s"%(self.vservername,self.test_ssh.hostname)
257 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
260 def free_tracker (self):
261 command="head -1 %s"%TestPlc.TRACKER_FILE
262 (code,line) = utils.output_of(self.test_ssh.actual_command(command))
264 print "No entry found in %s on %s"%(TestPlc.TRACKER_FILE,self.test_ssh.hostname)
267 [vserver_to_stop,hostname] = line.split()
269 print "WARNING: free_tracker: Could not parse %s - skipped"%TestPlc.TRACKER_FILE
271 stop_command = "vserver --silent %s stop"%vserver_to_stop
272 utils.system(self.test_ssh.actual_command(stop_command))
273 x=TestPlc.TRACKER_FILE
274 flush_command = "tail --lines=+2 %s > %s.tmp ; mv %s.tmp %s"%(x,x,x,x)
275 utils.system(self.test_ssh.actual_command(flush_command))
278 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
279 def cleanup_tracker (self):
280 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
281 utils.system(self.test_ssh.actual_command(stop_all))
282 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
283 utils.system(self.test_ssh.actual_command(clean_tracker))
286 self.run_in_host("vserver --silent %s delete"%self.vservername)
292 # a full path for the local calls
293 build_dir=os.path.dirname(sys.argv[0])
294 # sometimes this is empty - set to "." in such a case
295 if not build_dir: build_dir="."
296 build_dir += "/build"
298 # use a standard name - will be relative to remote buildname
300 # run checkout in any case - would do an update if already exists
301 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
302 if self.run_in_host(build_checkout) != 0:
304 # the repo url is taken from arch-rpms-url
305 # with the last step (i386.) removed
306 repo_url = self.options.arch_rpms_url
307 for level in [ 'arch' ]:
308 repo_url = os.path.dirname(repo_url)
309 if self.options.arch == "i386":
310 personality_option="-p linux32"
312 personality_option="-p linux64"
313 script="vtest-init-vserver.sh"
314 vserver_name = self.vservername
315 vserver_options="--netdev eth0 --interface %s"%self.vserverip
317 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
318 vserver_options += " --hostname %s"%vserver_hostname
321 create_vserver="%(build_dir)s/%(script)s %(personality_option)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
322 return self.run_in_host(create_vserver) == 0
325 def install_rpm(self):
326 return self.run_in_guest("yum -y install myplc-native")==0
330 tmpname='%s.plc-config-tty'%(self.name())
331 fileconf=open(tmpname,'w')
332 for var in [ 'PLC_NAME',
336 'PLC_MAIL_SUPPORT_ADDRESS',
343 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
344 fileconf.write('w\n')
345 fileconf.write('q\n')
347 utils.system('cat %s'%tmpname)
348 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
349 utils.system('rm %s'%tmpname)
353 self.run_in_guest('service plc start')
357 self.run_in_guest('service plc stop')
364 # could use a TestKey class
365 def store_keys(self):
366 for key_spec in self.plc_spec['keys']:
367 TestKey(self,key_spec).store_key()
370 def clean_keys(self):
371 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
374 return self.do_sites()
376 def clean_sites (self):
377 return self.do_sites(action="delete")
379 def do_sites (self,action="add"):
380 for site_spec in self.plc_spec['sites']:
381 test_site = TestSite (self,site_spec)
382 if (action != "add"):
383 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
384 test_site.delete_site()
385 # deleted with the site
386 #test_site.delete_users()
389 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
390 test_site.create_site()
391 test_site.create_users()
394 def clean_all_sites (self):
395 print 'auth_root',self.auth_root()
396 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
397 for site_id in site_ids:
398 print 'Deleting site_id',site_id
399 self.apiserver.DeleteSite(self.auth_root(),site_id)
402 return self.do_nodes()
403 def clean_nodes (self):
404 return self.do_nodes(action="delete")
406 def do_nodes (self,action="add"):
407 for site_spec in self.plc_spec['sites']:
408 test_site = TestSite (self,site_spec)
410 utils.header("Deleting nodes in site %s"%test_site.name())
411 for node_spec in site_spec['nodes']:
412 test_node=TestNode(self,test_site,node_spec)
413 utils.header("Deleting %s"%test_node.name())
414 test_node.delete_node()
416 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
417 for node_spec in site_spec['nodes']:
418 utils.pprint('Creating node %s'%node_spec,node_spec)
419 test_node = TestNode (self,test_site,node_spec)
420 test_node.create_node ()
423 # create nodegroups if needed, and populate
424 # no need for a clean_nodegroups if we are careful enough
425 def nodegroups (self):
426 # 1st pass to scan contents
428 for site_spec in self.plc_spec['sites']:
429 test_site = TestSite (self,site_spec)
430 for node_spec in site_spec['nodes']:
431 test_node=TestNode (self,test_site,node_spec)
432 if node_spec.has_key('nodegroups'):
433 nodegroupnames=node_spec['nodegroups']
434 if isinstance(nodegroupnames,StringTypes):
435 nodegroupnames = [ nodegroupnames ]
436 for nodegroupname in nodegroupnames:
437 if not groups_dict.has_key(nodegroupname):
438 groups_dict[nodegroupname]=[]
439 groups_dict[nodegroupname].append(test_node.name())
440 auth=self.auth_root()
441 for (nodegroupname,group_nodes) in groups_dict.iteritems():
442 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
443 # first, check if the nodetagtype is here
444 tag_types = self.apiserver.GetNodeTagTypes(auth,{'tagname':nodegroupname})
446 tag_type_id = tag_types[0]['node_tag_type_id']
447 print 'node-tag-type',nodegroupname,'already exists'
449 tag_type_id = self.apiserver.AddNodeTagType(auth,
450 {'tagname':nodegroupname,
451 'description': 'for nodegroup %s'%nodegroupname,
455 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
457 print 'nodegroup',nodegroupname,'already exists'
459 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
460 # set node tag on all nodes, value='yes'
462 for nodename in group_nodes:
464 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
466 print 'node',nodename,'seems to already have tag',nodegroupname
469 expect_yes = self.apiserver.GetNodeTags(
471 {'hostname':nodename,
472 'tagname':nodegroupname},
473 ['tagvalue'])[0]['tagvalue']
474 if expect_yes != "yes":
475 print 'Mismatch node tag on node',nodename,'got',expect_yes
478 print 'Cannot find tag',nodegroupname,'on node',nodename
482 def all_hostnames (self) :
484 for site_spec in self.plc_spec['sites']:
485 hostnames += [ node_spec['node_fields']['hostname'] \
486 for node_spec in site_spec['nodes'] ]
489 # gracetime : during the first <gracetime> minutes nothing gets printed
490 def do_nodes_booted (self, minutes, gracetime,period=30):
491 if self.options.dry_run:
495 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
496 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
497 # the nodes that haven't checked yet - start with a full list and shrink over time
498 tocheck = self.all_hostnames()
499 utils.header("checking nodes %r"%tocheck)
500 # create a dict hostname -> status
501 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
504 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
506 for array in tocheck_status:
507 hostname=array['hostname']
508 boot_state=array['boot_state']
509 if boot_state == 'boot':
510 utils.header ("%s has reached the 'boot' state"%hostname)
512 # if it's a real node, never mind
513 (site_spec,node_spec)=self.locate_hostname(hostname)
514 if TestNode.is_real_model(node_spec['node_fields']['model']):
515 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
518 elif datetime.datetime.now() > graceout:
519 utils.header ("%s still in '%s' state"%(hostname,boot_state))
520 graceout=datetime.datetime.now()+datetime.timedelta(1)
521 status[hostname] = boot_state
523 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
526 if datetime.datetime.now() > timeout:
527 for hostname in tocheck:
528 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
530 # otherwise, sleep for a while
532 # only useful in empty plcs
535 def nodes_booted(self):
536 return self.do_nodes_booted(minutes=20,gracetime=15)
538 def do_nodes_ssh(self,minutes,gracetime,period=30):
540 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
541 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
542 tocheck = self.all_hostnames()
543 # self.scan_publicKeys(tocheck)
544 utils.header("checking Connectivity on nodes %r"%tocheck)
546 for hostname in tocheck:
547 # try to ssh in nodes
548 node_test_ssh = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa")
549 success=self.run_in_guest(node_test_ssh.actual_command("hostname"))==0
551 utils.header('The node %s is sshable -->'%hostname)
553 tocheck.remove(hostname)
555 # we will have tried real nodes once, in case they're up - but if not, just skip
556 (site_spec,node_spec)=self.locate_hostname(hostname)
557 if TestNode.is_real_model(node_spec['node_fields']['model']):
558 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
559 tocheck.remove(hostname)
560 elif datetime.datetime.now() > graceout:
561 utils.header("Could not ssh-enter root context on %s"%hostname)
564 if datetime.datetime.now() > timeout:
565 for hostname in tocheck:
566 utils.header("FAILURE to ssh into %s"%hostname)
568 # otherwise, sleep for a while
570 # only useful in empty plcs
574 return self.do_nodes_ssh(minutes=6,gracetime=4)
577 def init_node (self): pass
579 def bootcd (self): pass
581 def configure_qemu (self): pass
583 def reinstall_node (self): pass
585 def export_qemu (self): pass
587 def do_check_initscripts(self):
589 for slice_spec in self.plc_spec['slices']:
590 if not slice_spec.has_key('initscriptname'):
592 initscript=slice_spec['initscriptname']
593 for nodename in slice_spec['nodenames']:
594 (site,node) = self.locate_node (nodename)
595 # xxx - passing the wrong site - probably harmless
596 test_site = TestSite (self,site)
597 test_slice = TestSlice (self,test_site,slice_spec)
598 test_node = TestNode (self,test_site,node)
599 test_sliver = TestSliver (self, test_node, test_slice)
600 if not test_sliver.check_initscript(initscript):
604 def check_initscripts(self):
605 return self.do_check_initscripts()
607 def initscripts (self):
608 for initscript in self.plc_spec['initscripts']:
609 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
610 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
613 def clean_initscripts (self):
614 for initscript in self.plc_spec['initscripts']:
615 initscript_name = initscript['initscript_fields']['name']
616 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
618 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
619 print initscript_name,'deleted'
621 print 'deletion went wrong - probably did not exist'
625 return self.do_slices()
627 def clean_slices (self):
628 return self.do_slices("delete")
630 def do_slices (self, action="add"):
631 for slice in self.plc_spec['slices']:
632 site_spec = self.locate_site (slice['sitename'])
633 test_site = TestSite(self,site_spec)
634 test_slice=TestSlice(self,test_site,slice)
636 utils.header("Deleting slices in site %s"%test_site.name())
637 test_slice.delete_slice()
639 utils.pprint("Creating slice",slice)
640 test_slice.create_slice()
641 utils.header('Created Slice %s'%slice['slice_fields']['name'])
644 @slice_mapper_options
645 def check_slice(self): pass
648 def clear_known_hosts (self): pass
651 def start_node (self) : pass
653 def all_sliver_objs (self):
655 for slice_spec in self.plc_spec['slices']:
656 slicename = slice_spec['slice_fields']['name']
657 for nodename in slice_spec['nodenames']:
658 result.append(self.locate_sliver_obj (nodename,slicename))
661 def locate_sliver_obj (self,nodename,slicename):
662 (site,node) = self.locate_node(nodename)
663 slice = self.locate_slice (slicename)
665 test_site = TestSite (self, site)
666 test_node = TestNode (self, test_site,node)
667 # xxx the slice site is assumed to be the node site - mhh - probably harmless
668 test_slice = TestSlice (self, test_site, slice)
669 return TestSliver (self, test_node, test_slice)
671 def check_tcp (self):
672 specs = self.plc_spec['tcp_test']
677 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
678 if not s_test_sliver.run_tcp_server(port,timeout=10):
682 # idem for the client side
683 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
684 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
689 def gather_logs (self):
690 # (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
691 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
692 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
693 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
695 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
696 self.gather_var_logs ()
698 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
699 for site_spec in self.plc_spec['sites']:
700 test_site = TestSite (self,site_spec)
701 for node_spec in site_spec['nodes']:
702 test_node=TestNode(self,test_site,node_spec)
703 test_node.gather_qemu_logs()
705 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
706 self.gather_nodes_var_logs()
708 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
709 self.gather_slivers_var_logs()
712 def gather_slivers_var_logs(self):
713 for test_sliver in self.all_sliver_objs():
714 remote = test_sliver.tar_var_logs()
715 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
716 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
717 utils.system(command)
720 def gather_var_logs (self):
721 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
722 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
723 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
724 utils.system(command)
726 def gather_nodes_var_logs (self):
727 for site_spec in self.plc_spec['sites']:
728 test_site = TestSite (self,site_spec)
729 for node_spec in site_spec['nodes']:
730 test_node=TestNode(self,test_site,node_spec)
731 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
732 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
733 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
734 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
735 utils.system(command)
738 # returns the filename to use for sql dump/restore, using options.dbname if set
739 def dbfile (self, database):
740 # uses options.dbname if it is found
742 name=self.options.dbname
743 if not isinstance(name,StringTypes):
746 t=datetime.datetime.now()
749 return "/root/%s-%s.sql"%(database,name)
752 dump=self.dbfile("planetab4")
753 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
754 utils.header('Dumped planetlab4 database in %s'%dump)
757 def db_restore(self):
758 dump=self.dbfile("planetab4")
760 self.run_in_guest('service httpd stop')
761 # xxx - need another wrapper
762 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
763 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
764 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
765 ##starting httpd service
766 self.run_in_guest('service httpd start')
768 utils.header('Database restored from ' + dump)
771 def standby_1(): pass
773 def standby_2(): pass
775 def standby_3(): pass
777 def standby_4(): pass
779 def standby_5(): pass
781 def standby_6(): pass
783 def standby_7(): pass
785 def standby_8(): pass
787 def standby_9(): pass
789 def standby_10(): pass
791 def standby_11(): pass
793 def standby_12(): pass
795 def standby_13(): pass
797 def standby_14(): pass
799 def standby_15(): pass
801 def standby_16(): pass
803 def standby_17(): pass
805 def standby_18(): pass
807 def standby_19(): pass
809 def standby_20(): pass