7 from types import StringTypes
10 from TestSite import TestSite
11 from TestNode import TestNode
12 from TestUser import TestUser
13 from TestKey import TestKey
14 from TestSlice import TestSlice
15 from TestSliver import TestSliver
16 from TestBox import TestBox
17 from TestSsh import TestSsh
18 from TestApiserver import TestApiserver
20 # step methods must take (self) and return a boolean (options is a member of the class)
22 def standby(minutes,dry_run):
23 utils.header('Entering StandBy for %d mn'%minutes)
27 time.sleep(60*minutes)
30 def standby_generic (func):
32 minutes=int(func.__name__.split("_")[1])
33 return standby(minutes,self.options.dry_run)
36 def node_mapper (method):
39 node_method = TestNode.__dict__[method.__name__]
40 for site_spec in self.plc_spec['sites']:
41 test_site = TestSite (self,site_spec)
42 for node_spec in site_spec['nodes']:
43 test_node = TestNode (self,test_site,node_spec)
44 if not node_method(test_node): overall=False
48 def slice_mapper_options (method):
51 slice_method = TestSlice.__dict__[method.__name__]
52 for slice_spec in self.plc_spec['slices']:
53 site_spec = self.locate_site (slice_spec['sitename'])
54 test_site = TestSite(self,site_spec)
55 test_slice=TestSlice(self,test_site,slice_spec)
56 if not slice_method(test_slice,self.options): overall=False
64 default_steps = ['uninstall','install','install_rpm',
65 'configure', 'start', SEP,
66 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
67 'sites', 'nodes', 'slices', 'nodegroups', SEP,
68 'init_node','bootcd', 'configure_qemu', 'export_qemu',
69 'kill_all_qemus', 'reinstall_node','start_node', SEP,
70 'nodes_booted', 'nodes_ssh', 'check_slice',
71 'check_initscripts', 'check_tcp',SEP,
72 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
73 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', SEP,
74 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
75 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
76 'db_dump' , 'db_restore', ' cleanup_tracker',
77 'standby_1 through 20'
81 def printable_steps (list):
82 return " ".join(list).replace(" "+SEP+" "," \\\n")
84 def valid_step (step):
87 def __init__ (self,plc_spec,options):
88 self.plc_spec=plc_spec
90 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
92 self.vserverip=plc_spec['vserverip']
93 self.vservername=plc_spec['vservername']
94 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
98 self.url="https://%s:443/PLCAPI/"%plc_spec['hostname']
99 # utils.header('Using API url %s'%self.url)
100 self.apiserver=TestApiserver(self.url,options.dry_run)
103 name=self.plc_spec['name']
105 return "%s.%s"%(name,self.vservername)
107 return "%s.chroot"%name
110 return self.plc_spec['hostname']
113 return self.test_ssh.is_local()
115 # define the API methods on this object through xmlrpc
116 # would help, but not strictly necessary
120 def actual_command_in_guest (self,command):
121 return self.test_ssh.actual_command(self.host_to_guest(command))
123 def run_in_guest (self,command):
124 return utils.system(self.actual_command_in_guest(command))
126 def run_in_host (self,command):
127 return self.test_ssh.run_in_buildname(command)
129 #command gets run in the chroot/vserver
130 def host_to_guest(self,command):
132 return "vserver %s exec %s"%(self.vservername,command)
134 return "chroot /plc/root %s"%TestSsh.backslash_shell_specials(command)
136 # copy a file to the myplc root image - pass in_data=True if the file must go in /plc/data
137 def copy_in_guest (self, localfile, remotefile, in_data=False):
139 chroot_dest="/plc/data"
141 chroot_dest="/plc/root"
144 utils.system("cp %s %s/%s"%(localfile,chroot_dest,remotefile))
146 utils.system("cp %s /vservers/%s/%s"%(localfile,self.vservername,remotefile))
149 utils.system("scp %s %s:%s/%s"%(localfile,self.hostname(),chroot_dest,remotefile))
151 utils.system("scp %s %s@/vservers/%s/%s"%(localfile,self.hostname(),self.vservername,remotefile))
155 def run_in_guest_piped (self,local,remote):
156 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
158 def auth_root (self):
159 return {'Username':self.plc_spec['PLC_ROOT_USER'],
160 'AuthMethod':'password',
161 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
162 'Role' : self.plc_spec['role']
164 def locate_site (self,sitename):
165 for site in self.plc_spec['sites']:
166 if site['site_fields']['name'] == sitename:
168 if site['site_fields']['login_base'] == sitename:
170 raise Exception,"Cannot locate site %s"%sitename
172 def locate_node (self,nodename):
173 for site in self.plc_spec['sites']:
174 for node in site['nodes']:
175 if node['name'] == nodename:
177 raise Exception,"Cannot locate node %s"%nodename
179 def locate_hostname (self,hostname):
180 for site in self.plc_spec['sites']:
181 for node in site['nodes']:
182 if node['node_fields']['hostname'] == hostname:
184 raise Exception,"Cannot locate hostname %s"%hostname
186 def locate_key (self,keyname):
187 for key in self.plc_spec['keys']:
188 if key['name'] == keyname:
190 raise Exception,"Cannot locate key %s"%keyname
192 def locate_slice (self, slicename):
193 for slice in self.plc_spec['slices']:
194 if slice['slice_fields']['name'] == slicename:
196 raise Exception,"Cannot locate slice %s"%slicename
198 # all different hostboxes used in this plc
199 def gather_hostBoxes(self):
200 # maps on sites and nodes, return [ (host_box,test_node) ]
202 for site_spec in self.plc_spec['sites']:
203 test_site = TestSite (self,site_spec)
204 for node_spec in site_spec['nodes']:
205 test_node = TestNode (self, test_site, node_spec)
206 if not test_node.is_real():
207 tuples.append( (test_node.host_box(),test_node) )
208 # transform into a dict { 'host_box' -> [ test_node .. ] }
210 for (box,node) in tuples:
211 if not result.has_key(box):
214 result[box].append(node)
217 # a step for checking this stuff
218 def show_boxes (self):
219 for (box,nodes) in self.gather_hostBoxes().iteritems():
220 print box,":"," + ".join( [ node.name() for node in nodes ] )
223 # make this a valid step
224 def kill_all_qemus(self):
225 # this is the brute force version, kill all qemus on that host box
226 for (box,nodes) in self.gather_hostBoxes().iteritems():
227 # pass the first nodename, as we don't push template-qemu on testboxes
228 nodedir=nodes[0].nodedir()
229 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
232 # make this a valid step
233 def list_all_qemus(self):
234 for (box,nodes) in self.gather_hostBoxes().iteritems():
235 # this is the brute force version, kill all qemus on that host box
236 TestBox(box,self.options.buildname).list_all_qemus()
239 # kill only the right qemus
240 def list_qemus(self):
241 for (box,nodes) in self.gather_hostBoxes().iteritems():
242 # the fine-grain version
247 # kill only the right qemus
248 def kill_qemus(self):
249 for (box,nodes) in self.gather_hostBoxes().iteritems():
250 # the fine-grain version
256 ### utility methods for handling the pool of IP addresses allocated to plcs
258 # (*) running plcs are recorded in the file named ~/running-test-plcs
259 # (*) this file contains a line for each running plc, older first
260 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
261 # (*) the free_tracker method performs a vserver stop on the oldest entry
262 # (*) the record_tracker method adds an entry at the bottom of the file
263 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
265 TRACKER_FILE="~/running-test-plcs"
267 def record_tracker (self):
269 print 'record_tracker active on vserver plcs only - ignored'
271 command="echo %s %s >> %s"%(self.vservername,self.test_ssh.hostname,TestPlc.TRACKER_FILE)
272 (code,output) = utils.output_of (self.test_ssh.actual_command(command))
274 print "WARNING : COULD NOT record_tracker %s as a running plc on %s"%(self.vservername,self.test_ssh.hostname)
276 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
279 def free_tracker (self):
281 print 'free_tracker active on vserver plcs only - ignored'
283 command="head -1 %s"%TestPlc.TRACKER_FILE
284 (code,line) = utils.output_of(self.test_ssh.actual_command(command))
286 print "No entry found in %s on %s"%(TestPlc.TRACKER_FILE,self.test_ssh.hostname)
289 [vserver_to_stop,hostname] = line.split()
291 print "WARNING: free_tracker: Could not parse %s - skipped"%TestPlc.TRACKER_FILE
293 stop_command = "vserver --silent %s stop"%vserver_to_stop
294 utils.system(self.test_ssh.actual_command(stop_command))
295 x=TestPlc.TRACKER_FILE
296 flush_command = "tail --lines=+2 %s > %s.tmp ; mv %s.tmp %s"%(x,x,x,x)
297 utils.system(self.test_ssh.actual_command(flush_command))
300 def cleanup_tracker (self):
301 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
302 utils.system(self.test_ssh.actual_command(stop_all))
303 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
304 utils.system(self.test_ssh.actual_command(clean_tracker))
306 #################### step methods
309 def uninstall_chroot(self):
310 self.run_in_host('service plc safestop')
311 #####detecting the last myplc version installed and remove it
312 self.run_in_host('rpm -e myplc')
313 ##### Clean up the /plc directory
314 self.run_in_host('rm -rf /plc/data')
315 ##### stop any running vservers
316 self.run_in_host('for vserver in $(ls -d /vservers/* | sed -e s,/vservers/,,) ; do case $vserver in vtest*) echo Shutting down vserver $vserver ; vserver $vserver stop ;; esac ; done')
319 def uninstall_vserver(self):
320 self.run_in_host("vserver --silent %s delete"%self.vservername)
324 # if there's a chroot-based myplc running, and then a native-based myplc is being deployed
325 # it sounds safer to have the former uninstalled too
326 # now the vserver method cannot be invoked for chroot instances as vservername is required
328 self.uninstall_vserver()
329 self.uninstall_chroot()
331 self.uninstall_chroot()
335 def install_chroot(self):
339 def install_vserver(self):
340 # we need build dir for vtest-init-vserver
342 # a full path for the local calls
343 build_dir=os.path.dirname(sys.argv[0])+"/build"
345 # use a standard name - will be relative to remote buildname
347 # run checkout in any case - would do an update if already exists
348 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
349 if self.run_in_host(build_checkout) != 0:
351 # the repo url is taken from myplc-url
352 # with the last two steps (i386/myplc...) removed
353 repo_url = self.options.myplc_url
354 for level in [ 'rpmname','arch' ]:
355 repo_url = os.path.dirname(repo_url)
356 if self.options.arch == "i386":
357 personality="-p linux32"
359 personality="-p linux64"
360 create_vserver="%s/vtest-init-vserver.sh %s %s %s -- --interface eth0:%s"%\
361 (build_dir,personality,self.vservername,repo_url,self.vserverip)
362 return self.run_in_host(create_vserver) == 0
366 return self.install_vserver()
368 return self.install_chroot()
370 ### install_rpm - make this an optional step
372 url = self.options.myplc_url
373 rpm = os.path.basename(url)
374 cache_fetch="pwd;if [ -f %(rpm)s ] ; then echo Using cached rpm %(rpm)s ; else echo Fetching %(url)s ; curl -O %(url)s; fi"%locals()
375 return self.run_in_host(cache_fetch)==0
377 def install_rpm_chroot(self):
378 url = self.options.myplc_url
379 rpm = os.path.basename(url)
380 if not self.cache_rpm():
382 utils.header('Installing the : %s'%rpm)
383 return self.run_in_host('rpm -Uvh '+rpm)==0 and self.run_in_host('service plc mount')==0
385 def install_rpm_vserver(self):
386 return self.run_in_guest("yum -y install myplc-native")==0
388 def install_rpm(self):
390 return self.install_rpm_vserver()
392 return self.install_rpm_chroot()
396 tmpname='%s.plc-config-tty'%(self.name())
397 fileconf=open(tmpname,'w')
398 for var in [ 'PLC_NAME',
402 'PLC_MAIL_SUPPORT_ADDRESS',
409 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
410 fileconf.write('w\n')
411 fileconf.write('q\n')
413 utils.system('cat %s'%tmpname)
414 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
415 utils.system('rm %s'%tmpname)
418 # the chroot install is slightly different to this respect
421 self.run_in_guest('service plc start')
423 self.run_in_host('service plc start')
428 self.run_in_guest('service plc stop')
430 self.run_in_host('service plc stop')
433 # could use a TestKey class
434 def store_keys(self):
435 for key_spec in self.plc_spec['keys']:
436 TestKey(self,key_spec).store_key()
439 def clean_keys(self):
440 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
443 return self.do_sites()
445 def clean_sites (self):
446 return self.do_sites(action="delete")
448 def do_sites (self,action="add"):
449 for site_spec in self.plc_spec['sites']:
450 test_site = TestSite (self,site_spec)
451 if (action != "add"):
452 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
453 test_site.delete_site()
454 # deleted with the site
455 #test_site.delete_users()
458 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
459 test_site.create_site()
460 test_site.create_users()
464 return self.do_nodes()
465 def clean_nodes (self):
466 return self.do_nodes(action="delete")
468 def do_nodes (self,action="add"):
469 for site_spec in self.plc_spec['sites']:
470 test_site = TestSite (self,site_spec)
472 utils.header("Deleting nodes in site %s"%test_site.name())
473 for node_spec in site_spec['nodes']:
474 test_node=TestNode(self,test_site,node_spec)
475 utils.header("Deleting %s"%test_node.name())
476 test_node.delete_node()
478 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
479 for node_spec in site_spec['nodes']:
480 utils.pprint('Creating node %s'%node_spec,node_spec)
481 test_node = TestNode (self,test_site,node_spec)
482 test_node.create_node ()
485 # create nodegroups if needed, and populate
486 # no need for a clean_nodegroups if we are careful enough
487 def nodegroups (self):
488 # 1st pass to scan contents
490 for site_spec in self.plc_spec['sites']:
491 test_site = TestSite (self,site_spec)
492 for node_spec in site_spec['nodes']:
493 test_node=TestNode (self,test_site,node_spec)
494 if node_spec.has_key('nodegroups'):
495 nodegroupnames=node_spec['nodegroups']
496 if isinstance(nodegroupnames,StringTypes):
497 nodegroupnames = [ nodegroupnames ]
498 for nodegroupname in nodegroupnames:
499 if not groups_dict.has_key(nodegroupname):
500 groups_dict[nodegroupname]=[]
501 groups_dict[nodegroupname].append(test_node.name())
502 auth=self.auth_root()
503 for (nodegroupname,group_nodes) in groups_dict.iteritems():
505 self.apiserver.GetNodeGroups(auth,{'name':nodegroupname})[0]
507 self.apiserver.AddNodeGroup(auth,{'name':nodegroupname})
508 for node in group_nodes:
509 self.apiserver.AddNodeToNodeGroup(auth,node,nodegroupname)
512 def all_hostnames (self) :
514 for site_spec in self.plc_spec['sites']:
515 hostnames += [ node_spec['node_fields']['hostname'] \
516 for node_spec in site_spec['nodes'] ]
519 # gracetime : during the first <gracetime> minutes nothing gets printed
520 def do_nodes_booted (self, minutes, gracetime,period=30):
521 if self.options.dry_run:
525 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
526 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
527 # the nodes that haven't checked yet - start with a full list and shrink over time
528 tocheck = self.all_hostnames()
529 utils.header("checking nodes %r"%tocheck)
530 # create a dict hostname -> status
531 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
534 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
536 for array in tocheck_status:
537 hostname=array['hostname']
538 boot_state=array['boot_state']
539 if boot_state == 'boot':
540 utils.header ("%s has reached the 'boot' state"%hostname)
542 # if it's a real node, never mind
543 (site_spec,node_spec)=self.locate_hostname(hostname)
544 if TestNode.is_real_model(node_spec['node_fields']['model']):
545 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
548 elif datetime.datetime.now() > graceout:
549 utils.header ("%s still in '%s' state"%(hostname,boot_state))
550 graceout=datetime.datetime.now()+datetime.timedelta(1)
551 status[hostname] = boot_state
553 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
556 if datetime.datetime.now() > timeout:
557 for hostname in tocheck:
558 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
560 # otherwise, sleep for a while
562 # only useful in empty plcs
565 def nodes_booted(self):
566 return self.do_nodes_booted(minutes=20,gracetime=15)
568 def do_nodes_ssh(self,minutes,gracetime,period=30):
570 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
571 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
572 tocheck = self.all_hostnames()
573 # self.scan_publicKeys(tocheck)
574 utils.header("checking Connectivity on nodes %r"%tocheck)
576 for hostname in tocheck:
577 # try to ssh in nodes
578 node_test_ssh = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa")
579 success=self.run_in_guest(node_test_ssh.actual_command("hostname"))==0
581 utils.header('The node %s is sshable -->'%hostname)
583 tocheck.remove(hostname)
585 # we will have tried real nodes once, in case they're up - but if not, just skip
586 (site_spec,node_spec)=self.locate_hostname(hostname)
587 if TestNode.is_real_model(node_spec['node_fields']['model']):
588 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
589 tocheck.remove(hostname)
590 elif datetime.datetime.now() > graceout:
591 utils.header("Could not ssh-enter root context on %s"%hostname)
594 if datetime.datetime.now() > timeout:
595 for hostname in tocheck:
596 utils.header("FAILURE to ssh into %s"%hostname)
598 # otherwise, sleep for a while
600 # only useful in empty plcs
604 return self.do_nodes_ssh(minutes=6,gracetime=4)
607 def init_node (self): pass
609 def bootcd (self): pass
611 def configure_qemu (self): pass
613 def reinstall_node (self): pass
615 def export_qemu (self): pass
617 def do_check_initscripts(self):
619 for slice_spec in self.plc_spec['slices']:
620 if not slice_spec.has_key('initscriptname'):
622 initscript=slice_spec['initscriptname']
623 for nodename in slice_spec['nodenames']:
624 (site,node) = self.locate_node (nodename)
625 # xxx - passing the wrong site - probably harmless
626 test_site = TestSite (self,site)
627 test_slice = TestSlice (self,test_site,slice_spec)
628 test_node = TestNode (self,test_site,node)
629 test_sliver = TestSliver (self, test_node, test_slice)
630 if not test_sliver.check_initscript(initscript):
634 def check_initscripts(self):
635 return self.do_check_initscripts()
637 def initscripts (self):
638 for initscript in self.plc_spec['initscripts']:
639 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
640 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
644 return self.do_slices()
646 def clean_slices (self):
647 return self.do_slices("delete")
649 def do_slices (self, action="add"):
650 for slice in self.plc_spec['slices']:
651 site_spec = self.locate_site (slice['sitename'])
652 test_site = TestSite(self,site_spec)
653 test_slice=TestSlice(self,test_site,slice)
655 utils.header("Deleting slices in site %s"%test_site.name())
656 test_slice.delete_slice()
658 utils.pprint("Creating slice",slice)
659 test_slice.create_slice()
660 utils.header('Created Slice %s'%slice['slice_fields']['name'])
663 @slice_mapper_options
664 def check_slice(self): pass
667 def clear_known_hosts (self): pass
670 def start_node (self) : pass
672 def all_sliver_objs (self):
674 for slice_spec in self.plc_spec['slices']:
675 slicename = slice_spec['slice_fields']['name']
676 for nodename in slice_spec['nodenames']:
677 result.append(self.locate_sliver_obj (nodename,slicename))
680 def locate_sliver_obj (self,nodename,slicename):
681 (site,node) = self.locate_node(nodename)
682 slice = self.locate_slice (slicename)
684 test_site = TestSite (self, site)
685 test_node = TestNode (self, test_site,node)
686 # xxx the slice site is assumed to be the node site - mhh - probably harmless
687 test_slice = TestSlice (self, test_site, slice)
688 return TestSliver (self, test_node, test_slice)
690 def check_tcp (self):
691 specs = self.plc_spec['tcp_test']
696 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
697 if not s_test_sliver.run_tcp_server(port,timeout=10):
701 # idem for the client side
702 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
703 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
708 def gather_logs (self):
709 # (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
710 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
711 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
712 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
714 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
715 self.gather_var_logs ()
717 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
718 for site_spec in self.plc_spec['sites']:
719 test_site = TestSite (self,site_spec)
720 for node_spec in site_spec['nodes']:
721 test_node=TestNode(self,test_site,node_spec)
722 test_node.gather_qemu_logs()
724 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
725 self.gather_nodes_var_logs()
727 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
728 self.gather_slivers_var_logs()
731 def gather_slivers_var_logs(self):
732 for test_sliver in self.all_sliver_objs():
733 remote = test_sliver.tar_var_logs()
734 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
735 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
736 utils.system(command)
739 def gather_var_logs (self):
740 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
741 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
742 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
743 utils.system(command)
745 def gather_nodes_var_logs (self):
746 for site_spec in self.plc_spec['sites']:
747 test_site = TestSite (self,site_spec)
748 for node_spec in site_spec['nodes']:
749 test_node=TestNode(self,test_site,node_spec)
750 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
751 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
752 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
753 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
754 utils.system(command)
757 # returns the filename to use for sql dump/restore, using options.dbname if set
758 def dbfile (self, database):
759 # uses options.dbname if it is found
761 name=self.options.dbname
762 if not isinstance(name,StringTypes):
765 t=datetime.datetime.now()
768 return "/root/%s-%s.sql"%(database,name)
771 dump=self.dbfile("planetab4")
772 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
773 utils.header('Dumped planetlab4 database in %s'%dump)
776 def db_restore(self):
777 dump=self.dbfile("planetab4")
779 self.run_in_guest('service httpd stop')
780 # xxx - need another wrapper
781 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
782 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
783 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
784 ##starting httpd service
785 self.run_in_guest('service httpd start')
787 utils.header('Database restored from ' + dump)
790 def standby_1(): pass
792 def standby_2(): pass
794 def standby_3(): pass
796 def standby_4(): pass
798 def standby_5(): pass
800 def standby_6(): pass
802 def standby_7(): pass
804 def standby_8(): pass
806 def standby_9(): pass
808 def standby_10(): pass
810 def standby_11(): pass
812 def standby_12(): pass
814 def standby_13(): pass
816 def standby_14(): pass
818 def standby_15(): pass
820 def standby_16(): pass
822 def standby_17(): pass
824 def standby_18(): pass
826 def standby_19(): pass
828 def standby_20(): pass