7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
65 default_steps = ['uninstall','install','install_rpm',
66 'configure', 'start', SEP,
67 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
68 'sites', 'nodes', 'slices', 'nodegroups', SEP,
69 'init_node','bootcd', 'configure_qemu', 'export_qemu',
70 'kill_all_qemus', 'reinstall_node','start_node', SEP,
71 'nodes_booted', 'nodes_ssh', 'check_slice',
72 'check_initscripts', 'check_tcp', 'plcsh_stress_test', SEP,
73 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
74 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
75 'clean_initscripts', 'clean_all_sites',
76 'clean_sites', 'clean_nodes',
77 'clean_slices', 'clean_keys', SEP,
78 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
79 'db_dump' , 'db_restore', ' cleanup_tracker',
80 'standby_1 through 20'
84 def printable_steps (list):
85 return " ".join(list).replace(" "+SEP+" "," \\\n")
87 def valid_step (step):
90 def __init__ (self,plc_spec,options):
91 self.plc_spec=plc_spec
93 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
95 self.vserverip=plc_spec['vserverip']
96 self.vservername=plc_spec['vservername']
97 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
100 raise Exception,'chroot-based myplc testing is deprecated'
101 self.apiserver=TestApiserver(self.url,options.dry_run)
104 name=self.plc_spec['name']
105 return "%s.%s"%(name,self.vservername)
108 return self.plc_spec['hostname']
111 return self.test_ssh.is_local()
113 # define the API methods on this object through xmlrpc
114 # would help, but not strictly necessary
118 def actual_command_in_guest (self,command):
119 return self.test_ssh.actual_command(self.host_to_guest(command))
121 def start_guest (self):
122 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
124 def run_in_guest (self,command):
125 return utils.system(self.actual_command_in_guest(command))
127 def run_in_host (self,command):
128 return self.test_ssh.run_in_buildname(command)
130 #command gets run in the vserver
131 def host_to_guest(self,command):
132 return "vserver %s exec %s"%(self.vservername,command)
134 #command gets run in the vserver
135 def start_guest_in_host(self):
136 return "vserver %s start"%(self.vservername)
139 def run_in_guest_piped (self,local,remote):
140 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
142 def auth_root (self):
143 return {'Username':self.plc_spec['PLC_ROOT_USER'],
144 'AuthMethod':'password',
145 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
146 'Role' : self.plc_spec['role']
148 def locate_site (self,sitename):
149 for site in self.plc_spec['sites']:
150 if site['site_fields']['name'] == sitename:
152 if site['site_fields']['login_base'] == sitename:
154 raise Exception,"Cannot locate site %s"%sitename
156 def locate_node (self,nodename):
157 for site in self.plc_spec['sites']:
158 for node in site['nodes']:
159 if node['name'] == nodename:
161 raise Exception,"Cannot locate node %s"%nodename
163 def locate_hostname (self,hostname):
164 for site in self.plc_spec['sites']:
165 for node in site['nodes']:
166 if node['node_fields']['hostname'] == hostname:
168 raise Exception,"Cannot locate hostname %s"%hostname
170 def locate_key (self,keyname):
171 for key in self.plc_spec['keys']:
172 if key['name'] == keyname:
174 raise Exception,"Cannot locate key %s"%keyname
176 def locate_slice (self, slicename):
177 for slice in self.plc_spec['slices']:
178 if slice['slice_fields']['name'] == slicename:
180 raise Exception,"Cannot locate slice %s"%slicename
182 # all different hostboxes used in this plc
183 def gather_hostBoxes(self):
184 # maps on sites and nodes, return [ (host_box,test_node) ]
186 for site_spec in self.plc_spec['sites']:
187 test_site = TestSite (self,site_spec)
188 for node_spec in site_spec['nodes']:
189 test_node = TestNode (self, test_site, node_spec)
190 if not test_node.is_real():
191 tuples.append( (test_node.host_box(),test_node) )
192 # transform into a dict { 'host_box' -> [ test_node .. ] }
194 for (box,node) in tuples:
195 if not result.has_key(box):
198 result[box].append(node)
201 # a step for checking this stuff
202 def show_boxes (self):
203 for (box,nodes) in self.gather_hostBoxes().iteritems():
204 print box,":"," + ".join( [ node.name() for node in nodes ] )
207 # make this a valid step
208 def kill_all_qemus(self):
209 # this is the brute force version, kill all qemus on that host box
210 for (box,nodes) in self.gather_hostBoxes().iteritems():
211 # pass the first nodename, as we don't push template-qemu on testboxes
212 nodedir=nodes[0].nodedir()
213 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
216 # make this a valid step
217 def list_all_qemus(self):
218 for (box,nodes) in self.gather_hostBoxes().iteritems():
219 # this is the brute force version, kill all qemus on that host box
220 TestBox(box,self.options.buildname).list_all_qemus()
223 # kill only the right qemus
224 def list_qemus(self):
225 for (box,nodes) in self.gather_hostBoxes().iteritems():
226 # the fine-grain version
231 # kill only the right qemus
232 def kill_qemus(self):
233 for (box,nodes) in self.gather_hostBoxes().iteritems():
234 # the fine-grain version
240 ### utility methods for handling the pool of IP addresses allocated to plcs
242 # (*) running plcs are recorded in the file named ~/running-test-plcs
243 # (*) this file contains a line for each running plc, older first
244 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
245 # (*) the free_tracker method performs a vserver stop on the oldest entry
246 # (*) the record_tracker method adds an entry at the bottom of the file
247 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
249 TRACKER_FILE="~/running-test-plcs"
251 def record_tracker (self):
252 command="echo %s %s >> %s"%(self.vservername,self.test_ssh.hostname,TestPlc.TRACKER_FILE)
253 (code,output) = utils.output_of (self.test_ssh.actual_command(command))
255 print "WARNING : COULD NOT record_tracker %s as a running plc on %s"%(self.vservername,self.test_ssh.hostname)
257 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
260 def free_tracker (self):
261 command="head -1 %s"%TestPlc.TRACKER_FILE
262 (code,line) = utils.output_of(self.test_ssh.actual_command(command))
264 print "No entry found in %s on %s"%(TestPlc.TRACKER_FILE,self.test_ssh.hostname)
267 [vserver_to_stop,hostname] = line.split()
269 print "WARNING: free_tracker: Could not parse %s - skipped"%TestPlc.TRACKER_FILE
271 stop_command = "vserver --silent %s stop"%vserver_to_stop
272 utils.system(self.test_ssh.actual_command(stop_command))
273 x=TestPlc.TRACKER_FILE
274 flush_command = "tail --lines=+2 %s > %s.tmp ; mv %s.tmp %s"%(x,x,x,x)
275 utils.system(self.test_ssh.actual_command(flush_command))
278 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
279 def cleanup_tracker (self):
280 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
281 utils.system(self.test_ssh.actual_command(stop_all))
282 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
283 utils.system(self.test_ssh.actual_command(clean_tracker))
286 self.run_in_host("vserver --silent %s delete"%self.vservername)
292 # a full path for the local calls
293 build_dir=os.path.dirname(sys.argv[0])
294 # sometimes this is empty - set to "." in such a case
295 if not build_dir: build_dir="."
296 build_dir += "/build"
298 # use a standard name - will be relative to remote buildname
300 # run checkout in any case - would do an update if already exists
301 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
302 if self.run_in_host(build_checkout) != 0:
304 # the repo url is taken from arch-rpms-url
305 # with the last step (i386.) removed
306 repo_url = self.options.arch_rpms_url
307 for level in [ 'arch' ]:
308 repo_url = os.path.dirname(repo_url)
309 if self.options.arch == "i386":
310 personality_option="-p linux32"
312 personality_option="-p linux64"
313 script="vtest-init-vserver.sh"
314 vserver_name = self.vservername
315 vserver_options="--netdev eth0 --interface %s"%self.vserverip
317 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
318 vserver_options += " --hostname %s"%vserver_hostname
321 create_vserver="%(build_dir)s/%(script)s %(personality_option)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
322 return self.run_in_host(create_vserver) == 0
325 def install_rpm(self):
326 return self.run_in_guest("yum -y install myplc-native")==0
330 tmpname='%s.plc-config-tty'%(self.name())
331 fileconf=open(tmpname,'w')
332 for var in [ 'PLC_NAME',
336 'PLC_MAIL_SUPPORT_ADDRESS',
343 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
344 fileconf.write('w\n')
345 fileconf.write('q\n')
347 utils.system('cat %s'%tmpname)
348 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
349 utils.system('rm %s'%tmpname)
353 self.run_in_guest('service plc start')
357 self.run_in_guest('service plc stop')
364 # could use a TestKey class
365 def store_keys(self):
366 for key_spec in self.plc_spec['keys']:
367 TestKey(self,key_spec).store_key()
370 def clean_keys(self):
371 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
374 return self.do_sites()
376 def clean_sites (self):
377 return self.do_sites(action="delete")
379 def do_sites (self,action="add"):
380 for site_spec in self.plc_spec['sites']:
381 test_site = TestSite (self,site_spec)
382 if (action != "add"):
383 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
384 test_site.delete_site()
385 # deleted with the site
386 #test_site.delete_users()
389 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
390 test_site.create_site()
391 test_site.create_users()
394 def clean_all_sites (self):
395 print 'auth_root',self.auth_root()
396 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
397 for site_id in site_ids:
398 print 'Deleting site_id',site_id
399 self.apiserver.DeleteSite(self.auth_root(),site_id)
402 return self.do_nodes()
403 def clean_nodes (self):
404 return self.do_nodes(action="delete")
406 def do_nodes (self,action="add"):
407 for site_spec in self.plc_spec['sites']:
408 test_site = TestSite (self,site_spec)
410 utils.header("Deleting nodes in site %s"%test_site.name())
411 for node_spec in site_spec['nodes']:
412 test_node=TestNode(self,test_site,node_spec)
413 utils.header("Deleting %s"%test_node.name())
414 test_node.delete_node()
416 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
417 for node_spec in site_spec['nodes']:
418 utils.pprint('Creating node %s'%node_spec,node_spec)
419 test_node = TestNode (self,test_site,node_spec)
420 test_node.create_node ()
423 # create nodegroups if needed, and populate
424 # no need for a clean_nodegroups if we are careful enough
425 def nodegroups (self):
426 # 1st pass to scan contents
428 for site_spec in self.plc_spec['sites']:
429 test_site = TestSite (self,site_spec)
430 for node_spec in site_spec['nodes']:
431 test_node=TestNode (self,test_site,node_spec)
432 if node_spec.has_key('nodegroups'):
433 nodegroupnames=node_spec['nodegroups']
434 if isinstance(nodegroupnames,StringTypes):
435 nodegroupnames = [ nodegroupnames ]
436 for nodegroupname in nodegroupnames:
437 if not groups_dict.has_key(nodegroupname):
438 groups_dict[nodegroupname]=[]
439 groups_dict[nodegroupname].append(test_node.name())
440 auth=self.auth_root()
441 for (nodegroupname,group_nodes) in groups_dict.iteritems():
442 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
443 # first, check if the nodetagtype is here
444 tag_types = self.apiserver.GetNodeTagTypes(auth,{'tagname':nodegroupname})
446 tag_type_id = tag_types[0]['node_tag_type_id']
448 tag_type_id = self.apiserver.AddNodeTagType(auth,
449 {'tagname':nodegroupname,
450 'description': 'for nodegroup %s'%nodegroupname,
454 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
456 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
457 # set node tag on all nodes, value='yes'
459 for nodename in group_nodes:
461 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
463 print 'node',nodename,'seems to already have tag',nodegroupname
466 expect_yes = self.apiserver.GetNodeTags(
468 {'hostname':nodename,
469 'tagname':nodegroupname},
470 ['tagvalue'])[0]['tagvalue']
471 if expect_yes != "yes":
472 print 'Mismatch node tag on node',nodename,'got',expect_yes
475 if not self.options.dry_run:
476 print 'Cannot find tag',nodegroupname,'on node',nodename
480 def all_hostnames (self) :
482 for site_spec in self.plc_spec['sites']:
483 hostnames += [ node_spec['node_fields']['hostname'] \
484 for node_spec in site_spec['nodes'] ]
487 # gracetime : during the first <gracetime> minutes nothing gets printed
488 def do_nodes_booted (self, minutes, gracetime,period=15):
489 if self.options.dry_run:
493 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
494 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
495 # the nodes that haven't checked yet - start with a full list and shrink over time
496 tocheck = self.all_hostnames()
497 utils.header("checking nodes %r"%tocheck)
498 # create a dict hostname -> status
499 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
502 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
504 for array in tocheck_status:
505 hostname=array['hostname']
506 boot_state=array['boot_state']
507 if boot_state == 'boot':
508 utils.header ("%s has reached the 'boot' state"%hostname)
510 # if it's a real node, never mind
511 (site_spec,node_spec)=self.locate_hostname(hostname)
512 if TestNode.is_real_model(node_spec['node_fields']['model']):
513 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
516 elif datetime.datetime.now() > graceout:
517 utils.header ("%s still in '%s' state"%(hostname,boot_state))
518 graceout=datetime.datetime.now()+datetime.timedelta(1)
519 status[hostname] = boot_state
521 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
524 if datetime.datetime.now() > timeout:
525 for hostname in tocheck:
526 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
528 # otherwise, sleep for a while
530 # only useful in empty plcs
533 def nodes_booted(self):
534 return self.do_nodes_booted(minutes=20,gracetime=15)
536 def do_nodes_ssh(self,minutes,gracetime,period=15):
538 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
539 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
540 tocheck = self.all_hostnames()
541 # self.scan_publicKeys(tocheck)
542 utils.header("checking Connectivity on nodes %r"%tocheck)
544 for hostname in tocheck:
545 # try to ssh in nodes
546 node_test_ssh = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa")
547 success=self.run_in_guest(node_test_ssh.actual_command("hostname"))==0
549 utils.header('The node %s is sshable -->'%hostname)
551 tocheck.remove(hostname)
553 # we will have tried real nodes once, in case they're up - but if not, just skip
554 (site_spec,node_spec)=self.locate_hostname(hostname)
555 if TestNode.is_real_model(node_spec['node_fields']['model']):
556 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
557 tocheck.remove(hostname)
558 elif datetime.datetime.now() > graceout:
559 utils.header("Could not ssh-enter root context on %s"%hostname)
562 if datetime.datetime.now() > timeout:
563 for hostname in tocheck:
564 utils.header("FAILURE to ssh into %s"%hostname)
566 # otherwise, sleep for a while
568 # only useful in empty plcs
572 return self.do_nodes_ssh(minutes=6,gracetime=4)
575 def init_node (self): pass
577 def bootcd (self): pass
579 def configure_qemu (self): pass
581 def reinstall_node (self): pass
583 def export_qemu (self): pass
585 def do_check_initscripts(self):
587 for slice_spec in self.plc_spec['slices']:
588 if not slice_spec.has_key('initscriptname'):
590 initscript=slice_spec['initscriptname']
591 for nodename in slice_spec['nodenames']:
592 (site,node) = self.locate_node (nodename)
593 # xxx - passing the wrong site - probably harmless
594 test_site = TestSite (self,site)
595 test_slice = TestSlice (self,test_site,slice_spec)
596 test_node = TestNode (self,test_site,node)
597 test_sliver = TestSliver (self, test_node, test_slice)
598 if not test_sliver.check_initscript(initscript):
602 def check_initscripts(self):
603 return self.do_check_initscripts()
605 def initscripts (self):
606 for initscript in self.plc_spec['initscripts']:
607 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
608 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
611 def clean_initscripts (self):
612 for initscript in self.plc_spec['initscripts']:
613 initscript_name = initscript['initscript_fields']['name']
614 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
616 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
617 print initscript_name,'deleted'
619 print 'deletion went wrong - probably did not exist'
623 return self.do_slices()
625 def clean_slices (self):
626 return self.do_slices("delete")
628 def do_slices (self, action="add"):
629 for slice in self.plc_spec['slices']:
630 site_spec = self.locate_site (slice['sitename'])
631 test_site = TestSite(self,site_spec)
632 test_slice=TestSlice(self,test_site,slice)
634 utils.header("Deleting slices in site %s"%test_site.name())
635 test_slice.delete_slice()
637 utils.pprint("Creating slice",slice)
638 test_slice.create_slice()
639 utils.header('Created Slice %s'%slice['slice_fields']['name'])
642 @slice_mapper_options
643 def check_slice(self): pass
646 def clear_known_hosts (self): pass
649 def start_node (self) : pass
651 def all_sliver_objs (self):
653 for slice_spec in self.plc_spec['slices']:
654 slicename = slice_spec['slice_fields']['name']
655 for nodename in slice_spec['nodenames']:
656 result.append(self.locate_sliver_obj (nodename,slicename))
659 def locate_sliver_obj (self,nodename,slicename):
660 (site,node) = self.locate_node(nodename)
661 slice = self.locate_slice (slicename)
663 test_site = TestSite (self, site)
664 test_node = TestNode (self, test_site,node)
665 # xxx the slice site is assumed to be the node site - mhh - probably harmless
666 test_slice = TestSlice (self, test_site, slice)
667 return TestSliver (self, test_node, test_slice)
669 def check_tcp (self):
670 specs = self.plc_spec['tcp_test']
675 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
676 if not s_test_sliver.run_tcp_server(port,timeout=10):
680 # idem for the client side
681 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
682 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
686 def plcsh_stress_test (self):
687 # install the stress-test in the plc image
688 location = "/usr/share/plc_api/plcsh-stress-test.py"
689 remote="/vservers/%s/%s"%(self.vservername,location)
690 self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
691 if self.options.small_test:
692 command=location + " -- --tiny"
695 return ( self.run_in_guest(command) == 0)
697 def gather_logs (self):
698 # (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
699 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
700 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
701 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
703 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
704 self.gather_var_logs ()
706 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
707 for site_spec in self.plc_spec['sites']:
708 test_site = TestSite (self,site_spec)
709 for node_spec in site_spec['nodes']:
710 test_node=TestNode(self,test_site,node_spec)
711 test_node.gather_qemu_logs()
713 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
714 self.gather_nodes_var_logs()
716 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
717 self.gather_slivers_var_logs()
720 def gather_slivers_var_logs(self):
721 for test_sliver in self.all_sliver_objs():
722 remote = test_sliver.tar_var_logs()
723 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
724 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
725 utils.system(command)
728 def gather_var_logs (self):
729 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
730 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
731 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
732 utils.system(command)
734 def gather_nodes_var_logs (self):
735 for site_spec in self.plc_spec['sites']:
736 test_site = TestSite (self,site_spec)
737 for node_spec in site_spec['nodes']:
738 test_node=TestNode(self,test_site,node_spec)
739 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
740 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
741 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
742 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
743 utils.system(command)
746 # returns the filename to use for sql dump/restore, using options.dbname if set
747 def dbfile (self, database):
748 # uses options.dbname if it is found
750 name=self.options.dbname
751 if not isinstance(name,StringTypes):
754 t=datetime.datetime.now()
757 return "/root/%s-%s.sql"%(database,name)
760 dump=self.dbfile("planetab4")
761 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
762 utils.header('Dumped planetlab4 database in %s'%dump)
765 def db_restore(self):
766 dump=self.dbfile("planetab4")
768 self.run_in_guest('service httpd stop')
769 # xxx - need another wrapper
770 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
771 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
772 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
773 ##starting httpd service
774 self.run_in_guest('service httpd start')
776 utils.header('Database restored from ' + dump)
779 def standby_1(): pass
781 def standby_2(): pass
783 def standby_3(): pass
785 def standby_4(): pass
787 def standby_5(): pass
789 def standby_6(): pass
791 def standby_7(): pass
793 def standby_8(): pass
795 def standby_9(): pass
797 def standby_10(): pass
799 def standby_11(): pass
801 def standby_12(): pass
803 def standby_13(): pass
805 def standby_14(): pass
807 def standby_15(): pass
809 def standby_16(): pass
811 def standby_17(): pass
813 def standby_18(): pass
815 def standby_19(): pass
817 def standby_20(): pass