7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
65 default_steps = ['uninstall','install','install_rpm',
66 'configure', 'start', 'fetch_keys', SEP,
67 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
68 'sites', 'nodes', 'slices', 'nodegroups', SEP,
69 'init_node','bootcd', 'configure_qemu', 'export_qemu',
70 'kill_all_qemus', 'reinstall_node','start_node', SEP,
71 'nodes_booted', 'nodes_ssh', 'check_slice', 'check_initscripts', SEP,
72 'check_sanity', 'check_tcp', 'plcsh_stress_test', SEP,
73 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
74 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
75 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
76 'clean_sites', 'clean_nodes',
77 'clean_slices', 'clean_keys', SEP,
78 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
79 'db_dump' , 'db_restore', ' cleanup_tracker',
80 'standby_1 through 20'
84 def printable_steps (list):
85 return " ".join(list).replace(" "+SEP+" "," \\\n")
87 def valid_step (step):
90 def __init__ (self,plc_spec,options):
91 self.plc_spec=plc_spec
93 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
95 self.vserverip=plc_spec['vserverip']
96 self.vservername=plc_spec['vservername']
97 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
100 raise Exception,'chroot-based myplc testing is deprecated'
101 self.apiserver=TestApiserver(self.url,options.dry_run)
104 name=self.plc_spec['name']
105 return "%s.%s"%(name,self.vservername)
108 return self.plc_spec['hostname']
111 return self.test_ssh.is_local()
113 # define the API methods on this object through xmlrpc
114 # would help, but not strictly necessary
118 def actual_command_in_guest (self,command):
119 return self.test_ssh.actual_command(self.host_to_guest(command))
121 def start_guest (self):
122 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
124 def run_in_guest (self,command):
125 return utils.system(self.actual_command_in_guest(command))
127 def run_in_host (self,command):
128 return self.test_ssh.run_in_buildname(command)
130 #command gets run in the vserver
131 def host_to_guest(self,command):
132 return "vserver %s exec %s"%(self.vservername,command)
134 #command gets run in the vserver
135 def start_guest_in_host(self):
136 return "vserver %s start"%(self.vservername)
139 def run_in_guest_piped (self,local,remote):
140 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
142 def auth_root (self):
143 return {'Username':self.plc_spec['PLC_ROOT_USER'],
144 'AuthMethod':'password',
145 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
146 'Role' : self.plc_spec['role']
148 def locate_site (self,sitename):
149 for site in self.plc_spec['sites']:
150 if site['site_fields']['name'] == sitename:
152 if site['site_fields']['login_base'] == sitename:
154 raise Exception,"Cannot locate site %s"%sitename
156 def locate_node (self,nodename):
157 for site in self.plc_spec['sites']:
158 for node in site['nodes']:
159 if node['name'] == nodename:
161 raise Exception,"Cannot locate node %s"%nodename
163 def locate_hostname (self,hostname):
164 for site in self.plc_spec['sites']:
165 for node in site['nodes']:
166 if node['node_fields']['hostname'] == hostname:
168 raise Exception,"Cannot locate hostname %s"%hostname
170 def locate_key (self,keyname):
171 for key in self.plc_spec['keys']:
172 if key['name'] == keyname:
174 raise Exception,"Cannot locate key %s"%keyname
176 def locate_slice (self, slicename):
177 for slice in self.plc_spec['slices']:
178 if slice['slice_fields']['name'] == slicename:
180 raise Exception,"Cannot locate slice %s"%slicename
182 # all different hostboxes used in this plc
183 def gather_hostBoxes(self):
184 # maps on sites and nodes, return [ (host_box,test_node) ]
186 for site_spec in self.plc_spec['sites']:
187 test_site = TestSite (self,site_spec)
188 for node_spec in site_spec['nodes']:
189 test_node = TestNode (self, test_site, node_spec)
190 if not test_node.is_real():
191 tuples.append( (test_node.host_box(),test_node) )
192 # transform into a dict { 'host_box' -> [ test_node .. ] }
194 for (box,node) in tuples:
195 if not result.has_key(box):
198 result[box].append(node)
201 # a step for checking this stuff
202 def show_boxes (self):
203 for (box,nodes) in self.gather_hostBoxes().iteritems():
204 print box,":"," + ".join( [ node.name() for node in nodes ] )
207 # make this a valid step
208 def kill_all_qemus(self):
209 # this is the brute force version, kill all qemus on that host box
210 for (box,nodes) in self.gather_hostBoxes().iteritems():
211 # pass the first nodename, as we don't push template-qemu on testboxes
212 nodedir=nodes[0].nodedir()
213 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
216 # make this a valid step
217 def list_all_qemus(self):
218 for (box,nodes) in self.gather_hostBoxes().iteritems():
219 # this is the brute force version, kill all qemus on that host box
220 TestBox(box,self.options.buildname).list_all_qemus()
223 # kill only the right qemus
224 def list_qemus(self):
225 for (box,nodes) in self.gather_hostBoxes().iteritems():
226 # the fine-grain version
231 # kill only the right qemus
232 def kill_qemus(self):
233 for (box,nodes) in self.gather_hostBoxes().iteritems():
234 # the fine-grain version
240 ### utility methods for handling the pool of IP addresses allocated to plcs
242 # (*) running plcs are recorded in the file named ~/running-test-plcs
243 # (*) this file contains a line for each running plc, older first
244 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
245 # (*) the free_tracker method performs a vserver stop on the oldest entry
246 # (*) the record_tracker method adds an entry at the bottom of the file
247 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
249 TRACKER_FILE="~/running-test-plcs"
251 def record_tracker (self):
252 command="echo %s %s >> %s"%(self.vservername,self.test_ssh.hostname,TestPlc.TRACKER_FILE)
253 (code,output) = utils.output_of (self.test_ssh.actual_command(command))
255 print "WARNING : COULD NOT record_tracker %s as a running plc on %s"%(self.vservername,self.test_ssh.hostname)
257 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
260 def free_tracker (self):
261 command="head -1 %s"%TestPlc.TRACKER_FILE
262 (code,line) = utils.output_of(self.test_ssh.actual_command(command))
264 print "No entry found in %s on %s"%(TestPlc.TRACKER_FILE,self.test_ssh.hostname)
267 [vserver_to_stop,hostname] = line.split()
269 print "WARNING: free_tracker: Could not parse %s - skipped"%TestPlc.TRACKER_FILE
271 stop_command = "vserver --silent %s stop"%vserver_to_stop
272 utils.system(self.test_ssh.actual_command(stop_command))
273 x=TestPlc.TRACKER_FILE
274 flush_command = "tail --lines=+2 %s > %s.tmp ; mv %s.tmp %s"%(x,x,x,x)
275 utils.system(self.test_ssh.actual_command(flush_command))
278 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
279 def cleanup_tracker (self):
280 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
281 utils.system(self.test_ssh.actual_command(stop_all))
282 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
283 utils.system(self.test_ssh.actual_command(clean_tracker))
286 self.run_in_host("vserver --silent %s delete"%self.vservername)
292 # a full path for the local calls
293 build_dir=os.path.dirname(sys.argv[0])
294 # sometimes this is empty - set to "." in such a case
295 if not build_dir: build_dir="."
296 build_dir += "/build"
298 # use a standard name - will be relative to remote buildname
300 # run checkout in any case - would do an update if already exists
301 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
302 if self.run_in_host(build_checkout) != 0:
304 # the repo url is taken from arch-rpms-url
305 # with the last step (i386.) removed
306 repo_url = self.options.arch_rpms_url
307 for level in [ 'arch' ]:
308 repo_url = os.path.dirname(repo_url)
309 if self.options.arch == "i386":
310 personality_option="-p linux32"
312 personality_option="-p linux64"
313 script="vtest-init-vserver.sh"
314 vserver_name = self.vservername
315 vserver_options="--netdev eth0 --interface %s"%self.vserverip
317 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
318 vserver_options += " --hostname %s"%vserver_hostname
321 create_vserver="%(build_dir)s/%(script)s %(personality_option)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
322 return self.run_in_host(create_vserver) == 0
325 def install_rpm(self):
326 return self.run_in_guest("yum -y install myplc-native")==0
330 tmpname='%s.plc-config-tty'%(self.name())
331 fileconf=open(tmpname,'w')
332 for var in [ 'PLC_NAME',
336 'PLC_MAIL_SUPPORT_ADDRESS',
343 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
344 fileconf.write('w\n')
345 fileconf.write('q\n')
347 utils.system('cat %s'%tmpname)
348 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
349 utils.system('rm %s'%tmpname)
353 self.run_in_guest('service plc start')
357 self.run_in_guest('service plc stop')
364 # stores the keys from the config for further use
365 def store_keys(self):
366 for key_spec in self.plc_spec['keys']:
367 TestKey(self,key_spec).store_key()
370 def clean_keys(self):
371 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
373 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
374 # for later direct access to the nodes
375 def fetch_keys(self):
376 prefix = 'root_ssh_key'
377 vservername=self.vservername
378 for ext in [ 'pub', 'rsa' ] :
379 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
380 dst="keys/%(vservername)s.%(ext)s"%locals()
381 self.run_in_guest_piped
382 self.test_ssh.fetch(src,dst)
385 return self.do_sites()
387 def clean_sites (self):
388 return self.do_sites(action="delete")
390 def do_sites (self,action="add"):
391 for site_spec in self.plc_spec['sites']:
392 test_site = TestSite (self,site_spec)
393 if (action != "add"):
394 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
395 test_site.delete_site()
396 # deleted with the site
397 #test_site.delete_users()
400 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
401 test_site.create_site()
402 test_site.create_users()
405 def clean_all_sites (self):
406 print 'auth_root',self.auth_root()
407 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
408 for site_id in site_ids:
409 print 'Deleting site_id',site_id
410 self.apiserver.DeleteSite(self.auth_root(),site_id)
413 return self.do_nodes()
414 def clean_nodes (self):
415 return self.do_nodes(action="delete")
417 def do_nodes (self,action="add"):
418 for site_spec in self.plc_spec['sites']:
419 test_site = TestSite (self,site_spec)
421 utils.header("Deleting nodes in site %s"%test_site.name())
422 for node_spec in site_spec['nodes']:
423 test_node=TestNode(self,test_site,node_spec)
424 utils.header("Deleting %s"%test_node.name())
425 test_node.delete_node()
427 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
428 for node_spec in site_spec['nodes']:
429 utils.pprint('Creating node %s'%node_spec,node_spec)
430 test_node = TestNode (self,test_site,node_spec)
431 test_node.create_node ()
434 def nodegroups (self):
435 return self.do_nodegroups("add")
436 def clean_nodegroups (self):
437 return self.do_nodegroups("delete")
439 # create nodegroups if needed, and populate
440 def do_nodegroups (self, action="add"):
441 # 1st pass to scan contents
443 for site_spec in self.plc_spec['sites']:
444 test_site = TestSite (self,site_spec)
445 for node_spec in site_spec['nodes']:
446 test_node=TestNode (self,test_site,node_spec)
447 if node_spec.has_key('nodegroups'):
448 nodegroupnames=node_spec['nodegroups']
449 if isinstance(nodegroupnames,StringTypes):
450 nodegroupnames = [ nodegroupnames ]
451 for nodegroupname in nodegroupnames:
452 if not groups_dict.has_key(nodegroupname):
453 groups_dict[nodegroupname]=[]
454 groups_dict[nodegroupname].append(test_node.name())
455 auth=self.auth_root()
457 for (nodegroupname,group_nodes) in groups_dict.iteritems():
459 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
460 # first, check if the nodetagtype is here
461 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
463 tag_type_id = tag_types[0]['tag_type_id']
465 tag_type_id = self.apiserver.AddTagType(auth,
466 {'tagname':nodegroupname,
467 'description': 'for nodegroup %s'%nodegroupname,
470 print 'located tag (type)',nodegroupname,'as',tag_type_id
472 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
474 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
475 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
476 # set node tag on all nodes, value='yes'
477 for nodename in group_nodes:
479 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
481 traceback.print_exc()
482 print 'node',nodename,'seems to already have tag',nodegroupname
485 expect_yes = self.apiserver.GetNodeTags(auth,
486 {'hostname':nodename,
487 'tagname':nodegroupname},
488 ['tagvalue'])[0]['tagvalue']
489 if expect_yes != "yes":
490 print 'Mismatch node tag on node',nodename,'got',expect_yes
493 if not self.options.dry_run:
494 print 'Cannot find tag',nodegroupname,'on node',nodename
498 print 'cleaning nodegroup',nodegroupname
499 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
501 traceback.print_exc()
505 def all_hostnames (self) :
507 for site_spec in self.plc_spec['sites']:
508 hostnames += [ node_spec['node_fields']['hostname'] \
509 for node_spec in site_spec['nodes'] ]
512 # gracetime : during the first <gracetime> minutes nothing gets printed
513 def do_nodes_booted (self, minutes, gracetime,period=15):
514 if self.options.dry_run:
518 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
519 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
520 # the nodes that haven't checked yet - start with a full list and shrink over time
521 tocheck = self.all_hostnames()
522 utils.header("checking nodes %r"%tocheck)
523 # create a dict hostname -> status
524 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
527 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
529 for array in tocheck_status:
530 hostname=array['hostname']
531 boot_state=array['boot_state']
532 if boot_state == 'boot':
533 utils.header ("%s has reached the 'boot' state"%hostname)
535 # if it's a real node, never mind
536 (site_spec,node_spec)=self.locate_hostname(hostname)
537 if TestNode.is_real_model(node_spec['node_fields']['model']):
538 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
541 elif datetime.datetime.now() > graceout:
542 utils.header ("%s still in '%s' state"%(hostname,boot_state))
543 graceout=datetime.datetime.now()+datetime.timedelta(1)
544 status[hostname] = boot_state
546 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
549 if datetime.datetime.now() > timeout:
550 for hostname in tocheck:
551 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
553 # otherwise, sleep for a while
555 # only useful in empty plcs
558 def nodes_booted(self):
559 return self.do_nodes_booted(minutes=20,gracetime=15)
561 def do_nodes_ssh(self,minutes,gracetime,period=15):
563 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
564 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
565 tocheck = self.all_hostnames()
566 # self.scan_publicKeys(tocheck)
567 utils.header("checking Connectivity on nodes %r"%tocheck)
569 for hostname in tocheck:
570 # try to ssh in nodes
571 node_test_ssh = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa")
572 success=self.run_in_guest(node_test_ssh.actual_command("hostname"))==0
574 utils.header('The node %s is sshable -->'%hostname)
576 tocheck.remove(hostname)
578 # we will have tried real nodes once, in case they're up - but if not, just skip
579 (site_spec,node_spec)=self.locate_hostname(hostname)
580 if TestNode.is_real_model(node_spec['node_fields']['model']):
581 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
582 tocheck.remove(hostname)
583 elif datetime.datetime.now() > graceout:
584 utils.header("Could not ssh-enter root context on %s"%hostname)
587 if datetime.datetime.now() > timeout:
588 for hostname in tocheck:
589 utils.header("FAILURE to ssh into %s"%hostname)
591 # otherwise, sleep for a while
593 # only useful in empty plcs
597 return self.do_nodes_ssh(minutes=10,gracetime=5)
600 def init_node (self): pass
602 def bootcd (self): pass
604 def configure_qemu (self): pass
606 def reinstall_node (self): pass
608 def export_qemu (self): pass
611 def check_sanity_node (self): pass
612 @slice_mapper_options
613 def check_sanity_slice (self) : pass
615 def check_sanity (self):
616 return self.check_sanity_node() and self.check_sanity_slice()
618 def do_check_initscripts(self):
620 for slice_spec in self.plc_spec['slices']:
621 if not slice_spec.has_key('initscriptname'):
623 initscript=slice_spec['initscriptname']
624 for nodename in slice_spec['nodenames']:
625 (site,node) = self.locate_node (nodename)
626 # xxx - passing the wrong site - probably harmless
627 test_site = TestSite (self,site)
628 test_slice = TestSlice (self,test_site,slice_spec)
629 test_node = TestNode (self,test_site,node)
630 test_sliver = TestSliver (self, test_node, test_slice)
631 if not test_sliver.check_initscript(initscript):
635 def check_initscripts(self):
636 return self.do_check_initscripts()
638 def initscripts (self):
639 for initscript in self.plc_spec['initscripts']:
640 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
641 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
644 def clean_initscripts (self):
645 for initscript in self.plc_spec['initscripts']:
646 initscript_name = initscript['initscript_fields']['name']
647 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
649 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
650 print initscript_name,'deleted'
652 print 'deletion went wrong - probably did not exist'
656 return self.do_slices()
658 def clean_slices (self):
659 return self.do_slices("delete")
661 def do_slices (self, action="add"):
662 for slice in self.plc_spec['slices']:
663 site_spec = self.locate_site (slice['sitename'])
664 test_site = TestSite(self,site_spec)
665 test_slice=TestSlice(self,test_site,slice)
667 utils.header("Deleting slices in site %s"%test_site.name())
668 test_slice.delete_slice()
670 utils.pprint("Creating slice",slice)
671 test_slice.create_slice()
672 utils.header('Created Slice %s'%slice['slice_fields']['name'])
675 @slice_mapper_options
676 def check_slice(self): pass
679 def clear_known_hosts (self): pass
682 def start_node (self) : pass
684 def all_sliver_objs (self):
686 for slice_spec in self.plc_spec['slices']:
687 slicename = slice_spec['slice_fields']['name']
688 for nodename in slice_spec['nodenames']:
689 result.append(self.locate_sliver_obj (nodename,slicename))
692 def locate_sliver_obj (self,nodename,slicename):
693 (site,node) = self.locate_node(nodename)
694 slice = self.locate_slice (slicename)
696 test_site = TestSite (self, site)
697 test_node = TestNode (self, test_site,node)
698 # xxx the slice site is assumed to be the node site - mhh - probably harmless
699 test_slice = TestSlice (self, test_site, slice)
700 return TestSliver (self, test_node, test_slice)
702 def check_tcp (self):
703 specs = self.plc_spec['tcp_test']
708 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
709 if not s_test_sliver.run_tcp_server(port,timeout=10):
713 # idem for the client side
714 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
715 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
719 def plcsh_stress_test (self):
720 # install the stress-test in the plc image
721 location = "/usr/share/plc_api/plcsh-stress-test.py"
722 remote="/vservers/%s/%s"%(self.vservername,location)
723 self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
725 command += " -- --check"
726 if self.options.small_test:
728 return ( self.run_in_guest(command) == 0)
730 def gather_logs (self):
731 # (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
732 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
733 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
734 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
736 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
737 self.gather_var_logs ()
739 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
740 for site_spec in self.plc_spec['sites']:
741 test_site = TestSite (self,site_spec)
742 for node_spec in site_spec['nodes']:
743 test_node=TestNode(self,test_site,node_spec)
744 test_node.gather_qemu_logs()
746 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
747 self.gather_nodes_var_logs()
749 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
750 self.gather_slivers_var_logs()
753 def gather_slivers_var_logs(self):
754 for test_sliver in self.all_sliver_objs():
755 remote = test_sliver.tar_var_logs()
756 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
757 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
758 utils.system(command)
761 def gather_var_logs (self):
762 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
763 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
764 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
765 utils.system(command)
766 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
767 utils.system(command)
769 def gather_nodes_var_logs (self):
770 for site_spec in self.plc_spec['sites']:
771 test_site = TestSite (self,site_spec)
772 for node_spec in site_spec['nodes']:
773 test_node=TestNode(self,test_site,node_spec)
774 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
775 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
776 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
777 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
778 utils.system(command)
781 # returns the filename to use for sql dump/restore, using options.dbname if set
782 def dbfile (self, database):
783 # uses options.dbname if it is found
785 name=self.options.dbname
786 if not isinstance(name,StringTypes):
789 t=datetime.datetime.now()
792 return "/root/%s-%s.sql"%(database,name)
795 dump=self.dbfile("planetab4")
796 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
797 utils.header('Dumped planetlab4 database in %s'%dump)
800 def db_restore(self):
801 dump=self.dbfile("planetab4")
803 self.run_in_guest('service httpd stop')
804 # xxx - need another wrapper
805 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
806 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
807 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
808 ##starting httpd service
809 self.run_in_guest('service httpd start')
811 utils.header('Database restored from ' + dump)
814 def standby_1(): pass
816 def standby_2(): pass
818 def standby_3(): pass
820 def standby_4(): pass
822 def standby_5(): pass
824 def standby_6(): pass
826 def standby_7(): pass
828 def standby_8(): pass
830 def standby_9(): pass
832 def standby_10(): pass
834 def standby_11(): pass
836 def standby_12(): pass
838 def standby_13(): pass
840 def standby_14(): pass
842 def standby_15(): pass
844 def standby_16(): pass
846 def standby_17(): pass
848 def standby_18(): pass
850 def standby_19(): pass
852 def standby_20(): pass