7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
65 default_steps = ['uninstall','install','install_rpm',
66 'configure', 'start', 'fetch_keys', SEP,
67 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
68 'sites', 'nodes', 'slices', 'nodegroups', SEP,
69 'init_node','bootcd', 'configure_qemu', 'export_qemu',
70 'kill_all_qemus', 'reinstall_node','start_node', SEP,
71 'nodes_booted', 'nodes_ssh', 'check_slice', 'check_initscripts', SEP,
72 'check_sanity', 'check_tcp', 'plcsh_stress_test', SEP,
73 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
74 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
75 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
76 'clean_sites', 'clean_nodes',
77 'clean_slices', 'clean_keys', SEP,
78 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
79 'db_dump' , 'db_restore', ' cleanup_tracker',
80 'standby_1 through 20'
84 def printable_steps (list):
85 return " ".join(list).replace(" "+SEP+" "," \\\n")
87 def valid_step (step):
90 def __init__ (self,plc_spec,options):
91 self.plc_spec=plc_spec
93 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
95 self.vserverip=plc_spec['vserverip']
96 self.vservername=plc_spec['vservername']
97 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
100 raise Exception,'chroot-based myplc testing is deprecated'
101 self.apiserver=TestApiserver(self.url,options.dry_run)
104 name=self.plc_spec['name']
105 return "%s.%s"%(name,self.vservername)
108 return self.plc_spec['hostname']
111 return self.test_ssh.is_local()
113 # define the API methods on this object through xmlrpc
114 # would help, but not strictly necessary
118 def actual_command_in_guest (self,command):
119 return self.test_ssh.actual_command(self.host_to_guest(command))
121 def start_guest (self):
122 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
124 def run_in_guest (self,command):
125 return utils.system(self.actual_command_in_guest(command))
127 def run_in_host (self,command):
128 return self.test_ssh.run_in_buildname(command)
130 #command gets run in the vserver
131 def host_to_guest(self,command):
132 return "vserver %s exec %s"%(self.vservername,command)
134 #command gets run in the vserver
135 def start_guest_in_host(self):
136 return "vserver %s start"%(self.vservername)
139 def run_in_guest_piped (self,local,remote):
140 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
142 def auth_root (self):
143 return {'Username':self.plc_spec['PLC_ROOT_USER'],
144 'AuthMethod':'password',
145 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
146 'Role' : self.plc_spec['role']
148 def locate_site (self,sitename):
149 for site in self.plc_spec['sites']:
150 if site['site_fields']['name'] == sitename:
152 if site['site_fields']['login_base'] == sitename:
154 raise Exception,"Cannot locate site %s"%sitename
156 def locate_node (self,nodename):
157 for site in self.plc_spec['sites']:
158 for node in site['nodes']:
159 if node['name'] == nodename:
161 raise Exception,"Cannot locate node %s"%nodename
163 def locate_hostname (self,hostname):
164 for site in self.plc_spec['sites']:
165 for node in site['nodes']:
166 if node['node_fields']['hostname'] == hostname:
168 raise Exception,"Cannot locate hostname %s"%hostname
170 def locate_key (self,keyname):
171 for key in self.plc_spec['keys']:
172 if key['name'] == keyname:
174 raise Exception,"Cannot locate key %s"%keyname
176 def locate_slice (self, slicename):
177 for slice in self.plc_spec['slices']:
178 if slice['slice_fields']['name'] == slicename:
180 raise Exception,"Cannot locate slice %s"%slicename
182 def all_sliver_objs (self):
184 for slice_spec in self.plc_spec['slices']:
185 slicename = slice_spec['slice_fields']['name']
186 for nodename in slice_spec['nodenames']:
187 result.append(self.locate_sliver_obj (nodename,slicename))
190 def locate_sliver_obj (self,nodename,slicename):
191 (site,node) = self.locate_node(nodename)
192 slice = self.locate_slice (slicename)
194 test_site = TestSite (self, site)
195 test_node = TestNode (self, test_site,node)
196 # xxx the slice site is assumed to be the node site - mhh - probably harmless
197 test_slice = TestSlice (self, test_site, slice)
198 return TestSliver (self, test_node, test_slice)
200 def locate_first_node(self):
201 nodename=self.plc_spec['slices'][0]['nodenames'][0]
202 (site,node) = self.locate_node(nodename)
203 test_site = TestSite (self, site)
204 test_node = TestNode (self, test_site,node)
207 def locate_first_sliver (self):
208 slice_spec=self.plc_spec['slices'][0]
209 slicename=slice_spec['slice_fields']['name']
210 nodename=slice_spec['nodenames'][0]
211 return self.locate_sliver_obj(nodename,slicename)
213 # all different hostboxes used in this plc
214 def gather_hostBoxes(self):
215 # maps on sites and nodes, return [ (host_box,test_node) ]
217 for site_spec in self.plc_spec['sites']:
218 test_site = TestSite (self,site_spec)
219 for node_spec in site_spec['nodes']:
220 test_node = TestNode (self, test_site, node_spec)
221 if not test_node.is_real():
222 tuples.append( (test_node.host_box(),test_node) )
223 # transform into a dict { 'host_box' -> [ test_node .. ] }
225 for (box,node) in tuples:
226 if not result.has_key(box):
229 result[box].append(node)
232 # a step for checking this stuff
233 def show_boxes (self):
234 for (box,nodes) in self.gather_hostBoxes().iteritems():
235 print box,":"," + ".join( [ node.name() for node in nodes ] )
238 # make this a valid step
239 def kill_all_qemus(self):
240 # this is the brute force version, kill all qemus on that host box
241 for (box,nodes) in self.gather_hostBoxes().iteritems():
242 # pass the first nodename, as we don't push template-qemu on testboxes
243 nodedir=nodes[0].nodedir()
244 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
247 # make this a valid step
248 def list_all_qemus(self):
249 for (box,nodes) in self.gather_hostBoxes().iteritems():
250 # this is the brute force version, kill all qemus on that host box
251 TestBox(box,self.options.buildname).list_all_qemus()
254 # kill only the right qemus
255 def list_qemus(self):
256 for (box,nodes) in self.gather_hostBoxes().iteritems():
257 # the fine-grain version
262 # kill only the right qemus
263 def kill_qemus(self):
264 for (box,nodes) in self.gather_hostBoxes().iteritems():
265 # the fine-grain version
271 ### utility methods for handling the pool of IP addresses allocated to plcs
273 # (*) running plcs are recorded in the file named ~/running-test-plcs
274 # (*) this file contains a line for each running plc, older first
275 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
276 # (*) the free_tracker method performs a vserver stop on the oldest entry
277 # (*) the record_tracker method adds an entry at the bottom of the file
278 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
280 TRACKER_FILE="~/running-test-plcs"
282 def record_tracker (self):
283 command="echo %s %s >> %s"%(self.vservername,self.test_ssh.hostname,TestPlc.TRACKER_FILE)
284 (code,output) = utils.output_of (self.test_ssh.actual_command(command))
286 print "WARNING : COULD NOT record_tracker %s as a running plc on %s"%(self.vservername,self.test_ssh.hostname)
288 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
291 def free_tracker (self):
292 command="head -1 %s"%TestPlc.TRACKER_FILE
293 (code,line) = utils.output_of(self.test_ssh.actual_command(command))
295 print "No entry found in %s on %s"%(TestPlc.TRACKER_FILE,self.test_ssh.hostname)
298 [vserver_to_stop,hostname] = line.split()
300 print "WARNING: free_tracker: Could not parse %s - skipped"%TestPlc.TRACKER_FILE
302 stop_command = "vserver --silent %s stop"%vserver_to_stop
303 utils.system(self.test_ssh.actual_command(stop_command))
304 x=TestPlc.TRACKER_FILE
305 flush_command = "tail --lines=+2 %s > %s.tmp ; mv %s.tmp %s"%(x,x,x,x)
306 utils.system(self.test_ssh.actual_command(flush_command))
309 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
310 def cleanup_tracker (self):
311 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
312 utils.system(self.test_ssh.actual_command(stop_all))
313 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
314 utils.system(self.test_ssh.actual_command(clean_tracker))
317 self.run_in_host("vserver --silent %s delete"%self.vservername)
323 # a full path for the local calls
324 build_dir=os.path.dirname(sys.argv[0])
325 # sometimes this is empty - set to "." in such a case
326 if not build_dir: build_dir="."
327 build_dir += "/build"
329 # use a standard name - will be relative to remote buildname
331 # run checkout in any case - would do an update if already exists
332 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
333 if self.run_in_host(build_checkout) != 0:
335 # the repo url is taken from arch-rpms-url
336 # with the last step (i386.) removed
337 repo_url = self.options.arch_rpms_url
338 for level in [ 'arch' ]:
339 repo_url = os.path.dirname(repo_url)
340 if self.options.arch == "i386":
341 personality_option="-p linux32"
343 personality_option="-p linux64"
344 script="vtest-init-vserver.sh"
345 vserver_name = self.vservername
346 vserver_options="--netdev eth0 --interface %s"%self.vserverip
348 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
349 vserver_options += " --hostname %s"%vserver_hostname
352 create_vserver="%(build_dir)s/%(script)s %(personality_option)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
353 return self.run_in_host(create_vserver) == 0
356 def install_rpm(self):
357 return self.run_in_guest("yum -y install myplc-native")==0
361 tmpname='%s.plc-config-tty'%(self.name())
362 fileconf=open(tmpname,'w')
363 for var in [ 'PLC_NAME',
367 'PLC_MAIL_SUPPORT_ADDRESS',
374 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
375 fileconf.write('w\n')
376 fileconf.write('q\n')
378 utils.system('cat %s'%tmpname)
379 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
380 utils.system('rm %s'%tmpname)
384 self.run_in_guest('service plc start')
388 self.run_in_guest('service plc stop')
395 # stores the keys from the config for further use
396 def store_keys(self):
397 for key_spec in self.plc_spec['keys']:
398 TestKey(self,key_spec).store_key()
401 def clean_keys(self):
402 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
404 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
405 # for later direct access to the nodes
406 def fetch_keys(self):
408 if not os.path.isdir(dir):
410 prefix = 'root_ssh_key'
411 vservername=self.vservername
413 for ext in [ 'pub', 'rsa' ] :
414 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
415 dst="keys/%(vservername)s.%(ext)s"%locals()
416 if self.test_ssh.fetch(src,dst) != 0: overall=False
420 return self.do_sites()
422 def clean_sites (self):
423 return self.do_sites(action="delete")
425 def do_sites (self,action="add"):
426 for site_spec in self.plc_spec['sites']:
427 test_site = TestSite (self,site_spec)
428 if (action != "add"):
429 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
430 test_site.delete_site()
431 # deleted with the site
432 #test_site.delete_users()
435 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
436 test_site.create_site()
437 test_site.create_users()
440 def clean_all_sites (self):
441 print 'auth_root',self.auth_root()
442 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
443 for site_id in site_ids:
444 print 'Deleting site_id',site_id
445 self.apiserver.DeleteSite(self.auth_root(),site_id)
448 return self.do_nodes()
449 def clean_nodes (self):
450 return self.do_nodes(action="delete")
452 def do_nodes (self,action="add"):
453 for site_spec in self.plc_spec['sites']:
454 test_site = TestSite (self,site_spec)
456 utils.header("Deleting nodes in site %s"%test_site.name())
457 for node_spec in site_spec['nodes']:
458 test_node=TestNode(self,test_site,node_spec)
459 utils.header("Deleting %s"%test_node.name())
460 test_node.delete_node()
462 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
463 for node_spec in site_spec['nodes']:
464 utils.pprint('Creating node %s'%node_spec,node_spec)
465 test_node = TestNode (self,test_site,node_spec)
466 test_node.create_node ()
469 def nodegroups (self):
470 return self.do_nodegroups("add")
471 def clean_nodegroups (self):
472 return self.do_nodegroups("delete")
474 # create nodegroups if needed, and populate
475 def do_nodegroups (self, action="add"):
476 # 1st pass to scan contents
478 for site_spec in self.plc_spec['sites']:
479 test_site = TestSite (self,site_spec)
480 for node_spec in site_spec['nodes']:
481 test_node=TestNode (self,test_site,node_spec)
482 if node_spec.has_key('nodegroups'):
483 nodegroupnames=node_spec['nodegroups']
484 if isinstance(nodegroupnames,StringTypes):
485 nodegroupnames = [ nodegroupnames ]
486 for nodegroupname in nodegroupnames:
487 if not groups_dict.has_key(nodegroupname):
488 groups_dict[nodegroupname]=[]
489 groups_dict[nodegroupname].append(test_node.name())
490 auth=self.auth_root()
492 for (nodegroupname,group_nodes) in groups_dict.iteritems():
494 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
495 # first, check if the nodetagtype is here
496 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
498 tag_type_id = tag_types[0]['tag_type_id']
500 tag_type_id = self.apiserver.AddTagType(auth,
501 {'tagname':nodegroupname,
502 'description': 'for nodegroup %s'%nodegroupname,
505 print 'located tag (type)',nodegroupname,'as',tag_type_id
507 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
509 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
510 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
511 # set node tag on all nodes, value='yes'
512 for nodename in group_nodes:
514 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
516 traceback.print_exc()
517 print 'node',nodename,'seems to already have tag',nodegroupname
520 expect_yes = self.apiserver.GetNodeTags(auth,
521 {'hostname':nodename,
522 'tagname':nodegroupname},
523 ['tagvalue'])[0]['tagvalue']
524 if expect_yes != "yes":
525 print 'Mismatch node tag on node',nodename,'got',expect_yes
528 if not self.options.dry_run:
529 print 'Cannot find tag',nodegroupname,'on node',nodename
533 print 'cleaning nodegroup',nodegroupname
534 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
536 traceback.print_exc()
540 def all_hostnames (self) :
542 for site_spec in self.plc_spec['sites']:
543 hostnames += [ node_spec['node_fields']['hostname'] \
544 for node_spec in site_spec['nodes'] ]
547 # gracetime : during the first <gracetime> minutes nothing gets printed
548 def do_nodes_booted (self, minutes, gracetime,period=15):
549 if self.options.dry_run:
553 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
554 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
555 # the nodes that haven't checked yet - start with a full list and shrink over time
556 tocheck = self.all_hostnames()
557 utils.header("checking nodes %r"%tocheck)
558 # create a dict hostname -> status
559 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
562 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
564 for array in tocheck_status:
565 hostname=array['hostname']
566 boot_state=array['boot_state']
567 if boot_state == 'boot':
568 utils.header ("%s has reached the 'boot' state"%hostname)
570 # if it's a real node, never mind
571 (site_spec,node_spec)=self.locate_hostname(hostname)
572 if TestNode.is_real_model(node_spec['node_fields']['model']):
573 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
576 elif datetime.datetime.now() > graceout:
577 utils.header ("%s still in '%s' state"%(hostname,boot_state))
578 graceout=datetime.datetime.now()+datetime.timedelta(1)
579 status[hostname] = boot_state
581 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
584 if datetime.datetime.now() > timeout:
585 for hostname in tocheck:
586 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
588 # otherwise, sleep for a while
590 # only useful in empty plcs
593 def nodes_booted(self):
594 return self.do_nodes_booted(minutes=20,gracetime=15)
596 def do_nodes_ssh(self,minutes,gracetime,period=15):
598 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
599 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
600 tocheck = self.all_hostnames()
601 # self.scan_publicKeys(tocheck)
602 utils.header("checking Connectivity on nodes %r"%tocheck)
604 for hostname in tocheck:
605 # try to ssh in nodes
606 node_test_ssh = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa")
607 success=self.run_in_guest(node_test_ssh.actual_command("hostname"))==0
609 utils.header('The node %s is sshable -->'%hostname)
611 tocheck.remove(hostname)
613 # we will have tried real nodes once, in case they're up - but if not, just skip
614 (site_spec,node_spec)=self.locate_hostname(hostname)
615 if TestNode.is_real_model(node_spec['node_fields']['model']):
616 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
617 tocheck.remove(hostname)
618 elif datetime.datetime.now() > graceout:
619 utils.header("Could not ssh-enter root context on %s"%hostname)
622 if datetime.datetime.now() > timeout:
623 for hostname in tocheck:
624 utils.header("FAILURE to ssh into %s"%hostname)
626 # otherwise, sleep for a while
628 # only useful in empty plcs
632 return self.do_nodes_ssh(minutes=10,gracetime=5)
635 def init_node (self): pass
637 def bootcd (self): pass
639 def configure_qemu (self): pass
641 def reinstall_node (self): pass
643 def export_qemu (self): pass
645 ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
646 def check_sanity_node (self):
647 return self.locate_first_node().check_sanity()
648 def check_sanity_sliver (self) :
649 return self.locate_first_sliver().check_sanity()
651 def check_sanity (self):
652 return self.check_sanity_node() and self.check_sanity_sliver()
655 def do_check_initscripts(self):
657 for slice_spec in self.plc_spec['slices']:
658 if not slice_spec.has_key('initscriptname'):
660 initscript=slice_spec['initscriptname']
661 for nodename in slice_spec['nodenames']:
662 (site,node) = self.locate_node (nodename)
663 # xxx - passing the wrong site - probably harmless
664 test_site = TestSite (self,site)
665 test_slice = TestSlice (self,test_site,slice_spec)
666 test_node = TestNode (self,test_site,node)
667 test_sliver = TestSliver (self, test_node, test_slice)
668 if not test_sliver.check_initscript(initscript):
672 def check_initscripts(self):
673 return self.do_check_initscripts()
675 def initscripts (self):
676 for initscript in self.plc_spec['initscripts']:
677 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
678 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
681 def clean_initscripts (self):
682 for initscript in self.plc_spec['initscripts']:
683 initscript_name = initscript['initscript_fields']['name']
684 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
686 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
687 print initscript_name,'deleted'
689 print 'deletion went wrong - probably did not exist'
694 return self.do_slices()
696 def clean_slices (self):
697 return self.do_slices("delete")
699 def do_slices (self, action="add"):
700 for slice in self.plc_spec['slices']:
701 site_spec = self.locate_site (slice['sitename'])
702 test_site = TestSite(self,site_spec)
703 test_slice=TestSlice(self,test_site,slice)
705 utils.header("Deleting slices in site %s"%test_site.name())
706 test_slice.delete_slice()
708 utils.pprint("Creating slice",slice)
709 test_slice.create_slice()
710 utils.header('Created Slice %s'%slice['slice_fields']['name'])
713 @slice_mapper_options
714 def check_slice(self): pass
717 def clear_known_hosts (self): pass
720 def start_node (self) : pass
722 def check_tcp (self):
723 specs = self.plc_spec['tcp_test']
728 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
729 if not s_test_sliver.run_tcp_server(port,timeout=10):
733 # idem for the client side
734 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
735 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
739 def plcsh_stress_test (self):
740 # install the stress-test in the plc image
741 location = "/usr/share/plc_api/plcsh-stress-test.py"
742 remote="/vservers/%s/%s"%(self.vservername,location)
743 self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
745 command += " -- --check"
746 if self.options.small_test:
748 return ( self.run_in_guest(command) == 0)
750 def gather_logs (self):
751 # (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
752 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
753 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
754 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
756 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
757 self.gather_var_logs ()
759 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
760 for site_spec in self.plc_spec['sites']:
761 test_site = TestSite (self,site_spec)
762 for node_spec in site_spec['nodes']:
763 test_node=TestNode(self,test_site,node_spec)
764 test_node.gather_qemu_logs()
766 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
767 self.gather_nodes_var_logs()
769 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
770 self.gather_slivers_var_logs()
773 def gather_slivers_var_logs(self):
774 for test_sliver in self.all_sliver_objs():
775 remote = test_sliver.tar_var_logs()
776 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
777 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
778 utils.system(command)
781 def gather_var_logs (self):
782 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
783 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
784 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
785 utils.system(command)
786 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
787 utils.system(command)
789 def gather_nodes_var_logs (self):
790 for site_spec in self.plc_spec['sites']:
791 test_site = TestSite (self,site_spec)
792 for node_spec in site_spec['nodes']:
793 test_node=TestNode(self,test_site,node_spec)
794 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
795 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
796 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
797 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
798 utils.system(command)
801 # returns the filename to use for sql dump/restore, using options.dbname if set
802 def dbfile (self, database):
803 # uses options.dbname if it is found
805 name=self.options.dbname
806 if not isinstance(name,StringTypes):
809 t=datetime.datetime.now()
812 return "/root/%s-%s.sql"%(database,name)
815 dump=self.dbfile("planetab4")
816 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
817 utils.header('Dumped planetlab4 database in %s'%dump)
820 def db_restore(self):
821 dump=self.dbfile("planetab4")
823 self.run_in_guest('service httpd stop')
824 # xxx - need another wrapper
825 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
826 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
827 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
828 ##starting httpd service
829 self.run_in_guest('service httpd start')
831 utils.header('Database restored from ' + dump)
834 def standby_1(): pass
836 def standby_2(): pass
838 def standby_3(): pass
840 def standby_4(): pass
842 def standby_5(): pass
844 def standby_6(): pass
846 def standby_7(): pass
848 def standby_8(): pass
850 def standby_9(): pass
852 def standby_10(): pass
854 def standby_11(): pass
856 def standby_12(): pass
858 def standby_13(): pass
860 def standby_14(): pass
862 def standby_15(): pass
864 def standby_16(): pass
866 def standby_17(): pass
868 def standby_18(): pass
870 def standby_19(): pass
872 def standby_20(): pass