7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
65 default_steps = ['uninstall','install','install_rpm',
66 'configure', 'start', 'fetch_keys', SEP,
67 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
68 'sites', 'nodes', 'slices', 'nodegroups', SEP,
69 'init_node','bootcd', 'configure_qemu', 'export_qemu',
70 'kill_all_qemus', 'reinstall_node','start_node', SEP,
71 'nodes_booted', 'nodes_ssh', 'check_slice', 'check_initscripts', SEP,
72 'check_sanity', 'check_tcp', 'plcsh_stress_test', SEP,
73 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
74 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
75 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
76 'clean_sites', 'clean_nodes',
77 'clean_slices', 'clean_keys', SEP,
78 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
79 'db_dump' , 'db_restore', ' cleanup_tracker',
80 'standby_1 through 20'
84 def printable_steps (list):
85 return " ".join(list).replace(" "+SEP+" "," \\\n")
87 def valid_step (step):
90 def __init__ (self,plc_spec,options):
91 self.plc_spec=plc_spec
93 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
95 self.vserverip=plc_spec['vserverip']
96 self.vservername=plc_spec['vservername']
97 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
100 raise Exception,'chroot-based myplc testing is deprecated'
101 self.apiserver=TestApiserver(self.url,options.dry_run)
104 name=self.plc_spec['name']
105 return "%s.%s"%(name,self.vservername)
108 return self.plc_spec['hostname']
111 return self.test_ssh.is_local()
113 # define the API methods on this object through xmlrpc
114 # would help, but not strictly necessary
118 def actual_command_in_guest (self,command):
119 return self.test_ssh.actual_command(self.host_to_guest(command))
121 def start_guest (self):
122 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
124 def run_in_guest (self,command):
125 return utils.system(self.actual_command_in_guest(command))
127 def run_in_host (self,command):
128 return self.test_ssh.run_in_buildname(command)
130 #command gets run in the vserver
131 def host_to_guest(self,command):
132 return "vserver %s exec %s"%(self.vservername,command)
134 #command gets run in the vserver
135 def start_guest_in_host(self):
136 return "vserver %s start"%(self.vservername)
139 def run_in_guest_piped (self,local,remote):
140 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
142 def auth_root (self):
143 return {'Username':self.plc_spec['PLC_ROOT_USER'],
144 'AuthMethod':'password',
145 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
146 'Role' : self.plc_spec['role']
148 def locate_site (self,sitename):
149 for site in self.plc_spec['sites']:
150 if site['site_fields']['name'] == sitename:
152 if site['site_fields']['login_base'] == sitename:
154 raise Exception,"Cannot locate site %s"%sitename
156 def locate_node (self,nodename):
157 for site in self.plc_spec['sites']:
158 for node in site['nodes']:
159 if node['name'] == nodename:
161 raise Exception,"Cannot locate node %s"%nodename
163 def locate_hostname (self,hostname):
164 for site in self.plc_spec['sites']:
165 for node in site['nodes']:
166 if node['node_fields']['hostname'] == hostname:
168 raise Exception,"Cannot locate hostname %s"%hostname
170 def locate_key (self,keyname):
171 for key in self.plc_spec['keys']:
172 if key['name'] == keyname:
174 raise Exception,"Cannot locate key %s"%keyname
176 def locate_slice (self, slicename):
177 for slice in self.plc_spec['slices']:
178 if slice['slice_fields']['name'] == slicename:
180 raise Exception,"Cannot locate slice %s"%slicename
182 def all_sliver_objs (self):
184 for slice_spec in self.plc_spec['slices']:
185 slicename = slice_spec['slice_fields']['name']
186 for nodename in slice_spec['nodenames']:
187 result.append(self.locate_sliver_obj (nodename,slicename))
190 def locate_sliver_obj (self,nodename,slicename):
191 (site,node) = self.locate_node(nodename)
192 slice = self.locate_slice (slicename)
194 test_site = TestSite (self, site)
195 test_node = TestNode (self, test_site,node)
196 # xxx the slice site is assumed to be the node site - mhh - probably harmless
197 test_slice = TestSlice (self, test_site, slice)
198 return TestSliver (self, test_node, test_slice)
200 def locate_first_node(self):
201 nodename=self.plc_spec['slices'][0]['nodenames'][0]
202 (site,node) = self.locate_node(nodename)
203 test_site = TestSite (self, site)
204 test_node = TestNode (self, test_site,node)
207 def locate_first_sliver (self):
208 slice_spec=self.plc_spec['slices'][0]
209 slicename=slice_spec['slice_fields']['name']
210 nodename=slice_spec['nodenames'][0]
211 return self.locate_sliver_obj(nodename,slicename)
213 # all different hostboxes used in this plc
214 def gather_hostBoxes(self):
215 # maps on sites and nodes, return [ (host_box,test_node) ]
217 for site_spec in self.plc_spec['sites']:
218 test_site = TestSite (self,site_spec)
219 for node_spec in site_spec['nodes']:
220 test_node = TestNode (self, test_site, node_spec)
221 if not test_node.is_real():
222 tuples.append( (test_node.host_box(),test_node) )
223 # transform into a dict { 'host_box' -> [ test_node .. ] }
225 for (box,node) in tuples:
226 if not result.has_key(box):
229 result[box].append(node)
232 # a step for checking this stuff
233 def show_boxes (self):
234 for (box,nodes) in self.gather_hostBoxes().iteritems():
235 print box,":"," + ".join( [ node.name() for node in nodes ] )
238 # make this a valid step
239 def kill_all_qemus(self):
240 # this is the brute force version, kill all qemus on that host box
241 for (box,nodes) in self.gather_hostBoxes().iteritems():
242 # pass the first nodename, as we don't push template-qemu on testboxes
243 nodedir=nodes[0].nodedir()
244 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
247 # make this a valid step
248 def list_all_qemus(self):
249 for (box,nodes) in self.gather_hostBoxes().iteritems():
250 # this is the brute force version, kill all qemus on that host box
251 TestBox(box,self.options.buildname).list_all_qemus()
254 # kill only the right qemus
255 def list_qemus(self):
256 for (box,nodes) in self.gather_hostBoxes().iteritems():
257 # the fine-grain version
262 # kill only the right qemus
263 def kill_qemus(self):
264 for (box,nodes) in self.gather_hostBoxes().iteritems():
265 # the fine-grain version
271 ### utility methods for handling the pool of IP addresses allocated to plcs
273 # (*) running plcs are recorded in the file named ~/running-test-plcs
274 # (*) this file contains a line for each running plc, older first
275 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
276 # (*) the free_tracker method performs a vserver stop on the oldest entry
277 # (*) the record_tracker method adds an entry at the bottom of the file
278 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
280 TRACKER_FILE="~/running-test-plcs"
282 def record_tracker (self):
283 command="echo %s %s >> %s"%(self.vservername,self.test_ssh.hostname,TestPlc.TRACKER_FILE)
284 (code,output) = utils.output_of (self.test_ssh.actual_command(command))
286 print "WARNING : COULD NOT record_tracker %s as a running plc on %s"%(self.vservername,self.test_ssh.hostname)
288 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
291 def free_tracker (self):
292 command="head -1 %s"%TestPlc.TRACKER_FILE
293 (code,line) = utils.output_of(self.test_ssh.actual_command(command))
295 print "No entry found in %s on %s"%(TestPlc.TRACKER_FILE,self.test_ssh.hostname)
298 [vserver_to_stop,hostname] = line.split()
300 print "WARNING: free_tracker: Could not parse %s - skipped"%TestPlc.TRACKER_FILE
302 stop_command = "vserver --silent %s stop"%vserver_to_stop
303 utils.system(self.test_ssh.actual_command(stop_command))
304 x=TestPlc.TRACKER_FILE
305 flush_command = "tail --lines=+2 %s > %s.tmp ; mv %s.tmp %s"%(x,x,x,x)
306 utils.system(self.test_ssh.actual_command(flush_command))
309 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
310 def cleanup_tracker (self):
311 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
312 utils.system(self.test_ssh.actual_command(stop_all))
313 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
314 utils.system(self.test_ssh.actual_command(clean_tracker))
317 self.run_in_host("vserver --silent %s delete"%self.vservername)
323 # a full path for the local calls
324 build_dir=os.path.dirname(sys.argv[0])
325 # sometimes this is empty - set to "." in such a case
326 if not build_dir: build_dir="."
327 build_dir += "/build"
329 # use a standard name - will be relative to remote buildname
331 # run checkout in any case - would do an update if already exists
332 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
333 if self.run_in_host(build_checkout) != 0:
335 # the repo url is taken from arch-rpms-url
336 # with the last step (i386.) removed
337 repo_url = self.options.arch_rpms_url
338 for level in [ 'arch' ]:
339 repo_url = os.path.dirname(repo_url)
340 # pass the vbuild-nightly options to vtest-init-vserver
342 test_env_options += " -p %s"%self.options.personality
343 test_env_options += " -d %s"%self.options.pldistro
344 test_env_options += " -f %s"%self.options.fcdistro
345 script="vtest-init-vserver.sh"
346 vserver_name = self.vservername
347 vserver_options="--netdev eth0 --interface %s"%self.vserverip
349 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
350 vserver_options += " --hostname %s"%vserver_hostname
353 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
354 return self.run_in_host(create_vserver) == 0
357 def install_rpm(self):
358 return self.run_in_guest("yum -y install myplc-native")==0
362 tmpname='%s.plc-config-tty'%(self.name())
363 fileconf=open(tmpname,'w')
364 for var in [ 'PLC_NAME',
368 'PLC_MAIL_SUPPORT_ADDRESS',
375 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
376 fileconf.write('w\n')
377 fileconf.write('q\n')
379 utils.system('cat %s'%tmpname)
380 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
381 utils.system('rm %s'%tmpname)
385 self.run_in_guest('service plc start')
389 self.run_in_guest('service plc stop')
396 # stores the keys from the config for further use
397 def store_keys(self):
398 for key_spec in self.plc_spec['keys']:
399 TestKey(self,key_spec).store_key()
402 def clean_keys(self):
403 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
405 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
406 # for later direct access to the nodes
407 def fetch_keys(self):
409 if not os.path.isdir(dir):
411 prefix = 'root_ssh_key'
412 vservername=self.vservername
414 for ext in [ 'pub', 'rsa' ] :
415 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
416 dst="keys/%(vservername)s.%(ext)s"%locals()
417 if self.test_ssh.fetch(src,dst) != 0: overall=False
421 return self.do_sites()
423 def clean_sites (self):
424 return self.do_sites(action="delete")
426 def do_sites (self,action="add"):
427 for site_spec in self.plc_spec['sites']:
428 test_site = TestSite (self,site_spec)
429 if (action != "add"):
430 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
431 test_site.delete_site()
432 # deleted with the site
433 #test_site.delete_users()
436 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
437 test_site.create_site()
438 test_site.create_users()
441 def clean_all_sites (self):
442 print 'auth_root',self.auth_root()
443 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
444 for site_id in site_ids:
445 print 'Deleting site_id',site_id
446 self.apiserver.DeleteSite(self.auth_root(),site_id)
449 return self.do_nodes()
450 def clean_nodes (self):
451 return self.do_nodes(action="delete")
453 def do_nodes (self,action="add"):
454 for site_spec in self.plc_spec['sites']:
455 test_site = TestSite (self,site_spec)
457 utils.header("Deleting nodes in site %s"%test_site.name())
458 for node_spec in site_spec['nodes']:
459 test_node=TestNode(self,test_site,node_spec)
460 utils.header("Deleting %s"%test_node.name())
461 test_node.delete_node()
463 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
464 for node_spec in site_spec['nodes']:
465 utils.pprint('Creating node %s'%node_spec,node_spec)
466 test_node = TestNode (self,test_site,node_spec)
467 test_node.create_node ()
470 def nodegroups (self):
471 return self.do_nodegroups("add")
472 def clean_nodegroups (self):
473 return self.do_nodegroups("delete")
475 # create nodegroups if needed, and populate
476 def do_nodegroups (self, action="add"):
477 # 1st pass to scan contents
479 for site_spec in self.plc_spec['sites']:
480 test_site = TestSite (self,site_spec)
481 for node_spec in site_spec['nodes']:
482 test_node=TestNode (self,test_site,node_spec)
483 if node_spec.has_key('nodegroups'):
484 nodegroupnames=node_spec['nodegroups']
485 if isinstance(nodegroupnames,StringTypes):
486 nodegroupnames = [ nodegroupnames ]
487 for nodegroupname in nodegroupnames:
488 if not groups_dict.has_key(nodegroupname):
489 groups_dict[nodegroupname]=[]
490 groups_dict[nodegroupname].append(test_node.name())
491 auth=self.auth_root()
493 for (nodegroupname,group_nodes) in groups_dict.iteritems():
495 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
496 # first, check if the nodetagtype is here
497 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
499 tag_type_id = tag_types[0]['tag_type_id']
501 tag_type_id = self.apiserver.AddTagType(auth,
502 {'tagname':nodegroupname,
503 'description': 'for nodegroup %s'%nodegroupname,
506 print 'located tag (type)',nodegroupname,'as',tag_type_id
508 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
510 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
511 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
512 # set node tag on all nodes, value='yes'
513 for nodename in group_nodes:
515 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
517 traceback.print_exc()
518 print 'node',nodename,'seems to already have tag',nodegroupname
521 expect_yes = self.apiserver.GetNodeTags(auth,
522 {'hostname':nodename,
523 'tagname':nodegroupname},
524 ['tagvalue'])[0]['tagvalue']
525 if expect_yes != "yes":
526 print 'Mismatch node tag on node',nodename,'got',expect_yes
529 if not self.options.dry_run:
530 print 'Cannot find tag',nodegroupname,'on node',nodename
534 print 'cleaning nodegroup',nodegroupname
535 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
537 traceback.print_exc()
541 def all_hostnames (self) :
543 for site_spec in self.plc_spec['sites']:
544 hostnames += [ node_spec['node_fields']['hostname'] \
545 for node_spec in site_spec['nodes'] ]
548 # gracetime : during the first <gracetime> minutes nothing gets printed
549 def do_nodes_booted (self, minutes, gracetime,period=15):
550 if self.options.dry_run:
554 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
555 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
556 # the nodes that haven't checked yet - start with a full list and shrink over time
557 tocheck = self.all_hostnames()
558 utils.header("checking nodes %r"%tocheck)
559 # create a dict hostname -> status
560 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
563 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
565 for array in tocheck_status:
566 hostname=array['hostname']
567 boot_state=array['boot_state']
568 if boot_state == 'boot':
569 utils.header ("%s has reached the 'boot' state"%hostname)
571 # if it's a real node, never mind
572 (site_spec,node_spec)=self.locate_hostname(hostname)
573 if TestNode.is_real_model(node_spec['node_fields']['model']):
574 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
577 elif datetime.datetime.now() > graceout:
578 utils.header ("%s still in '%s' state"%(hostname,boot_state))
579 graceout=datetime.datetime.now()+datetime.timedelta(1)
580 status[hostname] = boot_state
582 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
585 if datetime.datetime.now() > timeout:
586 for hostname in tocheck:
587 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
589 # otherwise, sleep for a while
591 # only useful in empty plcs
594 def nodes_booted(self):
595 return self.do_nodes_booted(minutes=20,gracetime=15)
597 def do_nodes_ssh(self,minutes,gracetime,period=15):
599 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
600 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
601 tocheck = self.all_hostnames()
602 # self.scan_publicKeys(tocheck)
603 utils.header("checking Connectivity on nodes %r"%tocheck)
605 for hostname in tocheck:
606 # try to ssh in nodes
607 node_test_ssh = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa")
608 success=self.run_in_guest(node_test_ssh.actual_command("hostname"))==0
610 utils.header('The node %s is sshable -->'%hostname)
612 tocheck.remove(hostname)
614 # we will have tried real nodes once, in case they're up - but if not, just skip
615 (site_spec,node_spec)=self.locate_hostname(hostname)
616 if TestNode.is_real_model(node_spec['node_fields']['model']):
617 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
618 tocheck.remove(hostname)
619 elif datetime.datetime.now() > graceout:
620 utils.header("Could not ssh-enter root context on %s"%hostname)
623 if datetime.datetime.now() > timeout:
624 for hostname in tocheck:
625 utils.header("FAILURE to ssh into %s"%hostname)
627 # otherwise, sleep for a while
629 # only useful in empty plcs
633 return self.do_nodes_ssh(minutes=10,gracetime=5)
636 def init_node (self): pass
638 def bootcd (self): pass
640 def configure_qemu (self): pass
642 def reinstall_node (self): pass
644 def export_qemu (self): pass
646 ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
647 def check_sanity_node (self):
648 return self.locate_first_node().check_sanity()
649 def check_sanity_sliver (self) :
650 return self.locate_first_sliver().check_sanity()
652 def check_sanity (self):
653 return self.check_sanity_node() and self.check_sanity_sliver()
656 def do_check_initscripts(self):
658 for slice_spec in self.plc_spec['slices']:
659 if not slice_spec.has_key('initscriptname'):
661 initscript=slice_spec['initscriptname']
662 for nodename in slice_spec['nodenames']:
663 (site,node) = self.locate_node (nodename)
664 # xxx - passing the wrong site - probably harmless
665 test_site = TestSite (self,site)
666 test_slice = TestSlice (self,test_site,slice_spec)
667 test_node = TestNode (self,test_site,node)
668 test_sliver = TestSliver (self, test_node, test_slice)
669 if not test_sliver.check_initscript(initscript):
673 def check_initscripts(self):
674 return self.do_check_initscripts()
676 def initscripts (self):
677 for initscript in self.plc_spec['initscripts']:
678 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
679 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
682 def clean_initscripts (self):
683 for initscript in self.plc_spec['initscripts']:
684 initscript_name = initscript['initscript_fields']['name']
685 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
687 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
688 print initscript_name,'deleted'
690 print 'deletion went wrong - probably did not exist'
695 return self.do_slices()
697 def clean_slices (self):
698 return self.do_slices("delete")
700 def do_slices (self, action="add"):
701 for slice in self.plc_spec['slices']:
702 site_spec = self.locate_site (slice['sitename'])
703 test_site = TestSite(self,site_spec)
704 test_slice=TestSlice(self,test_site,slice)
706 utils.header("Deleting slices in site %s"%test_site.name())
707 test_slice.delete_slice()
709 utils.pprint("Creating slice",slice)
710 test_slice.create_slice()
711 utils.header('Created Slice %s'%slice['slice_fields']['name'])
714 @slice_mapper_options
715 def check_slice(self): pass
718 def clear_known_hosts (self): pass
721 def start_node (self) : pass
723 def check_tcp (self):
724 specs = self.plc_spec['tcp_test']
729 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
730 if not s_test_sliver.run_tcp_server(port,timeout=10):
734 # idem for the client side
735 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
736 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
740 def plcsh_stress_test (self):
741 # install the stress-test in the plc image
742 location = "/usr/share/plc_api/plcsh-stress-test.py"
743 remote="/vservers/%s/%s"%(self.vservername,location)
744 self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
746 command += " -- --check"
747 if self.options.small_test:
749 return ( self.run_in_guest(command) == 0)
751 def gather_logs (self):
752 # (1) get the plc's /var/log and store it locally in logs/myplc.var-log.<plcname>/*
753 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
754 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
755 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
757 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
758 self.gather_var_logs ()
760 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
761 for site_spec in self.plc_spec['sites']:
762 test_site = TestSite (self,site_spec)
763 for node_spec in site_spec['nodes']:
764 test_node=TestNode(self,test_site,node_spec)
765 test_node.gather_qemu_logs()
767 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
768 self.gather_nodes_var_logs()
770 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
771 self.gather_slivers_var_logs()
774 def gather_slivers_var_logs(self):
775 for test_sliver in self.all_sliver_objs():
776 remote = test_sliver.tar_var_logs()
777 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
778 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
779 utils.system(command)
782 def gather_var_logs (self):
783 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
784 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
785 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
786 utils.system(command)
787 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
788 utils.system(command)
790 def gather_nodes_var_logs (self):
791 for site_spec in self.plc_spec['sites']:
792 test_site = TestSite (self,site_spec)
793 for node_spec in site_spec['nodes']:
794 test_node=TestNode(self,test_site,node_spec)
795 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
796 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
797 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
798 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
799 utils.system(command)
802 # returns the filename to use for sql dump/restore, using options.dbname if set
803 def dbfile (self, database):
804 # uses options.dbname if it is found
806 name=self.options.dbname
807 if not isinstance(name,StringTypes):
810 t=datetime.datetime.now()
813 return "/root/%s-%s.sql"%(database,name)
816 dump=self.dbfile("planetab4")
817 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
818 utils.header('Dumped planetlab4 database in %s'%dump)
821 def db_restore(self):
822 dump=self.dbfile("planetab4")
824 self.run_in_guest('service httpd stop')
825 # xxx - need another wrapper
826 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
827 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
828 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
829 ##starting httpd service
830 self.run_in_guest('service httpd start')
832 utils.header('Database restored from ' + dump)
835 def standby_1(): pass
837 def standby_2(): pass
839 def standby_3(): pass
841 def standby_4(): pass
843 def standby_5(): pass
845 def standby_6(): pass
847 def standby_7(): pass
849 def standby_8(): pass
851 def standby_9(): pass
853 def standby_10(): pass
855 def standby_11(): pass
857 def standby_12(): pass
859 def standby_13(): pass
861 def standby_14(): pass
863 def standby_15(): pass
865 def standby_16(): pass
867 def standby_17(): pass
869 def standby_18(): pass
871 def standby_19(): pass
873 def standby_20(): pass