7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
65 default_steps = ['uninstall','install','install_rpm',
66 'configure', 'start', 'fetch_keys', SEP,
67 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
68 'sites', 'nodes', 'slices', 'nodegroups', SEP,
69 'init_node','bootcd', 'configure_qemu', 'export_qemu',
70 'kill_all_qemus', 'reinstall_node','start_node', SEP,
71 'nodes_booted', 'nodes_ssh', 'check_slice', 'check_initscripts', SEP,
72 'check_sanity', 'check_tcp', 'plcsh_stress_test', SEP,
73 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
74 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
75 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
76 'clean_sites', 'clean_nodes',
77 'clean_slices', 'clean_keys', SEP,
78 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
79 'db_dump' , 'db_restore', 'cleanup_trackers', 'cleanup_all_trackers',
80 'standby_1 through 20'
84 def printable_steps (list):
85 return " ".join(list).replace(" "+SEP+" "," \\\n")
87 def valid_step (step):
90 def __init__ (self,plc_spec,options):
91 self.plc_spec=plc_spec
93 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
95 self.vserverip=plc_spec['vserverip']
96 self.vservername=plc_spec['vservername']
97 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
100 raise Exception,'chroot-based myplc testing is deprecated'
101 self.apiserver=TestApiserver(self.url,options.dry_run)
104 name=self.plc_spec['name']
105 return "%s.%s"%(name,self.vservername)
108 return self.plc_spec['hostname']
111 return self.test_ssh.is_local()
113 # define the API methods on this object through xmlrpc
114 # would help, but not strictly necessary
118 def actual_command_in_guest (self,command):
119 return self.test_ssh.actual_command(self.host_to_guest(command))
121 def start_guest (self):
122 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
124 def run_in_guest (self,command):
125 return utils.system(self.actual_command_in_guest(command))
127 def run_in_host (self,command):
128 return self.test_ssh.run_in_buildname(command)
130 #command gets run in the vserver
131 def host_to_guest(self,command):
132 return "vserver %s exec %s"%(self.vservername,command)
134 #command gets run in the vserver
135 def start_guest_in_host(self):
136 return "vserver %s start"%(self.vservername)
139 def run_in_guest_piped (self,local,remote):
140 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
142 def auth_root (self):
143 return {'Username':self.plc_spec['PLC_ROOT_USER'],
144 'AuthMethod':'password',
145 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
146 'Role' : self.plc_spec['role']
148 def locate_site (self,sitename):
149 for site in self.plc_spec['sites']:
150 if site['site_fields']['name'] == sitename:
152 if site['site_fields']['login_base'] == sitename:
154 raise Exception,"Cannot locate site %s"%sitename
156 def locate_node (self,nodename):
157 for site in self.plc_spec['sites']:
158 for node in site['nodes']:
159 if node['name'] == nodename:
161 raise Exception,"Cannot locate node %s"%nodename
163 def locate_hostname (self,hostname):
164 for site in self.plc_spec['sites']:
165 for node in site['nodes']:
166 if node['node_fields']['hostname'] == hostname:
168 raise Exception,"Cannot locate hostname %s"%hostname
170 def locate_key (self,keyname):
171 for key in self.plc_spec['keys']:
172 if key['name'] == keyname:
174 raise Exception,"Cannot locate key %s"%keyname
176 def locate_slice (self, slicename):
177 for slice in self.plc_spec['slices']:
178 if slice['slice_fields']['name'] == slicename:
180 raise Exception,"Cannot locate slice %s"%slicename
182 def all_sliver_objs (self):
184 for slice_spec in self.plc_spec['slices']:
185 slicename = slice_spec['slice_fields']['name']
186 for nodename in slice_spec['nodenames']:
187 result.append(self.locate_sliver_obj (nodename,slicename))
190 def locate_sliver_obj (self,nodename,slicename):
191 (site,node) = self.locate_node(nodename)
192 slice = self.locate_slice (slicename)
194 test_site = TestSite (self, site)
195 test_node = TestNode (self, test_site,node)
196 # xxx the slice site is assumed to be the node site - mhh - probably harmless
197 test_slice = TestSlice (self, test_site, slice)
198 return TestSliver (self, test_node, test_slice)
200 def locate_first_node(self):
201 nodename=self.plc_spec['slices'][0]['nodenames'][0]
202 (site,node) = self.locate_node(nodename)
203 test_site = TestSite (self, site)
204 test_node = TestNode (self, test_site,node)
207 def locate_first_sliver (self):
208 slice_spec=self.plc_spec['slices'][0]
209 slicename=slice_spec['slice_fields']['name']
210 nodename=slice_spec['nodenames'][0]
211 return self.locate_sliver_obj(nodename,slicename)
213 # all different hostboxes used in this plc
214 def gather_hostBoxes(self):
215 # maps on sites and nodes, return [ (host_box,test_node) ]
217 for site_spec in self.plc_spec['sites']:
218 test_site = TestSite (self,site_spec)
219 for node_spec in site_spec['nodes']:
220 test_node = TestNode (self, test_site, node_spec)
221 if not test_node.is_real():
222 tuples.append( (test_node.host_box(),test_node) )
223 # transform into a dict { 'host_box' -> [ test_node .. ] }
225 for (box,node) in tuples:
226 if not result.has_key(box):
229 result[box].append(node)
232 # a step for checking this stuff
233 def show_boxes (self):
234 for (box,nodes) in self.gather_hostBoxes().iteritems():
235 print box,":"," + ".join( [ node.name() for node in nodes ] )
238 # make this a valid step
239 def kill_all_qemus(self):
240 # this is the brute force version, kill all qemus on that host box
241 for (box,nodes) in self.gather_hostBoxes().iteritems():
242 # pass the first nodename, as we don't push template-qemu on testboxes
243 nodedir=nodes[0].nodedir()
244 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
247 # make this a valid step
248 def list_all_qemus(self):
249 for (box,nodes) in self.gather_hostBoxes().iteritems():
250 # this is the brute force version, kill all qemus on that host box
251 TestBox(box,self.options.buildname).list_all_qemus()
254 # kill only the right qemus
255 def list_qemus(self):
256 for (box,nodes) in self.gather_hostBoxes().iteritems():
257 # the fine-grain version
262 # kill only the right qemus
263 def kill_qemus(self):
264 for (box,nodes) in self.gather_hostBoxes().iteritems():
265 # the fine-grain version
271 ### utility methods for handling the pool of IP addresses allocated to plcs
273 # (*) running plcs are recorded in the file named ~/running-test-plcs
274 # (*) this file contains a line for each running plc, older first
275 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
276 # (*) the free_tracker method performs a vserver stop on the oldest entry
277 # (*) the record_tracker method adds an entry at the bottom of the file
278 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
280 TRACKER_FILE=os.environ['HOME']+"/running-test-plcs"
282 def record_tracker (self):
284 lines=file(TestPlc.TRACKER_FILE).readlines()
288 this_line="%s %s\n"%(self.vservername,self.test_ssh.hostname)
291 print 'this vserver is already included in %s'%TestPlc.TRACKER_FILE
293 if self.options.dry_run:
294 print 'dry_run: record_tracker - skipping tracker update'
296 tracker=file(TestPlc.TRACKER_FILE,"w")
297 for line in lines+[this_line]:
300 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
303 def free_tracker (self, keep_vservers=3):
305 lines=file(TestPlc.TRACKER_FILE).readlines()
307 print 'dry_run: free_tracker - skipping tracker update'
309 how_many = len(lines) - keep_vservers
310 # nothing todo until we have more than keep_vservers in the tracker
312 print 'free_tracker : limit %d not reached'%keep_vservers
314 to_stop = lines[:how_many]
315 to_keep = lines[how_many:]
318 [vname,hostname]=line.split()
319 command=TestSsh(hostname).actual_command("vserver --silent %s stop"%vname)
320 utils.system(command)
321 if self.options.dry_run:
322 print 'dry_run: free_tracker would stop %d vservers'%len(to_stop)
323 for line in to_stop: print line,
324 print 'dry_run: free_tracker would keep %d vservers'%len(to_keep)
325 for line in to_keep: print line,
327 print "Storing %d remaining vservers in %s"%(len(to_keep),TestPlc.TRACKER_FILE)
328 tracker=open(TestPlc.TRACKER_FILE,"w")
334 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
335 def cleanup_trackers (self):
337 for line in TestPlc.TRACKER_FILE.readlines():
338 [vname,hostname]=line.split()
339 stop="vserver --silent %s stop"%vname
340 command=TestSsh(hostname).actual_command(stop)
341 utils.system(command)
342 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
343 utils.system(self.test_ssh.actual_command(clean_tracker))
347 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
348 def cleanup_all_trackers (self):
349 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
350 utils.system(self.test_ssh.actual_command(stop_all))
351 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
352 utils.system(self.test_ssh.actual_command(clean_tracker))
356 self.run_in_host("vserver --silent %s delete"%self.vservername)
362 # a full path for the local calls
363 build_dir=os.path.dirname(sys.argv[0])
364 # sometimes this is empty - set to "." in such a case
365 if not build_dir: build_dir="."
366 build_dir += "/build"
368 # use a standard name - will be relative to remote buildname
370 # run checkout in any case - would do an update if already exists
371 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
372 if self.run_in_host(build_checkout) != 0:
374 # the repo url is taken from arch-rpms-url
375 # with the last step (i386.) removed
376 repo_url = self.options.arch_rpms_url
377 for level in [ 'arch' ]:
378 repo_url = os.path.dirname(repo_url)
379 # pass the vbuild-nightly options to vtest-init-vserver
381 test_env_options += " -p %s"%self.options.personality
382 test_env_options += " -d %s"%self.options.pldistro
383 test_env_options += " -f %s"%self.options.fcdistro
384 script="vtest-init-vserver.sh"
385 vserver_name = self.vservername
386 vserver_options="--netdev eth0 --interface %s"%self.vserverip
388 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
389 vserver_options += " --hostname %s"%vserver_hostname
392 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
393 return self.run_in_host(create_vserver) == 0
396 def install_rpm(self):
397 return self.run_in_guest("yum -y install myplc-native")==0 \
398 and self.run_in_guest("yum -y install noderepo-$(cat /etc/nodefamily)")
402 tmpname='%s.plc-config-tty'%(self.name())
403 fileconf=open(tmpname,'w')
404 for var in [ 'PLC_NAME',
408 'PLC_MAIL_SUPPORT_ADDRESS',
415 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
416 fileconf.write('w\n')
417 fileconf.write('q\n')
419 utils.system('cat %s'%tmpname)
420 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
421 utils.system('rm %s'%tmpname)
425 self.run_in_guest('service plc start')
429 self.run_in_guest('service plc stop')
436 # stores the keys from the config for further use
437 def store_keys(self):
438 for key_spec in self.plc_spec['keys']:
439 TestKey(self,key_spec).store_key()
442 def clean_keys(self):
443 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
445 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
446 # for later direct access to the nodes
447 def fetch_keys(self):
449 if not os.path.isdir(dir):
451 prefix = 'root_ssh_key'
452 vservername=self.vservername
454 for ext in [ 'pub', 'rsa' ] :
455 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
456 dst="keys/%(vservername)s.%(ext)s"%locals()
457 if self.test_ssh.fetch(src,dst) != 0: overall=False
461 return self.do_sites()
463 def clean_sites (self):
464 return self.do_sites(action="delete")
466 def do_sites (self,action="add"):
467 for site_spec in self.plc_spec['sites']:
468 test_site = TestSite (self,site_spec)
469 if (action != "add"):
470 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
471 test_site.delete_site()
472 # deleted with the site
473 #test_site.delete_users()
476 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
477 test_site.create_site()
478 test_site.create_users()
481 def clean_all_sites (self):
482 print 'auth_root',self.auth_root()
483 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
484 for site_id in site_ids:
485 print 'Deleting site_id',site_id
486 self.apiserver.DeleteSite(self.auth_root(),site_id)
489 return self.do_nodes()
490 def clean_nodes (self):
491 return self.do_nodes(action="delete")
493 def do_nodes (self,action="add"):
494 for site_spec in self.plc_spec['sites']:
495 test_site = TestSite (self,site_spec)
497 utils.header("Deleting nodes in site %s"%test_site.name())
498 for node_spec in site_spec['nodes']:
499 test_node=TestNode(self,test_site,node_spec)
500 utils.header("Deleting %s"%test_node.name())
501 test_node.delete_node()
503 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
504 for node_spec in site_spec['nodes']:
505 utils.pprint('Creating node %s'%node_spec,node_spec)
506 test_node = TestNode (self,test_site,node_spec)
507 test_node.create_node ()
510 def nodegroups (self):
511 return self.do_nodegroups("add")
512 def clean_nodegroups (self):
513 return self.do_nodegroups("delete")
515 # create nodegroups if needed, and populate
516 def do_nodegroups (self, action="add"):
517 # 1st pass to scan contents
519 for site_spec in self.plc_spec['sites']:
520 test_site = TestSite (self,site_spec)
521 for node_spec in site_spec['nodes']:
522 test_node=TestNode (self,test_site,node_spec)
523 if node_spec.has_key('nodegroups'):
524 nodegroupnames=node_spec['nodegroups']
525 if isinstance(nodegroupnames,StringTypes):
526 nodegroupnames = [ nodegroupnames ]
527 for nodegroupname in nodegroupnames:
528 if not groups_dict.has_key(nodegroupname):
529 groups_dict[nodegroupname]=[]
530 groups_dict[nodegroupname].append(test_node.name())
531 auth=self.auth_root()
533 for (nodegroupname,group_nodes) in groups_dict.iteritems():
535 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
536 # first, check if the nodetagtype is here
537 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
539 tag_type_id = tag_types[0]['tag_type_id']
541 tag_type_id = self.apiserver.AddTagType(auth,
542 {'tagname':nodegroupname,
543 'description': 'for nodegroup %s'%nodegroupname,
546 print 'located tag (type)',nodegroupname,'as',tag_type_id
548 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
550 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
551 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
552 # set node tag on all nodes, value='yes'
553 for nodename in group_nodes:
555 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
557 traceback.print_exc()
558 print 'node',nodename,'seems to already have tag',nodegroupname
561 expect_yes = self.apiserver.GetNodeTags(auth,
562 {'hostname':nodename,
563 'tagname':nodegroupname},
564 ['tagvalue'])[0]['tagvalue']
565 if expect_yes != "yes":
566 print 'Mismatch node tag on node',nodename,'got',expect_yes
569 if not self.options.dry_run:
570 print 'Cannot find tag',nodegroupname,'on node',nodename
574 print 'cleaning nodegroup',nodegroupname
575 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
577 traceback.print_exc()
581 def all_hostnames (self) :
583 for site_spec in self.plc_spec['sites']:
584 hostnames += [ node_spec['node_fields']['hostname'] \
585 for node_spec in site_spec['nodes'] ]
588 # gracetime : during the first <gracetime> minutes nothing gets printed
589 def do_nodes_booted (self, minutes, gracetime,period=15):
590 if self.options.dry_run:
594 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
595 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
596 # the nodes that haven't checked yet - start with a full list and shrink over time
597 tocheck = self.all_hostnames()
598 utils.header("checking nodes %r"%tocheck)
599 # create a dict hostname -> status
600 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
603 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
605 for array in tocheck_status:
606 hostname=array['hostname']
607 boot_state=array['boot_state']
608 if boot_state == 'boot':
609 utils.header ("%s has reached the 'boot' state"%hostname)
611 # if it's a real node, never mind
612 (site_spec,node_spec)=self.locate_hostname(hostname)
613 if TestNode.is_real_model(node_spec['node_fields']['model']):
614 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
617 elif datetime.datetime.now() > graceout:
618 utils.header ("%s still in '%s' state"%(hostname,boot_state))
619 graceout=datetime.datetime.now()+datetime.timedelta(1)
620 status[hostname] = boot_state
622 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != 'boot' ]
625 if datetime.datetime.now() > timeout:
626 for hostname in tocheck:
627 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
629 # otherwise, sleep for a while
631 # only useful in empty plcs
634 def nodes_booted(self):
635 return self.do_nodes_booted(minutes=20,gracetime=15)
637 def do_nodes_ssh(self,minutes,gracetime,period=20):
639 timeout = datetime.datetime.now()+datetime.timedelta(minutes=minutes)
640 graceout = datetime.datetime.now()+datetime.timedelta(minutes=gracetime)
641 tocheck = self.all_hostnames()
642 # self.scan_publicKeys(tocheck)
643 utils.header("checking ssh access to root context on nodes %r"%tocheck)
645 for hostname in tocheck:
646 # try to ssh in nodes
647 # ssh hostname to the node from the plc
648 cmd1 = TestSsh (hostname,key="/etc/planetlab/root_ssh_key.rsa").actual_command("hostname")
649 # run this in the guest
650 cmd2 = self.test_ssh.actual_command(cmd1)
651 # don't spam logs - show the command only after the grace period
652 if datetime.datetime.now() > graceout:
653 success=utils.system(cmd2)
655 success=os.system(cmd2)
657 utils.header('Successfully entered root@%s'%hostname)
659 tocheck.remove(hostname)
661 # we will have tried real nodes once, in case they're up - but if not, just skip
662 (site_spec,node_spec)=self.locate_hostname(hostname)
663 if TestNode.is_real_model(node_spec['node_fields']['model']):
664 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
665 tocheck.remove(hostname)
668 if datetime.datetime.now() > timeout:
669 for hostname in tocheck:
670 utils.header("FAILURE to ssh into %s"%hostname)
672 # otherwise, sleep for a while
674 # only useful in empty plcs
678 return self.do_nodes_ssh(minutes=30,gracetime=10)
681 def init_node (self): pass
683 def bootcd (self): pass
685 def configure_qemu (self): pass
687 def reinstall_node (self): pass
689 def export_qemu (self): pass
691 ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
692 def check_sanity_node (self):
693 return self.locate_first_node().check_sanity()
694 def check_sanity_sliver (self) :
695 return self.locate_first_sliver().check_sanity()
697 def check_sanity (self):
698 return self.check_sanity_node() and self.check_sanity_sliver()
701 def do_check_initscripts(self):
703 for slice_spec in self.plc_spec['slices']:
704 if not slice_spec.has_key('initscriptname'):
706 initscript=slice_spec['initscriptname']
707 for nodename in slice_spec['nodenames']:
708 (site,node) = self.locate_node (nodename)
709 # xxx - passing the wrong site - probably harmless
710 test_site = TestSite (self,site)
711 test_slice = TestSlice (self,test_site,slice_spec)
712 test_node = TestNode (self,test_site,node)
713 test_sliver = TestSliver (self, test_node, test_slice)
714 if not test_sliver.check_initscript(initscript):
718 def check_initscripts(self):
719 return self.do_check_initscripts()
721 def initscripts (self):
722 for initscript in self.plc_spec['initscripts']:
723 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
724 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
727 def clean_initscripts (self):
728 for initscript in self.plc_spec['initscripts']:
729 initscript_name = initscript['initscript_fields']['name']
730 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
732 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
733 print initscript_name,'deleted'
735 print 'deletion went wrong - probably did not exist'
740 return self.do_slices()
742 def clean_slices (self):
743 return self.do_slices("delete")
745 def do_slices (self, action="add"):
746 for slice in self.plc_spec['slices']:
747 site_spec = self.locate_site (slice['sitename'])
748 test_site = TestSite(self,site_spec)
749 test_slice=TestSlice(self,test_site,slice)
751 utils.header("Deleting slices in site %s"%test_site.name())
752 test_slice.delete_slice()
754 utils.pprint("Creating slice",slice)
755 test_slice.create_slice()
756 utils.header('Created Slice %s'%slice['slice_fields']['name'])
759 @slice_mapper_options
760 def check_slice(self): pass
763 def clear_known_hosts (self): pass
766 def start_node (self) : pass
768 def check_tcp (self):
769 specs = self.plc_spec['tcp_test']
774 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
775 if not s_test_sliver.run_tcp_server(port,timeout=10):
779 # idem for the client side
780 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
781 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
785 def plcsh_stress_test (self):
786 # install the stress-test in the plc image
787 location = "/usr/share/plc_api/plcsh-stress-test.py"
788 remote="/vservers/%s/%s"%(self.vservername,location)
789 self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
791 command += " -- --check"
792 if self.options.small_test:
794 return ( self.run_in_guest(command) == 0)
796 def gather_logs (self):
797 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
798 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
799 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
800 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
801 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
803 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
804 self.gather_var_logs ()
806 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
807 self.gather_pgsql_logs ()
809 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
810 for site_spec in self.plc_spec['sites']:
811 test_site = TestSite (self,site_spec)
812 for node_spec in site_spec['nodes']:
813 test_node=TestNode(self,test_site,node_spec)
814 test_node.gather_qemu_logs()
816 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
817 self.gather_nodes_var_logs()
819 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
820 self.gather_slivers_var_logs()
823 def gather_slivers_var_logs(self):
824 for test_sliver in self.all_sliver_objs():
825 remote = test_sliver.tar_var_logs()
826 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
827 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
828 utils.system(command)
831 def gather_var_logs (self):
832 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
833 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
834 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
835 utils.system(command)
836 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
837 utils.system(command)
839 def gather_pgsql_logs (self):
840 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
841 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
842 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
843 utils.system(command)
845 def gather_nodes_var_logs (self):
846 for site_spec in self.plc_spec['sites']:
847 test_site = TestSite (self,site_spec)
848 for node_spec in site_spec['nodes']:
849 test_node=TestNode(self,test_site,node_spec)
850 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
851 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
852 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
853 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
854 utils.system(command)
857 # returns the filename to use for sql dump/restore, using options.dbname if set
858 def dbfile (self, database):
859 # uses options.dbname if it is found
861 name=self.options.dbname
862 if not isinstance(name,StringTypes):
865 t=datetime.datetime.now()
868 return "/root/%s-%s.sql"%(database,name)
871 dump=self.dbfile("planetab4")
872 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
873 utils.header('Dumped planetlab4 database in %s'%dump)
876 def db_restore(self):
877 dump=self.dbfile("planetab4")
879 self.run_in_guest('service httpd stop')
880 # xxx - need another wrapper
881 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
882 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
883 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
884 ##starting httpd service
885 self.run_in_guest('service httpd start')
887 utils.header('Database restored from ' + dump)
890 def standby_1(): pass
892 def standby_2(): pass
894 def standby_3(): pass
896 def standby_4(): pass
898 def standby_5(): pass
900 def standby_6(): pass
902 def standby_7(): pass
904 def standby_8(): pass
906 def standby_9(): pass
908 def standby_10(): pass
910 def standby_11(): pass
912 def standby_12(): pass
914 def standby_13(): pass
916 def standby_14(): pass
918 def standby_15(): pass
920 def standby_16(): pass
922 def standby_17(): pass
924 def standby_18(): pass
926 def standby_19(): pass
928 def standby_20(): pass