7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
65 default_steps = ['display','uninstall','install','install_rpm',
66 'configure', 'start', 'fetch_keys', SEP,
67 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
68 'sites', 'nodes', 'slices', 'nodegroups', SEP,
69 'init_node','bootcd', 'configure_qemu', 'export_qemu',
70 'kill_all_qemus', 'reinstall_node','start_node', SEP,
71 'nodes_debug_ssh', 'nodes_boot_ssh', 'check_slice', 'check_initscripts', SEP,
72 'check_sanity', 'check_tcp', 'plcsh_stress_test', SEP,
73 'force_gather_logs', 'force_kill_qemus', 'force_record_tracker','force_free_tracker' ]
74 other_steps = [ 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
75 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
76 'clean_sites', 'clean_nodes',
77 'clean_slices', 'clean_keys', SEP,
78 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
79 'db_dump' , 'db_restore', 'cleanup_trackers', 'cleanup_all_trackers',
80 'standby_1 through 20'
84 def printable_steps (list):
85 return " ".join(list).replace(" "+SEP+" "," \\\n")
87 def valid_step (step):
90 def __init__ (self,plc_spec,options):
91 self.plc_spec=plc_spec
93 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
95 self.vserverip=plc_spec['vserverip']
96 self.vservername=plc_spec['vservername']
97 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
100 raise Exception,'chroot-based myplc testing is deprecated'
101 self.apiserver=TestApiserver(self.url,options.dry_run)
104 name=self.plc_spec['name']
105 return "%s.%s"%(name,self.vservername)
108 return self.plc_spec['hostname']
111 return self.test_ssh.is_local()
113 # define the API methods on this object through xmlrpc
114 # would help, but not strictly necessary
118 def actual_command_in_guest (self,command):
119 return self.test_ssh.actual_command(self.host_to_guest(command))
121 def start_guest (self):
122 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
124 def run_in_guest (self,command):
125 return utils.system(self.actual_command_in_guest(command))
127 def run_in_host (self,command):
128 return self.test_ssh.run_in_buildname(command)
130 #command gets run in the vserver
131 def host_to_guest(self,command):
132 return "vserver %s exec %s"%(self.vservername,command)
134 #command gets run in the vserver
135 def start_guest_in_host(self):
136 return "vserver %s start"%(self.vservername)
139 def run_in_guest_piped (self,local,remote):
140 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
142 def auth_root (self):
143 return {'Username':self.plc_spec['PLC_ROOT_USER'],
144 'AuthMethod':'password',
145 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
146 'Role' : self.plc_spec['role']
148 def locate_site (self,sitename):
149 for site in self.plc_spec['sites']:
150 if site['site_fields']['name'] == sitename:
152 if site['site_fields']['login_base'] == sitename:
154 raise Exception,"Cannot locate site %s"%sitename
156 def locate_node (self,nodename):
157 for site in self.plc_spec['sites']:
158 for node in site['nodes']:
159 if node['name'] == nodename:
161 raise Exception,"Cannot locate node %s"%nodename
163 def locate_hostname (self,hostname):
164 for site in self.plc_spec['sites']:
165 for node in site['nodes']:
166 if node['node_fields']['hostname'] == hostname:
168 raise Exception,"Cannot locate hostname %s"%hostname
170 def locate_key (self,keyname):
171 for key in self.plc_spec['keys']:
172 if key['name'] == keyname:
174 raise Exception,"Cannot locate key %s"%keyname
176 def locate_slice (self, slicename):
177 for slice in self.plc_spec['slices']:
178 if slice['slice_fields']['name'] == slicename:
180 raise Exception,"Cannot locate slice %s"%slicename
182 def all_sliver_objs (self):
184 for slice_spec in self.plc_spec['slices']:
185 slicename = slice_spec['slice_fields']['name']
186 for nodename in slice_spec['nodenames']:
187 result.append(self.locate_sliver_obj (nodename,slicename))
190 def locate_sliver_obj (self,nodename,slicename):
191 (site,node) = self.locate_node(nodename)
192 slice = self.locate_slice (slicename)
194 test_site = TestSite (self, site)
195 test_node = TestNode (self, test_site,node)
196 # xxx the slice site is assumed to be the node site - mhh - probably harmless
197 test_slice = TestSlice (self, test_site, slice)
198 return TestSliver (self, test_node, test_slice)
200 def locate_first_node(self):
201 nodename=self.plc_spec['slices'][0]['nodenames'][0]
202 (site,node) = self.locate_node(nodename)
203 test_site = TestSite (self, site)
204 test_node = TestNode (self, test_site,node)
207 def locate_first_sliver (self):
208 slice_spec=self.plc_spec['slices'][0]
209 slicename=slice_spec['slice_fields']['name']
210 nodename=slice_spec['nodenames'][0]
211 return self.locate_sliver_obj(nodename,slicename)
213 # all different hostboxes used in this plc
214 def gather_hostBoxes(self):
215 # maps on sites and nodes, return [ (host_box,test_node) ]
217 for site_spec in self.plc_spec['sites']:
218 test_site = TestSite (self,site_spec)
219 for node_spec in site_spec['nodes']:
220 test_node = TestNode (self, test_site, node_spec)
221 if not test_node.is_real():
222 tuples.append( (test_node.host_box(),test_node) )
223 # transform into a dict { 'host_box' -> [ test_node .. ] }
225 for (box,node) in tuples:
226 if not result.has_key(box):
229 result[box].append(node)
232 # a step for checking this stuff
233 def show_boxes (self):
234 for (box,nodes) in self.gather_hostBoxes().iteritems():
235 print box,":"," + ".join( [ node.name() for node in nodes ] )
238 # make this a valid step
239 def kill_all_qemus(self):
240 # this is the brute force version, kill all qemus on that host box
241 for (box,nodes) in self.gather_hostBoxes().iteritems():
242 # pass the first nodename, as we don't push template-qemu on testboxes
243 nodedir=nodes[0].nodedir()
244 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
247 # make this a valid step
248 def list_all_qemus(self):
249 for (box,nodes) in self.gather_hostBoxes().iteritems():
250 # this is the brute force version, kill all qemus on that host box
251 TestBox(box,self.options.buildname).list_all_qemus()
254 # kill only the right qemus
255 def list_qemus(self):
256 for (box,nodes) in self.gather_hostBoxes().iteritems():
257 # the fine-grain version
262 # kill only the right qemus
263 def kill_qemus(self):
264 for (box,nodes) in self.gather_hostBoxes().iteritems():
265 # the fine-grain version
271 utils.show_plc_spec (self.plc_spec)
274 ### utility methods for handling the pool of IP addresses allocated to plcs
276 # (*) running plcs are recorded in the file named ~/running-test-plcs
277 # (*) this file contains a line for each running plc, older first
278 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
279 # (*) the free_tracker method performs a vserver stop on the oldest entry
280 # (*) the record_tracker method adds an entry at the bottom of the file
281 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
283 TRACKER_FILE=os.environ['HOME']+"/running-test-plcs"
284 # how many concurrent plcs are we keeping alive - adjust with the IP pool size
285 TRACKER_KEEP_VSERVERS = 12
287 def record_tracker (self):
289 lines=file(TestPlc.TRACKER_FILE).readlines()
293 this_line="%s %s\n"%(self.vservername,self.test_ssh.hostname)
296 print 'this vserver is already included in %s'%TestPlc.TRACKER_FILE
298 if self.options.dry_run:
299 print 'dry_run: record_tracker - skipping tracker update'
301 tracker=file(TestPlc.TRACKER_FILE,"w")
302 for line in lines+[this_line]:
305 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
308 def free_tracker (self, keep_vservers=None):
309 if not keep_vservers: keep_vservers=TestPlc.TRACKER_KEEP_VSERVERS
311 lines=file(TestPlc.TRACKER_FILE).readlines()
313 print 'dry_run: free_tracker - skipping tracker update'
315 how_many = len(lines) - keep_vservers
316 # nothing todo until we have more than keep_vservers in the tracker
318 print 'free_tracker : limit %d not reached'%keep_vservers
320 to_stop = lines[:how_many]
321 to_keep = lines[how_many:]
324 [vname,hostname]=line.split()
325 command=TestSsh(hostname).actual_command("vserver --silent %s stop"%vname)
326 utils.system(command)
327 if self.options.dry_run:
328 print 'dry_run: free_tracker would stop %d vservers'%len(to_stop)
329 for line in to_stop: print line,
330 print 'dry_run: free_tracker would keep %d vservers'%len(to_keep)
331 for line in to_keep: print line,
333 print "Storing %d remaining vservers in %s"%(len(to_keep),TestPlc.TRACKER_FILE)
334 tracker=open(TestPlc.TRACKER_FILE,"w")
340 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
341 def cleanup_trackers (self):
343 for line in TestPlc.TRACKER_FILE.readlines():
344 [vname,hostname]=line.split()
345 stop="vserver --silent %s stop"%vname
346 command=TestSsh(hostname).actual_command(stop)
347 utils.system(command)
348 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
349 utils.system(self.test_ssh.actual_command(clean_tracker))
353 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
354 def cleanup_all_trackers (self):
355 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
356 utils.system(self.test_ssh.actual_command(stop_all))
357 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
358 utils.system(self.test_ssh.actual_command(clean_tracker))
362 self.run_in_host("vserver --silent %s delete"%self.vservername)
368 # a full path for the local calls
369 build_dir=os.path.dirname(sys.argv[0])
370 # sometimes this is empty - set to "." in such a case
371 if not build_dir: build_dir="."
372 build_dir += "/build"
374 # use a standard name - will be relative to remote buildname
376 # run checkout in any case - would do an update if already exists
377 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
378 if self.run_in_host(build_checkout) != 0:
380 # the repo url is taken from arch-rpms-url
381 # with the last step (i386.) removed
382 repo_url = self.options.arch_rpms_url
383 for level in [ 'arch' ]:
384 repo_url = os.path.dirname(repo_url)
385 # pass the vbuild-nightly options to vtest-init-vserver
387 test_env_options += " -p %s"%self.options.personality
388 test_env_options += " -d %s"%self.options.pldistro
389 test_env_options += " -f %s"%self.options.fcdistro
390 script="vtest-init-vserver.sh"
391 vserver_name = self.vservername
392 vserver_options="--netdev eth0 --interface %s"%self.vserverip
394 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
395 vserver_options += " --hostname %s"%vserver_hostname
398 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
399 return self.run_in_host(create_vserver) == 0
402 def install_rpm(self):
403 return self.run_in_guest("yum -y install myplc-native")==0 \
404 and self.run_in_guest("yum -y install noderepo-%s-%s"%(self.options.pldistro,self.options.arch))==0
408 tmpname='%s.plc-config-tty'%(self.name())
409 fileconf=open(tmpname,'w')
410 for var in [ 'PLC_NAME',
414 'PLC_MAIL_SUPPORT_ADDRESS',
421 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
422 fileconf.write('w\n')
423 fileconf.write('q\n')
425 utils.system('cat %s'%tmpname)
426 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
427 utils.system('rm %s'%tmpname)
431 self.run_in_guest('service plc start')
435 self.run_in_guest('service plc stop')
442 # stores the keys from the config for further use
443 def store_keys(self):
444 for key_spec in self.plc_spec['keys']:
445 TestKey(self,key_spec).store_key()
448 def clean_keys(self):
449 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
451 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
452 # for later direct access to the nodes
453 def fetch_keys(self):
455 if not os.path.isdir(dir):
457 vservername=self.vservername
459 prefix = 'root_ssh_key'
460 for ext in [ 'pub', 'rsa' ] :
461 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
462 dst="keys/%(vservername)s.%(ext)s"%locals()
463 if self.test_ssh.fetch(src,dst) != 0: overall=False
464 prefix = 'debug_ssh_key'
465 for ext in [ 'pub', 'rsa' ] :
466 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
467 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
468 if self.test_ssh.fetch(src,dst) != 0: overall=False
472 return self.do_sites()
474 def clean_sites (self):
475 return self.do_sites(action="delete")
477 def do_sites (self,action="add"):
478 for site_spec in self.plc_spec['sites']:
479 test_site = TestSite (self,site_spec)
480 if (action != "add"):
481 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
482 test_site.delete_site()
483 # deleted with the site
484 #test_site.delete_users()
487 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
488 test_site.create_site()
489 test_site.create_users()
492 def clean_all_sites (self):
493 print 'auth_root',self.auth_root()
494 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
495 for site_id in site_ids:
496 print 'Deleting site_id',site_id
497 self.apiserver.DeleteSite(self.auth_root(),site_id)
500 return self.do_nodes()
501 def clean_nodes (self):
502 return self.do_nodes(action="delete")
504 def do_nodes (self,action="add"):
505 for site_spec in self.plc_spec['sites']:
506 test_site = TestSite (self,site_spec)
508 utils.header("Deleting nodes in site %s"%test_site.name())
509 for node_spec in site_spec['nodes']:
510 test_node=TestNode(self,test_site,node_spec)
511 utils.header("Deleting %s"%test_node.name())
512 test_node.delete_node()
514 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
515 for node_spec in site_spec['nodes']:
516 utils.pprint('Creating node %s'%node_spec,node_spec)
517 test_node = TestNode (self,test_site,node_spec)
518 test_node.create_node ()
521 def nodegroups (self):
522 return self.do_nodegroups("add")
523 def clean_nodegroups (self):
524 return self.do_nodegroups("delete")
526 # create nodegroups if needed, and populate
527 def do_nodegroups (self, action="add"):
528 # 1st pass to scan contents
530 for site_spec in self.plc_spec['sites']:
531 test_site = TestSite (self,site_spec)
532 for node_spec in site_spec['nodes']:
533 test_node=TestNode (self,test_site,node_spec)
534 if node_spec.has_key('nodegroups'):
535 nodegroupnames=node_spec['nodegroups']
536 if isinstance(nodegroupnames,StringTypes):
537 nodegroupnames = [ nodegroupnames ]
538 for nodegroupname in nodegroupnames:
539 if not groups_dict.has_key(nodegroupname):
540 groups_dict[nodegroupname]=[]
541 groups_dict[nodegroupname].append(test_node.name())
542 auth=self.auth_root()
544 for (nodegroupname,group_nodes) in groups_dict.iteritems():
546 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
547 # first, check if the nodetagtype is here
548 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
550 tag_type_id = tag_types[0]['tag_type_id']
552 tag_type_id = self.apiserver.AddTagType(auth,
553 {'tagname':nodegroupname,
554 'description': 'for nodegroup %s'%nodegroupname,
557 print 'located tag (type)',nodegroupname,'as',tag_type_id
559 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
561 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
562 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
563 # set node tag on all nodes, value='yes'
564 for nodename in group_nodes:
566 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
568 traceback.print_exc()
569 print 'node',nodename,'seems to already have tag',nodegroupname
572 expect_yes = self.apiserver.GetNodeTags(auth,
573 {'hostname':nodename,
574 'tagname':nodegroupname},
575 ['tagvalue'])[0]['tagvalue']
576 if expect_yes != "yes":
577 print 'Mismatch node tag on node',nodename,'got',expect_yes
580 if not self.options.dry_run:
581 print 'Cannot find tag',nodegroupname,'on node',nodename
585 print 'cleaning nodegroup',nodegroupname
586 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
588 traceback.print_exc()
592 def all_hostnames (self) :
594 for site_spec in self.plc_spec['sites']:
595 hostnames += [ node_spec['node_fields']['hostname'] \
596 for node_spec in site_spec['nodes'] ]
599 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
600 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
601 if self.options.dry_run:
605 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
606 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
607 # the nodes that haven't checked yet - start with a full list and shrink over time
608 tocheck = self.all_hostnames()
609 utils.header("checking nodes %r"%tocheck)
610 # create a dict hostname -> status
611 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
614 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
616 for array in tocheck_status:
617 hostname=array['hostname']
618 boot_state=array['boot_state']
619 if boot_state == target_boot_state:
620 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
622 # if it's a real node, never mind
623 (site_spec,node_spec)=self.locate_hostname(hostname)
624 if TestNode.is_real_model(node_spec['node_fields']['model']):
625 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
627 boot_state = target_boot_state
628 elif datetime.datetime.now() > graceout:
629 utils.header ("%s still in '%s' state"%(hostname,boot_state))
630 graceout=datetime.datetime.now()+datetime.timedelta(1)
631 status[hostname] = boot_state
633 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
636 if datetime.datetime.now() > timeout:
637 for hostname in tocheck:
638 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
640 # otherwise, sleep for a while
642 # only useful in empty plcs
645 def nodes_booted(self):
646 return self.nodes_check_boot_state('boot',timeout_minutes=20,silent_minutes=15)
648 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=20):
650 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
651 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
652 vservername=self.vservername
655 local_key = "keys/%(vservername)s-debug.rsa"%locals()
658 local_key = "keys/%(vservername)s.rsa"%locals()
659 tocheck = self.all_hostnames()
660 utils.header("checking ssh access (expected in %s mode) to nodes %r"%(message,tocheck))
661 utils.header("max timeout is %d minutes, silent for %d minutes"%(timeout_minutes,silent_minutes))
663 for hostname in tocheck:
664 # try to run 'hostname' in the node
665 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
666 # don't spam logs - show the command only after the grace period
667 if datetime.datetime.now() > graceout:
668 success=utils.system(command)
670 # truly silent, just print out a dot to show we're alive
673 command += " 2>/dev/null"
674 if self.options.dry_run:
675 print 'dry_run',command
678 success=os.system(command)
680 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
682 tocheck.remove(hostname)
684 # we will have tried real nodes once, in case they're up - but if not, just skip
685 (site_spec,node_spec)=self.locate_hostname(hostname)
686 if TestNode.is_real_model(node_spec['node_fields']['model']):
687 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
688 tocheck.remove(hostname)
691 if datetime.datetime.now() > timeout:
692 for hostname in tocheck:
693 utils.header("FAILURE to ssh into %s"%hostname)
695 # otherwise, sleep for a while
697 # only useful in empty plcs
700 def nodes_debug_ssh(self):
701 return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=10)
703 def nodes_boot_ssh(self):
704 return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=10)
707 def init_node (self): pass
709 def bootcd (self): pass
711 def configure_qemu (self): pass
713 def reinstall_node (self): pass
715 def export_qemu (self): pass
717 ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
718 def check_sanity_node (self):
719 return self.locate_first_node().check_sanity()
720 def check_sanity_sliver (self) :
721 return self.locate_first_sliver().check_sanity()
723 def check_sanity (self):
724 return self.check_sanity_node() and self.check_sanity_sliver()
727 def do_check_initscripts(self):
729 for slice_spec in self.plc_spec['slices']:
730 if not slice_spec.has_key('initscriptname'):
732 initscript=slice_spec['initscriptname']
733 for nodename in slice_spec['nodenames']:
734 (site,node) = self.locate_node (nodename)
735 # xxx - passing the wrong site - probably harmless
736 test_site = TestSite (self,site)
737 test_slice = TestSlice (self,test_site,slice_spec)
738 test_node = TestNode (self,test_site,node)
739 test_sliver = TestSliver (self, test_node, test_slice)
740 if not test_sliver.check_initscript(initscript):
744 def check_initscripts(self):
745 return self.do_check_initscripts()
747 def initscripts (self):
748 for initscript in self.plc_spec['initscripts']:
749 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
750 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
753 def clean_initscripts (self):
754 for initscript in self.plc_spec['initscripts']:
755 initscript_name = initscript['initscript_fields']['name']
756 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
758 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
759 print initscript_name,'deleted'
761 print 'deletion went wrong - probably did not exist'
766 return self.do_slices()
768 def clean_slices (self):
769 return self.do_slices("delete")
771 def do_slices (self, action="add"):
772 for slice in self.plc_spec['slices']:
773 site_spec = self.locate_site (slice['sitename'])
774 test_site = TestSite(self,site_spec)
775 test_slice=TestSlice(self,test_site,slice)
777 utils.header("Deleting slices in site %s"%test_site.name())
778 test_slice.delete_slice()
780 utils.pprint("Creating slice",slice)
781 test_slice.create_slice()
782 utils.header('Created Slice %s'%slice['slice_fields']['name'])
785 @slice_mapper_options
786 def check_slice(self): pass
789 def clear_known_hosts (self): pass
792 def start_node (self) : pass
794 def check_tcp (self):
795 specs = self.plc_spec['tcp_test']
800 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
801 if not s_test_sliver.run_tcp_server(port,timeout=10):
805 # idem for the client side
806 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
807 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
811 def plcsh_stress_test (self):
812 # install the stress-test in the plc image
813 location = "/usr/share/plc_api/plcsh-stress-test.py"
814 remote="/vservers/%s/%s"%(self.vservername,location)
815 self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
817 command += " -- --check"
818 if self.options.small_test:
820 return ( self.run_in_guest(command) == 0)
822 def gather_logs (self):
823 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
824 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
825 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
826 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
827 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
829 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
830 self.gather_var_logs ()
832 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
833 self.gather_pgsql_logs ()
835 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
836 for site_spec in self.plc_spec['sites']:
837 test_site = TestSite (self,site_spec)
838 for node_spec in site_spec['nodes']:
839 test_node=TestNode(self,test_site,node_spec)
840 test_node.gather_qemu_logs()
842 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
843 self.gather_nodes_var_logs()
845 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
846 self.gather_slivers_var_logs()
849 def gather_slivers_var_logs(self):
850 for test_sliver in self.all_sliver_objs():
851 remote = test_sliver.tar_var_logs()
852 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
853 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
854 utils.system(command)
857 def gather_var_logs (self):
858 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
859 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
860 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
861 utils.system(command)
862 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
863 utils.system(command)
865 def gather_pgsql_logs (self):
866 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
867 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
868 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
869 utils.system(command)
871 def gather_nodes_var_logs (self):
872 for site_spec in self.plc_spec['sites']:
873 test_site = TestSite (self,site_spec)
874 for node_spec in site_spec['nodes']:
875 test_node=TestNode(self,test_site,node_spec)
876 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
877 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
878 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
879 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
880 utils.system(command)
883 # returns the filename to use for sql dump/restore, using options.dbname if set
884 def dbfile (self, database):
885 # uses options.dbname if it is found
887 name=self.options.dbname
888 if not isinstance(name,StringTypes):
891 t=datetime.datetime.now()
894 return "/root/%s-%s.sql"%(database,name)
897 dump=self.dbfile("planetab4")
898 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
899 utils.header('Dumped planetlab4 database in %s'%dump)
902 def db_restore(self):
903 dump=self.dbfile("planetab4")
905 self.run_in_guest('service httpd stop')
906 # xxx - need another wrapper
907 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
908 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
909 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
910 ##starting httpd service
911 self.run_in_guest('service httpd start')
913 utils.header('Database restored from ' + dump)
916 def standby_1(): pass
918 def standby_2(): pass
920 def standby_3(): pass
922 def standby_4(): pass
924 def standby_5(): pass
926 def standby_6(): pass
928 def standby_7(): pass
930 def standby_8(): pass
932 def standby_9(): pass
934 def standby_10(): pass
936 def standby_11(): pass
938 def standby_12(): pass
940 def standby_13(): pass
942 def standby_14(): pass
944 def standby_15(): pass
946 def standby_16(): pass
948 def standby_17(): pass
950 def standby_18(): pass
952 def standby_19(): pass
954 def standby_20(): pass