7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
20 from Trackers import TrackerPlc, TrackerQemu
22 # step methods must take (self) and return a boolean (options is a member of the class)
24 def standby(minutes,dry_run):
25 utils.header('Entering StandBy for %d mn'%minutes)
29 time.sleep(60*minutes)
32 def standby_generic (func):
34 minutes=int(func.__name__.split("_")[1])
35 return standby(minutes,self.options.dry_run)
38 def node_mapper (method):
41 node_method = TestNode.__dict__[method.__name__]
42 for site_spec in self.plc_spec['sites']:
43 test_site = TestSite (self,site_spec)
44 for node_spec in site_spec['nodes']:
45 test_node = TestNode (self,test_site,node_spec)
46 if not node_method(test_node): overall=False
50 def slice_mapper_options (method):
53 slice_method = TestSlice.__dict__[method.__name__]
54 for slice_spec in self.plc_spec['slices']:
55 site_spec = self.locate_site (slice_spec['sitename'])
56 test_site = TestSite(self,site_spec)
57 test_slice=TestSlice(self,test_site,slice_spec)
58 if not slice_method(test_slice,self.options): overall=False
67 'display','trqemu_record','trqemu_free','uninstall','install','install_rpm',
68 'configure', 'start', 'fetch_keys', SEP,
69 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
70 'sites', 'nodes', 'slices', 'nodegroups', SEP,
71 'init_node','bootcd', 'configure_qemu', 'export_qemu',
72 'kill_all_qemus', 'reinstall_node','start_node', SEP,
73 # better use of time: do this now that the nodes are taking off
74 'plcsh_stress_test', SEP,
75 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
78 'force_gather_logs', 'force_trplc_record','force_trplc_free',
81 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
82 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
83 'clean_sites', 'clean_nodes',
84 'clean_slices', 'clean_keys', SEP,
86 'show_boxes', 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
87 'db_dump' , 'db_restore', 'trplc_cleanup','trqemu_cleanup','trackers_cleanup', SEP,
88 'standby_1 through 20',
92 def printable_steps (list):
93 return " ".join(list).replace(" "+SEP+" "," \\\n")
95 def valid_step (step):
98 def __init__ (self,plc_spec,options):
99 self.plc_spec=plc_spec
101 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
103 self.vserverip=plc_spec['vserverip']
104 self.vservername=plc_spec['vservername']
105 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
108 raise Exception,'chroot-based myplc testing is deprecated'
109 self.apiserver=TestApiserver(self.url,options.dry_run)
112 name=self.plc_spec['name']
113 return "%s.%s"%(name,self.vservername)
116 return self.plc_spec['hostname']
119 return self.test_ssh.is_local()
121 # define the API methods on this object through xmlrpc
122 # would help, but not strictly necessary
126 def actual_command_in_guest (self,command):
127 return self.test_ssh.actual_command(self.host_to_guest(command))
129 def start_guest (self):
130 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
132 def run_in_guest (self,command):
133 return utils.system(self.actual_command_in_guest(command))
135 def run_in_host (self,command):
136 return self.test_ssh.run_in_buildname(command)
138 #command gets run in the vserver
139 def host_to_guest(self,command):
140 return "vserver %s exec %s"%(self.vservername,command)
142 #command gets run in the vserver
143 def start_guest_in_host(self):
144 return "vserver %s start"%(self.vservername)
147 def run_in_guest_piped (self,local,remote):
148 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
150 def auth_root (self):
151 return {'Username':self.plc_spec['PLC_ROOT_USER'],
152 'AuthMethod':'password',
153 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
154 'Role' : self.plc_spec['role']
156 def locate_site (self,sitename):
157 for site in self.plc_spec['sites']:
158 if site['site_fields']['name'] == sitename:
160 if site['site_fields']['login_base'] == sitename:
162 raise Exception,"Cannot locate site %s"%sitename
164 def locate_node (self,nodename):
165 for site in self.plc_spec['sites']:
166 for node in site['nodes']:
167 if node['name'] == nodename:
169 raise Exception,"Cannot locate node %s"%nodename
171 def locate_hostname (self,hostname):
172 for site in self.plc_spec['sites']:
173 for node in site['nodes']:
174 if node['node_fields']['hostname'] == hostname:
176 raise Exception,"Cannot locate hostname %s"%hostname
178 def locate_key (self,keyname):
179 for key in self.plc_spec['keys']:
180 if key['name'] == keyname:
182 raise Exception,"Cannot locate key %s"%keyname
184 def locate_slice (self, slicename):
185 for slice in self.plc_spec['slices']:
186 if slice['slice_fields']['name'] == slicename:
188 raise Exception,"Cannot locate slice %s"%slicename
190 def all_sliver_objs (self):
192 for slice_spec in self.plc_spec['slices']:
193 slicename = slice_spec['slice_fields']['name']
194 for nodename in slice_spec['nodenames']:
195 result.append(self.locate_sliver_obj (nodename,slicename))
198 def locate_sliver_obj (self,nodename,slicename):
199 (site,node) = self.locate_node(nodename)
200 slice = self.locate_slice (slicename)
202 test_site = TestSite (self, site)
203 test_node = TestNode (self, test_site,node)
204 # xxx the slice site is assumed to be the node site - mhh - probably harmless
205 test_slice = TestSlice (self, test_site, slice)
206 return TestSliver (self, test_node, test_slice)
208 def locate_first_node(self):
209 nodename=self.plc_spec['slices'][0]['nodenames'][0]
210 (site,node) = self.locate_node(nodename)
211 test_site = TestSite (self, site)
212 test_node = TestNode (self, test_site,node)
215 def locate_first_sliver (self):
216 slice_spec=self.plc_spec['slices'][0]
217 slicename=slice_spec['slice_fields']['name']
218 nodename=slice_spec['nodenames'][0]
219 return self.locate_sliver_obj(nodename,slicename)
221 # all different hostboxes used in this plc
222 def gather_hostBoxes(self):
223 # maps on sites and nodes, return [ (host_box,test_node) ]
225 for site_spec in self.plc_spec['sites']:
226 test_site = TestSite (self,site_spec)
227 for node_spec in site_spec['nodes']:
228 test_node = TestNode (self, test_site, node_spec)
229 if not test_node.is_real():
230 tuples.append( (test_node.host_box(),test_node) )
231 # transform into a dict { 'host_box' -> [ test_node .. ] }
233 for (box,node) in tuples:
234 if not result.has_key(box):
237 result[box].append(node)
240 # a step for checking this stuff
241 def show_boxes (self):
242 for (box,nodes) in self.gather_hostBoxes().iteritems():
243 print box,":"," + ".join( [ node.name() for node in nodes ] )
246 # make this a valid step
247 def kill_all_qemus(self):
248 # this is the brute force version, kill all qemus on that host box
249 for (box,nodes) in self.gather_hostBoxes().iteritems():
250 # pass the first nodename, as we don't push template-qemu on testboxes
251 nodedir=nodes[0].nodedir()
252 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
255 # make this a valid step
256 def list_all_qemus(self):
257 for (box,nodes) in self.gather_hostBoxes().iteritems():
258 # this is the brute force version, kill all qemus on that host box
259 TestBox(box,self.options.buildname).list_all_qemus()
262 # kill only the right qemus
263 def list_qemus(self):
264 for (box,nodes) in self.gather_hostBoxes().iteritems():
265 # the fine-grain version
270 # kill only the right qemus
271 def kill_qemus(self):
272 for (box,nodes) in self.gather_hostBoxes().iteritems():
273 # the fine-grain version
278 #################### display config
280 self.display_pass (1)
281 self.display_pass (2)
285 def display_pass (self,passno):
286 for (key,val) in self.plc_spec.iteritems():
290 self.display_site_spec(site)
291 for node in site['nodes']:
292 self.display_node_spec(node)
293 elif key=='initscripts':
294 for initscript in val:
295 self.display_initscript_spec (initscript)
298 self.display_slice_spec (slice)
301 self.display_key_spec (key)
303 if key not in ['sites','initscripts','slices','keys']:
304 print '* ',key,':',val
306 def display_site_spec (self,site):
307 print '* ======== site',site['site_fields']['name']
308 for (k,v) in site.iteritems():
311 print '* ','nodes : ',
313 print node['node_fields']['hostname'],'',
319 print user['name'],'',
321 elif k == 'site_fields':
322 print '* login_base',':',v['login_base']
323 elif k == 'address_fields':
327 PrettyPrinter(indent=8,depth=2).pprint(v)
329 def display_initscript_spec (self,initscript):
330 print '* ======== initscript',initscript['initscript_fields']['name']
332 def display_key_spec (self,key):
333 print '* ======== key',key['name']
335 def display_slice_spec (self,slice):
336 print '* ======== slice',slice['slice_fields']['name']
337 for (k,v) in slice.iteritems():
350 elif k=='slice_fields':
351 print '* fields',':',
352 print 'max_nodes=',v['max_nodes'],
357 def display_node_spec (self,node):
358 print "* node",node['name'],"host_box=",node['host_box'],
359 print "hostname=",node['node_fields']['hostname'],
360 print "ip=",node['interface_fields']['ip']
363 # another entry point for just showing the boxes involved
364 def display_mapping (self):
365 TestPlc.display_mapping_plc(self.plc_spec)
369 def display_mapping_plc (plc_spec):
370 print '* MyPLC',plc_spec['name']
371 print '*\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
372 print '*\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
373 for site_spec in plc_spec['sites']:
374 for node_spec in site_spec['nodes']:
375 TestPlc.display_mapping_node(node_spec)
378 def display_mapping_node (node_spec):
379 print '* NODE %s'%(node_spec['name'])
380 print '*\tqemu box %s'%node_spec['host_box']
381 print '*\thostname=%s'%node_spec['node_fields']['hostname']
384 def trplc_record (self):
385 tracker = TrackerPlc(self.options)
386 tracker.record(self.test_ssh.hostname,self.vservername)
390 def trplc_free (self):
391 tracker = TrackerPlc(self.options)
396 def trplc_cleanup (self):
397 tracker = TrackerPlc(self.options)
402 def trqemu_record (self):
403 tracker=TrackerQemu(self.options)
404 for site_spec in self.plc_spec['sites']:
405 for node_spec in site_spec['nodes']:
406 tracker.record(node_spec['host_box'],self.options.buildname,node_spec['node_fields']['hostname'])
410 def trqemu_free (self):
411 tracker=TrackerQemu(self.options)
412 for site_spec in self.plc_spec['sites']:
413 for node_spec in site_spec['nodes']:
418 def trqemu_cleanup (self):
419 tracker=TrackerQemu(self.options)
420 for site_spec in self.plc_spec['sites']:
421 for node_spec in site_spec['nodes']:
426 def trackers_cleanup (self):
427 self.trqemu_cleanup()
432 self.run_in_host("vserver --silent %s delete"%self.vservername)
438 # a full path for the local calls
439 build_dir=os.path.dirname(sys.argv[0])
440 # sometimes this is empty - set to "." in such a case
441 if not build_dir: build_dir="."
442 build_dir += "/build"
444 # use a standard name - will be relative to remote buildname
446 # run checkout in any case - would do an update if already exists
447 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
448 if self.run_in_host(build_checkout) != 0:
450 # the repo url is taken from arch-rpms-url
451 # with the last step (i386) removed
452 repo_url = self.options.arch_rpms_url
453 for level in [ 'arch' ]:
454 repo_url = os.path.dirname(repo_url)
455 # pass the vbuild-nightly options to vtest-init-vserver
457 test_env_options += " -p %s"%self.options.personality
458 test_env_options += " -d %s"%self.options.pldistro
459 test_env_options += " -f %s"%self.options.fcdistro
460 script="vtest-init-vserver.sh"
461 vserver_name = self.vservername
462 vserver_options="--netdev eth0 --interface %s"%self.vserverip
464 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
465 vserver_options += " --hostname %s"%vserver_hostname
468 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
469 return self.run_in_host(create_vserver) == 0
472 def install_rpm(self):
473 if self.options.personality == "linux32":
475 elif self.options.personality == "linux64":
478 raise Exception, "Unsupported personality %r"%self.options.personality
480 self.run_in_guest("yum -y install myplc")==0 and \
481 self.run_in_guest("yum -y install noderepo-%s-%s"%(self.options.pldistro,arch))==0 and \
482 self.run_in_guest("yum -y install bootstrapfs-%s-%s-plain"%(self.options.pldistro,arch))==0
486 tmpname='%s.plc-config-tty'%(self.name())
487 fileconf=open(tmpname,'w')
488 for var in [ 'PLC_NAME',
492 'PLC_MAIL_SUPPORT_ADDRESS',
499 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
500 fileconf.write('w\n')
501 fileconf.write('q\n')
503 utils.system('cat %s'%tmpname)
504 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
505 utils.system('rm %s'%tmpname)
509 self.run_in_guest('service plc start')
513 self.run_in_guest('service plc stop')
520 # stores the keys from the config for further use
521 def store_keys(self):
522 for key_spec in self.plc_spec['keys']:
523 TestKey(self,key_spec).store_key()
526 def clean_keys(self):
527 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
529 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
530 # for later direct access to the nodes
531 def fetch_keys(self):
533 if not os.path.isdir(dir):
535 vservername=self.vservername
537 prefix = 'root_ssh_key'
538 for ext in [ 'pub', 'rsa' ] :
539 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
540 dst="keys/%(vservername)s.%(ext)s"%locals()
541 if self.test_ssh.fetch(src,dst) != 0: overall=False
542 prefix = 'debug_ssh_key'
543 for ext in [ 'pub', 'rsa' ] :
544 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
545 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
546 if self.test_ssh.fetch(src,dst) != 0: overall=False
550 return self.do_sites()
552 def clean_sites (self):
553 return self.do_sites(action="delete")
555 def do_sites (self,action="add"):
556 for site_spec in self.plc_spec['sites']:
557 test_site = TestSite (self,site_spec)
558 if (action != "add"):
559 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
560 test_site.delete_site()
561 # deleted with the site
562 #test_site.delete_users()
565 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
566 test_site.create_site()
567 test_site.create_users()
570 def clean_all_sites (self):
571 print 'auth_root',self.auth_root()
572 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
573 for site_id in site_ids:
574 print 'Deleting site_id',site_id
575 self.apiserver.DeleteSite(self.auth_root(),site_id)
578 return self.do_nodes()
579 def clean_nodes (self):
580 return self.do_nodes(action="delete")
582 def do_nodes (self,action="add"):
583 for site_spec in self.plc_spec['sites']:
584 test_site = TestSite (self,site_spec)
586 utils.header("Deleting nodes in site %s"%test_site.name())
587 for node_spec in site_spec['nodes']:
588 test_node=TestNode(self,test_site,node_spec)
589 utils.header("Deleting %s"%test_node.name())
590 test_node.delete_node()
592 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
593 for node_spec in site_spec['nodes']:
594 utils.pprint('Creating node %s'%node_spec,node_spec)
595 test_node = TestNode (self,test_site,node_spec)
596 test_node.create_node ()
599 def nodegroups (self):
600 return self.do_nodegroups("add")
601 def clean_nodegroups (self):
602 return self.do_nodegroups("delete")
604 # create nodegroups if needed, and populate
605 def do_nodegroups (self, action="add"):
606 # 1st pass to scan contents
608 for site_spec in self.plc_spec['sites']:
609 test_site = TestSite (self,site_spec)
610 for node_spec in site_spec['nodes']:
611 test_node=TestNode (self,test_site,node_spec)
612 if node_spec.has_key('nodegroups'):
613 nodegroupnames=node_spec['nodegroups']
614 if isinstance(nodegroupnames,StringTypes):
615 nodegroupnames = [ nodegroupnames ]
616 for nodegroupname in nodegroupnames:
617 if not groups_dict.has_key(nodegroupname):
618 groups_dict[nodegroupname]=[]
619 groups_dict[nodegroupname].append(test_node.name())
620 auth=self.auth_root()
622 for (nodegroupname,group_nodes) in groups_dict.iteritems():
624 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
625 # first, check if the nodetagtype is here
626 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
628 tag_type_id = tag_types[0]['tag_type_id']
630 tag_type_id = self.apiserver.AddTagType(auth,
631 {'tagname':nodegroupname,
632 'description': 'for nodegroup %s'%nodegroupname,
635 print 'located tag (type)',nodegroupname,'as',tag_type_id
637 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
639 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
640 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
641 # set node tag on all nodes, value='yes'
642 for nodename in group_nodes:
644 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
646 traceback.print_exc()
647 print 'node',nodename,'seems to already have tag',nodegroupname
650 expect_yes = self.apiserver.GetNodeTags(auth,
651 {'hostname':nodename,
652 'tagname':nodegroupname},
653 ['value'])[0]['value']
654 if expect_yes != "yes":
655 print 'Mismatch node tag on node',nodename,'got',expect_yes
658 if not self.options.dry_run:
659 print 'Cannot find tag',nodegroupname,'on node',nodename
663 print 'cleaning nodegroup',nodegroupname
664 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
666 traceback.print_exc()
670 def all_hostnames (self) :
672 for site_spec in self.plc_spec['sites']:
673 hostnames += [ node_spec['node_fields']['hostname'] \
674 for node_spec in site_spec['nodes'] ]
677 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
678 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
679 if self.options.dry_run:
683 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
684 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
685 # the nodes that haven't checked yet - start with a full list and shrink over time
686 tocheck = self.all_hostnames()
687 utils.header("checking nodes %r"%tocheck)
688 # create a dict hostname -> status
689 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
692 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
694 for array in tocheck_status:
695 hostname=array['hostname']
696 boot_state=array['boot_state']
697 if boot_state == target_boot_state:
698 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
700 # if it's a real node, never mind
701 (site_spec,node_spec)=self.locate_hostname(hostname)
702 if TestNode.is_real_model(node_spec['node_fields']['model']):
703 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
705 boot_state = target_boot_state
706 elif datetime.datetime.now() > graceout:
707 utils.header ("%s still in '%s' state"%(hostname,boot_state))
708 graceout=datetime.datetime.now()+datetime.timedelta(1)
709 status[hostname] = boot_state
711 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
714 if datetime.datetime.now() > timeout:
715 for hostname in tocheck:
716 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
718 # otherwise, sleep for a while
720 # only useful in empty plcs
723 def nodes_booted(self):
724 return self.nodes_check_boot_state('boot',timeout_minutes=20,silent_minutes=15)
726 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=20):
728 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
729 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
730 vservername=self.vservername
733 local_key = "keys/%(vservername)s-debug.rsa"%locals()
736 local_key = "keys/%(vservername)s.rsa"%locals()
737 tocheck = self.all_hostnames()
738 utils.header("checking ssh access (expected in %s mode) to nodes %r"%(message,tocheck))
739 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
740 (timeout_minutes,silent_minutes,period))
742 for hostname in tocheck:
743 # try to run 'hostname' in the node
744 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
745 # don't spam logs - show the command only after the grace period
746 if datetime.datetime.now() > graceout:
747 success=utils.system(command)
749 # truly silent, just print out a dot to show we're alive
752 command += " 2>/dev/null"
753 if self.options.dry_run:
754 print 'dry_run',command
757 success=os.system(command)
759 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
761 tocheck.remove(hostname)
763 # we will have tried real nodes once, in case they're up - but if not, just skip
764 (site_spec,node_spec)=self.locate_hostname(hostname)
765 if TestNode.is_real_model(node_spec['node_fields']['model']):
766 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
767 tocheck.remove(hostname)
770 if datetime.datetime.now() > timeout:
771 for hostname in tocheck:
772 utils.header("FAILURE to ssh into %s"%hostname)
774 # otherwise, sleep for a while
776 # only useful in empty plcs
779 def nodes_ssh_debug(self):
780 return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=10)
782 def nodes_ssh_boot(self):
783 return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=10)
786 def init_node (self): pass
788 def bootcd (self): pass
790 def configure_qemu (self): pass
792 def reinstall_node (self): pass
794 def export_qemu (self): pass
796 ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
797 def check_sanity_node (self):
798 return self.locate_first_node().check_sanity()
799 def check_sanity_sliver (self) :
800 return self.locate_first_sliver().check_sanity()
802 def check_sanity (self):
803 return self.check_sanity_node() and self.check_sanity_sliver()
806 def do_check_initscripts(self):
808 for slice_spec in self.plc_spec['slices']:
809 if not slice_spec.has_key('initscriptname'):
811 initscript=slice_spec['initscriptname']
812 for nodename in slice_spec['nodenames']:
813 (site,node) = self.locate_node (nodename)
814 # xxx - passing the wrong site - probably harmless
815 test_site = TestSite (self,site)
816 test_slice = TestSlice (self,test_site,slice_spec)
817 test_node = TestNode (self,test_site,node)
818 test_sliver = TestSliver (self, test_node, test_slice)
819 if not test_sliver.check_initscript(initscript):
823 def check_initscripts(self):
824 return self.do_check_initscripts()
826 def initscripts (self):
827 for initscript in self.plc_spec['initscripts']:
828 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
829 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
832 def clean_initscripts (self):
833 for initscript in self.plc_spec['initscripts']:
834 initscript_name = initscript['initscript_fields']['name']
835 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
837 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
838 print initscript_name,'deleted'
840 print 'deletion went wrong - probably did not exist'
845 return self.do_slices()
847 def clean_slices (self):
848 return self.do_slices("delete")
850 def do_slices (self, action="add"):
851 for slice in self.plc_spec['slices']:
852 site_spec = self.locate_site (slice['sitename'])
853 test_site = TestSite(self,site_spec)
854 test_slice=TestSlice(self,test_site,slice)
856 utils.header("Deleting slices in site %s"%test_site.name())
857 test_slice.delete_slice()
859 utils.pprint("Creating slice",slice)
860 test_slice.create_slice()
861 utils.header('Created Slice %s'%slice['slice_fields']['name'])
864 @slice_mapper_options
865 def check_slice(self): pass
868 def clear_known_hosts (self): pass
871 def start_node (self) : pass
873 def check_tcp (self):
874 specs = self.plc_spec['tcp_test']
879 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
880 if not s_test_sliver.run_tcp_server(port,timeout=10):
884 # idem for the client side
885 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
886 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
890 def plcsh_stress_test (self):
891 # install the stress-test in the plc image
892 location = "/usr/share/plc_api/plcsh_stress_test.py"
893 remote="/vservers/%s/%s"%(self.vservername,location)
894 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
896 command += " -- --check"
897 if self.options.size == 1:
899 return ( self.run_in_guest(command) == 0)
901 # populate runs the same utility without slightly different options
902 # in particular runs with --preserve (dont cleanup) and without --check
903 # also it gets run twice, once with the --foreign option for creating fake foreign entries
905 # install the stress-test in the plc image
906 location = "/usr/share/plc_api/plcsh_stress_test.py"
907 remote="/vservers/%s/%s"%(self.vservername,location)
908 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
910 command += " -- --preserve --short-names"
911 local = (self.run_in_guest(command) == 0);
912 # second run with --foreign
913 command += ' --foreign'
914 remote = (self.run_in_guest(command) == 0);
915 return ( local and remote)
917 def gather_logs (self):
918 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
919 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
920 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
921 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
922 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
924 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
925 self.gather_var_logs ()
927 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
928 self.gather_pgsql_logs ()
930 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
931 for site_spec in self.plc_spec['sites']:
932 test_site = TestSite (self,site_spec)
933 for node_spec in site_spec['nodes']:
934 test_node=TestNode(self,test_site,node_spec)
935 test_node.gather_qemu_logs()
937 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
938 self.gather_nodes_var_logs()
940 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
941 self.gather_slivers_var_logs()
944 def gather_slivers_var_logs(self):
945 for test_sliver in self.all_sliver_objs():
946 remote = test_sliver.tar_var_logs()
947 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
948 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
949 utils.system(command)
952 def gather_var_logs (self):
953 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
954 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
955 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
956 utils.system(command)
957 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
958 utils.system(command)
960 def gather_pgsql_logs (self):
961 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
962 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
963 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
964 utils.system(command)
966 def gather_nodes_var_logs (self):
967 for site_spec in self.plc_spec['sites']:
968 test_site = TestSite (self,site_spec)
969 for node_spec in site_spec['nodes']:
970 test_node=TestNode(self,test_site,node_spec)
971 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
972 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
973 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
974 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
975 utils.system(command)
978 # returns the filename to use for sql dump/restore, using options.dbname if set
979 def dbfile (self, database):
980 # uses options.dbname if it is found
982 name=self.options.dbname
983 if not isinstance(name,StringTypes):
986 t=datetime.datetime.now()
989 return "/root/%s-%s.sql"%(database,name)
992 dump=self.dbfile("planetab4")
993 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
994 utils.header('Dumped planetlab4 database in %s'%dump)
997 def db_restore(self):
998 dump=self.dbfile("planetab4")
1000 self.run_in_guest('service httpd stop')
1001 # xxx - need another wrapper
1002 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
1003 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
1004 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
1005 ##starting httpd service
1006 self.run_in_guest('service httpd start')
1008 utils.header('Database restored from ' + dump)
1011 def standby_1(): pass
1013 def standby_2(): pass
1015 def standby_3(): pass
1017 def standby_4(): pass
1019 def standby_5(): pass
1021 def standby_6(): pass
1023 def standby_7(): pass
1025 def standby_8(): pass
1027 def standby_9(): pass
1029 def standby_10(): pass
1031 def standby_11(): pass
1033 def standby_12(): pass
1035 def standby_13(): pass
1037 def standby_14(): pass
1039 def standby_15(): pass
1041 def standby_16(): pass
1043 def standby_17(): pass
1045 def standby_18(): pass
1047 def standby_19(): pass
1049 def standby_20(): pass