7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
20 from TestSliceSfa import TestSliceSfa
21 from TestUserSfa import TestUserSfa
23 # step methods must take (self) and return a boolean (options is a member of the class)
25 def standby(minutes,dry_run):
26 utils.header('Entering StandBy for %d mn'%minutes)
30 time.sleep(60*minutes)
33 def standby_generic (func):
35 minutes=int(func.__name__.split("_")[1])
36 return standby(minutes,self.options.dry_run)
39 def node_mapper (method):
42 node_method = TestNode.__dict__[method.__name__]
43 for site_spec in self.plc_spec['sites']:
44 test_site = TestSite (self,site_spec)
45 for node_spec in site_spec['nodes']:
46 test_node = TestNode (self,test_site,node_spec)
47 if not node_method(test_node): overall=False
49 # restore the doc text
50 actual.__doc__=method.__doc__
53 def slice_mapper_options (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=method.__doc__
67 def slice_mapper_options_sfa (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(test_plc,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
87 'display', 'local_pre', SEP,
88 'delete_vs','create_vs','install', 'configure', 'start', SEP,
89 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
90 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', SEP,
91 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu',
92 'kill_all_qemus', 'start_node', SEP,
93 # better use of time: do this now that the nodes are taking off
94 'plcsh_stress_test', SEP,
95 'install_sfa', 'configure_sfa', 'import_sfa', 'start_sfa', SEP,
96 'setup_sfa', 'add_sfa', 'update_sfa', 'view_sfa', SEP,
97 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
98 # optionally run sfa later; takes longer, but checks more about nm
99 # 'install_sfa', 'configure_sfa', 'import_sfa', 'start_sfa', SEP,
100 # 'setup_sfa', 'add_sfa', 'update_sfa', 'view_sfa', SEP,
101 'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEP,
102 'check_tcp', 'check_hooks', SEP,
103 'force_gather_logs', 'force_local_post',
106 'show_boxes', 'local_list','local_cleanup',SEP,
107 'stop', 'vs_start', SEP,
108 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
109 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
111 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
112 'db_dump' , 'db_restore', SEP,
113 'standby_1 through 20',
117 def printable_steps (list):
118 return " ".join(list).replace(" "+SEP+" "," \\\n")
120 def valid_step (step):
123 def __init__ (self,plc_spec,options):
124 self.plc_spec=plc_spec
126 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
128 self.vserverip=plc_spec['vserverip']
129 self.vservername=plc_spec['vservername']
130 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
133 raise Exception,'chroot-based myplc testing is deprecated'
134 self.apiserver=TestApiserver(self.url,options.dry_run)
137 name=self.plc_spec['name']
138 return "%s.%s"%(name,self.vservername)
141 return self.plc_spec['hostname']
144 return self.test_ssh.is_local()
146 # define the API methods on this object through xmlrpc
147 # would help, but not strictly necessary
151 def actual_command_in_guest (self,command):
152 return self.test_ssh.actual_command(self.host_to_guest(command))
154 def start_guest (self):
155 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
157 def run_in_guest (self,command):
158 return utils.system(self.actual_command_in_guest(command))
160 def run_in_host (self,command):
161 return self.test_ssh.run_in_buildname(command)
163 #command gets run in the vserver
164 def host_to_guest(self,command):
165 return "vserver %s exec %s"%(self.vservername,command)
167 #command gets run in the vserver
168 def start_guest_in_host(self):
169 return "vserver %s start"%(self.vservername)
172 def run_in_guest_piped (self,local,remote):
173 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
175 def auth_root (self):
176 return {'Username':self.plc_spec['PLC_ROOT_USER'],
177 'AuthMethod':'password',
178 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
179 'Role' : self.plc_spec['role']
181 def locate_site (self,sitename):
182 for site in self.plc_spec['sites']:
183 if site['site_fields']['name'] == sitename:
185 if site['site_fields']['login_base'] == sitename:
187 raise Exception,"Cannot locate site %s"%sitename
189 def locate_node (self,nodename):
190 for site in self.plc_spec['sites']:
191 for node in site['nodes']:
192 if node['name'] == nodename:
194 raise Exception,"Cannot locate node %s"%nodename
196 def locate_hostname (self,hostname):
197 for site in self.plc_spec['sites']:
198 for node in site['nodes']:
199 if node['node_fields']['hostname'] == hostname:
201 raise Exception,"Cannot locate hostname %s"%hostname
203 def locate_key (self,keyname):
204 for key in self.plc_spec['keys']:
205 if key['name'] == keyname:
207 raise Exception,"Cannot locate key %s"%keyname
209 def locate_slice (self, slicename):
210 for slice in self.plc_spec['slices']:
211 if slice['slice_fields']['name'] == slicename:
213 raise Exception,"Cannot locate slice %s"%slicename
215 def all_sliver_objs (self):
217 for slice_spec in self.plc_spec['slices']:
218 slicename = slice_spec['slice_fields']['name']
219 for nodename in slice_spec['nodenames']:
220 result.append(self.locate_sliver_obj (nodename,slicename))
223 def locate_sliver_obj (self,nodename,slicename):
224 (site,node) = self.locate_node(nodename)
225 slice = self.locate_slice (slicename)
227 test_site = TestSite (self, site)
228 test_node = TestNode (self, test_site,node)
229 # xxx the slice site is assumed to be the node site - mhh - probably harmless
230 test_slice = TestSlice (self, test_site, slice)
231 return TestSliver (self, test_node, test_slice)
233 def locate_first_node(self):
234 nodename=self.plc_spec['slices'][0]['nodenames'][0]
235 (site,node) = self.locate_node(nodename)
236 test_site = TestSite (self, site)
237 test_node = TestNode (self, test_site,node)
240 def locate_first_sliver (self):
241 slice_spec=self.plc_spec['slices'][0]
242 slicename=slice_spec['slice_fields']['name']
243 nodename=slice_spec['nodenames'][0]
244 return self.locate_sliver_obj(nodename,slicename)
246 # all different hostboxes used in this plc
247 def gather_hostBoxes(self):
248 # maps on sites and nodes, return [ (host_box,test_node) ]
250 for site_spec in self.plc_spec['sites']:
251 test_site = TestSite (self,site_spec)
252 for node_spec in site_spec['nodes']:
253 test_node = TestNode (self, test_site, node_spec)
254 if not test_node.is_real():
255 tuples.append( (test_node.host_box(),test_node) )
256 # transform into a dict { 'host_box' -> [ test_node .. ] }
258 for (box,node) in tuples:
259 if not result.has_key(box):
262 result[box].append(node)
265 # a step for checking this stuff
266 def show_boxes (self):
267 'print summary of nodes location'
268 for (box,nodes) in self.gather_hostBoxes().iteritems():
269 print box,":"," + ".join( [ node.name() for node in nodes ] )
272 # make this a valid step
273 def kill_all_qemus(self):
274 'kill all qemu instances on the qemu boxes involved by this setup'
275 # this is the brute force version, kill all qemus on that host box
276 for (box,nodes) in self.gather_hostBoxes().iteritems():
277 # pass the first nodename, as we don't push template-qemu on testboxes
278 nodedir=nodes[0].nodedir()
279 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
282 # make this a valid step
283 def list_all_qemus(self):
284 'list all qemu instances on the qemu boxes involved by this setup'
285 for (box,nodes) in self.gather_hostBoxes().iteritems():
286 # this is the brute force version, kill all qemus on that host box
287 TestBox(box,self.options.buildname).list_all_qemus()
290 # kill only the right qemus
291 def list_qemus(self):
292 'list qemu instances for our nodes'
293 for (box,nodes) in self.gather_hostBoxes().iteritems():
294 # the fine-grain version
299 # kill only the right qemus
300 def kill_qemus(self):
301 'kill the qemu instances for our nodes'
302 for (box,nodes) in self.gather_hostBoxes().iteritems():
303 # the fine-grain version
308 #################### display config
310 "show test configuration after localization"
311 self.display_pass (1)
312 self.display_pass (2)
316 def display_pass (self,passno):
317 for (key,val) in self.plc_spec.iteritems():
321 self.display_site_spec(site)
322 for node in site['nodes']:
323 self.display_node_spec(node)
324 elif key=='initscripts':
325 for initscript in val:
326 self.display_initscript_spec (initscript)
329 self.display_slice_spec (slice)
332 self.display_key_spec (key)
334 if key not in ['sites','initscripts','slices','keys', 'sfa']:
335 print '+ ',key,':',val
337 def display_site_spec (self,site):
338 print '+ ======== site',site['site_fields']['name']
339 for (k,v) in site.iteritems():
342 print '+ ','nodes : ',
344 print node['node_fields']['hostname'],'',
350 print user['name'],'',
352 elif k == 'site_fields':
353 print '+ login_base',':',v['login_base']
354 elif k == 'address_fields':
358 PrettyPrinter(indent=8,depth=2).pprint(v)
360 def display_initscript_spec (self,initscript):
361 print '+ ======== initscript',initscript['initscript_fields']['name']
363 def display_key_spec (self,key):
364 print '+ ======== key',key['name']
366 def display_slice_spec (self,slice):
367 print '+ ======== slice',slice['slice_fields']['name']
368 for (k,v) in slice.iteritems():
381 elif k=='slice_fields':
382 print '+ fields',':',
383 print 'max_nodes=',v['max_nodes'],
388 def display_node_spec (self,node):
389 print "+ node",node['name'],"host_box=",node['host_box'],
390 print "hostname=",node['node_fields']['hostname'],
391 print "ip=",node['interface_fields']['ip']
394 # another entry point for just showing the boxes involved
395 def display_mapping (self):
396 TestPlc.display_mapping_plc(self.plc_spec)
400 def display_mapping_plc (plc_spec):
401 print '+ MyPLC',plc_spec['name']
402 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
403 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
404 for site_spec in plc_spec['sites']:
405 for node_spec in site_spec['nodes']:
406 TestPlc.display_mapping_node(node_spec)
409 def display_mapping_node (node_spec):
410 print '+ NODE %s'%(node_spec['name'])
411 print '+\tqemu box %s'%node_spec['host_box']
412 print '+\thostname=%s'%node_spec['node_fields']['hostname']
414 def local_pre (self):
415 "run site-dependant pre-test script as defined in LocalTestResources"
416 from LocalTestResources import local_resources
417 return local_resources.step_pre(self)
419 def local_post (self):
420 "run site-dependant post-test script as defined in LocalTestResources"
421 from LocalTestResources import local_resources
422 return local_resources.step_post(self)
424 def local_list (self):
425 "run site-dependant list script as defined in LocalTestResources"
426 from LocalTestResources import local_resources
427 return local_resources.step_list(self)
429 def local_cleanup (self):
430 "run site-dependant cleanup script as defined in LocalTestResources"
431 from LocalTestResources import local_resources
432 return local_resources.step_cleanup(self)
435 "vserver delete the test myplc"
436 self.run_in_host("vserver --silent %s delete"%self.vservername)
440 def create_vs (self):
441 "vserver creation (no install done)"
443 # a full path for the local calls
444 build_dir=os.path.dirname(sys.argv[0])
445 # sometimes this is empty - set to "." in such a case
446 if not build_dir: build_dir="."
447 build_dir += "/build"
449 # use a standard name - will be relative to remote buildname
451 # run checkout in any case - would do an update if already exists
452 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
453 if self.run_in_host(build_checkout) != 0:
455 # the repo url is taken from arch-rpms-url
456 # with the last step (i386) removed
457 repo_url = self.options.arch_rpms_url
458 for level in [ 'arch' ]:
459 repo_url = os.path.dirname(repo_url)
460 # pass the vbuild-nightly options to vtest-init-vserver
462 test_env_options += " -p %s"%self.options.personality
463 test_env_options += " -d %s"%self.options.pldistro
464 test_env_options += " -f %s"%self.options.fcdistro
465 script="vtest-init-vserver.sh"
466 vserver_name = self.vservername
467 vserver_options="--netdev eth0 --interface %s"%self.vserverip
469 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
470 vserver_options += " --hostname %s"%vserver_hostname
472 print "Cannot reverse lookup %s"%self.vserverip
473 print "This is considered fatal, as this might pollute the test results"
475 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
476 return self.run_in_host(create_vserver) == 0
480 "yum install myplc, noderepo, and the plain bootstrapfs"
482 # workaround for getting pgsql8.2 on centos5
483 if self.options.fcdistro == "centos5":
484 self.run_in_guest("rpm -Uvh http://yum.pgsqlrpms.org/8.2/pgdg-centos-8.2-4.noarch.rpm")
485 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
487 if self.options.personality == "linux32":
489 elif self.options.personality == "linux64":
492 raise Exception, "Unsupported personality %r"%self.options.personality
494 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
496 # try to install slicerepo - not fatal yet
497 self.run_in_guest("yum -y install slicerepo-%s"%nodefamily)
500 self.run_in_guest("yum -y install myplc")==0 and \
501 self.run_in_guest("yum -y install noderepo-%s"%nodefamily)==0 and \
502 self.run_in_guest("yum -y install bootstrapfs-%s-plain"%nodefamily)==0
507 tmpname='%s.plc-config-tty'%(self.name())
508 fileconf=open(tmpname,'w')
509 for var in [ 'PLC_NAME',
513 'PLC_MAIL_SUPPORT_ADDRESS',
516 # Above line was added for integrating SFA Testing
522 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
523 fileconf.write('w\n')
524 fileconf.write('q\n')
526 utils.system('cat %s'%tmpname)
527 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
528 utils.system('rm %s'%tmpname)
533 self.run_in_guest('service plc start')
538 self.run_in_guest('service plc stop')
542 "start the PLC vserver"
546 # stores the keys from the config for further use
547 def store_keys(self):
548 "stores test users ssh keys in keys/"
549 for key_spec in self.plc_spec['keys']:
550 TestKey(self,key_spec).store_key()
553 def clean_keys(self):
554 "removes keys cached in keys/"
555 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
557 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
558 # for later direct access to the nodes
559 def fetch_keys(self):
560 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
562 if not os.path.isdir(dir):
564 vservername=self.vservername
566 prefix = 'debug_ssh_key'
567 for ext in [ 'pub', 'rsa' ] :
568 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
569 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
570 if self.test_ssh.fetch(src,dst) != 0: overall=False
574 "create sites with PLCAPI"
575 return self.do_sites()
577 def clean_sites (self):
578 "delete sites with PLCAPI"
579 return self.do_sites(action="delete")
581 def do_sites (self,action="add"):
582 for site_spec in self.plc_spec['sites']:
583 test_site = TestSite (self,site_spec)
584 if (action != "add"):
585 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
586 test_site.delete_site()
587 # deleted with the site
588 #test_site.delete_users()
591 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
592 test_site.create_site()
593 test_site.create_users()
596 def clean_all_sites (self):
597 "Delete all sites in PLC, and related objects"
598 print 'auth_root',self.auth_root()
599 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
600 for site_id in site_ids:
601 print 'Deleting site_id',site_id
602 self.apiserver.DeleteSite(self.auth_root(),site_id)
605 "create nodes with PLCAPI"
606 return self.do_nodes()
607 def clean_nodes (self):
608 "delete nodes with PLCAPI"
609 return self.do_nodes(action="delete")
611 def do_nodes (self,action="add"):
612 for site_spec in self.plc_spec['sites']:
613 test_site = TestSite (self,site_spec)
615 utils.header("Deleting nodes in site %s"%test_site.name())
616 for node_spec in site_spec['nodes']:
617 test_node=TestNode(self,test_site,node_spec)
618 utils.header("Deleting %s"%test_node.name())
619 test_node.delete_node()
621 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
622 for node_spec in site_spec['nodes']:
623 utils.pprint('Creating node %s'%node_spec,node_spec)
624 test_node = TestNode (self,test_site,node_spec)
625 test_node.create_node ()
628 def nodegroups (self):
629 "create nodegroups with PLCAPI"
630 return self.do_nodegroups("add")
631 def clean_nodegroups (self):
632 "delete nodegroups with PLCAPI"
633 return self.do_nodegroups("delete")
635 # create nodegroups if needed, and populate
636 def do_nodegroups (self, action="add"):
637 # 1st pass to scan contents
639 for site_spec in self.plc_spec['sites']:
640 test_site = TestSite (self,site_spec)
641 for node_spec in site_spec['nodes']:
642 test_node=TestNode (self,test_site,node_spec)
643 if node_spec.has_key('nodegroups'):
644 nodegroupnames=node_spec['nodegroups']
645 if isinstance(nodegroupnames,StringTypes):
646 nodegroupnames = [ nodegroupnames ]
647 for nodegroupname in nodegroupnames:
648 if not groups_dict.has_key(nodegroupname):
649 groups_dict[nodegroupname]=[]
650 groups_dict[nodegroupname].append(test_node.name())
651 auth=self.auth_root()
653 for (nodegroupname,group_nodes) in groups_dict.iteritems():
655 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
656 # first, check if the nodetagtype is here
657 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
659 tag_type_id = tag_types[0]['tag_type_id']
661 tag_type_id = self.apiserver.AddTagType(auth,
662 {'tagname':nodegroupname,
663 'description': 'for nodegroup %s'%nodegroupname,
666 print 'located tag (type)',nodegroupname,'as',tag_type_id
668 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
670 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
671 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
672 # set node tag on all nodes, value='yes'
673 for nodename in group_nodes:
675 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
677 traceback.print_exc()
678 print 'node',nodename,'seems to already have tag',nodegroupname
681 expect_yes = self.apiserver.GetNodeTags(auth,
682 {'hostname':nodename,
683 'tagname':nodegroupname},
684 ['value'])[0]['value']
685 if expect_yes != "yes":
686 print 'Mismatch node tag on node',nodename,'got',expect_yes
689 if not self.options.dry_run:
690 print 'Cannot find tag',nodegroupname,'on node',nodename
694 print 'cleaning nodegroup',nodegroupname
695 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
697 traceback.print_exc()
701 # return a list of tuples (nodename,qemuname)
702 def all_node_infos (self) :
704 for site_spec in self.plc_spec['sites']:
705 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
706 for node_spec in site_spec['nodes'] ]
709 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
711 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
712 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
713 if self.options.dry_run:
717 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
718 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
719 # the nodes that haven't checked yet - start with a full list and shrink over time
720 tocheck = self.all_hostnames()
721 utils.header("checking nodes %r"%tocheck)
722 # create a dict hostname -> status
723 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
726 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
728 for array in tocheck_status:
729 hostname=array['hostname']
730 boot_state=array['boot_state']
731 if boot_state == target_boot_state:
732 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
734 # if it's a real node, never mind
735 (site_spec,node_spec)=self.locate_hostname(hostname)
736 if TestNode.is_real_model(node_spec['node_fields']['model']):
737 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
739 boot_state = target_boot_state
740 elif datetime.datetime.now() > graceout:
741 utils.header ("%s still in '%s' state"%(hostname,boot_state))
742 graceout=datetime.datetime.now()+datetime.timedelta(1)
743 status[hostname] = boot_state
745 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
748 if datetime.datetime.now() > timeout:
749 for hostname in tocheck:
750 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
752 # otherwise, sleep for a while
754 # only useful in empty plcs
757 def nodes_booted(self):
758 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
760 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
762 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
763 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
764 vservername=self.vservername
767 local_key = "keys/%(vservername)s-debug.rsa"%locals()
770 local_key = "keys/key1.rsa"
771 node_infos = self.all_node_infos()
772 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
773 for (nodename,qemuname) in node_infos:
774 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
775 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
776 (timeout_minutes,silent_minutes,period))
778 for node_info in node_infos:
779 (hostname,qemuname) = node_info
780 # try to run 'hostname' in the node
781 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
782 # don't spam logs - show the command only after the grace period
783 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
785 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
787 node_infos.remove(node_info)
789 # we will have tried real nodes once, in case they're up - but if not, just skip
790 (site_spec,node_spec)=self.locate_hostname(hostname)
791 if TestNode.is_real_model(node_spec['node_fields']['model']):
792 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
793 node_infos.remove(node_info)
796 if datetime.datetime.now() > timeout:
797 for (hostname,qemuname) in node_infos:
798 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
800 # otherwise, sleep for a while
802 # only useful in empty plcs
805 def nodes_ssh_debug(self):
806 "Tries to ssh into nodes in debug mode with the debug ssh key"
807 return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=5)
809 def nodes_ssh_boot(self):
810 "Tries to ssh into nodes in production mode with the root ssh key"
811 return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=15)
814 def init_node (self):
815 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
819 "all nodes: invoke GetBootMedium and store result locally"
822 def configure_qemu (self):
823 "all nodes: compute qemu config qemu.conf and store it locally"
826 def reinstall_node (self):
827 "all nodes: mark PLCAPI boot_state as reinstall"
830 def export_qemu (self):
831 "all nodes: push local node-dep directory on the qemu box"
834 ### check hooks : invoke scripts from hooks/{node,slice}
835 def check_hooks_node (self):
836 return self.locate_first_node().check_hooks()
837 def check_hooks_sliver (self) :
838 return self.locate_first_sliver().check_hooks()
840 def check_hooks (self):
841 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
842 return self.check_hooks_node() and self.check_hooks_sliver()
845 def do_check_initscripts(self):
847 for slice_spec in self.plc_spec['slices']:
848 if not slice_spec.has_key('initscriptname'):
850 initscript=slice_spec['initscriptname']
851 for nodename in slice_spec['nodenames']:
852 (site,node) = self.locate_node (nodename)
853 # xxx - passing the wrong site - probably harmless
854 test_site = TestSite (self,site)
855 test_slice = TestSlice (self,test_site,slice_spec)
856 test_node = TestNode (self,test_site,node)
857 test_sliver = TestSliver (self, test_node, test_slice)
858 if not test_sliver.check_initscript(initscript):
862 def check_initscripts(self):
863 "check that the initscripts have triggered"
864 return self.do_check_initscripts()
866 def initscripts (self):
867 "create initscripts with PLCAPI"
868 for initscript in self.plc_spec['initscripts']:
869 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
870 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
873 def clean_initscripts (self):
874 "delete initscripts with PLCAPI"
875 for initscript in self.plc_spec['initscripts']:
876 initscript_name = initscript['initscript_fields']['name']
877 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
879 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
880 print initscript_name,'deleted'
882 print 'deletion went wrong - probably did not exist'
887 "create slices with PLCAPI"
888 return self.do_slices()
890 def clean_slices (self):
891 "delete slices with PLCAPI"
892 return self.do_slices("delete")
894 def do_slices (self, action="add"):
895 for slice in self.plc_spec['slices']:
896 site_spec = self.locate_site (slice['sitename'])
897 test_site = TestSite(self,site_spec)
898 test_slice=TestSlice(self,test_site,slice)
900 utils.header("Deleting slices in site %s"%test_site.name())
901 test_slice.delete_slice()
903 utils.pprint("Creating slice",slice)
904 test_slice.create_slice()
905 utils.header('Created Slice %s'%slice['slice_fields']['name'])
908 @slice_mapper_options
909 def check_slice(self):
910 "tries to ssh-enter the slice with the user key, to ensure slice creation"
914 def clear_known_hosts (self):
915 "remove test nodes entries from the local known_hosts file"
919 def start_node (self) :
920 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
923 def check_tcp (self):
924 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
925 specs = self.plc_spec['tcp_test']
930 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
931 if not s_test_sliver.run_tcp_server(port,timeout=10):
935 # idem for the client side
936 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
937 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
941 def plcsh_stress_test (self):
942 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
943 # install the stress-test in the plc image
944 location = "/usr/share/plc_api/plcsh_stress_test.py"
945 remote="/vservers/%s/%s"%(self.vservername,location)
946 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
948 command += " -- --check"
949 if self.options.size == 1:
951 return ( self.run_in_guest(command) == 0)
953 # populate runs the same utility without slightly different options
954 # in particular runs with --preserve (dont cleanup) and without --check
955 # also it gets run twice, once with the --foreign option for creating fake foreign entries
958 def install_sfa(self):
959 "yum install sfa, sfa-plc and sfa-client"
960 if self.options.personality == "linux32":
962 elif self.options.personality == "linux64":
965 raise Exception, "Unsupported personality %r"%self.options.personality
966 return self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")==0
969 def configure_sfa(self):
971 tmpname='%s.sfa-config-tty'%(self.name())
972 fileconf=open(tmpname,'w')
973 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
974 'SFA_REGISTRY_LEVEL1_AUTH',
976 'SFA_AGGREGATE_HOST',
982 'SFA_PLC_DB_PASSWORD',
984 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
985 fileconf.write('w\n')
986 fileconf.write('R\n')
987 fileconf.write('q\n')
989 utils.system('cat %s'%tmpname)
990 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
991 utils.system('rm %s'%tmpname)
994 def import_sfa(self):
996 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
997 return self.run_in_guest('sfa-import-plc.py')==0
999 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1001 def start_sfa(self):
1003 return self.run_in_guest('service sfa start')==0
1005 def setup_sfa(self):
1006 "sfi client configuration"
1008 if os.path.exists(dir_name):
1009 utils.system('rm -rf %s'%dir_name)
1010 utils.system('mkdir %s'%dir_name)
1011 file_name=dir_name + os.sep + 'fake-pi1.pkey'
1012 fileconf=open(file_name,'w')
1013 fileconf.write (self.plc_spec['keys'][0]['private'])
1016 file_name=dir_name + os.sep + 'sfi_config'
1017 fileconf=open(file_name,'w')
1018 SFI_AUTH=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']+".main"
1019 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1020 fileconf.write('\n')
1021 SFI_USER=SFI_AUTH+'.fake-pi1'
1022 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1023 fileconf.write('\n')
1024 SFI_REGISTRY='http://' + self.plc_spec['sfa']['SFA_PLC_DB_HOST'] + ':12345/'
1025 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1026 fileconf.write('\n')
1027 SFI_SM='http://' + self.plc_spec['sfa']['SFA_PLC_DB_HOST'] + ':12347/'
1028 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1029 fileconf.write('\n')
1032 file_name=dir_name + os.sep + 'person.xml'
1033 fileconf=open(file_name,'w')
1034 for record in self.plc_spec['sfa']['sfa_person_xml']:
1035 person_record=record
1036 fileconf.write(person_record)
1037 fileconf.write('\n')
1040 file_name=dir_name + os.sep + 'slice.xml'
1041 fileconf=open(file_name,'w')
1042 for record in self.plc_spec['sfa']['sfa_slice_xml']:
1044 #slice_record=self.plc_spec['sfa']['sfa_slice_xml']
1045 fileconf.write(slice_record)
1046 fileconf.write('\n')
1049 file_name=dir_name + os.sep + 'slice.rspec'
1050 fileconf=open(file_name,'w')
1052 for (key, value) in self.plc_spec['sfa']['sfa_slice_rspec'].items():
1054 fileconf.write(slice_rspec)
1055 fileconf.write('\n')
1058 remote="/vservers/%s/%s"%(self.vservername,location)
1059 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1061 #utils.system('cat %s'%tmpname)
1062 utils.system('rm -rf %s'%dir_name)
1066 "run sfi.py add (on Registry) and sfi.py create (on SM) to form new objects"
1068 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1069 success=test_user_sfa.add_user()
1071 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1072 site_spec = self.locate_site (slice_spec['sitename'])
1073 test_site = TestSite(self,site_spec)
1074 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1075 success1=test_slice_sfa.add_slice()
1076 success2=test_slice_sfa.create_slice()
1077 return success and success1 and success2
1079 def update_sfa(self):
1080 "run sfi.py update (on Registry) and sfi.py create (on SM) on existing objects"
1082 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1083 success1=test_user_sfa.update_user()
1085 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1086 site_spec = self.locate_site (slice_spec['sitename'])
1087 test_site = TestSite(self,site_spec)
1088 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1089 success2=test_slice_sfa.update_slice()
1090 return success1 and success2
1093 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1094 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1096 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.main"%auth)==0 and \
1097 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.main"%auth)==0 and \
1098 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1099 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1101 @slice_mapper_options_sfa
1102 def check_slice_sfa(self):
1103 "tries to ssh-enter the SFA slice"
1106 def delete_sfa(self):
1107 "run sfi.py delete (on SM), sfi.py remove (on Registry)"
1109 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1110 success1=test_user_sfa.delete_user()
1111 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1112 site_spec = self.locate_site (slice_spec['sitename'])
1113 test_site = TestSite(self,site_spec)
1114 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1115 success2=test_slice_sfa.delete_slice()
1117 return success1 and success2
1121 return self.run_in_guest('service sfa stop')==0
1123 def populate (self):
1124 "creates random entries in the PLCAPI"
1125 # install the stress-test in the plc image
1126 location = "/usr/share/plc_api/plcsh_stress_test.py"
1127 remote="/vservers/%s/%s"%(self.vservername,location)
1128 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1130 command += " -- --preserve --short-names"
1131 local = (self.run_in_guest(command) == 0);
1132 # second run with --foreign
1133 command += ' --foreign'
1134 remote = (self.run_in_guest(command) == 0);
1135 return ( local and remote)
1137 def gather_logs (self):
1138 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1139 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1140 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1141 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1142 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1143 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1145 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1146 self.gather_var_logs ()
1148 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1149 self.gather_pgsql_logs ()
1151 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1152 for site_spec in self.plc_spec['sites']:
1153 test_site = TestSite (self,site_spec)
1154 for node_spec in site_spec['nodes']:
1155 test_node=TestNode(self,test_site,node_spec)
1156 test_node.gather_qemu_logs()
1158 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1159 self.gather_nodes_var_logs()
1161 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1162 self.gather_slivers_var_logs()
1165 def gather_slivers_var_logs(self):
1166 for test_sliver in self.all_sliver_objs():
1167 remote = test_sliver.tar_var_logs()
1168 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1169 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1170 utils.system(command)
1173 def gather_var_logs (self):
1174 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1175 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1176 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1177 utils.system(command)
1178 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1179 utils.system(command)
1181 def gather_pgsql_logs (self):
1182 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1183 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1184 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1185 utils.system(command)
1187 def gather_nodes_var_logs (self):
1188 for site_spec in self.plc_spec['sites']:
1189 test_site = TestSite (self,site_spec)
1190 for node_spec in site_spec['nodes']:
1191 test_node=TestNode(self,test_site,node_spec)
1192 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1193 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1194 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1195 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1196 utils.system(command)
1199 # returns the filename to use for sql dump/restore, using options.dbname if set
1200 def dbfile (self, database):
1201 # uses options.dbname if it is found
1203 name=self.options.dbname
1204 if not isinstance(name,StringTypes):
1207 t=datetime.datetime.now()
1210 return "/root/%s-%s.sql"%(database,name)
1213 'dump the planetlab5 DB in /root in the PLC - filename has time'
1214 dump=self.dbfile("planetab5")
1215 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1216 utils.header('Dumped planetlab5 database in %s'%dump)
1219 def db_restore(self):
1220 'restore the planetlab5 DB - looks broken, but run -n might help'
1221 dump=self.dbfile("planetab5")
1222 ##stop httpd service
1223 self.run_in_guest('service httpd stop')
1224 # xxx - need another wrapper
1225 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1226 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1227 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1228 ##starting httpd service
1229 self.run_in_guest('service httpd start')
1231 utils.header('Database restored from ' + dump)
1234 def standby_1(): pass
1236 def standby_2(): pass
1238 def standby_3(): pass
1240 def standby_4(): pass
1242 def standby_5(): pass
1244 def standby_6(): pass
1246 def standby_7(): pass
1248 def standby_8(): pass
1250 def standby_9(): pass
1252 def standby_10(): pass
1254 def standby_11(): pass
1256 def standby_12(): pass
1258 def standby_13(): pass
1260 def standby_14(): pass
1262 def standby_15(): pass
1264 def standby_16(): pass
1266 def standby_17(): pass
1268 def standby_18(): pass
1270 def standby_19(): pass
1272 def standby_20(): pass