7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
20 from TestSliceSfa import TestSliceSfa
21 from TestUserSfa import TestUserSfa
23 # step methods must take (self) and return a boolean (options is a member of the class)
25 def standby(minutes,dry_run):
26 utils.header('Entering StandBy for %d mn'%minutes)
30 time.sleep(60*minutes)
33 def standby_generic (func):
35 minutes=int(func.__name__.split("_")[1])
36 return standby(minutes,self.options.dry_run)
39 def node_mapper (method):
42 node_method = TestNode.__dict__[method.__name__]
43 for site_spec in self.plc_spec['sites']:
44 test_site = TestSite (self,site_spec)
45 for node_spec in site_spec['nodes']:
46 test_node = TestNode (self,test_site,node_spec)
47 if not node_method(test_node): overall=False
49 # restore the doc text
50 actual.__doc__=method.__doc__
53 def slice_mapper_options (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=method.__doc__
67 def slice_mapper_options_sfa (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(test_plc,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
87 'display', 'local_pre', SEP,
88 'delete','create','install', 'configure', 'start', SEP,
89 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
90 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', SEP,
91 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu',
92 'kill_all_qemus', 'start_node', SEP,
93 # better use of time: do this now that the nodes are taking off
94 'plcsh_stress_test', SEP,
95 'install_sfa', 'configure_sfa', 'import_sfa', 'start_sfa', SEP,
96 'setup_sfa', 'add_sfa', 'update_sfa', 'view_sfa', SEP,
97 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
98 # optionally run sfa later; takes longer, but checks more about nm
99 # 'install_sfa', 'configure_sfa', 'import_sfa', 'start_sfa', SEP,
100 # 'setup_sfa', 'add_sfa', 'update_sfa', 'view_sfa', SEP,
101 'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEP,
102 'check_tcp', 'check_hooks', SEP,
103 'force_gather_logs', 'force_local_post',
106 'show_boxes', 'local_list','local_cleanup',SEP,
107 'stop', 'vs_start', SEP,
108 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
109 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
111 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
112 'db_dump' , 'db_restore', SEP,
113 'standby_1 through 20',
117 def printable_steps (list):
118 return " ".join(list).replace(" "+SEP+" "," \\\n")
120 def valid_step (step):
123 def __init__ (self,plc_spec,options):
124 self.plc_spec=plc_spec
126 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
128 self.vserverip=plc_spec['vserverip']
129 self.vservername=plc_spec['vservername']
130 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
133 raise Exception,'chroot-based myplc testing is deprecated'
134 self.apiserver=TestApiserver(self.url,options.dry_run)
137 name=self.plc_spec['name']
138 return "%s.%s"%(name,self.vservername)
141 return self.plc_spec['hostname']
144 return self.test_ssh.is_local()
146 # define the API methods on this object through xmlrpc
147 # would help, but not strictly necessary
151 def actual_command_in_guest (self,command):
152 return self.test_ssh.actual_command(self.host_to_guest(command))
154 def start_guest (self):
155 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
157 def run_in_guest (self,command):
158 return utils.system(self.actual_command_in_guest(command))
160 def run_in_host (self,command):
161 return self.test_ssh.run_in_buildname(command)
163 #command gets run in the vserver
164 def host_to_guest(self,command):
165 return "vserver %s exec %s"%(self.vservername,command)
167 #command gets run in the vserver
168 def start_guest_in_host(self):
169 return "vserver %s start"%(self.vservername)
172 def run_in_guest_piped (self,local,remote):
173 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
175 def auth_root (self):
176 return {'Username':self.plc_spec['PLC_ROOT_USER'],
177 'AuthMethod':'password',
178 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
179 'Role' : self.plc_spec['role']
181 def locate_site (self,sitename):
182 for site in self.plc_spec['sites']:
183 if site['site_fields']['name'] == sitename:
185 if site['site_fields']['login_base'] == sitename:
187 raise Exception,"Cannot locate site %s"%sitename
189 def locate_node (self,nodename):
190 for site in self.plc_spec['sites']:
191 for node in site['nodes']:
192 if node['name'] == nodename:
194 raise Exception,"Cannot locate node %s"%nodename
196 def locate_hostname (self,hostname):
197 for site in self.plc_spec['sites']:
198 for node in site['nodes']:
199 if node['node_fields']['hostname'] == hostname:
201 raise Exception,"Cannot locate hostname %s"%hostname
203 def locate_key (self,keyname):
204 for key in self.plc_spec['keys']:
205 if key['name'] == keyname:
207 raise Exception,"Cannot locate key %s"%keyname
209 def locate_slice (self, slicename):
210 for slice in self.plc_spec['slices']:
211 if slice['slice_fields']['name'] == slicename:
213 raise Exception,"Cannot locate slice %s"%slicename
215 def all_sliver_objs (self):
217 for slice_spec in self.plc_spec['slices']:
218 slicename = slice_spec['slice_fields']['name']
219 for nodename in slice_spec['nodenames']:
220 result.append(self.locate_sliver_obj (nodename,slicename))
223 def locate_sliver_obj (self,nodename,slicename):
224 (site,node) = self.locate_node(nodename)
225 slice = self.locate_slice (slicename)
227 test_site = TestSite (self, site)
228 test_node = TestNode (self, test_site,node)
229 # xxx the slice site is assumed to be the node site - mhh - probably harmless
230 test_slice = TestSlice (self, test_site, slice)
231 return TestSliver (self, test_node, test_slice)
233 def locate_first_node(self):
234 nodename=self.plc_spec['slices'][0]['nodenames'][0]
235 (site,node) = self.locate_node(nodename)
236 test_site = TestSite (self, site)
237 test_node = TestNode (self, test_site,node)
240 def locate_first_sliver (self):
241 slice_spec=self.plc_spec['slices'][0]
242 slicename=slice_spec['slice_fields']['name']
243 nodename=slice_spec['nodenames'][0]
244 return self.locate_sliver_obj(nodename,slicename)
246 # all different hostboxes used in this plc
247 def gather_hostBoxes(self):
248 # maps on sites and nodes, return [ (host_box,test_node) ]
250 for site_spec in self.plc_spec['sites']:
251 test_site = TestSite (self,site_spec)
252 for node_spec in site_spec['nodes']:
253 test_node = TestNode (self, test_site, node_spec)
254 if not test_node.is_real():
255 tuples.append( (test_node.host_box(),test_node) )
256 # transform into a dict { 'host_box' -> [ test_node .. ] }
258 for (box,node) in tuples:
259 if not result.has_key(box):
262 result[box].append(node)
265 # a step for checking this stuff
266 def show_boxes (self):
267 'print summary of nodes location'
268 for (box,nodes) in self.gather_hostBoxes().iteritems():
269 print box,":"," + ".join( [ node.name() for node in nodes ] )
272 # make this a valid step
273 def kill_all_qemus(self):
274 'kill all qemu instances on the qemu boxes involved by this setup'
275 # this is the brute force version, kill all qemus on that host box
276 for (box,nodes) in self.gather_hostBoxes().iteritems():
277 # pass the first nodename, as we don't push template-qemu on testboxes
278 nodedir=nodes[0].nodedir()
279 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
282 # make this a valid step
283 def list_all_qemus(self):
284 'list all qemu instances on the qemu boxes involved by this setup'
285 for (box,nodes) in self.gather_hostBoxes().iteritems():
286 # this is the brute force version, kill all qemus on that host box
287 TestBox(box,self.options.buildname).list_all_qemus()
290 # kill only the right qemus
291 def list_qemus(self):
292 'list qemu instances for our nodes'
293 for (box,nodes) in self.gather_hostBoxes().iteritems():
294 # the fine-grain version
299 # kill only the right qemus
300 def kill_qemus(self):
301 'kill the qemu instances for our nodes'
302 for (box,nodes) in self.gather_hostBoxes().iteritems():
303 # the fine-grain version
308 #################### display config
310 "show test configuration after localization"
311 self.display_pass (1)
312 self.display_pass (2)
316 def display_pass (self,passno):
317 for (key,val) in self.plc_spec.iteritems():
321 self.display_site_spec(site)
322 for node in site['nodes']:
323 self.display_node_spec(node)
324 elif key=='initscripts':
325 for initscript in val:
326 self.display_initscript_spec (initscript)
329 self.display_slice_spec (slice)
332 self.display_key_spec (key)
334 if key not in ['sites','initscripts','slices','keys', 'sfa']:
335 print '+ ',key,':',val
337 def display_site_spec (self,site):
338 print '+ ======== site',site['site_fields']['name']
339 for (k,v) in site.iteritems():
342 print '+ ','nodes : ',
344 print node['node_fields']['hostname'],'',
350 print user['name'],'',
352 elif k == 'site_fields':
353 print '+ login_base',':',v['login_base']
354 elif k == 'address_fields':
358 PrettyPrinter(indent=8,depth=2).pprint(v)
360 def display_initscript_spec (self,initscript):
361 print '+ ======== initscript',initscript['initscript_fields']['name']
363 def display_key_spec (self,key):
364 print '+ ======== key',key['name']
366 def display_slice_spec (self,slice):
367 print '+ ======== slice',slice['slice_fields']['name']
368 for (k,v) in slice.iteritems():
381 elif k=='slice_fields':
382 print '+ fields',':',
383 print 'max_nodes=',v['max_nodes'],
388 def display_node_spec (self,node):
389 print "+ node",node['name'],"host_box=",node['host_box'],
390 print "hostname=",node['node_fields']['hostname'],
391 print "ip=",node['interface_fields']['ip']
394 # another entry point for just showing the boxes involved
395 def display_mapping (self):
396 TestPlc.display_mapping_plc(self.plc_spec)
400 def display_mapping_plc (plc_spec):
401 print '+ MyPLC',plc_spec['name']
402 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
403 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
404 for site_spec in plc_spec['sites']:
405 for node_spec in site_spec['nodes']:
406 TestPlc.display_mapping_node(node_spec)
409 def display_mapping_node (node_spec):
410 print '+ NODE %s'%(node_spec['name'])
411 print '+\tqemu box %s'%node_spec['host_box']
412 print '+\thostname=%s'%node_spec['node_fields']['hostname']
414 def local_pre (self):
415 "run site-dependant pre-test script as defined in LocalTestResources"
416 from LocalTestResources import local_resources
417 return local_resources.step_pre(self)
419 def local_post (self):
420 "run site-dependant post-test script as defined in LocalTestResources"
421 from LocalTestResources import local_resources
422 return local_resources.step_post(self)
424 def local_list (self):
425 "run site-dependant list script as defined in LocalTestResources"
426 from LocalTestResources import local_resources
427 return local_resources.step_list(self)
429 def local_cleanup (self):
430 "run site-dependant cleanup script as defined in LocalTestResources"
431 from LocalTestResources import local_resources
432 return local_resources.step_cleanup(self)
435 "vserver delete the test myplc"
436 self.run_in_host("vserver --silent %s delete"%self.vservername)
441 "vserver creation (no install done)"
443 # a full path for the local calls
444 build_dir=os.path.dirname(sys.argv[0])
445 # sometimes this is empty - set to "." in such a case
446 if not build_dir: build_dir="."
447 build_dir += "/build"
449 # use a standard name - will be relative to remote buildname
451 # run checkout in any case - would do an update if already exists
452 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
453 if self.run_in_host(build_checkout) != 0:
455 # the repo url is taken from arch-rpms-url
456 # with the last step (i386) removed
457 repo_url = self.options.arch_rpms_url
458 for level in [ 'arch' ]:
459 repo_url = os.path.dirname(repo_url)
460 # pass the vbuild-nightly options to vtest-init-vserver
462 test_env_options += " -p %s"%self.options.personality
463 test_env_options += " -d %s"%self.options.pldistro
464 test_env_options += " -f %s"%self.options.fcdistro
465 script="vtest-init-vserver.sh"
466 vserver_name = self.vservername
467 vserver_options="--netdev eth0 --interface %s"%self.vserverip
469 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
470 vserver_options += " --hostname %s"%vserver_hostname
473 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
474 return self.run_in_host(create_vserver) == 0
478 "yum install myplc, noderepo, and the plain bootstrapfs"
480 # workaround for getting pgsql8.2 on centos5
481 if self.options.fcdistro == "centos5":
482 self.run_in_guest("rpm -Uvh http://yum.pgsqlrpms.org/8.2/pgdg-centos-8.2-4.noarch.rpm")
483 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
485 if self.options.personality == "linux32":
487 elif self.options.personality == "linux64":
490 raise Exception, "Unsupported personality %r"%self.options.personality
492 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
494 # try to install slicerepo - not fatal yet
495 self.run_in_guest("yum -y install slicerepo-%s"%nodefamily)
498 self.run_in_guest("yum -y install myplc")==0 and \
499 self.run_in_guest("yum -y install noderepo-%s"%nodefamily)==0 and \
500 self.run_in_guest("yum -y install bootstrapfs-%s-plain"%nodefamily)==0
505 tmpname='%s.plc-config-tty'%(self.name())
506 fileconf=open(tmpname,'w')
507 for var in [ 'PLC_NAME',
511 'PLC_MAIL_SUPPORT_ADDRESS',
514 # Above line was added for integrating SFA Testing
520 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
521 fileconf.write('w\n')
522 fileconf.write('q\n')
524 utils.system('cat %s'%tmpname)
525 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
526 utils.system('rm %s'%tmpname)
531 self.run_in_guest('service plc start')
536 self.run_in_guest('service plc stop')
540 "start the PLC vserver"
544 # stores the keys from the config for further use
545 def store_keys(self):
546 "stores test users ssh keys in keys/"
547 for key_spec in self.plc_spec['keys']:
548 TestKey(self,key_spec).store_key()
551 def clean_keys(self):
552 "removes keys cached in keys/"
553 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
555 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
556 # for later direct access to the nodes
557 def fetch_keys(self):
558 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
560 if not os.path.isdir(dir):
562 vservername=self.vservername
564 prefix = 'debug_ssh_key'
565 for ext in [ 'pub', 'rsa' ] :
566 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
567 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
568 if self.test_ssh.fetch(src,dst) != 0: overall=False
572 "create sites with PLCAPI"
573 return self.do_sites()
575 def clean_sites (self):
576 "delete sites with PLCAPI"
577 return self.do_sites(action="delete")
579 def do_sites (self,action="add"):
580 for site_spec in self.plc_spec['sites']:
581 test_site = TestSite (self,site_spec)
582 if (action != "add"):
583 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
584 test_site.delete_site()
585 # deleted with the site
586 #test_site.delete_users()
589 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
590 test_site.create_site()
591 test_site.create_users()
594 def clean_all_sites (self):
595 "Delete all sites in PLC, and related objects"
596 print 'auth_root',self.auth_root()
597 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
598 for site_id in site_ids:
599 print 'Deleting site_id',site_id
600 self.apiserver.DeleteSite(self.auth_root(),site_id)
603 "create nodes with PLCAPI"
604 return self.do_nodes()
605 def clean_nodes (self):
606 "delete nodes with PLCAPI"
607 return self.do_nodes(action="delete")
609 def do_nodes (self,action="add"):
610 for site_spec in self.plc_spec['sites']:
611 test_site = TestSite (self,site_spec)
613 utils.header("Deleting nodes in site %s"%test_site.name())
614 for node_spec in site_spec['nodes']:
615 test_node=TestNode(self,test_site,node_spec)
616 utils.header("Deleting %s"%test_node.name())
617 test_node.delete_node()
619 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
620 for node_spec in site_spec['nodes']:
621 utils.pprint('Creating node %s'%node_spec,node_spec)
622 test_node = TestNode (self,test_site,node_spec)
623 test_node.create_node ()
626 def nodegroups (self):
627 "create nodegroups with PLCAPI"
628 return self.do_nodegroups("add")
629 def clean_nodegroups (self):
630 "delete nodegroups with PLCAPI"
631 return self.do_nodegroups("delete")
633 # create nodegroups if needed, and populate
634 def do_nodegroups (self, action="add"):
635 # 1st pass to scan contents
637 for site_spec in self.plc_spec['sites']:
638 test_site = TestSite (self,site_spec)
639 for node_spec in site_spec['nodes']:
640 test_node=TestNode (self,test_site,node_spec)
641 if node_spec.has_key('nodegroups'):
642 nodegroupnames=node_spec['nodegroups']
643 if isinstance(nodegroupnames,StringTypes):
644 nodegroupnames = [ nodegroupnames ]
645 for nodegroupname in nodegroupnames:
646 if not groups_dict.has_key(nodegroupname):
647 groups_dict[nodegroupname]=[]
648 groups_dict[nodegroupname].append(test_node.name())
649 auth=self.auth_root()
651 for (nodegroupname,group_nodes) in groups_dict.iteritems():
653 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
654 # first, check if the nodetagtype is here
655 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
657 tag_type_id = tag_types[0]['tag_type_id']
659 tag_type_id = self.apiserver.AddTagType(auth,
660 {'tagname':nodegroupname,
661 'description': 'for nodegroup %s'%nodegroupname,
664 print 'located tag (type)',nodegroupname,'as',tag_type_id
666 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
668 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
669 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
670 # set node tag on all nodes, value='yes'
671 for nodename in group_nodes:
673 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
675 traceback.print_exc()
676 print 'node',nodename,'seems to already have tag',nodegroupname
679 expect_yes = self.apiserver.GetNodeTags(auth,
680 {'hostname':nodename,
681 'tagname':nodegroupname},
682 ['value'])[0]['value']
683 if expect_yes != "yes":
684 print 'Mismatch node tag on node',nodename,'got',expect_yes
687 if not self.options.dry_run:
688 print 'Cannot find tag',nodegroupname,'on node',nodename
692 print 'cleaning nodegroup',nodegroupname
693 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
695 traceback.print_exc()
699 # return a list of tuples (nodename,qemuname)
700 def all_node_infos (self) :
702 for site_spec in self.plc_spec['sites']:
703 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
704 for node_spec in site_spec['nodes'] ]
707 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
709 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
710 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
711 if self.options.dry_run:
715 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
716 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
717 # the nodes that haven't checked yet - start with a full list and shrink over time
718 tocheck = self.all_hostnames()
719 utils.header("checking nodes %r"%tocheck)
720 # create a dict hostname -> status
721 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
724 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
726 for array in tocheck_status:
727 hostname=array['hostname']
728 boot_state=array['boot_state']
729 if boot_state == target_boot_state:
730 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
732 # if it's a real node, never mind
733 (site_spec,node_spec)=self.locate_hostname(hostname)
734 if TestNode.is_real_model(node_spec['node_fields']['model']):
735 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
737 boot_state = target_boot_state
738 elif datetime.datetime.now() > graceout:
739 utils.header ("%s still in '%s' state"%(hostname,boot_state))
740 graceout=datetime.datetime.now()+datetime.timedelta(1)
741 status[hostname] = boot_state
743 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
746 if datetime.datetime.now() > timeout:
747 for hostname in tocheck:
748 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
750 # otherwise, sleep for a while
752 # only useful in empty plcs
755 def nodes_booted(self):
756 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
758 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
760 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
761 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
762 vservername=self.vservername
765 local_key = "keys/%(vservername)s-debug.rsa"%locals()
768 local_key = "keys/key1.rsa"
769 node_infos = self.all_node_infos()
770 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
771 for (nodename,qemuname) in node_infos:
772 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
773 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
774 (timeout_minutes,silent_minutes,period))
776 for node_info in node_infos:
777 (hostname,qemuname) = node_info
778 # try to run 'hostname' in the node
779 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
780 # don't spam logs - show the command only after the grace period
781 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
783 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
785 node_infos.remove(node_info)
787 # we will have tried real nodes once, in case they're up - but if not, just skip
788 (site_spec,node_spec)=self.locate_hostname(hostname)
789 if TestNode.is_real_model(node_spec['node_fields']['model']):
790 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
791 node_infos.remove(node_info)
794 if datetime.datetime.now() > timeout:
795 for (hostname,qemuname) in node_infos:
796 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
798 # otherwise, sleep for a while
800 # only useful in empty plcs
803 def nodes_ssh_debug(self):
804 "Tries to ssh into nodes in debug mode with the debug ssh key"
805 return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=5)
807 def nodes_ssh_boot(self):
808 "Tries to ssh into nodes in production mode with the root ssh key"
809 return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=15)
812 def init_node (self):
813 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
817 "all nodes: invoke GetBootMedium and store result locally"
820 def configure_qemu (self):
821 "all nodes: compute qemu config qemu.conf and store it locally"
824 def reinstall_node (self):
825 "all nodes: mark PLCAPI boot_state as reinstall"
828 def export_qemu (self):
829 "all nodes: push local node-dep directory on the qemu box"
832 ### check hooks : invoke scripts from hooks/{node,slice}
833 def check_hooks_node (self):
834 return self.locate_first_node().check_hooks()
835 def check_hooks_sliver (self) :
836 return self.locate_first_sliver().check_hooks()
838 def check_hooks (self):
839 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
840 return self.check_hooks_node() and self.check_hooks_sliver()
843 def do_check_initscripts(self):
845 for slice_spec in self.plc_spec['slices']:
846 if not slice_spec.has_key('initscriptname'):
848 initscript=slice_spec['initscriptname']
849 for nodename in slice_spec['nodenames']:
850 (site,node) = self.locate_node (nodename)
851 # xxx - passing the wrong site - probably harmless
852 test_site = TestSite (self,site)
853 test_slice = TestSlice (self,test_site,slice_spec)
854 test_node = TestNode (self,test_site,node)
855 test_sliver = TestSliver (self, test_node, test_slice)
856 if not test_sliver.check_initscript(initscript):
860 def check_initscripts(self):
861 "check that the initscripts have triggered"
862 return self.do_check_initscripts()
864 def initscripts (self):
865 "create initscripts with PLCAPI"
866 for initscript in self.plc_spec['initscripts']:
867 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
868 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
871 def clean_initscripts (self):
872 "delete initscripts with PLCAPI"
873 for initscript in self.plc_spec['initscripts']:
874 initscript_name = initscript['initscript_fields']['name']
875 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
877 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
878 print initscript_name,'deleted'
880 print 'deletion went wrong - probably did not exist'
885 "create slices with PLCAPI"
886 return self.do_slices()
888 def clean_slices (self):
889 "delete slices with PLCAPI"
890 return self.do_slices("delete")
892 def do_slices (self, action="add"):
893 for slice in self.plc_spec['slices']:
894 site_spec = self.locate_site (slice['sitename'])
895 test_site = TestSite(self,site_spec)
896 test_slice=TestSlice(self,test_site,slice)
898 utils.header("Deleting slices in site %s"%test_site.name())
899 test_slice.delete_slice()
901 utils.pprint("Creating slice",slice)
902 test_slice.create_slice()
903 utils.header('Created Slice %s'%slice['slice_fields']['name'])
906 @slice_mapper_options
907 def check_slice(self):
908 "tries to ssh-enter the slice with the user key, to ensure slice creation"
912 def clear_known_hosts (self):
913 "remove test nodes entries from the local known_hosts file"
917 def start_node (self) :
918 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
921 def check_tcp (self):
922 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
923 specs = self.plc_spec['tcp_test']
928 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
929 if not s_test_sliver.run_tcp_server(port,timeout=10):
933 # idem for the client side
934 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
935 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
939 def plcsh_stress_test (self):
940 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
941 # install the stress-test in the plc image
942 location = "/usr/share/plc_api/plcsh_stress_test.py"
943 remote="/vservers/%s/%s"%(self.vservername,location)
944 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
946 command += " -- --check"
947 if self.options.size == 1:
949 return ( self.run_in_guest(command) == 0)
951 # populate runs the same utility without slightly different options
952 # in particular runs with --preserve (dont cleanup) and without --check
953 # also it gets run twice, once with the --foreign option for creating fake foreign entries
956 def install_sfa(self):
957 "yum install sfa, sfa-plc and sfa-client"
958 if self.options.personality == "linux32":
960 elif self.options.personality == "linux64":
963 raise Exception, "Unsupported personality %r"%self.options.personality
964 return self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")==0
967 def configure_sfa(self):
969 tmpname='%s.sfa-config-tty'%(self.name())
970 fileconf=open(tmpname,'w')
971 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
972 'SFA_REGISTRY_LEVEL1_AUTH',
974 'SFA_AGGREGATE_HOST',
980 'SFA_PLC_DB_PASSWORD',
982 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
983 fileconf.write('w\n')
984 fileconf.write('R\n')
985 fileconf.write('q\n')
987 utils.system('cat %s'%tmpname)
988 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
989 utils.system('rm %s'%tmpname)
992 def import_sfa(self):
994 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
995 return self.run_in_guest('sfa-import-plc.py')==0
997 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1001 return self.run_in_guest('service sfa start')==0
1003 def setup_sfa(self):
1004 "sfi client configuration"
1006 if os.path.exists(dir_name):
1007 utils.system('rm -rf %s'%dir_name)
1008 utils.system('mkdir %s'%dir_name)
1009 file_name=dir_name + os.sep + 'fake-pi1.pkey'
1010 fileconf=open(file_name,'w')
1011 fileconf.write (self.plc_spec['keys'][0]['private'])
1014 file_name=dir_name + os.sep + 'sfi_config'
1015 fileconf=open(file_name,'w')
1016 SFI_AUTH=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']+".main"
1017 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1018 fileconf.write('\n')
1019 SFI_USER=SFI_AUTH+'.fake-pi1'
1020 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1021 fileconf.write('\n')
1022 SFI_REGISTRY='http://' + self.plc_spec['sfa']['SFA_PLC_DB_HOST'] + ':12345/'
1023 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1024 fileconf.write('\n')
1025 SFI_SM='http://' + self.plc_spec['sfa']['SFA_PLC_DB_HOST'] + ':12347/'
1026 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1027 fileconf.write('\n')
1030 file_name=dir_name + os.sep + 'person.xml'
1031 fileconf=open(file_name,'w')
1032 for record in self.plc_spec['sfa']['sfa_person_xml']:
1033 person_record=record
1034 fileconf.write(person_record)
1035 fileconf.write('\n')
1038 file_name=dir_name + os.sep + 'slice.xml'
1039 fileconf=open(file_name,'w')
1040 for record in self.plc_spec['sfa']['sfa_slice_xml']:
1042 #slice_record=self.plc_spec['sfa']['sfa_slice_xml']
1043 fileconf.write(slice_record)
1044 fileconf.write('\n')
1047 file_name=dir_name + os.sep + 'slice.rspec'
1048 fileconf=open(file_name,'w')
1050 for (key, value) in self.plc_spec['sfa']['sfa_slice_rspec'].items():
1052 fileconf.write(slice_rspec)
1053 fileconf.write('\n')
1056 remote="/vservers/%s/%s"%(self.vservername,location)
1057 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1059 #utils.system('cat %s'%tmpname)
1060 utils.system('rm -rf %s'%dir_name)
1064 "run sfi.py add (on Registry) and sfi.py create (on SM) to form new objects"
1066 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1067 success=test_user_sfa.add_user()
1069 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1070 site_spec = self.locate_site (slice_spec['sitename'])
1071 test_site = TestSite(self,site_spec)
1072 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1073 success1=test_slice_sfa.add_slice()
1074 success2=test_slice_sfa.create_slice()
1075 return success and success1 and success2
1077 def update_sfa(self):
1078 "run sfi.py update (on Registry) and sfi.py create (on SM) on existing objects"
1080 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1081 success1=test_user_sfa.update_user()
1083 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1084 site_spec = self.locate_site (slice_spec['sitename'])
1085 test_site = TestSite(self,site_spec)
1086 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1087 success2=test_slice_sfa.update_slice()
1088 return success1 and success2
1091 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1092 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1094 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.main"%auth)==0 and \
1095 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.main"%auth)==0 and \
1096 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1097 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1099 @slice_mapper_options_sfa
1100 def check_slice_sfa(self):
1101 "tries to ssh-enter the SFA slice"
1104 def delete_sfa(self):
1105 "run sfi.py delete (on SM), sfi.py remove (on Registry)"
1107 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1108 success1=test_user_sfa.delete_user()
1109 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1110 site_spec = self.locate_site (slice_spec['sitename'])
1111 test_site = TestSite(self,site_spec)
1112 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1113 success2=test_slice_sfa.delete_slice()
1115 return success1 and success2
1119 return self.run_in_guest('service sfa stop')==0
1121 def populate (self):
1122 "creates random entries in the PLCAPI"
1123 # install the stress-test in the plc image
1124 location = "/usr/share/plc_api/plcsh_stress_test.py"
1125 remote="/vservers/%s/%s"%(self.vservername,location)
1126 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1128 command += " -- --preserve --short-names"
1129 local = (self.run_in_guest(command) == 0);
1130 # second run with --foreign
1131 command += ' --foreign'
1132 remote = (self.run_in_guest(command) == 0);
1133 return ( local and remote)
1135 def gather_logs (self):
1136 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1137 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1138 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1139 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1140 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1141 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1143 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1144 self.gather_var_logs ()
1146 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1147 self.gather_pgsql_logs ()
1149 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1150 for site_spec in self.plc_spec['sites']:
1151 test_site = TestSite (self,site_spec)
1152 for node_spec in site_spec['nodes']:
1153 test_node=TestNode(self,test_site,node_spec)
1154 test_node.gather_qemu_logs()
1156 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1157 self.gather_nodes_var_logs()
1159 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1160 self.gather_slivers_var_logs()
1163 def gather_slivers_var_logs(self):
1164 for test_sliver in self.all_sliver_objs():
1165 remote = test_sliver.tar_var_logs()
1166 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1167 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1168 utils.system(command)
1171 def gather_var_logs (self):
1172 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1173 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1174 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1175 utils.system(command)
1176 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1177 utils.system(command)
1179 def gather_pgsql_logs (self):
1180 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1181 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1182 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1183 utils.system(command)
1185 def gather_nodes_var_logs (self):
1186 for site_spec in self.plc_spec['sites']:
1187 test_site = TestSite (self,site_spec)
1188 for node_spec in site_spec['nodes']:
1189 test_node=TestNode(self,test_site,node_spec)
1190 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1191 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1192 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1193 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1194 utils.system(command)
1197 # returns the filename to use for sql dump/restore, using options.dbname if set
1198 def dbfile (self, database):
1199 # uses options.dbname if it is found
1201 name=self.options.dbname
1202 if not isinstance(name,StringTypes):
1205 t=datetime.datetime.now()
1208 return "/root/%s-%s.sql"%(database,name)
1211 'dump the planetlab5 DB in /root in the PLC - filename has time'
1212 dump=self.dbfile("planetab5")
1213 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1214 utils.header('Dumped planetlab5 database in %s'%dump)
1217 def db_restore(self):
1218 'restore the planetlab5 DB - looks broken, but run -n might help'
1219 dump=self.dbfile("planetab5")
1220 ##stop httpd service
1221 self.run_in_guest('service httpd stop')
1222 # xxx - need another wrapper
1223 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1224 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1225 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1226 ##starting httpd service
1227 self.run_in_guest('service httpd start')
1229 utils.header('Database restored from ' + dump)
1232 def standby_1(): pass
1234 def standby_2(): pass
1236 def standby_3(): pass
1238 def standby_4(): pass
1240 def standby_5(): pass
1242 def standby_6(): pass
1244 def standby_7(): pass
1246 def standby_8(): pass
1248 def standby_9(): pass
1250 def standby_10(): pass
1252 def standby_11(): pass
1254 def standby_12(): pass
1256 def standby_13(): pass
1258 def standby_14(): pass
1260 def standby_15(): pass
1262 def standby_16(): pass
1264 def standby_17(): pass
1266 def standby_18(): pass
1268 def standby_19(): pass
1270 def standby_20(): pass