7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
20 from TestSliceSfa import TestSliceSfa
21 from TestUserSfa import TestUserSfa
23 # step methods must take (self) and return a boolean (options is a member of the class)
25 def standby(minutes,dry_run):
26 utils.header('Entering StandBy for %d mn'%minutes)
30 time.sleep(60*minutes)
33 def standby_generic (func):
35 minutes=int(func.__name__.split("_")[1])
36 return standby(minutes,self.options.dry_run)
39 def node_mapper (method):
42 node_method = TestNode.__dict__[method.__name__]
43 for site_spec in self.plc_spec['sites']:
44 test_site = TestSite (self,site_spec)
45 for node_spec in site_spec['nodes']:
46 test_node = TestNode (self,test_site,node_spec)
47 if not node_method(test_node): overall=False
49 # restore the doc text
50 actual.__doc__=method.__doc__
53 def slice_mapper_options (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=method.__doc__
67 def slice_mapper_options_sfa (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(test_plc,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
87 'display', 'local_pre', SEP,
88 'delete','create','install', 'configure', 'start', SEP,
89 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
90 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', SEP,
91 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu',
92 'kill_all_qemus', 'start_node', SEP,
93 # better use of time: do this now that the nodes are taking off
94 'plcsh_stress_test', SEP,
95 'install_sfa', 'configure_sfa', 'import_sfa', 'start_sfa', SEP,
96 'setup_sfa', 'add_sfa', 'update_sfa', 'view_sfa', SEP,
97 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
98 # optionally run sfa later; takes longer, but checks more about nm
99 # 'install_sfa', 'configure_sfa', 'import_sfa', 'start_sfa', SEP,
100 # 'setup_sfa', 'add_sfa', 'update_sfa', 'view_sfa', SEP,
101 'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEP,
102 'check_tcp', 'check_hooks', SEP,
103 'force_gather_logs', 'force_local_post',
106 'show_boxes', 'local_list','local_cleanup',SEP,
107 'stop', 'vs_start', SEP,
108 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
109 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
111 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
112 'db_dump' , 'db_restore', SEP,
113 'standby_1 through 20',
117 def printable_steps (list):
118 return " ".join(list).replace(" "+SEP+" "," \\\n")
120 def valid_step (step):
123 def __init__ (self,plc_spec,options):
124 self.plc_spec=plc_spec
126 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
128 self.vserverip=plc_spec['vserverip']
129 self.vservername=plc_spec['vservername']
130 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
133 raise Exception,'chroot-based myplc testing is deprecated'
134 self.apiserver=TestApiserver(self.url,options.dry_run)
137 name=self.plc_spec['name']
138 return "%s.%s"%(name,self.vservername)
141 return self.plc_spec['hostname']
144 return self.test_ssh.is_local()
146 # define the API methods on this object through xmlrpc
147 # would help, but not strictly necessary
151 def actual_command_in_guest (self,command):
152 return self.test_ssh.actual_command(self.host_to_guest(command))
154 def start_guest (self):
155 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
157 def run_in_guest (self,command):
158 return utils.system(self.actual_command_in_guest(command))
160 def run_in_host (self,command):
161 return self.test_ssh.run_in_buildname(command)
163 #command gets run in the vserver
164 def host_to_guest(self,command):
165 return "vserver %s exec %s"%(self.vservername,command)
167 #command gets run in the vserver
168 def start_guest_in_host(self):
169 return "vserver %s start"%(self.vservername)
172 def run_in_guest_piped (self,local,remote):
173 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
175 def auth_root (self):
176 return {'Username':self.plc_spec['PLC_ROOT_USER'],
177 'AuthMethod':'password',
178 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
179 'Role' : self.plc_spec['role']
181 def locate_site (self,sitename):
182 for site in self.plc_spec['sites']:
183 if site['site_fields']['name'] == sitename:
185 if site['site_fields']['login_base'] == sitename:
187 raise Exception,"Cannot locate site %s"%sitename
189 def locate_node (self,nodename):
190 for site in self.plc_spec['sites']:
191 for node in site['nodes']:
192 if node['name'] == nodename:
194 raise Exception,"Cannot locate node %s"%nodename
196 def locate_hostname (self,hostname):
197 for site in self.plc_spec['sites']:
198 for node in site['nodes']:
199 if node['node_fields']['hostname'] == hostname:
201 raise Exception,"Cannot locate hostname %s"%hostname
203 def locate_key (self,keyname):
204 for key in self.plc_spec['keys']:
205 if key['name'] == keyname:
207 raise Exception,"Cannot locate key %s"%keyname
209 def locate_slice (self, slicename):
210 for slice in self.plc_spec['slices']:
211 if slice['slice_fields']['name'] == slicename:
213 raise Exception,"Cannot locate slice %s"%slicename
215 def all_sliver_objs (self):
217 for slice_spec in self.plc_spec['slices']:
218 slicename = slice_spec['slice_fields']['name']
219 for nodename in slice_spec['nodenames']:
220 result.append(self.locate_sliver_obj (nodename,slicename))
223 def locate_sliver_obj (self,nodename,slicename):
224 (site,node) = self.locate_node(nodename)
225 slice = self.locate_slice (slicename)
227 test_site = TestSite (self, site)
228 test_node = TestNode (self, test_site,node)
229 # xxx the slice site is assumed to be the node site - mhh - probably harmless
230 test_slice = TestSlice (self, test_site, slice)
231 return TestSliver (self, test_node, test_slice)
233 def locate_first_node(self):
234 nodename=self.plc_spec['slices'][0]['nodenames'][0]
235 (site,node) = self.locate_node(nodename)
236 test_site = TestSite (self, site)
237 test_node = TestNode (self, test_site,node)
240 def locate_first_sliver (self):
241 slice_spec=self.plc_spec['slices'][0]
242 slicename=slice_spec['slice_fields']['name']
243 nodename=slice_spec['nodenames'][0]
244 return self.locate_sliver_obj(nodename,slicename)
246 # all different hostboxes used in this plc
247 def gather_hostBoxes(self):
248 # maps on sites and nodes, return [ (host_box,test_node) ]
250 for site_spec in self.plc_spec['sites']:
251 test_site = TestSite (self,site_spec)
252 for node_spec in site_spec['nodes']:
253 test_node = TestNode (self, test_site, node_spec)
254 if not test_node.is_real():
255 tuples.append( (test_node.host_box(),test_node) )
256 # transform into a dict { 'host_box' -> [ test_node .. ] }
258 for (box,node) in tuples:
259 if not result.has_key(box):
262 result[box].append(node)
265 # a step for checking this stuff
266 def show_boxes (self):
267 'print summary of nodes location'
268 for (box,nodes) in self.gather_hostBoxes().iteritems():
269 print box,":"," + ".join( [ node.name() for node in nodes ] )
272 # make this a valid step
273 def kill_all_qemus(self):
274 'kill all qemu instances on the qemu boxes involved by this setup'
275 # this is the brute force version, kill all qemus on that host box
276 for (box,nodes) in self.gather_hostBoxes().iteritems():
277 # pass the first nodename, as we don't push template-qemu on testboxes
278 nodedir=nodes[0].nodedir()
279 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
282 # make this a valid step
283 def list_all_qemus(self):
284 'list all qemu instances on the qemu boxes involved by this setup'
285 for (box,nodes) in self.gather_hostBoxes().iteritems():
286 # this is the brute force version, kill all qemus on that host box
287 TestBox(box,self.options.buildname).list_all_qemus()
290 # kill only the right qemus
291 def list_qemus(self):
292 'list qemu instances for our nodes'
293 for (box,nodes) in self.gather_hostBoxes().iteritems():
294 # the fine-grain version
299 # kill only the right qemus
300 def kill_qemus(self):
301 'kill the qemu instances for our nodes'
302 for (box,nodes) in self.gather_hostBoxes().iteritems():
303 # the fine-grain version
308 #################### display config
310 "show test configuration after localization"
311 self.display_pass (1)
312 self.display_pass (2)
316 def display_pass (self,passno):
317 for (key,val) in self.plc_spec.iteritems():
321 self.display_site_spec(site)
322 for node in site['nodes']:
323 self.display_node_spec(node)
324 elif key=='initscripts':
325 for initscript in val:
326 self.display_initscript_spec (initscript)
329 self.display_slice_spec (slice)
332 self.display_key_spec (key)
334 if key not in ['sites','initscripts','slices','keys', 'sfa']:
335 print '+ ',key,':',val
337 def display_site_spec (self,site):
338 print '+ ======== site',site['site_fields']['name']
339 for (k,v) in site.iteritems():
342 print '+ ','nodes : ',
344 print node['node_fields']['hostname'],'',
350 print user['name'],'',
352 elif k == 'site_fields':
353 print '+ login_base',':',v['login_base']
354 elif k == 'address_fields':
358 PrettyPrinter(indent=8,depth=2).pprint(v)
360 def display_initscript_spec (self,initscript):
361 print '+ ======== initscript',initscript['initscript_fields']['name']
363 def display_key_spec (self,key):
364 print '+ ======== key',key['name']
366 def display_slice_spec (self,slice):
367 print '+ ======== slice',slice['slice_fields']['name']
368 for (k,v) in slice.iteritems():
381 elif k=='slice_fields':
382 print '+ fields',':',
383 print 'max_nodes=',v['max_nodes'],
388 def display_node_spec (self,node):
389 print "+ node",node['name'],"host_box=",node['host_box'],
390 print "hostname=",node['node_fields']['hostname'],
391 print "ip=",node['interface_fields']['ip']
394 # another entry point for just showing the boxes involved
395 def display_mapping (self):
396 TestPlc.display_mapping_plc(self.plc_spec)
400 def display_mapping_plc (plc_spec):
401 print '+ MyPLC',plc_spec['name']
402 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
403 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
404 for site_spec in plc_spec['sites']:
405 for node_spec in site_spec['nodes']:
406 TestPlc.display_mapping_node(node_spec)
409 def display_mapping_node (node_spec):
410 print '+ NODE %s'%(node_spec['name'])
411 print '+\tqemu box %s'%node_spec['host_box']
412 print '+\thostname=%s'%node_spec['node_fields']['hostname']
414 def local_pre (self):
415 "run site-dependant pre-test script as defined in LocalTestResources"
416 from LocalTestResources import local_resources
417 return local_resources.step_pre(self)
419 def local_post (self):
420 "run site-dependant post-test script as defined in LocalTestResources"
421 from LocalTestResources import local_resources
422 return local_resources.step_post(self)
424 def local_list (self):
425 "run site-dependant list script as defined in LocalTestResources"
426 from LocalTestResources import local_resources
427 return local_resources.step_list(self)
429 def local_cleanup (self):
430 "run site-dependant cleanup script as defined in LocalTestResources"
431 from LocalTestResources import local_resources
432 return local_resources.step_cleanup(self)
435 "vserver delete the test myplc"
436 self.run_in_host("vserver --silent %s delete"%self.vservername)
441 "vserver creation (no install done)"
443 # a full path for the local calls
444 build_dir=os.path.dirname(sys.argv[0])
445 # sometimes this is empty - set to "." in such a case
446 if not build_dir: build_dir="."
447 build_dir += "/build"
449 # use a standard name - will be relative to remote buildname
451 # run checkout in any case - would do an update if already exists
452 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
453 if self.run_in_host(build_checkout) != 0:
455 # the repo url is taken from arch-rpms-url
456 # with the last step (i386) removed
457 repo_url = self.options.arch_rpms_url
458 for level in [ 'arch' ]:
459 repo_url = os.path.dirname(repo_url)
460 # pass the vbuild-nightly options to vtest-init-vserver
462 test_env_options += " -p %s"%self.options.personality
463 test_env_options += " -d %s"%self.options.pldistro
464 test_env_options += " -f %s"%self.options.fcdistro
465 script="vtest-init-vserver.sh"
466 vserver_name = self.vservername
467 vserver_options="--netdev eth0 --interface %s"%self.vserverip
469 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
470 vserver_options += " --hostname %s"%vserver_hostname
473 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
474 return self.run_in_host(create_vserver) == 0
478 "yum install myplc, noderepo, and the plain bootstrapfs"
480 # workaround for getting pgsql8.2 on centos5
481 if self.options.fcdistro == "centos5":
482 self.run_in_guest("rpm -Uvh http://yum.pgsqlrpms.org/8.2/pgdg-centos-8.2-4.noarch.rpm")
483 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
485 if self.options.personality == "linux32":
487 elif self.options.personality == "linux64":
490 raise Exception, "Unsupported personality %r"%self.options.personality
492 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
494 self.run_in_guest("yum -y install myplc")==0 and \
495 self.run_in_guest("yum -y install noderepo-%s"%nodefamily)==0 and \
496 self.run_in_guest("yum -y install bootstrapfs-%s-plain"%nodefamily)==0
501 tmpname='%s.plc-config-tty'%(self.name())
502 fileconf=open(tmpname,'w')
503 for var in [ 'PLC_NAME',
507 'PLC_MAIL_SUPPORT_ADDRESS',
510 # Above line was added for integrating SFA Testing
516 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
517 fileconf.write('w\n')
518 fileconf.write('q\n')
520 utils.system('cat %s'%tmpname)
521 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
522 utils.system('rm %s'%tmpname)
527 self.run_in_guest('service plc start')
532 self.run_in_guest('service plc stop')
536 "start the PLC vserver"
540 # stores the keys from the config for further use
541 def store_keys(self):
542 "stores test users ssh keys in keys/"
543 for key_spec in self.plc_spec['keys']:
544 TestKey(self,key_spec).store_key()
547 def clean_keys(self):
548 "removes keys cached in keys/"
549 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
551 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
552 # for later direct access to the nodes
553 def fetch_keys(self):
554 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
556 if not os.path.isdir(dir):
558 vservername=self.vservername
560 prefix = 'debug_ssh_key'
561 for ext in [ 'pub', 'rsa' ] :
562 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
563 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
564 if self.test_ssh.fetch(src,dst) != 0: overall=False
568 "create sites with PLCAPI"
569 return self.do_sites()
571 def clean_sites (self):
572 "delete sites with PLCAPI"
573 return self.do_sites(action="delete")
575 def do_sites (self,action="add"):
576 for site_spec in self.plc_spec['sites']:
577 test_site = TestSite (self,site_spec)
578 if (action != "add"):
579 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
580 test_site.delete_site()
581 # deleted with the site
582 #test_site.delete_users()
585 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
586 test_site.create_site()
587 test_site.create_users()
590 def clean_all_sites (self):
591 "Delete all sites in PLC, and related objects"
592 print 'auth_root',self.auth_root()
593 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
594 for site_id in site_ids:
595 print 'Deleting site_id',site_id
596 self.apiserver.DeleteSite(self.auth_root(),site_id)
599 "create nodes with PLCAPI"
600 return self.do_nodes()
601 def clean_nodes (self):
602 "delete nodes with PLCAPI"
603 return self.do_nodes(action="delete")
605 def do_nodes (self,action="add"):
606 for site_spec in self.plc_spec['sites']:
607 test_site = TestSite (self,site_spec)
609 utils.header("Deleting nodes in site %s"%test_site.name())
610 for node_spec in site_spec['nodes']:
611 test_node=TestNode(self,test_site,node_spec)
612 utils.header("Deleting %s"%test_node.name())
613 test_node.delete_node()
615 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
616 for node_spec in site_spec['nodes']:
617 utils.pprint('Creating node %s'%node_spec,node_spec)
618 test_node = TestNode (self,test_site,node_spec)
619 test_node.create_node ()
622 def nodegroups (self):
623 "create nodegroups with PLCAPI"
624 return self.do_nodegroups("add")
625 def clean_nodegroups (self):
626 "delete nodegroups with PLCAPI"
627 return self.do_nodegroups("delete")
629 # create nodegroups if needed, and populate
630 def do_nodegroups (self, action="add"):
631 # 1st pass to scan contents
633 for site_spec in self.plc_spec['sites']:
634 test_site = TestSite (self,site_spec)
635 for node_spec in site_spec['nodes']:
636 test_node=TestNode (self,test_site,node_spec)
637 if node_spec.has_key('nodegroups'):
638 nodegroupnames=node_spec['nodegroups']
639 if isinstance(nodegroupnames,StringTypes):
640 nodegroupnames = [ nodegroupnames ]
641 for nodegroupname in nodegroupnames:
642 if not groups_dict.has_key(nodegroupname):
643 groups_dict[nodegroupname]=[]
644 groups_dict[nodegroupname].append(test_node.name())
645 auth=self.auth_root()
647 for (nodegroupname,group_nodes) in groups_dict.iteritems():
649 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
650 # first, check if the nodetagtype is here
651 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
653 tag_type_id = tag_types[0]['tag_type_id']
655 tag_type_id = self.apiserver.AddTagType(auth,
656 {'tagname':nodegroupname,
657 'description': 'for nodegroup %s'%nodegroupname,
660 print 'located tag (type)',nodegroupname,'as',tag_type_id
662 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
664 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
665 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
666 # set node tag on all nodes, value='yes'
667 for nodename in group_nodes:
669 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
671 traceback.print_exc()
672 print 'node',nodename,'seems to already have tag',nodegroupname
675 expect_yes = self.apiserver.GetNodeTags(auth,
676 {'hostname':nodename,
677 'tagname':nodegroupname},
678 ['value'])[0]['value']
679 if expect_yes != "yes":
680 print 'Mismatch node tag on node',nodename,'got',expect_yes
683 if not self.options.dry_run:
684 print 'Cannot find tag',nodegroupname,'on node',nodename
688 print 'cleaning nodegroup',nodegroupname
689 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
691 traceback.print_exc()
695 # return a list of tuples (nodename,qemuname)
696 def all_node_infos (self) :
698 for site_spec in self.plc_spec['sites']:
699 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
700 for node_spec in site_spec['nodes'] ]
703 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
705 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
706 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
707 if self.options.dry_run:
711 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
712 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
713 # the nodes that haven't checked yet - start with a full list and shrink over time
714 tocheck = self.all_hostnames()
715 utils.header("checking nodes %r"%tocheck)
716 # create a dict hostname -> status
717 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
720 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
722 for array in tocheck_status:
723 hostname=array['hostname']
724 boot_state=array['boot_state']
725 if boot_state == target_boot_state:
726 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
728 # if it's a real node, never mind
729 (site_spec,node_spec)=self.locate_hostname(hostname)
730 if TestNode.is_real_model(node_spec['node_fields']['model']):
731 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
733 boot_state = target_boot_state
734 elif datetime.datetime.now() > graceout:
735 utils.header ("%s still in '%s' state"%(hostname,boot_state))
736 graceout=datetime.datetime.now()+datetime.timedelta(1)
737 status[hostname] = boot_state
739 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
742 if datetime.datetime.now() > timeout:
743 for hostname in tocheck:
744 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
746 # otherwise, sleep for a while
748 # only useful in empty plcs
751 def nodes_booted(self):
752 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
754 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
756 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
757 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
758 vservername=self.vservername
761 local_key = "keys/%(vservername)s-debug.rsa"%locals()
764 local_key = "keys/key1.rsa"
765 node_infos = self.all_node_infos()
766 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
767 for (nodename,qemuname) in node_infos:
768 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
769 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
770 (timeout_minutes,silent_minutes,period))
772 for node_info in node_infos:
773 (hostname,qemuname) = node_info
774 # try to run 'hostname' in the node
775 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
776 # don't spam logs - show the command only after the grace period
777 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
779 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
781 node_infos.remove(node_info)
783 # we will have tried real nodes once, in case they're up - but if not, just skip
784 (site_spec,node_spec)=self.locate_hostname(hostname)
785 if TestNode.is_real_model(node_spec['node_fields']['model']):
786 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
787 node_infos.remove(node_info)
790 if datetime.datetime.now() > timeout:
791 for (hostname,qemuname) in node_infos:
792 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
794 # otherwise, sleep for a while
796 # only useful in empty plcs
799 def nodes_ssh_debug(self):
800 "Tries to ssh into nodes in debug mode with the debug ssh key"
801 return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=5)
803 def nodes_ssh_boot(self):
804 "Tries to ssh into nodes in production mode with the root ssh key"
805 return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=15)
808 def init_node (self):
809 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
813 "all nodes: invoke GetBootMedium and store result locally"
816 def configure_qemu (self):
817 "all nodes: compute qemu config qemu.conf and store it locally"
820 def reinstall_node (self):
821 "all nodes: mark PLCAPI boot_state as reinstall"
824 def export_qemu (self):
825 "all nodes: push local node-dep directory on the qemu box"
828 ### check hooks : invoke scripts from hooks/{node,slice}
829 def check_hooks_node (self):
830 return self.locate_first_node().check_hooks()
831 def check_hooks_sliver (self) :
832 return self.locate_first_sliver().check_hooks()
834 def check_hooks (self):
835 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
836 return self.check_hooks_node() and self.check_hooks_sliver()
839 def do_check_initscripts(self):
841 for slice_spec in self.plc_spec['slices']:
842 if not slice_spec.has_key('initscriptname'):
844 initscript=slice_spec['initscriptname']
845 for nodename in slice_spec['nodenames']:
846 (site,node) = self.locate_node (nodename)
847 # xxx - passing the wrong site - probably harmless
848 test_site = TestSite (self,site)
849 test_slice = TestSlice (self,test_site,slice_spec)
850 test_node = TestNode (self,test_site,node)
851 test_sliver = TestSliver (self, test_node, test_slice)
852 if not test_sliver.check_initscript(initscript):
856 def check_initscripts(self):
857 "check that the initscripts have triggered"
858 return self.do_check_initscripts()
860 def initscripts (self):
861 "create initscripts with PLCAPI"
862 for initscript in self.plc_spec['initscripts']:
863 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
864 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
867 def clean_initscripts (self):
868 "delete initscripts with PLCAPI"
869 for initscript in self.plc_spec['initscripts']:
870 initscript_name = initscript['initscript_fields']['name']
871 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
873 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
874 print initscript_name,'deleted'
876 print 'deletion went wrong - probably did not exist'
881 "create slices with PLCAPI"
882 return self.do_slices()
884 def clean_slices (self):
885 "delete slices with PLCAPI"
886 return self.do_slices("delete")
888 def do_slices (self, action="add"):
889 for slice in self.plc_spec['slices']:
890 site_spec = self.locate_site (slice['sitename'])
891 test_site = TestSite(self,site_spec)
892 test_slice=TestSlice(self,test_site,slice)
894 utils.header("Deleting slices in site %s"%test_site.name())
895 test_slice.delete_slice()
897 utils.pprint("Creating slice",slice)
898 test_slice.create_slice()
899 utils.header('Created Slice %s'%slice['slice_fields']['name'])
902 @slice_mapper_options
903 def check_slice(self):
904 "tries to ssh-enter the slice with the user key, to ensure slice creation"
908 def clear_known_hosts (self):
909 "remove test nodes entries from the local known_hosts file"
913 def start_node (self) :
914 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
917 def check_tcp (self):
918 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
919 specs = self.plc_spec['tcp_test']
924 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
925 if not s_test_sliver.run_tcp_server(port,timeout=10):
929 # idem for the client side
930 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
931 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
935 def plcsh_stress_test (self):
936 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
937 # install the stress-test in the plc image
938 location = "/usr/share/plc_api/plcsh_stress_test.py"
939 remote="/vservers/%s/%s"%(self.vservername,location)
940 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
942 command += " -- --check"
943 if self.options.size == 1:
945 return ( self.run_in_guest(command) == 0)
947 # populate runs the same utility without slightly different options
948 # in particular runs with --preserve (dont cleanup) and without --check
949 # also it gets run twice, once with the --foreign option for creating fake foreign entries
952 def install_sfa(self):
953 "yum install sfa, sfa-plc and sfa-client"
954 if self.options.personality == "linux32":
956 elif self.options.personality == "linux64":
959 raise Exception, "Unsupported personality %r"%self.options.personality
960 return self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")==0
963 def configure_sfa(self):
965 tmpname='%s.sfa-config-tty'%(self.name())
966 fileconf=open(tmpname,'w')
967 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
968 'SFA_REGISTRY_LEVEL1_AUTH',
970 'SFA_AGGREGATE_HOST',
976 'SFA_PLC_DB_PASSWORD',
978 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
979 fileconf.write('w\n')
980 fileconf.write('R\n')
981 fileconf.write('q\n')
983 utils.system('cat %s'%tmpname)
984 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
985 utils.system('rm %s'%tmpname)
988 def import_sfa(self):
990 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
991 return self.run_in_guest('sfa-import-plc.py')==0
993 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
997 return self.run_in_guest('service sfa start')==0
1000 "sfi client configuration"
1002 if os.path.exists(dir_name):
1003 utils.system('rm -rf %s'%dir_name)
1004 utils.system('mkdir %s'%dir_name)
1005 file_name=dir_name + os.sep + 'fake-pi1.pkey'
1006 fileconf=open(file_name,'w')
1007 fileconf.write (self.plc_spec['keys'][0]['private'])
1010 file_name=dir_name + os.sep + 'sfi_config'
1011 fileconf=open(file_name,'w')
1012 SFI_AUTH=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']+".main"
1013 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1014 fileconf.write('\n')
1015 SFI_USER=SFI_AUTH+'.fake-pi1'
1016 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1017 fileconf.write('\n')
1018 SFI_REGISTRY='http://' + self.plc_spec['sfa']['SFA_PLC_DB_HOST'] + ':12345/'
1019 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1020 fileconf.write('\n')
1021 SFI_SM='http://' + self.plc_spec['sfa']['SFA_PLC_DB_HOST'] + ':12347/'
1022 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1023 fileconf.write('\n')
1026 file_name=dir_name + os.sep + 'person.xml'
1027 fileconf=open(file_name,'w')
1028 for record in self.plc_spec['sfa']['sfa_person_xml']:
1029 person_record=record
1030 fileconf.write(person_record)
1031 fileconf.write('\n')
1034 file_name=dir_name + os.sep + 'slice.xml'
1035 fileconf=open(file_name,'w')
1036 for record in self.plc_spec['sfa']['sfa_slice_xml']:
1038 #slice_record=self.plc_spec['sfa']['sfa_slice_xml']
1039 fileconf.write(slice_record)
1040 fileconf.write('\n')
1043 file_name=dir_name + os.sep + 'slice.rspec'
1044 fileconf=open(file_name,'w')
1046 for (key, value) in self.plc_spec['sfa']['sfa_slice_rspec'].items():
1048 fileconf.write(slice_rspec)
1049 fileconf.write('\n')
1052 remote="/vservers/%s/%s"%(self.vservername,location)
1053 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1055 #utils.system('cat %s'%tmpname)
1056 utils.system('rm -rf %s'%dir_name)
1060 "run sfi.py add (on Registry) and sfi.py create (on SM) to form new objects"
1062 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1063 success=test_user_sfa.add_user()
1065 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1066 site_spec = self.locate_site (slice_spec['sitename'])
1067 test_site = TestSite(self,site_spec)
1068 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1069 success1=test_slice_sfa.add_slice()
1070 success2=test_slice_sfa.create_slice()
1071 return success and success1 and success2
1073 def update_sfa(self):
1074 "run sfi.py update (on Registry) and sfi.py create (on SM) on existing objects"
1076 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1077 success1=test_user_sfa.update_user()
1079 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1080 site_spec = self.locate_site (slice_spec['sitename'])
1081 test_site = TestSite(self,site_spec)
1082 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1083 success2=test_slice_sfa.update_slice()
1084 return success1 and success2
1087 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1088 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1090 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.main"%auth)==0 and \
1091 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.main"%auth)==0 and \
1092 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1093 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1095 @slice_mapper_options_sfa
1096 def check_slice_sfa(self):
1097 "tries to ssh-enter the SFA slice"
1100 def delete_sfa(self):
1101 "run sfi.py delete (on SM), sfi.py remove (on Registry)"
1103 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1104 success1=test_user_sfa.delete_user()
1105 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1106 site_spec = self.locate_site (slice_spec['sitename'])
1107 test_site = TestSite(self,site_spec)
1108 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1109 success2=test_slice_sfa.delete_slice()
1111 return success1 and success2
1115 return self.run_in_guest('service sfa stop')==0
1117 def populate (self):
1118 "creates random entries in the PLCAPI"
1119 # install the stress-test in the plc image
1120 location = "/usr/share/plc_api/plcsh_stress_test.py"
1121 remote="/vservers/%s/%s"%(self.vservername,location)
1122 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1124 command += " -- --preserve --short-names"
1125 local = (self.run_in_guest(command) == 0);
1126 # second run with --foreign
1127 command += ' --foreign'
1128 remote = (self.run_in_guest(command) == 0);
1129 return ( local and remote)
1131 def gather_logs (self):
1132 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1133 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1134 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1135 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1136 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1137 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1139 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1140 self.gather_var_logs ()
1142 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1143 self.gather_pgsql_logs ()
1145 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1146 for site_spec in self.plc_spec['sites']:
1147 test_site = TestSite (self,site_spec)
1148 for node_spec in site_spec['nodes']:
1149 test_node=TestNode(self,test_site,node_spec)
1150 test_node.gather_qemu_logs()
1152 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1153 self.gather_nodes_var_logs()
1155 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1156 self.gather_slivers_var_logs()
1159 def gather_slivers_var_logs(self):
1160 for test_sliver in self.all_sliver_objs():
1161 remote = test_sliver.tar_var_logs()
1162 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1163 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1164 utils.system(command)
1167 def gather_var_logs (self):
1168 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1169 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1170 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1171 utils.system(command)
1172 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1173 utils.system(command)
1175 def gather_pgsql_logs (self):
1176 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1177 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1178 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1179 utils.system(command)
1181 def gather_nodes_var_logs (self):
1182 for site_spec in self.plc_spec['sites']:
1183 test_site = TestSite (self,site_spec)
1184 for node_spec in site_spec['nodes']:
1185 test_node=TestNode(self,test_site,node_spec)
1186 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1187 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1188 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1189 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1190 utils.system(command)
1193 # returns the filename to use for sql dump/restore, using options.dbname if set
1194 def dbfile (self, database):
1195 # uses options.dbname if it is found
1197 name=self.options.dbname
1198 if not isinstance(name,StringTypes):
1201 t=datetime.datetime.now()
1204 return "/root/%s-%s.sql"%(database,name)
1207 'dump the planetlab5 DB in /root in the PLC - filename has time'
1208 dump=self.dbfile("planetab5")
1209 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1210 utils.header('Dumped planetlab5 database in %s'%dump)
1213 def db_restore(self):
1214 'restore the planetlab5 DB - looks broken, but run -n might help'
1215 dump=self.dbfile("planetab5")
1216 ##stop httpd service
1217 self.run_in_guest('service httpd stop')
1218 # xxx - need another wrapper
1219 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1220 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1221 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1222 ##starting httpd service
1223 self.run_in_guest('service httpd start')
1225 utils.header('Database restored from ' + dump)
1228 def standby_1(): pass
1230 def standby_2(): pass
1232 def standby_3(): pass
1234 def standby_4(): pass
1236 def standby_5(): pass
1238 def standby_6(): pass
1240 def standby_7(): pass
1242 def standby_8(): pass
1244 def standby_9(): pass
1246 def standby_10(): pass
1248 def standby_11(): pass
1250 def standby_12(): pass
1252 def standby_13(): pass
1254 def standby_14(): pass
1256 def standby_15(): pass
1258 def standby_16(): pass
1260 def standby_17(): pass
1262 def standby_18(): pass
1264 def standby_19(): pass
1266 def standby_20(): pass