7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
20 from TestSliceSfa import TestSliceSfa
21 from TestUserSfa import TestUserSfa
23 # step methods must take (self) and return a boolean (options is a member of the class)
25 def standby(minutes,dry_run):
26 utils.header('Entering StandBy for %d mn'%minutes)
30 time.sleep(60*minutes)
33 def standby_generic (func):
35 minutes=int(func.__name__.split("_")[1])
36 return standby(minutes,self.options.dry_run)
39 def node_mapper (method):
42 node_method = TestNode.__dict__[method.__name__]
43 for site_spec in self.plc_spec['sites']:
44 test_site = TestSite (self,site_spec)
45 for node_spec in site_spec['nodes']:
46 test_node = TestNode (self,test_site,node_spec)
47 if not node_method(test_node): overall=False
49 # restore the doc text
50 actual.__doc__=method.__doc__
53 def slice_mapper_options (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=method.__doc__
67 def slice_mapper_options_sfa (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(test_plc,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
87 'display', 'local_pre', SEP,
88 'delete','create','install', 'configure', 'start', SEP,
89 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
90 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', SEP,
91 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu',
92 'kill_all_qemus', 'start_node', SEP,
93 # better use of time: do this now that the nodes are taking off
94 'plcsh_stress_test', SEP,
95 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
96 'install_sfa', 'configure_sfa', 'import_sfa', 'start_sfa', SEP,
97 'setup_sfa', 'add_sfa', 'update_sfa', SEP,
98 'view_sfa', 'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEP,
99 'check_tcp', 'check_hooks', SEP,
100 'force_gather_logs', 'force_local_post',
103 'fresh_install', 'stop', 'vs_start', SEP,
104 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
105 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
107 'show_boxes', 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
108 'db_dump' , 'db_restore', SEP,
109 'local_list','local_cleanup',SEP,
110 'standby_1 through 20',
114 def printable_steps (list):
115 return " ".join(list).replace(" "+SEP+" "," \\\n")
117 def valid_step (step):
120 def __init__ (self,plc_spec,options):
121 self.plc_spec=plc_spec
123 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
125 self.vserverip=plc_spec['vserverip']
126 self.vservername=plc_spec['vservername']
127 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
130 raise Exception,'chroot-based myplc testing is deprecated'
131 self.apiserver=TestApiserver(self.url,options.dry_run)
134 name=self.plc_spec['name']
135 return "%s.%s"%(name,self.vservername)
138 return self.plc_spec['hostname']
141 return self.test_ssh.is_local()
143 # define the API methods on this object through xmlrpc
144 # would help, but not strictly necessary
148 def actual_command_in_guest (self,command):
149 return self.test_ssh.actual_command(self.host_to_guest(command))
151 def start_guest (self):
152 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
154 def run_in_guest (self,command):
155 return utils.system(self.actual_command_in_guest(command))
157 def run_in_host (self,command):
158 return self.test_ssh.run_in_buildname(command)
160 #command gets run in the vserver
161 def host_to_guest(self,command):
162 return "vserver %s exec %s"%(self.vservername,command)
164 #command gets run in the vserver
165 def start_guest_in_host(self):
166 return "vserver %s start"%(self.vservername)
169 def run_in_guest_piped (self,local,remote):
170 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
172 def auth_root (self):
173 return {'Username':self.plc_spec['PLC_ROOT_USER'],
174 'AuthMethod':'password',
175 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
176 'Role' : self.plc_spec['role']
178 def locate_site (self,sitename):
179 for site in self.plc_spec['sites']:
180 if site['site_fields']['name'] == sitename:
182 if site['site_fields']['login_base'] == sitename:
184 raise Exception,"Cannot locate site %s"%sitename
186 def locate_node (self,nodename):
187 for site in self.plc_spec['sites']:
188 for node in site['nodes']:
189 if node['name'] == nodename:
191 raise Exception,"Cannot locate node %s"%nodename
193 def locate_hostname (self,hostname):
194 for site in self.plc_spec['sites']:
195 for node in site['nodes']:
196 if node['node_fields']['hostname'] == hostname:
198 raise Exception,"Cannot locate hostname %s"%hostname
200 def locate_key (self,keyname):
201 for key in self.plc_spec['keys']:
202 if key['name'] == keyname:
204 raise Exception,"Cannot locate key %s"%keyname
206 def locate_slice (self, slicename):
207 for slice in self.plc_spec['slices']:
208 if slice['slice_fields']['name'] == slicename:
210 raise Exception,"Cannot locate slice %s"%slicename
212 def all_sliver_objs (self):
214 for slice_spec in self.plc_spec['slices']:
215 slicename = slice_spec['slice_fields']['name']
216 for nodename in slice_spec['nodenames']:
217 result.append(self.locate_sliver_obj (nodename,slicename))
220 def locate_sliver_obj (self,nodename,slicename):
221 (site,node) = self.locate_node(nodename)
222 slice = self.locate_slice (slicename)
224 test_site = TestSite (self, site)
225 test_node = TestNode (self, test_site,node)
226 # xxx the slice site is assumed to be the node site - mhh - probably harmless
227 test_slice = TestSlice (self, test_site, slice)
228 return TestSliver (self, test_node, test_slice)
230 def locate_first_node(self):
231 nodename=self.plc_spec['slices'][0]['nodenames'][0]
232 (site,node) = self.locate_node(nodename)
233 test_site = TestSite (self, site)
234 test_node = TestNode (self, test_site,node)
237 def locate_first_sliver (self):
238 slice_spec=self.plc_spec['slices'][0]
239 slicename=slice_spec['slice_fields']['name']
240 nodename=slice_spec['nodenames'][0]
241 return self.locate_sliver_obj(nodename,slicename)
243 # all different hostboxes used in this plc
244 def gather_hostBoxes(self):
245 # maps on sites and nodes, return [ (host_box,test_node) ]
247 for site_spec in self.plc_spec['sites']:
248 test_site = TestSite (self,site_spec)
249 for node_spec in site_spec['nodes']:
250 test_node = TestNode (self, test_site, node_spec)
251 if not test_node.is_real():
252 tuples.append( (test_node.host_box(),test_node) )
253 # transform into a dict { 'host_box' -> [ test_node .. ] }
255 for (box,node) in tuples:
256 if not result.has_key(box):
259 result[box].append(node)
262 # a step for checking this stuff
263 def show_boxes (self):
264 for (box,nodes) in self.gather_hostBoxes().iteritems():
265 print box,":"," + ".join( [ node.name() for node in nodes ] )
268 # make this a valid step
269 def kill_all_qemus(self):
270 "all qemu boxes: kill all running qemus (even of former runs)"
271 # this is the brute force version, kill all qemus on that host box
272 for (box,nodes) in self.gather_hostBoxes().iteritems():
273 # pass the first nodename, as we don't push template-qemu on testboxes
274 nodedir=nodes[0].nodedir()
275 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
278 # make this a valid step
279 def list_all_qemus(self):
280 for (box,nodes) in self.gather_hostBoxes().iteritems():
281 # this is the brute force version, kill all qemus on that host box
282 TestBox(box,self.options.buildname).list_all_qemus()
285 # kill only the right qemus
286 def list_qemus(self):
287 for (box,nodes) in self.gather_hostBoxes().iteritems():
288 # the fine-grain version
293 # kill only the right qemus
294 def kill_qemus(self):
295 for (box,nodes) in self.gather_hostBoxes().iteritems():
296 # the fine-grain version
301 #################### display config
303 "show test configuration after localization"
304 self.display_pass (1)
305 self.display_pass (2)
309 def display_pass (self,passno):
310 for (key,val) in self.plc_spec.iteritems():
314 self.display_site_spec(site)
315 for node in site['nodes']:
316 self.display_node_spec(node)
317 elif key=='initscripts':
318 for initscript in val:
319 self.display_initscript_spec (initscript)
322 self.display_slice_spec (slice)
325 self.display_key_spec (key)
327 if key not in ['sites','initscripts','slices','keys']:
328 print '+ ',key,':',val
330 def display_site_spec (self,site):
331 print '+ ======== site',site['site_fields']['name']
332 for (k,v) in site.iteritems():
335 print '+ ','nodes : ',
337 print node['node_fields']['hostname'],'',
343 print user['name'],'',
345 elif k == 'site_fields':
346 print '+ login_base',':',v['login_base']
347 elif k == 'address_fields':
351 PrettyPrinter(indent=8,depth=2).pprint(v)
353 def display_initscript_spec (self,initscript):
354 print '+ ======== initscript',initscript['initscript_fields']['name']
356 def display_key_spec (self,key):
357 print '+ ======== key',key['name']
359 def display_slice_spec (self,slice):
360 print '+ ======== slice',slice['slice_fields']['name']
361 for (k,v) in slice.iteritems():
374 elif k=='slice_fields':
375 print '+ fields',':',
376 print 'max_nodes=',v['max_nodes'],
381 def display_node_spec (self,node):
382 print "+ node",node['name'],"host_box=",node['host_box'],
383 print "hostname=",node['node_fields']['hostname'],
384 print "ip=",node['interface_fields']['ip']
387 # another entry point for just showing the boxes involved
388 def display_mapping (self):
389 TestPlc.display_mapping_plc(self.plc_spec)
393 def display_mapping_plc (plc_spec):
394 print '+ MyPLC',plc_spec['name']
395 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
396 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
397 for site_spec in plc_spec['sites']:
398 for node_spec in site_spec['nodes']:
399 TestPlc.display_mapping_node(node_spec)
402 def display_mapping_node (node_spec):
403 print '+ NODE %s'%(node_spec['name'])
404 print '+\tqemu box %s'%node_spec['host_box']
405 print '+\thostname=%s'%node_spec['node_fields']['hostname']
407 def local_pre (self):
408 "run site-dependant pre-test script as defined in LocalTestResources"
409 from LocalTestResources import local_resources
410 return local_resources.step_pre(self)
412 def local_post (self):
413 "run site-dependant post-test script as defined in LocalTestResources"
414 from LocalTestResources import local_resources
415 return local_resources.step_post(self)
417 def local_list (self):
418 "run site-dependant list script as defined in LocalTestResources"
419 from LocalTestResources import local_resources
420 return local_resources.step_list(self)
422 def local_cleanup (self):
423 "run site-dependant cleanup script as defined in LocalTestResources"
424 from LocalTestResources import local_resources
425 return local_resources.step_cleanup(self)
428 "vserver delete the test myplc"
429 self.run_in_host("vserver --silent %s delete"%self.vservername)
434 "vserver creation (no install done)"
436 # a full path for the local calls
437 build_dir=os.path.dirname(sys.argv[0])
438 # sometimes this is empty - set to "." in such a case
439 if not build_dir: build_dir="."
440 build_dir += "/build"
442 # use a standard name - will be relative to remote buildname
444 # run checkout in any case - would do an update if already exists
445 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
446 if self.run_in_host(build_checkout) != 0:
448 # the repo url is taken from arch-rpms-url
449 # with the last step (i386) removed
450 repo_url = self.options.arch_rpms_url
451 for level in [ 'arch' ]:
452 repo_url = os.path.dirname(repo_url)
453 # pass the vbuild-nightly options to vtest-init-vserver
455 test_env_options += " -p %s"%self.options.personality
456 test_env_options += " -d %s"%self.options.pldistro
457 test_env_options += " -f %s"%self.options.fcdistro
458 script="vtest-init-vserver.sh"
459 vserver_name = self.vservername
460 vserver_options="--netdev eth0 --interface %s"%self.vserverip
462 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
463 vserver_options += " --hostname %s"%vserver_hostname
466 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
467 return self.run_in_host(create_vserver) == 0
471 "yum install myplc, noderepo, and the plain bootstrapfs"
473 # workaround for getting pgsql5.2 on centos5
474 if self.options.fcdistro == "centos5":
475 self.run_in_guest("rpm -Uvh http://yum.pgsqlrpms.org/8.2/pgdg-centos-8.2-4.noarch.rpm")
476 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
478 if self.options.personality == "linux32":
480 elif self.options.personality == "linux64":
483 raise Exception, "Unsupported personality %r"%self.options.personality
485 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
487 self.run_in_guest("yum -y install myplc")==0 and \
488 self.run_in_guest("yum -y install noderepo-%s"%nodefamily)==0 and \
489 self.run_in_guest("yum -y install bootstrapfs-%s-plain"%nodefamily)==0
494 tmpname='%s.plc-config-tty'%(self.name())
495 fileconf=open(tmpname,'w')
496 for var in [ 'PLC_NAME',
500 'PLC_MAIL_SUPPORT_ADDRESS',
503 # Above line was added for integrating SFA Testing
509 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
510 fileconf.write('w\n')
511 fileconf.write('q\n')
513 utils.system('cat %s'%tmpname)
514 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
515 utils.system('rm %s'%tmpname)
520 self.run_in_guest('service plc start')
525 self.run_in_guest('service plc stop')
532 # stores the keys from the config for further use
533 def store_keys(self):
534 "stores test users ssh keys in keys/"
535 for key_spec in self.plc_spec['keys']:
536 TestKey(self,key_spec).store_key()
539 def clean_keys(self):
540 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
542 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
543 # for later direct access to the nodes
544 def fetch_keys(self):
545 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
547 if not os.path.isdir(dir):
549 vservername=self.vservername
551 prefix = 'debug_ssh_key'
552 for ext in [ 'pub', 'rsa' ] :
553 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
554 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
555 if self.test_ssh.fetch(src,dst) != 0: overall=False
559 "create sites with PLCAPI"
560 return self.do_sites()
562 def clean_sites (self):
563 "delete sites with PLCAPI"
564 return self.do_sites(action="delete")
566 def do_sites (self,action="add"):
567 for site_spec in self.plc_spec['sites']:
568 test_site = TestSite (self,site_spec)
569 if (action != "add"):
570 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
571 test_site.delete_site()
572 # deleted with the site
573 #test_site.delete_users()
576 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
577 test_site.create_site()
578 test_site.create_users()
581 def clean_all_sites (self):
582 print 'auth_root',self.auth_root()
583 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
584 for site_id in site_ids:
585 print 'Deleting site_id',site_id
586 self.apiserver.DeleteSite(self.auth_root(),site_id)
589 "create nodes with PLCAPI"
590 return self.do_nodes()
591 def clean_nodes (self):
592 "delete nodes with PLCAPI"
593 return self.do_nodes(action="delete")
595 def do_nodes (self,action="add"):
596 for site_spec in self.plc_spec['sites']:
597 test_site = TestSite (self,site_spec)
599 utils.header("Deleting nodes in site %s"%test_site.name())
600 for node_spec in site_spec['nodes']:
601 test_node=TestNode(self,test_site,node_spec)
602 utils.header("Deleting %s"%test_node.name())
603 test_node.delete_node()
605 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
606 for node_spec in site_spec['nodes']:
607 utils.pprint('Creating node %s'%node_spec,node_spec)
608 test_node = TestNode (self,test_site,node_spec)
609 test_node.create_node ()
612 def nodegroups (self):
613 "create nodegroups with PLCAPI"
614 return self.do_nodegroups("add")
615 def clean_nodegroups (self):
616 "delete nodegroups with PLCAPI"
617 return self.do_nodegroups("delete")
619 # create nodegroups if needed, and populate
620 def do_nodegroups (self, action="add"):
621 # 1st pass to scan contents
623 for site_spec in self.plc_spec['sites']:
624 test_site = TestSite (self,site_spec)
625 for node_spec in site_spec['nodes']:
626 test_node=TestNode (self,test_site,node_spec)
627 if node_spec.has_key('nodegroups'):
628 nodegroupnames=node_spec['nodegroups']
629 if isinstance(nodegroupnames,StringTypes):
630 nodegroupnames = [ nodegroupnames ]
631 for nodegroupname in nodegroupnames:
632 if not groups_dict.has_key(nodegroupname):
633 groups_dict[nodegroupname]=[]
634 groups_dict[nodegroupname].append(test_node.name())
635 auth=self.auth_root()
637 for (nodegroupname,group_nodes) in groups_dict.iteritems():
639 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
640 # first, check if the nodetagtype is here
641 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
643 tag_type_id = tag_types[0]['tag_type_id']
645 tag_type_id = self.apiserver.AddTagType(auth,
646 {'tagname':nodegroupname,
647 'description': 'for nodegroup %s'%nodegroupname,
650 print 'located tag (type)',nodegroupname,'as',tag_type_id
652 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
654 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
655 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
656 # set node tag on all nodes, value='yes'
657 for nodename in group_nodes:
659 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
661 traceback.print_exc()
662 print 'node',nodename,'seems to already have tag',nodegroupname
665 expect_yes = self.apiserver.GetNodeTags(auth,
666 {'hostname':nodename,
667 'tagname':nodegroupname},
668 ['value'])[0]['value']
669 if expect_yes != "yes":
670 print 'Mismatch node tag on node',nodename,'got',expect_yes
673 if not self.options.dry_run:
674 print 'Cannot find tag',nodegroupname,'on node',nodename
678 print 'cleaning nodegroup',nodegroupname
679 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
681 traceback.print_exc()
685 # return a list of tuples (nodename,qemuname)
686 def all_node_infos (self) :
688 for site_spec in self.plc_spec['sites']:
689 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
690 for node_spec in site_spec['nodes'] ]
693 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
695 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
696 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
697 if self.options.dry_run:
701 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
702 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
703 # the nodes that haven't checked yet - start with a full list and shrink over time
704 tocheck = self.all_hostnames()
705 utils.header("checking nodes %r"%tocheck)
706 # create a dict hostname -> status
707 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
710 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
712 for array in tocheck_status:
713 hostname=array['hostname']
714 boot_state=array['boot_state']
715 if boot_state == target_boot_state:
716 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
718 # if it's a real node, never mind
719 (site_spec,node_spec)=self.locate_hostname(hostname)
720 if TestNode.is_real_model(node_spec['node_fields']['model']):
721 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
723 boot_state = target_boot_state
724 elif datetime.datetime.now() > graceout:
725 utils.header ("%s still in '%s' state"%(hostname,boot_state))
726 graceout=datetime.datetime.now()+datetime.timedelta(1)
727 status[hostname] = boot_state
729 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
732 if datetime.datetime.now() > timeout:
733 for hostname in tocheck:
734 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
736 # otherwise, sleep for a while
738 # only useful in empty plcs
741 def nodes_booted(self):
742 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
744 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
746 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
747 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
748 vservername=self.vservername
751 local_key = "keys/%(vservername)s-debug.rsa"%locals()
754 local_key = "keys/key1.rsa"
755 node_infos = self.all_node_infos()
756 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
757 for (nodename,qemuname) in node_infos:
758 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
759 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
760 (timeout_minutes,silent_minutes,period))
762 for node_info in node_infos:
763 (hostname,qemuname) = node_info
764 # try to run 'hostname' in the node
765 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
766 # don't spam logs - show the command only after the grace period
767 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
769 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
771 node_infos.remove(node_info)
773 # we will have tried real nodes once, in case they're up - but if not, just skip
774 (site_spec,node_spec)=self.locate_hostname(hostname)
775 if TestNode.is_real_model(node_spec['node_fields']['model']):
776 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
777 node_infos.remove(node_info)
780 if datetime.datetime.now() > timeout:
781 for (hostname,qemuname) in node_infos:
782 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
784 # otherwise, sleep for a while
786 # only useful in empty plcs
789 def nodes_ssh_debug(self):
790 "Tries to ssh into nodes in debug mode with the debug ssh key"
791 return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=5)
793 def nodes_ssh_boot(self):
794 "Tries to ssh into nodes in production mode with the root ssh key"
795 return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=15)
798 def init_node (self):
799 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
803 "all nodes: invoke GetBootMedium and store result locally"
806 def configure_qemu (self):
807 "all nodes: compute qemu config qemu.conf and store it locally"
810 def reinstall_node (self):
811 "all nodes: mark PLCAPI boot_state as reinstall"
814 def export_qemu (self):
815 "all nodes: push local node-dep directory on the qemu box"
818 ### check hooks : invoke scripts from hooks/{node,slice}
819 def check_hooks_node (self):
820 return self.locate_first_node().check_hooks()
821 def check_hooks_sliver (self) :
822 return self.locate_first_sliver().check_hooks()
824 def check_hooks (self):
825 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
826 return self.check_hooks_node() and self.check_hooks_sliver()
829 def do_check_initscripts(self):
831 for slice_spec in self.plc_spec['slices']:
832 if not slice_spec.has_key('initscriptname'):
834 initscript=slice_spec['initscriptname']
835 for nodename in slice_spec['nodenames']:
836 (site,node) = self.locate_node (nodename)
837 # xxx - passing the wrong site - probably harmless
838 test_site = TestSite (self,site)
839 test_slice = TestSlice (self,test_site,slice_spec)
840 test_node = TestNode (self,test_site,node)
841 test_sliver = TestSliver (self, test_node, test_slice)
842 if not test_sliver.check_initscript(initscript):
846 def check_initscripts(self):
847 "check that the initscripts have triggered"
848 return self.do_check_initscripts()
850 def initscripts (self):
851 "create initscripts with PLCAPI"
852 for initscript in self.plc_spec['initscripts']:
853 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
854 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
857 def clean_initscripts (self):
858 "delete initscripts with PLCAPI"
859 for initscript in self.plc_spec['initscripts']:
860 initscript_name = initscript['initscript_fields']['name']
861 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
863 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
864 print initscript_name,'deleted'
866 print 'deletion went wrong - probably did not exist'
871 "create slices with PLCAPI"
872 return self.do_slices()
874 def clean_slices (self):
875 "delete slices with PLCAPI"
876 return self.do_slices("delete")
878 def do_slices (self, action="add"):
879 for slice in self.plc_spec['slices']:
880 site_spec = self.locate_site (slice['sitename'])
881 test_site = TestSite(self,site_spec)
882 test_slice=TestSlice(self,test_site,slice)
884 utils.header("Deleting slices in site %s"%test_site.name())
885 test_slice.delete_slice()
887 utils.pprint("Creating slice",slice)
888 test_slice.create_slice()
889 utils.header('Created Slice %s'%slice['slice_fields']['name'])
892 @slice_mapper_options
893 def check_slice(self):
894 "tries to ssh-enter the slice with the user key, to ensure slice creation"
898 def clear_known_hosts (self):
899 "remove test nodes entries from the local known_hosts file"
903 def start_node (self) :
904 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
907 def check_tcp (self):
908 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
909 specs = self.plc_spec['tcp_test']
914 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
915 if not s_test_sliver.run_tcp_server(port,timeout=10):
919 # idem for the client side
920 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
921 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
925 def plcsh_stress_test (self):
926 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
927 # install the stress-test in the plc image
928 location = "/usr/share/plc_api/plcsh_stress_test.py"
929 remote="/vservers/%s/%s"%(self.vservername,location)
930 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
932 command += " -- --check"
933 if self.options.size == 1:
935 return ( self.run_in_guest(command) == 0)
937 # populate runs the same utility without slightly different options
938 # in particular runs with --preserve (dont cleanup) and without --check
939 # also it gets run twice, once with the --foreign option for creating fake foreign entries
942 def install_sfa(self):
943 "yum install sfa, sfa-plc and sfa-client"
944 if self.options.personality == "linux32":
946 elif self.options.personality == "linux64":
949 raise Exception, "Unsupported personality %r"%self.options.personality
950 return self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")==0
953 def configure_sfa(self):
955 tmpname='%s.sfa-config-tty'%(self.name())
956 fileconf=open(tmpname,'w')
957 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
958 'SFA_REGISTRY_LEVEL1_AUTH',
960 'SFA_AGGREGATE_HOST',
966 'SFA_PLC_DB_PASSWORD',
968 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
969 fileconf.write('w\n')
970 fileconf.write('R\n')
971 fileconf.write('q\n')
973 utils.system('cat %s'%tmpname)
974 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
975 utils.system('rm %s'%tmpname)
978 def import_sfa(self):
980 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
981 return self.run_in_guest('sfa-import-plc.py')==0
983 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
987 return self.run_in_guest('service sfa start')==0
990 "sfi client configuration"
992 if os.path.exists(dir_name):
993 utils.system('rm -rf %s'%dir_name)
994 utils.system('mkdir %s'%dir_name)
995 file_name=dir_name + os.sep + 'fake-pi1.pkey'
996 fileconf=open(file_name,'w')
997 fileconf.write (self.plc_spec['keys'][0]['private'])
1000 file_name=dir_name + os.sep + 'sfi_config'
1001 fileconf=open(file_name,'w')
1002 SFI_AUTH=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']+".main"
1003 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1004 fileconf.write('\n')
1005 SFI_USER=SFI_AUTH+'.fake-pi1'
1006 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1007 fileconf.write('\n')
1008 SFI_REGISTRY='http://' + self.plc_spec['sfa']['SFA_PLC_DB_HOST'] + ':12345/'
1009 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1010 fileconf.write('\n')
1011 SFI_SM='http://' + self.plc_spec['sfa']['SFA_PLC_DB_HOST'] + ':12347/'
1012 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1013 fileconf.write('\n')
1016 file_name=dir_name + os.sep + 'person.xml'
1017 fileconf=open(file_name,'w')
1018 for record in self.plc_spec['sfa']['sfa_person_xml']:
1019 person_record=record
1020 fileconf.write(person_record)
1021 fileconf.write('\n')
1024 file_name=dir_name + os.sep + 'slice.xml'
1025 fileconf=open(file_name,'w')
1026 for record in self.plc_spec['sfa']['sfa_slice_xml']:
1028 #slice_record=self.plc_spec['sfa']['sfa_slice_xml']
1029 fileconf.write(slice_record)
1030 fileconf.write('\n')
1033 file_name=dir_name + os.sep + 'slice.rspec'
1034 fileconf=open(file_name,'w')
1036 for (key, value) in self.plc_spec['sfa']['sfa_slice_rspec'].items():
1038 fileconf.write(slice_rspec)
1039 fileconf.write('\n')
1042 remote="/vservers/%s/%s"%(self.vservername,location)
1043 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1045 #utils.system('cat %s'%tmpname)
1046 utils.system('rm -rf %s'%dir_name)
1050 "run sfi.py add (on Registry) and sfi.py create (on SM) to form new objects"
1052 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1053 success=test_user_sfa.add_user()
1055 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1056 site_spec = self.locate_site (slice_spec['sitename'])
1057 test_site = TestSite(self,site_spec)
1058 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1059 success1=test_slice_sfa.add_slice()
1060 success2=test_slice_sfa.create_slice()
1061 return success and success1 and success2
1063 def update_sfa(self):
1064 "run sfi.py update (on Registry) and sfi.py create (on SM) on existing objects"
1066 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1067 success1=test_user_sfa.update_user()
1069 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1070 site_spec = self.locate_site (slice_spec['sitename'])
1071 test_site = TestSite(self,site_spec)
1072 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1073 success2=test_slice_sfa.update_slice()
1074 return success1 and success2
1077 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1078 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1080 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.main"%auth)==0 and \
1081 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.main"%auth)==0 and \
1082 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1083 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1085 @slice_mapper_options_sfa
1086 def check_slice_sfa(self):
1087 "tries to ssh-enter the SFA slice"
1090 def delete_sfa(self):
1091 "run sfi.py delete (on SM), sfi.py remove (on Registry)"
1093 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1094 success1=test_user_sfa.delete_user()
1095 for slice_spec in self.plc_spec['sfa']['slices_sfa']:
1096 site_spec = self.locate_site (slice_spec['sitename'])
1097 test_site = TestSite(self,site_spec)
1098 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1099 success2=test_slice_sfa.delete_slice()
1101 return success1 and success2
1105 return self.run_in_guest('service sfa stop')==0
1107 def populate (self):
1108 "creates random entries in the PLCAPI"
1109 # install the stress-test in the plc image
1110 location = "/usr/share/plc_api/plcsh_stress_test.py"
1111 remote="/vservers/%s/%s"%(self.vservername,location)
1112 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1114 command += " -- --preserve --short-names"
1115 local = (self.run_in_guest(command) == 0);
1116 # second run with --foreign
1117 command += ' --foreign'
1118 remote = (self.run_in_guest(command) == 0);
1119 return ( local and remote)
1121 def gather_logs (self):
1122 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1123 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1124 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1125 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1126 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1127 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1129 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1130 self.gather_var_logs ()
1132 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1133 self.gather_pgsql_logs ()
1135 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1136 for site_spec in self.plc_spec['sites']:
1137 test_site = TestSite (self,site_spec)
1138 for node_spec in site_spec['nodes']:
1139 test_node=TestNode(self,test_site,node_spec)
1140 test_node.gather_qemu_logs()
1142 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1143 self.gather_nodes_var_logs()
1145 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1146 self.gather_slivers_var_logs()
1149 def gather_slivers_var_logs(self):
1150 for test_sliver in self.all_sliver_objs():
1151 remote = test_sliver.tar_var_logs()
1152 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1153 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1154 utils.system(command)
1157 def gather_var_logs (self):
1158 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1159 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1160 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1161 utils.system(command)
1162 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1163 utils.system(command)
1165 def gather_pgsql_logs (self):
1166 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1167 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1168 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1169 utils.system(command)
1171 def gather_nodes_var_logs (self):
1172 for site_spec in self.plc_spec['sites']:
1173 test_site = TestSite (self,site_spec)
1174 for node_spec in site_spec['nodes']:
1175 test_node=TestNode(self,test_site,node_spec)
1176 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1177 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1178 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1179 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1180 utils.system(command)
1183 # returns the filename to use for sql dump/restore, using options.dbname if set
1184 def dbfile (self, database):
1185 # uses options.dbname if it is found
1187 name=self.options.dbname
1188 if not isinstance(name,StringTypes):
1191 t=datetime.datetime.now()
1194 return "/root/%s-%s.sql"%(database,name)
1197 dump=self.dbfile("planetab4")
1198 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
1199 utils.header('Dumped planetlab4 database in %s'%dump)
1202 def db_restore(self):
1203 dump=self.dbfile("planetab4")
1204 ##stop httpd service
1205 self.run_in_guest('service httpd stop')
1206 # xxx - need another wrapper
1207 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
1208 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
1209 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
1210 ##starting httpd service
1211 self.run_in_guest('service httpd start')
1213 utils.header('Database restored from ' + dump)
1216 def standby_1(): pass
1218 def standby_2(): pass
1220 def standby_3(): pass
1222 def standby_4(): pass
1224 def standby_5(): pass
1226 def standby_6(): pass
1228 def standby_7(): pass
1230 def standby_8(): pass
1232 def standby_9(): pass
1234 def standby_10(): pass
1236 def standby_11(): pass
1238 def standby_12(): pass
1240 def standby_13(): pass
1242 def standby_14(): pass
1244 def standby_15(): pass
1246 def standby_16(): pass
1248 def standby_17(): pass
1250 def standby_18(): pass
1252 def standby_19(): pass
1254 def standby_20(): pass