7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
21 # step methods must take (self) and return a boolean (options is a member of the class)
23 def standby(minutes,dry_run):
24 utils.header('Entering StandBy for %d mn'%minutes)
28 time.sleep(60*minutes)
31 def standby_generic (func):
33 minutes=int(func.__name__.split("_")[1])
34 return standby(minutes,self.options.dry_run)
37 def node_mapper (method):
40 node_method = TestNode.__dict__[method.__name__]
41 for site_spec in self.plc_spec['sites']:
42 test_site = TestSite (self,site_spec)
43 for node_spec in site_spec['nodes']:
44 test_node = TestNode (self,test_site,node_spec)
45 if not node_method(test_node): overall=False
49 def slice_mapper_options (method):
52 slice_method = TestSlice.__dict__[method.__name__]
53 for slice_spec in self.plc_spec['slices']:
54 site_spec = self.locate_site (slice_spec['sitename'])
55 test_site = TestSite(self,site_spec)
56 test_slice=TestSlice(self,test_site,slice_spec)
57 if not slice_method(test_slice,self.options): overall=False
66 'display','uninstall','install','install_rpm',
67 'configure', 'start', 'fetch_keys', SEP,
68 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
69 'sites', 'nodes', 'slices', 'nodegroups', SEP,
70 'init_node','bootcd', 'configure_qemu', 'export_qemu',
71 'kill_all_qemus', 'reinstall_node','start_node', SEP,
72 # better use of time: do this now that the nodes are taking off
73 'plcsh_stress_test', SEP,
74 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
76 'force_gather_logs', 'force_record_tracker','force_free_tracker',
79 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
81 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
82 'clean_sites', 'clean_nodes',
83 'clean_slices', 'clean_keys', SEP,
84 'show_boxes', 'list_all_qemus', 'list_qemus', SEP,
85 'db_dump' , 'db_restore', 'cleanup_trackers', 'cleanup_all_trackers',
86 'standby_1 through 20',
90 def printable_steps (list):
91 return " ".join(list).replace(" "+SEP+" "," \\\n")
93 def valid_step (step):
96 def __init__ (self,plc_spec,options):
97 self.plc_spec=plc_spec
99 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
101 self.vserverip=plc_spec['vserverip']
102 self.vservername=plc_spec['vservername']
103 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
106 raise Exception,'chroot-based myplc testing is deprecated'
107 self.apiserver=TestApiserver(self.url,options.dry_run)
110 name=self.plc_spec['name']
111 return "%s.%s"%(name,self.vservername)
114 return self.plc_spec['hostname']
117 return self.test_ssh.is_local()
119 # define the API methods on this object through xmlrpc
120 # would help, but not strictly necessary
124 def actual_command_in_guest (self,command):
125 return self.test_ssh.actual_command(self.host_to_guest(command))
127 def start_guest (self):
128 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
130 def run_in_guest (self,command):
131 return utils.system(self.actual_command_in_guest(command))
133 def run_in_host (self,command):
134 return self.test_ssh.run_in_buildname(command)
136 #command gets run in the vserver
137 def host_to_guest(self,command):
138 return "vserver %s exec %s"%(self.vservername,command)
140 #command gets run in the vserver
141 def start_guest_in_host(self):
142 return "vserver %s start"%(self.vservername)
145 def run_in_guest_piped (self,local,remote):
146 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
148 def auth_root (self):
149 return {'Username':self.plc_spec['PLC_ROOT_USER'],
150 'AuthMethod':'password',
151 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
152 'Role' : self.plc_spec['role']
154 def locate_site (self,sitename):
155 for site in self.plc_spec['sites']:
156 if site['site_fields']['name'] == sitename:
158 if site['site_fields']['login_base'] == sitename:
160 raise Exception,"Cannot locate site %s"%sitename
162 def locate_node (self,nodename):
163 for site in self.plc_spec['sites']:
164 for node in site['nodes']:
165 if node['name'] == nodename:
167 raise Exception,"Cannot locate node %s"%nodename
169 def locate_hostname (self,hostname):
170 for site in self.plc_spec['sites']:
171 for node in site['nodes']:
172 if node['node_fields']['hostname'] == hostname:
174 raise Exception,"Cannot locate hostname %s"%hostname
176 def locate_key (self,keyname):
177 for key in self.plc_spec['keys']:
178 if key['name'] == keyname:
180 raise Exception,"Cannot locate key %s"%keyname
182 def locate_slice (self, slicename):
183 for slice in self.plc_spec['slices']:
184 if slice['slice_fields']['name'] == slicename:
186 raise Exception,"Cannot locate slice %s"%slicename
188 def all_sliver_objs (self):
190 for slice_spec in self.plc_spec['slices']:
191 slicename = slice_spec['slice_fields']['name']
192 for nodename in slice_spec['nodenames']:
193 result.append(self.locate_sliver_obj (nodename,slicename))
196 def locate_sliver_obj (self,nodename,slicename):
197 (site,node) = self.locate_node(nodename)
198 slice = self.locate_slice (slicename)
200 test_site = TestSite (self, site)
201 test_node = TestNode (self, test_site,node)
202 # xxx the slice site is assumed to be the node site - mhh - probably harmless
203 test_slice = TestSlice (self, test_site, slice)
204 return TestSliver (self, test_node, test_slice)
206 def locate_first_node(self):
207 nodename=self.plc_spec['slices'][0]['nodenames'][0]
208 (site,node) = self.locate_node(nodename)
209 test_site = TestSite (self, site)
210 test_node = TestNode (self, test_site,node)
213 def locate_first_sliver (self):
214 slice_spec=self.plc_spec['slices'][0]
215 slicename=slice_spec['slice_fields']['name']
216 nodename=slice_spec['nodenames'][0]
217 return self.locate_sliver_obj(nodename,slicename)
219 # all different hostboxes used in this plc
220 def gather_hostBoxes(self):
221 # maps on sites and nodes, return [ (host_box,test_node) ]
223 for site_spec in self.plc_spec['sites']:
224 test_site = TestSite (self,site_spec)
225 for node_spec in site_spec['nodes']:
226 test_node = TestNode (self, test_site, node_spec)
227 if not test_node.is_real():
228 tuples.append( (test_node.host_box(),test_node) )
229 # transform into a dict { 'host_box' -> [ test_node .. ] }
231 for (box,node) in tuples:
232 if not result.has_key(box):
235 result[box].append(node)
238 # a step for checking this stuff
239 def show_boxes (self):
240 for (box,nodes) in self.gather_hostBoxes().iteritems():
241 print box,":"," + ".join( [ node.name() for node in nodes ] )
244 # make this a valid step
245 def kill_all_qemus(self):
246 # this is the brute force version, kill all qemus on that host box
247 for (box,nodes) in self.gather_hostBoxes().iteritems():
248 # pass the first nodename, as we don't push template-qemu on testboxes
249 nodedir=nodes[0].nodedir()
250 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
253 # make this a valid step
254 def list_all_qemus(self):
255 for (box,nodes) in self.gather_hostBoxes().iteritems():
256 # this is the brute force version, kill all qemus on that host box
257 TestBox(box,self.options.buildname).list_all_qemus()
260 # kill only the right qemus
261 def list_qemus(self):
262 for (box,nodes) in self.gather_hostBoxes().iteritems():
263 # the fine-grain version
268 # kill only the right qemus
269 def kill_qemus(self):
270 for (box,nodes) in self.gather_hostBoxes().iteritems():
271 # the fine-grain version
276 #################### display config
278 self.display_pass (1)
279 self.display_pass (2)
283 def display_pass (self,passno):
284 for (key,val) in self.plc_spec.iteritems():
288 self.display_site_spec(site)
289 for node in site['nodes']:
290 self.display_node_spec(node)
291 elif key=='initscripts':
292 for initscript in val:
293 self.display_initscript_spec (initscript)
296 self.display_slice_spec (slice)
299 self.display_key_spec (key)
301 if key not in ['sites','initscripts','slices','keys']:
302 print '* ',key,':',val
304 def display_site_spec (self,site):
305 print '* ======== site',site['site_fields']['name']
306 for (k,v) in site.iteritems():
309 print '* ','nodes : ',
311 print node['node_fields']['hostname'],'',
317 print user['name'],'',
319 elif k == 'site_fields':
320 print '* login_base',':',v['login_base']
321 elif k == 'address_fields':
325 PrettyPrinter(indent=8,depth=2).pprint(v)
327 def display_initscript_spec (self,initscript):
328 print '* ======== initscript',initscript['initscript_fields']['name']
330 def display_key_spec (self,key):
331 print '* ======== key',key['name']
333 def display_slice_spec (self,slice):
334 print '* ======== slice',slice['slice_fields']['name']
335 for (k,v) in slice.iteritems():
348 elif k=='slice_fields':
349 print '* fields',':',
350 print 'max_nodes=',v['max_nodes'],
355 def display_node_spec (self,node):
356 print "* node",node['name'],"host_box=",node['host_box'],
357 print "hostname=",node['node_fields']['hostname'],
358 print "ip=",node['interface_fields']['ip']
361 # another entry point for just showing the boxes involved
362 def display_mapping (self):
363 TestPlc.display_mapping_plc(self.plc_spec)
367 def display_mapping_plc (plc_spec):
368 print '* MyPLC',plc_spec['name']
369 print '*\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
370 print '*\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
371 for site_spec in plc_spec['sites']:
372 for node_spec in site_spec['nodes']:
373 TestPlc.display_mapping_node(node_spec)
376 def display_mapping_node (node_spec):
377 print '* NODE %s'%(node_spec['name'])
378 print '*\tqemu box %s'%node_spec['host_box']
379 print '*\thostname=%s'%node_spec['node_fields']['hostname']
381 ### utility methods for handling the pool of IP addresses allocated to plcs
383 # (*) running plcs are recorded in the file named ~/running-test-plcs
384 # (*) this file contains a line for each running plc, older first
385 # (*) each line contains the vserver name + the hostname of the (vserver) testbox where it sits
386 # (*) the free_tracker method performs a vserver stop on the oldest entry
387 # (*) the record_tracker method adds an entry at the bottom of the file
388 # (*) the cleanup_tracker method stops all known vservers and removes the tracker file
390 TRACKER_FILE=os.environ['HOME']+"/running-test-plcs"
391 # how many concurrent plcs are we keeping alive - adjust with the IP pool size
392 TRACKER_KEEP_VSERVERS = 12
394 def record_tracker (self):
396 lines=file(TestPlc.TRACKER_FILE).readlines()
400 this_line="%s %s\n"%(self.vservername,self.test_ssh.hostname)
403 print 'this vserver is already included in %s'%TestPlc.TRACKER_FILE
405 if self.options.dry_run:
406 print 'dry_run: record_tracker - skipping tracker update'
408 tracker=file(TestPlc.TRACKER_FILE,"w")
409 for line in lines+[this_line]:
412 print "Recorded %s in running plcs on host %s"%(self.vservername,self.test_ssh.hostname)
415 def free_tracker (self, keep_vservers=None):
416 if not keep_vservers: keep_vservers=TestPlc.TRACKER_KEEP_VSERVERS
418 lines=file(TestPlc.TRACKER_FILE).readlines()
420 print 'dry_run: free_tracker - skipping tracker update'
422 how_many = len(lines) - keep_vservers
423 # nothing todo until we have more than keep_vservers in the tracker
425 print 'free_tracker : limit %d not reached'%keep_vservers
427 to_stop = lines[:how_many]
428 to_keep = lines[how_many:]
431 [vname,hostname]=line.split()
432 command=TestSsh(hostname).actual_command("vserver --silent %s stop"%vname)
433 utils.system(command)
434 if self.options.dry_run:
435 print 'dry_run: free_tracker would stop %d vservers'%len(to_stop)
436 for line in to_stop: print line,
437 print 'dry_run: free_tracker would keep %d vservers'%len(to_keep)
438 for line in to_keep: print line,
440 print "Storing %d remaining vservers in %s"%(len(to_keep),TestPlc.TRACKER_FILE)
441 tracker=open(TestPlc.TRACKER_FILE,"w")
447 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
448 def cleanup_trackers (self):
450 for line in TestPlc.TRACKER_FILE.readlines():
451 [vname,hostname]=line.split()
452 stop="vserver --silent %s stop"%vname
453 command=TestSsh(hostname).actual_command(stop)
454 utils.system(command)
455 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
456 utils.system(self.test_ssh.actual_command(clean_tracker))
460 # this should/could stop only the ones in TRACKER_FILE if that turns out to be reliable
461 def cleanup_all_trackers (self):
462 stop_all = "cd /vservers ; for i in * ; do vserver --silent $i stop ; done"
463 utils.system(self.test_ssh.actual_command(stop_all))
464 clean_tracker = "rm -f %s"%TestPlc.TRACKER_FILE
465 utils.system(self.test_ssh.actual_command(clean_tracker))
469 self.run_in_host("vserver --silent %s delete"%self.vservername)
475 # a full path for the local calls
476 build_dir=os.path.dirname(sys.argv[0])
477 # sometimes this is empty - set to "." in such a case
478 if not build_dir: build_dir="."
479 build_dir += "/build"
481 # use a standard name - will be relative to remote buildname
483 # run checkout in any case - would do an update if already exists
484 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
485 if self.run_in_host(build_checkout) != 0:
487 # the repo url is taken from arch-rpms-url
488 # with the last step (i386) removed
489 repo_url = self.options.arch_rpms_url
490 for level in [ 'arch' ]:
491 repo_url = os.path.dirname(repo_url)
492 # pass the vbuild-nightly options to vtest-init-vserver
494 test_env_options += " -p %s"%self.options.personality
495 test_env_options += " -d %s"%self.options.pldistro
496 test_env_options += " -f %s"%self.options.fcdistro
497 script="vtest-init-vserver.sh"
498 vserver_name = self.vservername
499 vserver_options="--netdev eth0 --interface %s"%self.vserverip
501 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
502 vserver_options += " --hostname %s"%vserver_hostname
505 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
506 return self.run_in_host(create_vserver) == 0
509 def install_rpm(self):
510 if self.options.personality == "linux32":
512 elif self.options.personality == "linux64":
515 raise Exception, "Unsupported personality %r"%self.options.personality
517 self.run_in_guest("yum -y install myplc-native")==0 and \
518 self.run_in_guest("yum -y install noderepo-%s-%s"%(self.options.pldistro,arch))==0 and \
519 self.run_in_guest("yum -y install bootstrapfs-%s-%s-plain"%(self.options.pldistro,arch))==0
523 tmpname='%s.plc-config-tty'%(self.name())
524 fileconf=open(tmpname,'w')
525 for var in [ 'PLC_NAME',
529 'PLC_MAIL_SUPPORT_ADDRESS',
536 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
537 fileconf.write('w\n')
538 fileconf.write('q\n')
540 utils.system('cat %s'%tmpname)
541 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
542 utils.system('rm %s'%tmpname)
546 self.run_in_guest('service plc start')
550 self.run_in_guest('service plc stop')
557 # stores the keys from the config for further use
558 def store_keys(self):
559 for key_spec in self.plc_spec['keys']:
560 TestKey(self,key_spec).store_key()
563 def clean_keys(self):
564 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
566 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
567 # for later direct access to the nodes
568 def fetch_keys(self):
570 if not os.path.isdir(dir):
572 vservername=self.vservername
574 prefix = 'root_ssh_key'
575 for ext in [ 'pub', 'rsa' ] :
576 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
577 dst="keys/%(vservername)s.%(ext)s"%locals()
578 if self.test_ssh.fetch(src,dst) != 0: overall=False
579 prefix = 'debug_ssh_key'
580 for ext in [ 'pub', 'rsa' ] :
581 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
582 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
583 if self.test_ssh.fetch(src,dst) != 0: overall=False
587 return self.do_sites()
589 def clean_sites (self):
590 return self.do_sites(action="delete")
592 def do_sites (self,action="add"):
593 for site_spec in self.plc_spec['sites']:
594 test_site = TestSite (self,site_spec)
595 if (action != "add"):
596 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
597 test_site.delete_site()
598 # deleted with the site
599 #test_site.delete_users()
602 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
603 test_site.create_site()
604 test_site.create_users()
607 def clean_all_sites (self):
608 print 'auth_root',self.auth_root()
609 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
610 for site_id in site_ids:
611 print 'Deleting site_id',site_id
612 self.apiserver.DeleteSite(self.auth_root(),site_id)
615 return self.do_nodes()
616 def clean_nodes (self):
617 return self.do_nodes(action="delete")
619 def do_nodes (self,action="add"):
620 for site_spec in self.plc_spec['sites']:
621 test_site = TestSite (self,site_spec)
623 utils.header("Deleting nodes in site %s"%test_site.name())
624 for node_spec in site_spec['nodes']:
625 test_node=TestNode(self,test_site,node_spec)
626 utils.header("Deleting %s"%test_node.name())
627 test_node.delete_node()
629 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
630 for node_spec in site_spec['nodes']:
631 utils.pprint('Creating node %s'%node_spec,node_spec)
632 test_node = TestNode (self,test_site,node_spec)
633 test_node.create_node ()
636 def nodegroups (self):
637 return self.do_nodegroups("add")
638 def clean_nodegroups (self):
639 return self.do_nodegroups("delete")
641 # create nodegroups if needed, and populate
642 def do_nodegroups (self, action="add"):
643 # 1st pass to scan contents
645 for site_spec in self.plc_spec['sites']:
646 test_site = TestSite (self,site_spec)
647 for node_spec in site_spec['nodes']:
648 test_node=TestNode (self,test_site,node_spec)
649 if node_spec.has_key('nodegroups'):
650 nodegroupnames=node_spec['nodegroups']
651 if isinstance(nodegroupnames,StringTypes):
652 nodegroupnames = [ nodegroupnames ]
653 for nodegroupname in nodegroupnames:
654 if not groups_dict.has_key(nodegroupname):
655 groups_dict[nodegroupname]=[]
656 groups_dict[nodegroupname].append(test_node.name())
657 auth=self.auth_root()
659 for (nodegroupname,group_nodes) in groups_dict.iteritems():
661 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
662 # first, check if the nodetagtype is here
663 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
665 tag_type_id = tag_types[0]['tag_type_id']
667 tag_type_id = self.apiserver.AddTagType(auth,
668 {'tagname':nodegroupname,
669 'description': 'for nodegroup %s'%nodegroupname,
672 print 'located tag (type)',nodegroupname,'as',tag_type_id
674 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
676 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
677 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
678 # set node tag on all nodes, value='yes'
679 for nodename in group_nodes:
681 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
683 traceback.print_exc()
684 print 'node',nodename,'seems to already have tag',nodegroupname
687 expect_yes = self.apiserver.GetNodeTags(auth,
688 {'hostname':nodename,
689 'tagname':nodegroupname},
690 ['tagvalue'])[0]['tagvalue']
691 if expect_yes != "yes":
692 print 'Mismatch node tag on node',nodename,'got',expect_yes
695 if not self.options.dry_run:
696 print 'Cannot find tag',nodegroupname,'on node',nodename
700 print 'cleaning nodegroup',nodegroupname
701 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
703 traceback.print_exc()
707 def all_hostnames (self) :
709 for site_spec in self.plc_spec['sites']:
710 hostnames += [ node_spec['node_fields']['hostname'] \
711 for node_spec in site_spec['nodes'] ]
714 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
715 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
716 if self.options.dry_run:
720 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
721 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
722 # the nodes that haven't checked yet - start with a full list and shrink over time
723 tocheck = self.all_hostnames()
724 utils.header("checking nodes %r"%tocheck)
725 # create a dict hostname -> status
726 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
729 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
731 for array in tocheck_status:
732 hostname=array['hostname']
733 boot_state=array['boot_state']
734 if boot_state == target_boot_state:
735 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
737 # if it's a real node, never mind
738 (site_spec,node_spec)=self.locate_hostname(hostname)
739 if TestNode.is_real_model(node_spec['node_fields']['model']):
740 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
742 boot_state = target_boot_state
743 elif datetime.datetime.now() > graceout:
744 utils.header ("%s still in '%s' state"%(hostname,boot_state))
745 graceout=datetime.datetime.now()+datetime.timedelta(1)
746 status[hostname] = boot_state
748 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
751 if datetime.datetime.now() > timeout:
752 for hostname in tocheck:
753 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
755 # otherwise, sleep for a while
757 # only useful in empty plcs
760 def nodes_booted(self):
761 return self.nodes_check_boot_state('boot',timeout_minutes=20,silent_minutes=15)
763 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=20):
765 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
766 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
767 vservername=self.vservername
770 local_key = "keys/%(vservername)s-debug.rsa"%locals()
773 local_key = "keys/%(vservername)s.rsa"%locals()
774 tocheck = self.all_hostnames()
775 utils.header("checking ssh access (expected in %s mode) to nodes %r"%(message,tocheck))
776 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
777 (timeout_minutes,silent_minutes,period))
779 for hostname in tocheck:
780 # try to run 'hostname' in the node
781 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
782 # don't spam logs - show the command only after the grace period
783 if datetime.datetime.now() > graceout:
784 success=utils.system(command)
786 # truly silent, just print out a dot to show we're alive
789 command += " 2>/dev/null"
790 if self.options.dry_run:
791 print 'dry_run',command
794 success=os.system(command)
796 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
798 tocheck.remove(hostname)
800 # we will have tried real nodes once, in case they're up - but if not, just skip
801 (site_spec,node_spec)=self.locate_hostname(hostname)
802 if TestNode.is_real_model(node_spec['node_fields']['model']):
803 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
804 tocheck.remove(hostname)
807 if datetime.datetime.now() > timeout:
808 for hostname in tocheck:
809 utils.header("FAILURE to ssh into %s"%hostname)
811 # otherwise, sleep for a while
813 # only useful in empty plcs
816 def nodes_ssh_debug(self):
817 return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=10)
819 def nodes_ssh_boot(self):
820 return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=10)
823 def init_node (self): pass
825 def bootcd (self): pass
827 def configure_qemu (self): pass
829 def reinstall_node (self): pass
831 def export_qemu (self): pass
833 ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
834 def check_sanity_node (self):
835 return self.locate_first_node().check_sanity()
836 def check_sanity_sliver (self) :
837 return self.locate_first_sliver().check_sanity()
839 def check_sanity (self):
840 return self.check_sanity_node() and self.check_sanity_sliver()
843 def do_check_initscripts(self):
845 for slice_spec in self.plc_spec['slices']:
846 if not slice_spec.has_key('initscriptname'):
848 initscript=slice_spec['initscriptname']
849 for nodename in slice_spec['nodenames']:
850 (site,node) = self.locate_node (nodename)
851 # xxx - passing the wrong site - probably harmless
852 test_site = TestSite (self,site)
853 test_slice = TestSlice (self,test_site,slice_spec)
854 test_node = TestNode (self,test_site,node)
855 test_sliver = TestSliver (self, test_node, test_slice)
856 if not test_sliver.check_initscript(initscript):
860 def check_initscripts(self):
861 return self.do_check_initscripts()
863 def initscripts (self):
864 for initscript in self.plc_spec['initscripts']:
865 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
866 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
869 def clean_initscripts (self):
870 for initscript in self.plc_spec['initscripts']:
871 initscript_name = initscript['initscript_fields']['name']
872 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
874 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
875 print initscript_name,'deleted'
877 print 'deletion went wrong - probably did not exist'
882 return self.do_slices()
884 def clean_slices (self):
885 return self.do_slices("delete")
887 def do_slices (self, action="add"):
888 for slice in self.plc_spec['slices']:
889 site_spec = self.locate_site (slice['sitename'])
890 test_site = TestSite(self,site_spec)
891 test_slice=TestSlice(self,test_site,slice)
893 utils.header("Deleting slices in site %s"%test_site.name())
894 test_slice.delete_slice()
896 utils.pprint("Creating slice",slice)
897 test_slice.create_slice()
898 utils.header('Created Slice %s'%slice['slice_fields']['name'])
901 @slice_mapper_options
902 def check_slice(self): pass
905 def clear_known_hosts (self): pass
908 def start_node (self) : pass
910 def check_tcp (self):
911 specs = self.plc_spec['tcp_test']
916 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
917 if not s_test_sliver.run_tcp_server(port,timeout=10):
921 # idem for the client side
922 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
923 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
927 def plcsh_stress_test (self):
928 # install the stress-test in the plc image
929 location = "/usr/share/plc_api/plcsh-stress-test.py"
930 remote="/vservers/%s/%s"%(self.vservername,location)
931 self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
933 command += " -- --check"
934 if self.options.size == 1:
936 return ( self.run_in_guest(command) == 0)
938 def gather_logs (self):
939 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
940 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
941 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
942 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
943 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
945 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
946 self.gather_var_logs ()
948 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
949 self.gather_pgsql_logs ()
951 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
952 for site_spec in self.plc_spec['sites']:
953 test_site = TestSite (self,site_spec)
954 for node_spec in site_spec['nodes']:
955 test_node=TestNode(self,test_site,node_spec)
956 test_node.gather_qemu_logs()
958 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
959 self.gather_nodes_var_logs()
961 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
962 self.gather_slivers_var_logs()
965 def gather_slivers_var_logs(self):
966 for test_sliver in self.all_sliver_objs():
967 remote = test_sliver.tar_var_logs()
968 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
969 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
970 utils.system(command)
973 def gather_var_logs (self):
974 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
975 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
976 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
977 utils.system(command)
978 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
979 utils.system(command)
981 def gather_pgsql_logs (self):
982 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
983 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
984 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
985 utils.system(command)
987 def gather_nodes_var_logs (self):
988 for site_spec in self.plc_spec['sites']:
989 test_site = TestSite (self,site_spec)
990 for node_spec in site_spec['nodes']:
991 test_node=TestNode(self,test_site,node_spec)
992 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
993 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
994 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
995 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
996 utils.system(command)
999 # returns the filename to use for sql dump/restore, using options.dbname if set
1000 def dbfile (self, database):
1001 # uses options.dbname if it is found
1003 name=self.options.dbname
1004 if not isinstance(name,StringTypes):
1007 t=datetime.datetime.now()
1010 return "/root/%s-%s.sql"%(database,name)
1013 dump=self.dbfile("planetab4")
1014 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
1015 utils.header('Dumped planetlab4 database in %s'%dump)
1018 def db_restore(self):
1019 dump=self.dbfile("planetab4")
1020 ##stop httpd service
1021 self.run_in_guest('service httpd stop')
1022 # xxx - need another wrapper
1023 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
1024 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
1025 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
1026 ##starting httpd service
1027 self.run_in_guest('service httpd start')
1029 utils.header('Database restored from ' + dump)
1032 def standby_1(): pass
1034 def standby_2(): pass
1036 def standby_3(): pass
1038 def standby_4(): pass
1040 def standby_5(): pass
1042 def standby_6(): pass
1044 def standby_7(): pass
1046 def standby_8(): pass
1048 def standby_9(): pass
1050 def standby_10(): pass
1052 def standby_11(): pass
1054 def standby_12(): pass
1056 def standby_13(): pass
1058 def standby_14(): pass
1060 def standby_15(): pass
1062 def standby_16(): pass
1064 def standby_17(): pass
1066 def standby_18(): pass
1068 def standby_19(): pass
1070 def standby_20(): pass