7 from types import StringTypes
11 from TestSite import TestSite
12 from TestNode import TestNode
13 from TestUser import TestUser
14 from TestKey import TestKey
15 from TestSlice import TestSlice
16 from TestSliver import TestSliver
17 from TestBox import TestBox
18 from TestSsh import TestSsh
19 from TestApiserver import TestApiserver
20 from Trackers import TrackerPlc, TrackerQemu
22 # step methods must take (self) and return a boolean (options is a member of the class)
24 def standby(minutes,dry_run):
25 utils.header('Entering StandBy for %d mn'%minutes)
29 time.sleep(60*minutes)
32 def standby_generic (func):
34 minutes=int(func.__name__.split("_")[1])
35 return standby(minutes,self.options.dry_run)
38 def node_mapper (method):
41 node_method = TestNode.__dict__[method.__name__]
42 for site_spec in self.plc_spec['sites']:
43 test_site = TestSite (self,site_spec)
44 for node_spec in site_spec['nodes']:
45 test_node = TestNode (self,test_site,node_spec)
46 if not node_method(test_node): overall=False
50 def slice_mapper_options (method):
53 slice_method = TestSlice.__dict__[method.__name__]
54 for slice_spec in self.plc_spec['slices']:
55 site_spec = self.locate_site (slice_spec['sitename'])
56 test_site = TestSite(self,site_spec)
57 test_slice=TestSlice(self,test_site,slice_spec)
58 if not slice_method(test_slice,self.options): overall=False
67 'display','trqemu_record','trqemu_free','uninstall','install','install_rpm',
68 'configure', 'start', 'fetch_keys', SEP,
69 'store_keys', 'clear_known_hosts', 'initscripts', SEP,
70 'sites', 'nodes', 'slices', 'nodegroups', SEP,
71 'init_node','bootcd', 'configure_qemu', 'export_qemu',
72 'kill_all_qemus', 'reinstall_node','start_node', SEP,
73 # better use of time: do this now that the nodes are taking off
74 'plcsh_stress_test', SEP,
75 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
77 'force_gather_logs', 'force_trplc_record','force_trplc_free',
80 'stop_all_vservers','fresh_install', 'cache_rpm', 'stop', 'vs_start', SEP,
82 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
83 'clean_sites', 'clean_nodes',
84 'clean_slices', 'clean_keys', SEP,
85 'show_boxes', 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
86 'db_dump' , 'db_restore', 'trplc_cleanup','trqemu_cleanup','trackers_cleanup', SEP,
87 'standby_1 through 20',
91 def printable_steps (list):
92 return " ".join(list).replace(" "+SEP+" "," \\\n")
94 def valid_step (step):
97 def __init__ (self,plc_spec,options):
98 self.plc_spec=plc_spec
100 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
102 self.vserverip=plc_spec['vserverip']
103 self.vservername=plc_spec['vservername']
104 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
107 raise Exception,'chroot-based myplc testing is deprecated'
108 self.apiserver=TestApiserver(self.url,options.dry_run)
111 name=self.plc_spec['name']
112 return "%s.%s"%(name,self.vservername)
115 return self.plc_spec['hostname']
118 return self.test_ssh.is_local()
120 # define the API methods on this object through xmlrpc
121 # would help, but not strictly necessary
125 def actual_command_in_guest (self,command):
126 return self.test_ssh.actual_command(self.host_to_guest(command))
128 def start_guest (self):
129 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
131 def run_in_guest (self,command):
132 return utils.system(self.actual_command_in_guest(command))
134 def run_in_host (self,command):
135 return self.test_ssh.run_in_buildname(command)
137 #command gets run in the vserver
138 def host_to_guest(self,command):
139 return "vserver %s exec %s"%(self.vservername,command)
141 #command gets run in the vserver
142 def start_guest_in_host(self):
143 return "vserver %s start"%(self.vservername)
146 def run_in_guest_piped (self,local,remote):
147 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
149 def auth_root (self):
150 return {'Username':self.plc_spec['PLC_ROOT_USER'],
151 'AuthMethod':'password',
152 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
153 'Role' : self.plc_spec['role']
155 def locate_site (self,sitename):
156 for site in self.plc_spec['sites']:
157 if site['site_fields']['name'] == sitename:
159 if site['site_fields']['login_base'] == sitename:
161 raise Exception,"Cannot locate site %s"%sitename
163 def locate_node (self,nodename):
164 for site in self.plc_spec['sites']:
165 for node in site['nodes']:
166 if node['name'] == nodename:
168 raise Exception,"Cannot locate node %s"%nodename
170 def locate_hostname (self,hostname):
171 for site in self.plc_spec['sites']:
172 for node in site['nodes']:
173 if node['node_fields']['hostname'] == hostname:
175 raise Exception,"Cannot locate hostname %s"%hostname
177 def locate_key (self,keyname):
178 for key in self.plc_spec['keys']:
179 if key['name'] == keyname:
181 raise Exception,"Cannot locate key %s"%keyname
183 def locate_slice (self, slicename):
184 for slice in self.plc_spec['slices']:
185 if slice['slice_fields']['name'] == slicename:
187 raise Exception,"Cannot locate slice %s"%slicename
189 def all_sliver_objs (self):
191 for slice_spec in self.plc_spec['slices']:
192 slicename = slice_spec['slice_fields']['name']
193 for nodename in slice_spec['nodenames']:
194 result.append(self.locate_sliver_obj (nodename,slicename))
197 def locate_sliver_obj (self,nodename,slicename):
198 (site,node) = self.locate_node(nodename)
199 slice = self.locate_slice (slicename)
201 test_site = TestSite (self, site)
202 test_node = TestNode (self, test_site,node)
203 # xxx the slice site is assumed to be the node site - mhh - probably harmless
204 test_slice = TestSlice (self, test_site, slice)
205 return TestSliver (self, test_node, test_slice)
207 def locate_first_node(self):
208 nodename=self.plc_spec['slices'][0]['nodenames'][0]
209 (site,node) = self.locate_node(nodename)
210 test_site = TestSite (self, site)
211 test_node = TestNode (self, test_site,node)
214 def locate_first_sliver (self):
215 slice_spec=self.plc_spec['slices'][0]
216 slicename=slice_spec['slice_fields']['name']
217 nodename=slice_spec['nodenames'][0]
218 return self.locate_sliver_obj(nodename,slicename)
220 # all different hostboxes used in this plc
221 def gather_hostBoxes(self):
222 # maps on sites and nodes, return [ (host_box,test_node) ]
224 for site_spec in self.plc_spec['sites']:
225 test_site = TestSite (self,site_spec)
226 for node_spec in site_spec['nodes']:
227 test_node = TestNode (self, test_site, node_spec)
228 if not test_node.is_real():
229 tuples.append( (test_node.host_box(),test_node) )
230 # transform into a dict { 'host_box' -> [ test_node .. ] }
232 for (box,node) in tuples:
233 if not result.has_key(box):
236 result[box].append(node)
239 # a step for checking this stuff
240 def show_boxes (self):
241 for (box,nodes) in self.gather_hostBoxes().iteritems():
242 print box,":"," + ".join( [ node.name() for node in nodes ] )
245 # make this a valid step
246 def kill_all_qemus(self):
247 # this is the brute force version, kill all qemus on that host box
248 for (box,nodes) in self.gather_hostBoxes().iteritems():
249 # pass the first nodename, as we don't push template-qemu on testboxes
250 nodedir=nodes[0].nodedir()
251 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
254 # make this a valid step
255 def list_all_qemus(self):
256 for (box,nodes) in self.gather_hostBoxes().iteritems():
257 # this is the brute force version, kill all qemus on that host box
258 TestBox(box,self.options.buildname).list_all_qemus()
261 # kill only the right qemus
262 def list_qemus(self):
263 for (box,nodes) in self.gather_hostBoxes().iteritems():
264 # the fine-grain version
269 # kill only the right qemus
270 def kill_qemus(self):
271 for (box,nodes) in self.gather_hostBoxes().iteritems():
272 # the fine-grain version
277 #################### display config
279 self.display_pass (1)
280 self.display_pass (2)
284 def display_pass (self,passno):
285 for (key,val) in self.plc_spec.iteritems():
289 self.display_site_spec(site)
290 for node in site['nodes']:
291 self.display_node_spec(node)
292 elif key=='initscripts':
293 for initscript in val:
294 self.display_initscript_spec (initscript)
297 self.display_slice_spec (slice)
300 self.display_key_spec (key)
302 if key not in ['sites','initscripts','slices','keys']:
303 print '* ',key,':',val
305 def display_site_spec (self,site):
306 print '* ======== site',site['site_fields']['name']
307 for (k,v) in site.iteritems():
310 print '* ','nodes : ',
312 print node['node_fields']['hostname'],'',
318 print user['name'],'',
320 elif k == 'site_fields':
321 print '* login_base',':',v['login_base']
322 elif k == 'address_fields':
326 PrettyPrinter(indent=8,depth=2).pprint(v)
328 def display_initscript_spec (self,initscript):
329 print '* ======== initscript',initscript['initscript_fields']['name']
331 def display_key_spec (self,key):
332 print '* ======== key',key['name']
334 def display_slice_spec (self,slice):
335 print '* ======== slice',slice['slice_fields']['name']
336 for (k,v) in slice.iteritems():
349 elif k=='slice_fields':
350 print '* fields',':',
351 print 'max_nodes=',v['max_nodes'],
356 def display_node_spec (self,node):
357 print "* node",node['name'],"host_box=",node['host_box'],
358 print "hostname=",node['node_fields']['hostname'],
359 print "ip=",node['interface_fields']['ip']
362 # another entry point for just showing the boxes involved
363 def display_mapping (self):
364 TestPlc.display_mapping_plc(self.plc_spec)
368 def display_mapping_plc (plc_spec):
369 print '* MyPLC',plc_spec['name']
370 print '*\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
371 print '*\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
372 for site_spec in plc_spec['sites']:
373 for node_spec in site_spec['nodes']:
374 TestPlc.display_mapping_node(node_spec)
377 def display_mapping_node (node_spec):
378 print '* NODE %s'%(node_spec['name'])
379 print '*\tqemu box %s'%node_spec['host_box']
380 print '*\thostname=%s'%node_spec['node_fields']['hostname']
383 def trplc_record (self):
384 tracker = TrackerPlc(self.options)
385 tracker.record(self.test_ssh.hostname,self.vservername)
389 def trplc_free (self):
390 tracker = TrackerPlc(self.options)
395 def trplc_cleanup (self):
396 tracker = TrackerPlc(self.options)
401 def trqemu_record (self):
402 tracker=TrackerQemu(self.options)
403 for site_spec in self.plc_spec['sites']:
404 for node_spec in site_spec['nodes']:
405 tracker.record(node_spec['host_box'],self.options.buildname,node_spec['node_fields']['hostname'])
409 def trqemu_free (self):
410 tracker=TrackerQemu(self.options)
411 for site_spec in self.plc_spec['sites']:
412 for node_spec in site_spec['nodes']:
417 def trqemu_cleanup (self):
418 tracker=TrackerQemu(self.options)
419 for site_spec in self.plc_spec['sites']:
420 for node_spec in site_spec['nodes']:
425 def trackers_cleanup (self):
426 self.trqemu_cleanup()
431 self.run_in_host("vserver --silent %s delete"%self.vservername)
437 # a full path for the local calls
438 build_dir=os.path.dirname(sys.argv[0])
439 # sometimes this is empty - set to "." in such a case
440 if not build_dir: build_dir="."
441 build_dir += "/build"
443 # use a standard name - will be relative to remote buildname
445 # run checkout in any case - would do an update if already exists
446 build_checkout = "svn checkout %s %s"%(self.options.build_url,build_dir)
447 if self.run_in_host(build_checkout) != 0:
449 # the repo url is taken from arch-rpms-url
450 # with the last step (i386) removed
451 repo_url = self.options.arch_rpms_url
452 for level in [ 'arch' ]:
453 repo_url = os.path.dirname(repo_url)
454 # pass the vbuild-nightly options to vtest-init-vserver
456 test_env_options += " -p %s"%self.options.personality
457 test_env_options += " -d %s"%self.options.pldistro
458 test_env_options += " -f %s"%self.options.fcdistro
459 script="vtest-init-vserver.sh"
460 vserver_name = self.vservername
461 vserver_options="--netdev eth0 --interface %s"%self.vserverip
463 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
464 vserver_options += " --hostname %s"%vserver_hostname
467 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
468 return self.run_in_host(create_vserver) == 0
471 def install_rpm(self):
472 if self.options.personality == "linux32":
474 elif self.options.personality == "linux64":
477 raise Exception, "Unsupported personality %r"%self.options.personality
479 self.run_in_guest("yum -y install myplc")==0 and \
480 self.run_in_guest("yum -y install noderepo-%s-%s"%(self.options.pldistro,arch))==0 and \
481 self.run_in_guest("yum -y install bootstrapfs-%s-%s-plain"%(self.options.pldistro,arch))==0
485 tmpname='%s.plc-config-tty'%(self.name())
486 fileconf=open(tmpname,'w')
487 for var in [ 'PLC_NAME',
491 'PLC_MAIL_SUPPORT_ADDRESS',
498 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
499 fileconf.write('w\n')
500 fileconf.write('q\n')
502 utils.system('cat %s'%tmpname)
503 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
504 utils.system('rm %s'%tmpname)
508 self.run_in_guest('service plc start')
512 self.run_in_guest('service plc stop')
519 # stores the keys from the config for further use
520 def store_keys(self):
521 for key_spec in self.plc_spec['keys']:
522 TestKey(self,key_spec).store_key()
525 def clean_keys(self):
526 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
528 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
529 # for later direct access to the nodes
530 def fetch_keys(self):
532 if not os.path.isdir(dir):
534 vservername=self.vservername
536 prefix = 'root_ssh_key'
537 for ext in [ 'pub', 'rsa' ] :
538 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
539 dst="keys/%(vservername)s.%(ext)s"%locals()
540 if self.test_ssh.fetch(src,dst) != 0: overall=False
541 prefix = 'debug_ssh_key'
542 for ext in [ 'pub', 'rsa' ] :
543 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
544 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
545 if self.test_ssh.fetch(src,dst) != 0: overall=False
549 return self.do_sites()
551 def clean_sites (self):
552 return self.do_sites(action="delete")
554 def do_sites (self,action="add"):
555 for site_spec in self.plc_spec['sites']:
556 test_site = TestSite (self,site_spec)
557 if (action != "add"):
558 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
559 test_site.delete_site()
560 # deleted with the site
561 #test_site.delete_users()
564 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
565 test_site.create_site()
566 test_site.create_users()
569 def clean_all_sites (self):
570 print 'auth_root',self.auth_root()
571 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
572 for site_id in site_ids:
573 print 'Deleting site_id',site_id
574 self.apiserver.DeleteSite(self.auth_root(),site_id)
577 return self.do_nodes()
578 def clean_nodes (self):
579 return self.do_nodes(action="delete")
581 def do_nodes (self,action="add"):
582 for site_spec in self.plc_spec['sites']:
583 test_site = TestSite (self,site_spec)
585 utils.header("Deleting nodes in site %s"%test_site.name())
586 for node_spec in site_spec['nodes']:
587 test_node=TestNode(self,test_site,node_spec)
588 utils.header("Deleting %s"%test_node.name())
589 test_node.delete_node()
591 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
592 for node_spec in site_spec['nodes']:
593 utils.pprint('Creating node %s'%node_spec,node_spec)
594 test_node = TestNode (self,test_site,node_spec)
595 test_node.create_node ()
598 def nodegroups (self):
599 return self.do_nodegroups("add")
600 def clean_nodegroups (self):
601 return self.do_nodegroups("delete")
603 # create nodegroups if needed, and populate
604 def do_nodegroups (self, action="add"):
605 # 1st pass to scan contents
607 for site_spec in self.plc_spec['sites']:
608 test_site = TestSite (self,site_spec)
609 for node_spec in site_spec['nodes']:
610 test_node=TestNode (self,test_site,node_spec)
611 if node_spec.has_key('nodegroups'):
612 nodegroupnames=node_spec['nodegroups']
613 if isinstance(nodegroupnames,StringTypes):
614 nodegroupnames = [ nodegroupnames ]
615 for nodegroupname in nodegroupnames:
616 if not groups_dict.has_key(nodegroupname):
617 groups_dict[nodegroupname]=[]
618 groups_dict[nodegroupname].append(test_node.name())
619 auth=self.auth_root()
621 for (nodegroupname,group_nodes) in groups_dict.iteritems():
623 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
624 # first, check if the nodetagtype is here
625 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
627 tag_type_id = tag_types[0]['tag_type_id']
629 tag_type_id = self.apiserver.AddTagType(auth,
630 {'tagname':nodegroupname,
631 'description': 'for nodegroup %s'%nodegroupname,
634 print 'located tag (type)',nodegroupname,'as',tag_type_id
636 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
638 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
639 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
640 # set node tag on all nodes, value='yes'
641 for nodename in group_nodes:
643 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
645 traceback.print_exc()
646 print 'node',nodename,'seems to already have tag',nodegroupname
649 expect_yes = self.apiserver.GetNodeTags(auth,
650 {'hostname':nodename,
651 'tagname':nodegroupname},
652 ['value'])[0]['value']
653 if expect_yes != "yes":
654 print 'Mismatch node tag on node',nodename,'got',expect_yes
657 if not self.options.dry_run:
658 print 'Cannot find tag',nodegroupname,'on node',nodename
662 print 'cleaning nodegroup',nodegroupname
663 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
665 traceback.print_exc()
669 def all_hostnames (self) :
671 for site_spec in self.plc_spec['sites']:
672 hostnames += [ node_spec['node_fields']['hostname'] \
673 for node_spec in site_spec['nodes'] ]
676 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
677 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
678 if self.options.dry_run:
682 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
683 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
684 # the nodes that haven't checked yet - start with a full list and shrink over time
685 tocheck = self.all_hostnames()
686 utils.header("checking nodes %r"%tocheck)
687 # create a dict hostname -> status
688 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
691 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
693 for array in tocheck_status:
694 hostname=array['hostname']
695 boot_state=array['boot_state']
696 if boot_state == target_boot_state:
697 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
699 # if it's a real node, never mind
700 (site_spec,node_spec)=self.locate_hostname(hostname)
701 if TestNode.is_real_model(node_spec['node_fields']['model']):
702 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
704 boot_state = target_boot_state
705 elif datetime.datetime.now() > graceout:
706 utils.header ("%s still in '%s' state"%(hostname,boot_state))
707 graceout=datetime.datetime.now()+datetime.timedelta(1)
708 status[hostname] = boot_state
710 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
713 if datetime.datetime.now() > timeout:
714 for hostname in tocheck:
715 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
717 # otherwise, sleep for a while
719 # only useful in empty plcs
722 def nodes_booted(self):
723 return self.nodes_check_boot_state('boot',timeout_minutes=20,silent_minutes=15)
725 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=20):
727 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
728 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
729 vservername=self.vservername
732 local_key = "keys/%(vservername)s-debug.rsa"%locals()
735 local_key = "keys/%(vservername)s.rsa"%locals()
736 tocheck = self.all_hostnames()
737 utils.header("checking ssh access (expected in %s mode) to nodes %r"%(message,tocheck))
738 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
739 (timeout_minutes,silent_minutes,period))
741 for hostname in tocheck:
742 # try to run 'hostname' in the node
743 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
744 # don't spam logs - show the command only after the grace period
745 if datetime.datetime.now() > graceout:
746 success=utils.system(command)
748 # truly silent, just print out a dot to show we're alive
751 command += " 2>/dev/null"
752 if self.options.dry_run:
753 print 'dry_run',command
756 success=os.system(command)
758 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
760 tocheck.remove(hostname)
762 # we will have tried real nodes once, in case they're up - but if not, just skip
763 (site_spec,node_spec)=self.locate_hostname(hostname)
764 if TestNode.is_real_model(node_spec['node_fields']['model']):
765 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
766 tocheck.remove(hostname)
769 if datetime.datetime.now() > timeout:
770 for hostname in tocheck:
771 utils.header("FAILURE to ssh into %s"%hostname)
773 # otherwise, sleep for a while
775 # only useful in empty plcs
778 def nodes_ssh_debug(self):
779 return self.check_nodes_ssh(debug=True,timeout_minutes=30,silent_minutes=10)
781 def nodes_ssh_boot(self):
782 return self.check_nodes_ssh(debug=False,timeout_minutes=30,silent_minutes=10)
785 def init_node (self): pass
787 def bootcd (self): pass
789 def configure_qemu (self): pass
791 def reinstall_node (self): pass
793 def export_qemu (self): pass
795 ### check sanity : invoke scripts from qaapi/qa/tests/{node,slice}
796 def check_sanity_node (self):
797 return self.locate_first_node().check_sanity()
798 def check_sanity_sliver (self) :
799 return self.locate_first_sliver().check_sanity()
801 def check_sanity (self):
802 return self.check_sanity_node() and self.check_sanity_sliver()
805 def do_check_initscripts(self):
807 for slice_spec in self.plc_spec['slices']:
808 if not slice_spec.has_key('initscriptname'):
810 initscript=slice_spec['initscriptname']
811 for nodename in slice_spec['nodenames']:
812 (site,node) = self.locate_node (nodename)
813 # xxx - passing the wrong site - probably harmless
814 test_site = TestSite (self,site)
815 test_slice = TestSlice (self,test_site,slice_spec)
816 test_node = TestNode (self,test_site,node)
817 test_sliver = TestSliver (self, test_node, test_slice)
818 if not test_sliver.check_initscript(initscript):
822 def check_initscripts(self):
823 return self.do_check_initscripts()
825 def initscripts (self):
826 for initscript in self.plc_spec['initscripts']:
827 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
828 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
831 def clean_initscripts (self):
832 for initscript in self.plc_spec['initscripts']:
833 initscript_name = initscript['initscript_fields']['name']
834 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
836 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
837 print initscript_name,'deleted'
839 print 'deletion went wrong - probably did not exist'
844 return self.do_slices()
846 def clean_slices (self):
847 return self.do_slices("delete")
849 def do_slices (self, action="add"):
850 for slice in self.plc_spec['slices']:
851 site_spec = self.locate_site (slice['sitename'])
852 test_site = TestSite(self,site_spec)
853 test_slice=TestSlice(self,test_site,slice)
855 utils.header("Deleting slices in site %s"%test_site.name())
856 test_slice.delete_slice()
858 utils.pprint("Creating slice",slice)
859 test_slice.create_slice()
860 utils.header('Created Slice %s'%slice['slice_fields']['name'])
863 @slice_mapper_options
864 def check_slice(self): pass
867 def clear_known_hosts (self): pass
870 def start_node (self) : pass
872 def check_tcp (self):
873 specs = self.plc_spec['tcp_test']
878 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
879 if not s_test_sliver.run_tcp_server(port,timeout=10):
883 # idem for the client side
884 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
885 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
889 def plcsh_stress_test (self):
890 # install the stress-test in the plc image
891 location = "/usr/share/plc_api/plcsh-stress-test.py"
892 remote="/vservers/%s/%s"%(self.vservername,location)
893 self.test_ssh.copy_abs("plcsh-stress-test.py",remote)
895 command += " -- --check"
896 if self.options.size == 1:
898 return ( self.run_in_guest(command) == 0)
900 def gather_logs (self):
901 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
902 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
903 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
904 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
905 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
907 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
908 self.gather_var_logs ()
910 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
911 self.gather_pgsql_logs ()
913 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
914 for site_spec in self.plc_spec['sites']:
915 test_site = TestSite (self,site_spec)
916 for node_spec in site_spec['nodes']:
917 test_node=TestNode(self,test_site,node_spec)
918 test_node.gather_qemu_logs()
920 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
921 self.gather_nodes_var_logs()
923 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
924 self.gather_slivers_var_logs()
927 def gather_slivers_var_logs(self):
928 for test_sliver in self.all_sliver_objs():
929 remote = test_sliver.tar_var_logs()
930 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
931 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
932 utils.system(command)
935 def gather_var_logs (self):
936 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
937 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
938 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
939 utils.system(command)
940 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
941 utils.system(command)
943 def gather_pgsql_logs (self):
944 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
945 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
946 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
947 utils.system(command)
949 def gather_nodes_var_logs (self):
950 for site_spec in self.plc_spec['sites']:
951 test_site = TestSite (self,site_spec)
952 for node_spec in site_spec['nodes']:
953 test_node=TestNode(self,test_site,node_spec)
954 test_ssh = TestSsh (test_node.name(),key="/etc/planetlab/root_ssh_key.rsa")
955 to_plc = self.actual_command_in_guest ( test_ssh.actual_command("tar -C /var/log -cf - ."))
956 command = to_plc + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
957 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
958 utils.system(command)
961 # returns the filename to use for sql dump/restore, using options.dbname if set
962 def dbfile (self, database):
963 # uses options.dbname if it is found
965 name=self.options.dbname
966 if not isinstance(name,StringTypes):
969 t=datetime.datetime.now()
972 return "/root/%s-%s.sql"%(database,name)
975 dump=self.dbfile("planetab4")
976 self.run_in_guest('pg_dump -U pgsqluser planetlab4 -f '+ dump)
977 utils.header('Dumped planetlab4 database in %s'%dump)
980 def db_restore(self):
981 dump=self.dbfile("planetab4")
983 self.run_in_guest('service httpd stop')
984 # xxx - need another wrapper
985 self.run_in_guest_piped('echo drop database planetlab4','psql --user=pgsqluser template1')
986 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab4')
987 self.run_in_guest('psql -U pgsqluser planetlab4 -f '+dump)
988 ##starting httpd service
989 self.run_in_guest('service httpd start')
991 utils.header('Database restored from ' + dump)
994 def standby_1(): pass
996 def standby_2(): pass
998 def standby_3(): pass
1000 def standby_4(): pass
1002 def standby_5(): pass
1004 def standby_6(): pass
1006 def standby_7(): pass
1008 def standby_8(): pass
1010 def standby_9(): pass
1012 def standby_10(): pass
1014 def standby_11(): pass
1016 def standby_12(): pass
1018 def standby_13(): pass
1020 def standby_14(): pass
1022 def standby_15(): pass
1024 def standby_16(): pass
1026 def standby_17(): pass
1028 def standby_18(): pass
1030 def standby_19(): pass
1032 def standby_20(): pass