1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=method.__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=method.__doc__
65 def slice_sfa_mapper (method):
68 slice_method = TestSliceSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
70 site_spec = self.locate_site (slice_spec['sitename'])
71 test_site = TestSite(self,site_spec)
72 test_slice=TestSliceSfa(self,test_site,slice_spec)
73 if not slice_method(test_slice,self.options): overall=False
75 # restore the doc text
76 actual.__doc__=method.__doc__
86 'vs_delete','timestamp_vs','vs_create', SEP,
87 'plc_install', 'plc_configure', 'plc_start', SEP,
88 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
89 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
90 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
91 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
92 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
93 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
94 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
95 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
96 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
97 # but as the stress test might take a while, we sometimes missed the debug mode..
98 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
99 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
100 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
101 'check_tcp', 'check_sys_slice', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'standby_1_through_20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this is originally for centos5 as recent SFAs won't build on this platform
130 def check_whether_build_has_sfa (rpms_url):
131 # warning, we're now building 'sface' so let's be a bit more picky
132 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
133 # full builds are expected to return with 0 here
135 # move all steps containing 'sfa' from default_steps to other_steps
136 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
137 TestPlc.other_steps += sfa_steps
138 for step in sfa_steps: TestPlc.default_steps.remove(step)
140 def __init__ (self,plc_spec,options):
141 self.plc_spec=plc_spec
143 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
144 self.vserverip=plc_spec['vserverip']
145 self.vservername=plc_spec['vservername']
146 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
147 self.apiserver=TestApiserver(self.url,options.dry_run)
150 name=self.plc_spec['name']
151 return "%s.%s"%(name,self.vservername)
154 return self.plc_spec['host_box']
157 return self.test_ssh.is_local()
159 # define the API methods on this object through xmlrpc
160 # would help, but not strictly necessary
164 def actual_command_in_guest (self,command):
165 return self.test_ssh.actual_command(self.host_to_guest(command))
167 def start_guest (self):
168 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
170 def stop_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
173 def run_in_guest (self,command):
174 return utils.system(self.actual_command_in_guest(command))
176 def run_in_host (self,command):
177 return self.test_ssh.run_in_buildname(command)
179 #command gets run in the plc's vm
180 def host_to_guest(self,command):
181 if self.options.plcs_use_lxc:
182 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
184 return "vserver %s exec %s"%(self.vservername,command)
186 def vm_root_in_host(self):
187 if self.options.plcs_use_lxc:
188 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
190 return "/vservers/%s"%(self.vservername)
192 def vm_timestamp_path (self):
193 if self.options.plcs_use_lxc:
194 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
196 return "/vservers/%s.timestamp"%(self.vservername)
198 #start/stop the vserver
199 def start_guest_in_host(self):
200 if self.options.plcs_use_lxc:
201 return "lxc-start --daemon --name=%s"%(self.vservername)
203 return "vserver %s start"%(self.vservername)
205 def stop_guest_in_host(self):
206 if self.options.plcs_use_lxc:
207 return "lxc-stop --name=%s"%(self.vservername)
209 return "vserver %s stop"%(self.vservername)
212 def run_in_guest_piped (self,local,remote):
213 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
215 # does a yum install in the vs, ignore yum retcod, check with rpm
216 def yum_install (self, rpms):
217 if isinstance (rpms, list):
219 self.run_in_guest("yum -y install %s"%rpms)
220 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
221 self.run_in_guest("yum-complete-transaction -y")
222 return self.run_in_guest("rpm -q %s"%rpms)==0
224 def auth_root (self):
225 return {'Username':self.plc_spec['PLC_ROOT_USER'],
226 'AuthMethod':'password',
227 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
228 'Role' : self.plc_spec['role']
230 def locate_site (self,sitename):
231 for site in self.plc_spec['sites']:
232 if site['site_fields']['name'] == sitename:
234 if site['site_fields']['login_base'] == sitename:
236 raise Exception,"Cannot locate site %s"%sitename
238 def locate_node (self,nodename):
239 for site in self.plc_spec['sites']:
240 for node in site['nodes']:
241 if node['name'] == nodename:
243 raise Exception,"Cannot locate node %s"%nodename
245 def locate_hostname (self,hostname):
246 for site in self.plc_spec['sites']:
247 for node in site['nodes']:
248 if node['node_fields']['hostname'] == hostname:
250 raise Exception,"Cannot locate hostname %s"%hostname
252 def locate_key (self,keyname):
253 for key in self.plc_spec['keys']:
254 if key['name'] == keyname:
256 raise Exception,"Cannot locate key %s"%keyname
258 def locate_slice (self, slicename):
259 for slice in self.plc_spec['slices']:
260 if slice['slice_fields']['name'] == slicename:
262 raise Exception,"Cannot locate slice %s"%slicename
264 def all_sliver_objs (self):
266 for slice_spec in self.plc_spec['slices']:
267 slicename = slice_spec['slice_fields']['name']
268 for nodename in slice_spec['nodenames']:
269 result.append(self.locate_sliver_obj (nodename,slicename))
272 def locate_sliver_obj (self,nodename,slicename):
273 (site,node) = self.locate_node(nodename)
274 slice = self.locate_slice (slicename)
276 test_site = TestSite (self, site)
277 test_node = TestNode (self, test_site,node)
278 # xxx the slice site is assumed to be the node site - mhh - probably harmless
279 test_slice = TestSlice (self, test_site, slice)
280 return TestSliver (self, test_node, test_slice)
282 def locate_first_node(self):
283 nodename=self.plc_spec['slices'][0]['nodenames'][0]
284 (site,node) = self.locate_node(nodename)
285 test_site = TestSite (self, site)
286 test_node = TestNode (self, test_site,node)
289 def locate_first_sliver (self):
290 slice_spec=self.plc_spec['slices'][0]
291 slicename=slice_spec['slice_fields']['name']
292 nodename=slice_spec['nodenames'][0]
293 return self.locate_sliver_obj(nodename,slicename)
295 # all different hostboxes used in this plc
296 def gather_hostBoxes(self):
297 # maps on sites and nodes, return [ (host_box,test_node) ]
299 for site_spec in self.plc_spec['sites']:
300 test_site = TestSite (self,site_spec)
301 for node_spec in site_spec['nodes']:
302 test_node = TestNode (self, test_site, node_spec)
303 if not test_node.is_real():
304 tuples.append( (test_node.host_box(),test_node) )
305 # transform into a dict { 'host_box' -> [ test_node .. ] }
307 for (box,node) in tuples:
308 if not result.has_key(box):
311 result[box].append(node)
314 # a step for checking this stuff
315 def show_boxes (self):
316 'print summary of nodes location'
317 for (box,nodes) in self.gather_hostBoxes().iteritems():
318 print box,":"," + ".join( [ node.name() for node in nodes ] )
321 # make this a valid step
322 def qemu_kill_all(self):
323 'kill all qemu instances on the qemu boxes involved by this setup'
324 # this is the brute force version, kill all qemus on that host box
325 for (box,nodes) in self.gather_hostBoxes().iteritems():
326 # pass the first nodename, as we don't push template-qemu on testboxes
327 nodedir=nodes[0].nodedir()
328 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
331 # make this a valid step
332 def qemu_list_all(self):
333 'list all qemu instances on the qemu boxes involved by this setup'
334 for (box,nodes) in self.gather_hostBoxes().iteritems():
335 # this is the brute force version, kill all qemus on that host box
336 TestBoxQemu(box,self.options.buildname).qemu_list_all()
339 # kill only the right qemus
340 def qemu_list_mine(self):
341 'list qemu instances for our nodes'
342 for (box,nodes) in self.gather_hostBoxes().iteritems():
343 # the fine-grain version
348 # kill only the right qemus
349 def qemu_kill_mine(self):
350 'kill the qemu instances for our nodes'
351 for (box,nodes) in self.gather_hostBoxes().iteritems():
352 # the fine-grain version
357 #################### display config
359 "show test configuration after localization"
360 self.display_pass (1)
361 self.display_pass (2)
365 "print cut'n paste-able stuff to export env variables to your shell"
366 # guess local domain from hostname
367 domain=socket.gethostname().split('.',1)[1]
368 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
369 print "export BUILD=%s"%self.options.buildname
370 if self.options.plcs_use_lxc:
371 print "export PLCHOSTLXC=%s"%fqdn
373 print "export PLCHOSTVS=%s"%fqdn
374 print "export GUESTNAME=%s"%self.plc_spec['vservername']
375 vplcname=self.plc_spec['vservername'].split('-')[-1]
376 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
377 # find hostname of first node
378 (hostname,qemubox) = self.all_node_infos()[0]
379 print "export KVMHOST=%s.%s"%(qemubox,domain)
380 print "export NODE=%s"%(hostname)
384 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
385 def display_pass (self,passno):
386 for (key,val) in self.plc_spec.iteritems():
387 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
391 self.display_site_spec(site)
392 for node in site['nodes']:
393 self.display_node_spec(node)
394 elif key=='initscripts':
395 for initscript in val:
396 self.display_initscript_spec (initscript)
399 self.display_slice_spec (slice)
402 self.display_key_spec (key)
404 if key not in ['sites','initscripts','slices','keys', 'sfa']:
405 print '+ ',key,':',val
407 def display_site_spec (self,site):
408 print '+ ======== site',site['site_fields']['name']
409 for (k,v) in site.iteritems():
410 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
413 print '+ ','nodes : ',
415 print node['node_fields']['hostname'],'',
421 print user['name'],'',
423 elif k == 'site_fields':
424 print '+ login_base',':',v['login_base']
425 elif k == 'address_fields':
431 def display_initscript_spec (self,initscript):
432 print '+ ======== initscript',initscript['initscript_fields']['name']
434 def display_key_spec (self,key):
435 print '+ ======== key',key['name']
437 def display_slice_spec (self,slice):
438 print '+ ======== slice',slice['slice_fields']['name']
439 for (k,v) in slice.iteritems():
452 elif k=='slice_fields':
453 print '+ fields',':',
454 print 'max_nodes=',v['max_nodes'],
459 def display_node_spec (self,node):
460 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
461 print "hostname=",node['node_fields']['hostname'],
462 print "ip=",node['interface_fields']['ip']
463 if self.options.verbose:
464 utils.pprint("node details",node,depth=3)
466 # another entry point for just showing the boxes involved
467 def display_mapping (self):
468 TestPlc.display_mapping_plc(self.plc_spec)
472 def display_mapping_plc (plc_spec):
473 print '+ MyPLC',plc_spec['name']
474 # WARNING this would not be right for lxc-based PLC's - should be harmless though
475 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
476 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
477 for site_spec in plc_spec['sites']:
478 for node_spec in site_spec['nodes']:
479 TestPlc.display_mapping_node(node_spec)
482 def display_mapping_node (node_spec):
483 print '+ NODE %s'%(node_spec['name'])
484 print '+\tqemu box %s'%node_spec['host_box']
485 print '+\thostname=%s'%node_spec['node_fields']['hostname']
487 # write a timestamp in /vservers/<>.timestamp
488 # cannot be inside the vserver, that causes vserver .. build to cough
489 def timestamp_vs (self):
491 # TODO-lxc check this one
492 # a first approx. is to store the timestamp close to the VM root like vs does
493 stamp_path=self.vm_timestamp_path ()
494 stamp_dir = os.path.dirname (stamp_path)
495 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
496 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
498 # this is called inconditionnally at the beginning of the test sequence
499 # just in case this is a rerun, so if the vm is not running it's fine
501 "vserver delete the test myplc"
502 stamp_path=self.vm_timestamp_path()
503 self.run_in_host("rm -f %s"%stamp_path)
504 if self.options.plcs_use_lxc:
505 self.run_in_host("lxc-stop --name %s"%self.vservername)
506 self.run_in_host("lxc-destroy --name %s"%self.vservername)
509 self.run_in_host("vserver --silent %s delete"%self.vservername)
513 # historically the build was being fetched by the tests
514 # now the build pushes itself as a subdir of the tests workdir
515 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
516 def vs_create (self):
517 "vserver creation (no install done)"
518 # push the local build/ dir to the testplc box
520 # a full path for the local calls
521 build_dir=os.path.dirname(sys.argv[0])
522 # sometimes this is empty - set to "." in such a case
523 if not build_dir: build_dir="."
524 build_dir += "/build"
526 # use a standard name - will be relative to remote buildname
528 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
529 self.test_ssh.rmdir(build_dir)
530 self.test_ssh.copy(build_dir,recursive=True)
531 # the repo url is taken from arch-rpms-url
532 # with the last step (i386) removed
533 repo_url = self.options.arch_rpms_url
534 for level in [ 'arch' ]:
535 repo_url = os.path.dirname(repo_url)
536 # pass the vbuild-nightly options to vtest-init-vserver
538 test_env_options += " -p %s"%self.options.personality
539 test_env_options += " -d %s"%self.options.pldistro
540 test_env_options += " -f %s"%self.options.fcdistro
541 if self.options.plcs_use_lxc:
542 script="vtest-init-lxc.sh"
544 script="vtest-init-vserver.sh"
545 vserver_name = self.vservername
546 vserver_options="--netdev eth0 --interface %s"%self.vserverip
548 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
549 vserver_options += " --hostname %s"%vserver_hostname
551 print "Cannot reverse lookup %s"%self.vserverip
552 print "This is considered fatal, as this might pollute the test results"
554 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
555 return self.run_in_host(create_vserver) == 0
558 def plc_install(self):
559 "yum install myplc, noderepo, and the plain bootstrapfs"
561 # workaround for getting pgsql8.2 on centos5
562 if self.options.fcdistro == "centos5":
563 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
566 if self.options.personality == "linux32":
568 elif self.options.personality == "linux64":
571 raise Exception, "Unsupported personality %r"%self.options.personality
572 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
575 pkgs_list.append ("slicerepo-%s"%nodefamily)
576 pkgs_list.append ("myplc")
577 pkgs_list.append ("noderepo-%s"%nodefamily)
578 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
579 pkgs_string=" ".join(pkgs_list)
580 return self.yum_install (pkgs_list)
583 def plc_configure(self):
585 tmpname='%s.plc-config-tty'%(self.name())
586 fileconf=open(tmpname,'w')
587 for var in [ 'PLC_NAME',
592 'PLC_MAIL_SUPPORT_ADDRESS',
595 # Above line was added for integrating SFA Testing
601 'PLC_RESERVATION_GRANULARITY',
603 'PLC_OMF_XMPP_SERVER',
605 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
606 fileconf.write('w\n')
607 fileconf.write('q\n')
609 utils.system('cat %s'%tmpname)
610 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
611 utils.system('rm %s'%tmpname)
616 self.run_in_guest('service plc start')
621 self.run_in_guest('service plc stop')
625 "start the PLC vserver"
630 "stop the PLC vserver"
634 # stores the keys from the config for further use
635 def keys_store(self):
636 "stores test users ssh keys in keys/"
637 for key_spec in self.plc_spec['keys']:
638 TestKey(self,key_spec).store_key()
641 def keys_clean(self):
642 "removes keys cached in keys/"
643 utils.system("rm -rf ./keys")
646 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
647 # for later direct access to the nodes
648 def keys_fetch(self):
649 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
651 if not os.path.isdir(dir):
653 vservername=self.vservername
654 vm_root=self.vm_root_in_host()
656 prefix = 'debug_ssh_key'
657 for ext in [ 'pub', 'rsa' ] :
658 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
659 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
660 if self.test_ssh.fetch(src,dst) != 0: overall=False
664 "create sites with PLCAPI"
665 return self.do_sites()
667 def delete_sites (self):
668 "delete sites with PLCAPI"
669 return self.do_sites(action="delete")
671 def do_sites (self,action="add"):
672 for site_spec in self.plc_spec['sites']:
673 test_site = TestSite (self,site_spec)
674 if (action != "add"):
675 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
676 test_site.delete_site()
677 # deleted with the site
678 #test_site.delete_users()
681 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
682 test_site.create_site()
683 test_site.create_users()
686 def delete_all_sites (self):
687 "Delete all sites in PLC, and related objects"
688 print 'auth_root',self.auth_root()
689 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
690 for site_id in site_ids:
691 print 'Deleting site_id',site_id
692 self.apiserver.DeleteSite(self.auth_root(),site_id)
696 "create nodes with PLCAPI"
697 return self.do_nodes()
698 def delete_nodes (self):
699 "delete nodes with PLCAPI"
700 return self.do_nodes(action="delete")
702 def do_nodes (self,action="add"):
703 for site_spec in self.plc_spec['sites']:
704 test_site = TestSite (self,site_spec)
706 utils.header("Deleting nodes in site %s"%test_site.name())
707 for node_spec in site_spec['nodes']:
708 test_node=TestNode(self,test_site,node_spec)
709 utils.header("Deleting %s"%test_node.name())
710 test_node.delete_node()
712 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
713 for node_spec in site_spec['nodes']:
714 utils.pprint('Creating node %s'%node_spec,node_spec)
715 test_node = TestNode (self,test_site,node_spec)
716 test_node.create_node ()
719 def nodegroups (self):
720 "create nodegroups with PLCAPI"
721 return self.do_nodegroups("add")
722 def delete_nodegroups (self):
723 "delete nodegroups with PLCAPI"
724 return self.do_nodegroups("delete")
728 def translate_timestamp (start,grain,timestamp):
729 if timestamp < TestPlc.YEAR: return start+timestamp*grain
730 else: return timestamp
733 def timestamp_printable (timestamp):
734 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
737 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
739 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
740 print 'API answered grain=',grain
741 start=(now/grain)*grain
743 # find out all nodes that are reservable
744 nodes=self.all_reservable_nodenames()
746 utils.header ("No reservable node found - proceeding without leases")
749 # attach them to the leases as specified in plc_specs
750 # this is where the 'leases' field gets interpreted as relative of absolute
751 for lease_spec in self.plc_spec['leases']:
752 # skip the ones that come with a null slice id
753 if not lease_spec['slice']: continue
754 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
755 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
756 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
757 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
758 if lease_addition['errors']:
759 utils.header("Cannot create leases, %s"%lease_addition['errors'])
762 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
763 (nodes,lease_spec['slice'],
764 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
765 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
769 def delete_leases (self):
770 "remove all leases in the myplc side"
771 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
772 utils.header("Cleaning leases %r"%lease_ids)
773 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
776 def list_leases (self):
777 "list all leases known to the myplc"
778 leases = self.apiserver.GetLeases(self.auth_root())
781 current=l['t_until']>=now
782 if self.options.verbose or current:
783 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
784 TestPlc.timestamp_printable(l['t_from']),
785 TestPlc.timestamp_printable(l['t_until'])))
788 # create nodegroups if needed, and populate
789 def do_nodegroups (self, action="add"):
790 # 1st pass to scan contents
792 for site_spec in self.plc_spec['sites']:
793 test_site = TestSite (self,site_spec)
794 for node_spec in site_spec['nodes']:
795 test_node=TestNode (self,test_site,node_spec)
796 if node_spec.has_key('nodegroups'):
797 nodegroupnames=node_spec['nodegroups']
798 if isinstance(nodegroupnames,StringTypes):
799 nodegroupnames = [ nodegroupnames ]
800 for nodegroupname in nodegroupnames:
801 if not groups_dict.has_key(nodegroupname):
802 groups_dict[nodegroupname]=[]
803 groups_dict[nodegroupname].append(test_node.name())
804 auth=self.auth_root()
806 for (nodegroupname,group_nodes) in groups_dict.iteritems():
808 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
809 # first, check if the nodetagtype is here
810 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
812 tag_type_id = tag_types[0]['tag_type_id']
814 tag_type_id = self.apiserver.AddTagType(auth,
815 {'tagname':nodegroupname,
816 'description': 'for nodegroup %s'%nodegroupname,
818 print 'located tag (type)',nodegroupname,'as',tag_type_id
820 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
822 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
823 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
824 # set node tag on all nodes, value='yes'
825 for nodename in group_nodes:
827 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
829 traceback.print_exc()
830 print 'node',nodename,'seems to already have tag',nodegroupname
833 expect_yes = self.apiserver.GetNodeTags(auth,
834 {'hostname':nodename,
835 'tagname':nodegroupname},
836 ['value'])[0]['value']
837 if expect_yes != "yes":
838 print 'Mismatch node tag on node',nodename,'got',expect_yes
841 if not self.options.dry_run:
842 print 'Cannot find tag',nodegroupname,'on node',nodename
846 print 'cleaning nodegroup',nodegroupname
847 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
849 traceback.print_exc()
853 # a list of TestNode objs
854 def all_nodes (self):
856 for site_spec in self.plc_spec['sites']:
857 test_site = TestSite (self,site_spec)
858 for node_spec in site_spec['nodes']:
859 nodes.append(TestNode (self,test_site,node_spec))
862 # return a list of tuples (nodename,qemuname)
863 def all_node_infos (self) :
865 for site_spec in self.plc_spec['sites']:
866 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
867 for node_spec in site_spec['nodes'] ]
870 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
871 def all_reservable_nodenames (self):
873 for site_spec in self.plc_spec['sites']:
874 for node_spec in site_spec['nodes']:
875 node_fields=node_spec['node_fields']
876 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
877 res.append(node_fields['hostname'])
880 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
881 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
882 if self.options.dry_run:
886 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
887 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
888 # the nodes that haven't checked yet - start with a full list and shrink over time
889 tocheck = self.all_hostnames()
890 utils.header("checking nodes %r"%tocheck)
891 # create a dict hostname -> status
892 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
895 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
897 for array in tocheck_status:
898 hostname=array['hostname']
899 boot_state=array['boot_state']
900 if boot_state == target_boot_state:
901 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
903 # if it's a real node, never mind
904 (site_spec,node_spec)=self.locate_hostname(hostname)
905 if TestNode.is_real_model(node_spec['node_fields']['model']):
906 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
908 boot_state = target_boot_state
909 elif datetime.datetime.now() > graceout:
910 utils.header ("%s still in '%s' state"%(hostname,boot_state))
911 graceout=datetime.datetime.now()+datetime.timedelta(1)
912 status[hostname] = boot_state
914 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
917 if datetime.datetime.now() > timeout:
918 for hostname in tocheck:
919 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
921 # otherwise, sleep for a while
923 # only useful in empty plcs
926 def nodes_booted(self):
927 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
929 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
931 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
932 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
933 vservername=self.vservername
936 local_key = "keys/%(vservername)s-debug.rsa"%locals()
939 local_key = "keys/key1.rsa"
940 node_infos = self.all_node_infos()
941 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
942 for (nodename,qemuname) in node_infos:
943 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
944 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
945 (timeout_minutes,silent_minutes,period))
947 for node_info in node_infos:
948 (hostname,qemuname) = node_info
949 # try to run 'hostname' in the node
950 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
951 # don't spam logs - show the command only after the grace period
952 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
954 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
956 node_infos.remove(node_info)
958 # we will have tried real nodes once, in case they're up - but if not, just skip
959 (site_spec,node_spec)=self.locate_hostname(hostname)
960 if TestNode.is_real_model(node_spec['node_fields']['model']):
961 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
962 node_infos.remove(node_info)
965 if datetime.datetime.now() > timeout:
966 for (hostname,qemuname) in node_infos:
967 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
969 # otherwise, sleep for a while
971 # only useful in empty plcs
974 def ssh_node_debug(self):
975 "Tries to ssh into nodes in debug mode with the debug ssh key"
976 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
978 def ssh_node_boot(self):
979 "Tries to ssh into nodes in production mode with the root ssh key"
980 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
983 def qemu_local_init (self):
984 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
988 "all nodes: invoke GetBootMedium and store result locally"
991 def qemu_local_config (self):
992 "all nodes: compute qemu config qemu.conf and store it locally"
995 def nodestate_reinstall (self):
996 "all nodes: mark PLCAPI boot_state as reinstall"
999 def nodestate_safeboot (self):
1000 "all nodes: mark PLCAPI boot_state as safeboot"
1003 def nodestate_boot (self):
1004 "all nodes: mark PLCAPI boot_state as boot"
1007 def nodestate_show (self):
1008 "all nodes: show PLCAPI boot_state"
1011 def qemu_export (self):
1012 "all nodes: push local node-dep directory on the qemu box"
1015 ### check hooks : invoke scripts from hooks/{node,slice}
1016 def check_hooks_node (self):
1017 return self.locate_first_node().check_hooks()
1018 def check_hooks_sliver (self) :
1019 return self.locate_first_sliver().check_hooks()
1021 def check_hooks (self):
1022 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1023 return self.check_hooks_node() and self.check_hooks_sliver()
1026 def do_check_initscripts(self):
1028 for slice_spec in self.plc_spec['slices']:
1029 if not slice_spec.has_key('initscriptstamp'):
1031 stamp=slice_spec['initscriptstamp']
1032 for nodename in slice_spec['nodenames']:
1033 (site,node) = self.locate_node (nodename)
1034 # xxx - passing the wrong site - probably harmless
1035 test_site = TestSite (self,site)
1036 test_slice = TestSlice (self,test_site,slice_spec)
1037 test_node = TestNode (self,test_site,node)
1038 test_sliver = TestSliver (self, test_node, test_slice)
1039 if not test_sliver.check_initscript_stamp(stamp):
1043 def check_initscripts(self):
1044 "check that the initscripts have triggered"
1045 return self.do_check_initscripts()
1047 def initscripts (self):
1048 "create initscripts with PLCAPI"
1049 for initscript in self.plc_spec['initscripts']:
1050 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1051 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1054 def delete_initscripts (self):
1055 "delete initscripts with PLCAPI"
1056 for initscript in self.plc_spec['initscripts']:
1057 initscript_name = initscript['initscript_fields']['name']
1058 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1060 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1061 print initscript_name,'deleted'
1063 print 'deletion went wrong - probably did not exist'
1068 "create slices with PLCAPI"
1069 return self.do_slices()
1071 def delete_slices (self):
1072 "delete slices with PLCAPI"
1073 return self.do_slices("delete")
1075 def do_slices (self, action="add"):
1076 for slice in self.plc_spec['slices']:
1077 site_spec = self.locate_site (slice['sitename'])
1078 test_site = TestSite(self,site_spec)
1079 test_slice=TestSlice(self,test_site,slice)
1081 utils.header("Deleting slices in site %s"%test_site.name())
1082 test_slice.delete_slice()
1084 utils.pprint("Creating slice",slice)
1085 test_slice.create_slice()
1086 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1090 def ssh_slice(self):
1091 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1095 def keys_clear_known_hosts (self):
1096 "remove test nodes entries from the local known_hosts file"
1100 def qemu_start (self) :
1101 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1105 def timestamp_qemu (self) :
1106 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1109 def check_tcp (self):
1110 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1111 specs = self.plc_spec['tcp_test']
1116 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1117 if not s_test_sliver.run_tcp_server(port,timeout=10):
1121 # idem for the client side
1122 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1123 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1127 # painfully enough, we need to allow for some time as netflow might show up last
1128 def check_sys_slice (self):
1129 "all nodes: check that a system slice is alive"
1130 # would probably make more sense to check for netflow,
1131 # but that one is currently not working in the lxc distro
1132 # return self.check_systemslice ('netflow')
1133 return self.check_systemslice ('drl')
1135 # we have the slices up already here, so it should not take too long
1136 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1137 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1138 test_nodes=self.all_nodes()
1140 for test_node in test_nodes:
1141 if test_node.check_systemslice (slicename,dry_run=self.options.dry_run):
1143 test_nodes.remove(test_node)
1148 if datetime.datetime.now () > timeout:
1149 for test_node in test_nodes:
1150 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1155 def plcsh_stress_test (self):
1156 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1157 # install the stress-test in the plc image
1158 location = "/usr/share/plc_api/plcsh_stress_test.py"
1159 remote="%s/%s"%(self.vm_root_in_host(),location)
1160 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1162 command += " -- --check"
1163 if self.options.size == 1:
1164 command += " --tiny"
1165 return ( self.run_in_guest(command) == 0)
1167 # populate runs the same utility without slightly different options
1168 # in particular runs with --preserve (dont cleanup) and without --check
1169 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1171 def sfa_install_all (self):
1172 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1173 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1175 def sfa_install_core(self):
1177 return self.yum_install ("sfa")
1179 def sfa_install_plc(self):
1180 "yum install sfa-plc"
1181 return self.yum_install("sfa-plc")
1183 def sfa_install_client(self):
1184 "yum install sfa-client"
1185 return self.yum_install("sfa-client")
1187 def sfa_install_sfatables(self):
1188 "yum install sfa-sfatables"
1189 return self.yum_install ("sfa-sfatables")
1191 def sfa_dbclean(self):
1192 "thoroughly wipes off the SFA database"
1193 self.run_in_guest("sfa-nuke.py")==0 or \
1194 self.run_in_guest("sfa-nuke-plc.py") or \
1195 self.run_in_guest("sfaadmin.py registry nuke")
1198 def sfa_plcclean(self):
1199 "cleans the PLC entries that were created as a side effect of running the script"
1201 sfa_spec=self.plc_spec['sfa']
1203 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1204 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1205 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1206 except: print "Slice %s already absent from PLC db"%slicename
1208 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1209 try: self.apiserver.DeletePerson(self.auth_root(),username)
1210 except: print "User %s already absent from PLC db"%username
1212 print "REMEMBER TO RUN sfa_import AGAIN"
1215 def sfa_uninstall(self):
1216 "uses rpm to uninstall sfa - ignore result"
1217 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1218 self.run_in_guest("rm -rf /var/lib/sfa")
1219 self.run_in_guest("rm -rf /etc/sfa")
1220 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1222 self.run_in_guest("rpm -e --noscripts sfa-plc")
1225 ### run unit tests for SFA
1226 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1227 # Running Transaction
1228 # Transaction couldn't start:
1229 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1230 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1231 # no matter how many Gbs are available on the testplc
1232 # could not figure out what's wrong, so...
1233 # if the yum install phase fails, consider the test is successful
1234 # other combinations will eventually run it hopefully
1235 def sfa_utest(self):
1236 "yum install sfa-tests and run SFA unittests"
1237 self.run_in_guest("yum -y install sfa-tests")
1238 # failed to install - forget it
1239 if self.run_in_guest("rpm -q sfa-tests")!=0:
1240 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1242 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1246 dirname="conf.%s"%self.plc_spec['name']
1247 if not os.path.isdir(dirname):
1248 utils.system("mkdir -p %s"%dirname)
1249 if not os.path.isdir(dirname):
1250 raise "Cannot create config dir for plc %s"%self.name()
1253 def conffile(self,filename):
1254 return "%s/%s"%(self.confdir(),filename)
1255 def confsubdir(self,dirname,clean,dry_run=False):
1256 subdirname="%s/%s"%(self.confdir(),dirname)
1258 utils.system("rm -rf %s"%subdirname)
1259 if not os.path.isdir(subdirname):
1260 utils.system("mkdir -p %s"%subdirname)
1261 if not dry_run and not os.path.isdir(subdirname):
1262 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1265 def conffile_clean (self,filename):
1266 filename=self.conffile(filename)
1267 return utils.system("rm -rf %s"%filename)==0
1270 def sfa_configure(self):
1271 "run sfa-config-tty"
1272 tmpname=self.conffile("sfa-config-tty")
1273 fileconf=open(tmpname,'w')
1274 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1275 'SFA_INTERFACE_HRN',
1276 'SFA_REGISTRY_LEVEL1_AUTH',
1277 'SFA_REGISTRY_HOST',
1278 'SFA_AGGREGATE_HOST',
1289 if self.plc_spec['sfa'].has_key(var):
1290 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1291 # the way plc_config handles booleans just sucks..
1294 if self.plc_spec['sfa'][var]: val='true'
1295 fileconf.write ('e %s\n%s\n'%(var,val))
1296 fileconf.write('w\n')
1297 fileconf.write('R\n')
1298 fileconf.write('q\n')
1300 utils.system('cat %s'%tmpname)
1301 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1304 def aggregate_xml_line(self):
1305 port=self.plc_spec['sfa']['neighbours-port']
1306 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1307 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1309 def registry_xml_line(self):
1310 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1311 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1314 # a cross step that takes all other plcs in argument
1315 def cross_sfa_configure(self, other_plcs):
1316 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1317 # of course with a single plc, other_plcs is an empty list
1320 agg_fname=self.conffile("agg.xml")
1321 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1322 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1323 utils.header ("(Over)wrote %s"%agg_fname)
1324 reg_fname=self.conffile("reg.xml")
1325 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1326 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1327 utils.header ("(Over)wrote %s"%reg_fname)
1328 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1329 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1331 def sfa_import(self):
1333 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1334 return self.run_in_guest('sfa-import.py')==0 or \
1335 self.run_in_guest('sfa-import-plc.py')==0 or \
1336 self.run_in_guest('sfaadmin.py registry import_registry')==0
1337 # not needed anymore
1338 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1340 def sfa_start(self):
1342 return self.run_in_guest('service sfa start')==0
1344 def sfi_configure(self):
1345 "Create /root/sfi on the plc side for sfi client configuration"
1346 if self.options.dry_run:
1347 utils.header("DRY RUN - skipping step")
1349 sfa_spec=self.plc_spec['sfa']
1350 # cannot use sfa_slice_mapper to pass dir_name
1351 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1352 site_spec = self.locate_site (slice_spec['sitename'])
1353 test_site = TestSite(self,site_spec)
1354 test_slice=TestSliceSfa(self,test_site,slice_spec)
1355 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1356 test_slice.sfi_config(dir_name)
1357 # push into the remote /root/sfi area
1358 location = test_slice.sfi_path()
1359 remote="%s/%s"%(self.vm_root_in_host(),location)
1360 self.test_ssh.mkdir(remote,abs=True)
1361 # need to strip last level or remote otherwise we get an extra dir level
1362 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1366 def sfi_clean (self):
1367 "clean up /root/sfi on the plc side"
1368 self.run_in_guest("rm -rf /root/sfi")
1372 def sfa_add_user(self):
1377 def sfa_update_user(self):
1381 def sfa_add_slice(self):
1382 "run sfi.py add (on Registry) from slice.xml"
1386 def sfa_discover(self):
1387 "discover resources into resouces_in.rspec"
1391 def sfa_create_slice(self):
1392 "run sfi.py create (on SM) - 1st time"
1396 def sfa_check_slice_plc(self):
1397 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1401 def sfa_update_slice(self):
1402 "run sfi.py create (on SM) on existing object"
1407 "various registry-related calls"
1411 def ssh_slice_sfa(self):
1412 "tries to ssh-enter the SFA slice"
1416 def sfa_delete_user(self):
1421 def sfa_delete_slice(self):
1422 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1427 self.run_in_guest('service sfa stop')==0
1430 def populate (self):
1431 "creates random entries in the PLCAPI"
1432 # install the stress-test in the plc image
1433 location = "/usr/share/plc_api/plcsh_stress_test.py"
1434 remote="%s/%s"%(self.vm_root_in_host(),location)
1435 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1437 command += " -- --preserve --short-names"
1438 local = (self.run_in_guest(command) == 0);
1439 # second run with --foreign
1440 command += ' --foreign'
1441 remote = (self.run_in_guest(command) == 0);
1442 return ( local and remote)
1444 def gather_logs (self):
1445 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1446 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1447 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1448 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1449 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1450 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1451 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1453 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1454 self.gather_var_logs ()
1456 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1457 self.gather_pgsql_logs ()
1459 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1460 self.gather_root_sfi ()
1462 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1463 for site_spec in self.plc_spec['sites']:
1464 test_site = TestSite (self,site_spec)
1465 for node_spec in site_spec['nodes']:
1466 test_node=TestNode(self,test_site,node_spec)
1467 test_node.gather_qemu_logs()
1469 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1470 self.gather_nodes_var_logs()
1472 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1473 self.gather_slivers_var_logs()
1476 def gather_slivers_var_logs(self):
1477 for test_sliver in self.all_sliver_objs():
1478 remote = test_sliver.tar_var_logs()
1479 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1480 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1481 utils.system(command)
1484 def gather_var_logs (self):
1485 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1486 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1487 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1488 utils.system(command)
1489 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1490 utils.system(command)
1492 def gather_pgsql_logs (self):
1493 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1494 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1495 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1496 utils.system(command)
1498 def gather_root_sfi (self):
1499 utils.system("mkdir -p logs/sfi.%s"%self.name())
1500 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1501 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1502 utils.system(command)
1504 def gather_nodes_var_logs (self):
1505 for site_spec in self.plc_spec['sites']:
1506 test_site = TestSite (self,site_spec)
1507 for node_spec in site_spec['nodes']:
1508 test_node=TestNode(self,test_site,node_spec)
1509 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1510 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1511 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1512 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1513 utils.system(command)
1516 # returns the filename to use for sql dump/restore, using options.dbname if set
1517 def dbfile (self, database):
1518 # uses options.dbname if it is found
1520 name=self.options.dbname
1521 if not isinstance(name,StringTypes):
1524 t=datetime.datetime.now()
1527 return "/root/%s-%s.sql"%(database,name)
1529 def plc_db_dump(self):
1530 'dump the planetlab5 DB in /root in the PLC - filename has time'
1531 dump=self.dbfile("planetab5")
1532 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1533 utils.header('Dumped planetlab5 database in %s'%dump)
1536 def plc_db_restore(self):
1537 'restore the planetlab5 DB - looks broken, but run -n might help'
1538 dump=self.dbfile("planetab5")
1539 ##stop httpd service
1540 self.run_in_guest('service httpd stop')
1541 # xxx - need another wrapper
1542 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1543 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1544 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1545 ##starting httpd service
1546 self.run_in_guest('service httpd start')
1548 utils.header('Database restored from ' + dump)
1550 def standby_1_through_20(self):
1551 """convenience function to wait for a specified number of minutes"""
1554 def standby_1(): pass
1556 def standby_2(): pass
1558 def standby_3(): pass
1560 def standby_4(): pass
1562 def standby_5(): pass
1564 def standby_6(): pass
1566 def standby_7(): pass
1568 def standby_8(): pass
1570 def standby_9(): pass
1572 def standby_10(): pass
1574 def standby_11(): pass
1576 def standby_12(): pass
1578 def standby_13(): pass
1580 def standby_14(): pass
1582 def standby_15(): pass
1584 def standby_16(): pass
1586 def standby_17(): pass
1588 def standby_18(): pass
1590 def standby_19(): pass
1592 def standby_20(): pass