1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=method.__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=method.__doc__
65 def slice_sfa_mapper (method):
68 slice_method = TestSliceSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
70 site_spec = self.locate_site (slice_spec['sitename'])
71 test_site = TestSite(self,site_spec)
72 test_slice=TestSliceSfa(self,test_site,slice_spec)
73 if not slice_method(test_slice,self.options): overall=False
75 # restore the doc text
76 actual.__doc__=method.__doc__
86 'vs_delete','timestamp_vs','vs_create', SEP,
87 'plc_install', 'plc_configure', 'plc_start', SEP,
88 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
89 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
90 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
91 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
92 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
93 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
94 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
95 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
96 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
97 # but as the stress test might take a while, we sometimes missed the debug mode..
98 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
99 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
100 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
101 'check_tcp', 'check_netflow', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'standby_1_through_20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this is originally for centos5 as recent SFAs won't build on this platform
130 def check_whether_build_has_sfa (rpms_url):
131 # warning, we're now building 'sface' so let's be a bit more picky
132 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
133 # full builds are expected to return with 0 here
135 # move all steps containing 'sfa' from default_steps to other_steps
136 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
137 TestPlc.other_steps += sfa_steps
138 for step in sfa_steps: TestPlc.default_steps.remove(step)
140 def __init__ (self,plc_spec,options):
141 self.plc_spec=plc_spec
143 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
144 self.vserverip=plc_spec['vserverip']
145 self.vservername=plc_spec['vservername']
146 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
147 self.apiserver=TestApiserver(self.url,options.dry_run)
150 name=self.plc_spec['name']
151 return "%s.%s"%(name,self.vservername)
154 return self.plc_spec['host_box']
157 return self.test_ssh.is_local()
159 # define the API methods on this object through xmlrpc
160 # would help, but not strictly necessary
164 def actual_command_in_guest (self,command):
165 return self.test_ssh.actual_command(self.host_to_guest(command))
167 def start_guest (self):
168 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
170 def stop_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
173 def run_in_guest (self,command):
174 return utils.system(self.actual_command_in_guest(command))
176 def run_in_host (self,command):
177 return self.test_ssh.run_in_buildname(command)
179 #command gets run in the plc's vm
180 def host_to_guest(self,command):
181 if self.options.plcs_use_lxc:
182 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
184 return "vserver %s exec %s"%(self.vservername,command)
186 def vm_root_in_host(self):
187 if self.options.plcs_use_lxc:
188 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
190 return "/vservers/%s"%(self.vservername)
192 def vm_timestamp_path (self):
193 if self.options.plcs_use_lxc:
194 return "/var/lib/lxc/%s.timestamp"%(self.vservername)
196 return "/vservers/%s.timestamp"%(self.vservername)
198 #start/stop the vserver
199 def start_guest_in_host(self):
200 if self.options.plcs_use_lxc:
201 return "lxc-start --daemon --name=%s"%(self.vservername)
203 return "vserver %s start"%(self.vservername)
205 def stop_guest_in_host(self):
206 if self.options.plcs_use_lxc:
207 return "lxc-stop --name=%s"%(self.vservername)
209 return "vserver %s stop"%(self.vservername)
212 def run_in_guest_piped (self,local,remote):
213 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
215 # does a yum install in the vs, ignore yum retcod, check with rpm
216 def yum_install (self, rpms):
217 if isinstance (rpms, list):
219 self.run_in_guest("yum -y install %s"%rpms)
220 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
221 self.run_in_guest("yum-complete-transaction -y")
222 return self.run_in_guest("rpm -q %s"%rpms)==0
224 def auth_root (self):
225 return {'Username':self.plc_spec['PLC_ROOT_USER'],
226 'AuthMethod':'password',
227 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
228 'Role' : self.plc_spec['role']
230 def locate_site (self,sitename):
231 for site in self.plc_spec['sites']:
232 if site['site_fields']['name'] == sitename:
234 if site['site_fields']['login_base'] == sitename:
236 raise Exception,"Cannot locate site %s"%sitename
238 def locate_node (self,nodename):
239 for site in self.plc_spec['sites']:
240 for node in site['nodes']:
241 if node['name'] == nodename:
243 raise Exception,"Cannot locate node %s"%nodename
245 def locate_hostname (self,hostname):
246 for site in self.plc_spec['sites']:
247 for node in site['nodes']:
248 if node['node_fields']['hostname'] == hostname:
250 raise Exception,"Cannot locate hostname %s"%hostname
252 def locate_key (self,keyname):
253 for key in self.plc_spec['keys']:
254 if key['name'] == keyname:
256 raise Exception,"Cannot locate key %s"%keyname
258 def locate_slice (self, slicename):
259 for slice in self.plc_spec['slices']:
260 if slice['slice_fields']['name'] == slicename:
262 raise Exception,"Cannot locate slice %s"%slicename
264 def all_sliver_objs (self):
266 for slice_spec in self.plc_spec['slices']:
267 slicename = slice_spec['slice_fields']['name']
268 for nodename in slice_spec['nodenames']:
269 result.append(self.locate_sliver_obj (nodename,slicename))
272 def locate_sliver_obj (self,nodename,slicename):
273 (site,node) = self.locate_node(nodename)
274 slice = self.locate_slice (slicename)
276 test_site = TestSite (self, site)
277 test_node = TestNode (self, test_site,node)
278 # xxx the slice site is assumed to be the node site - mhh - probably harmless
279 test_slice = TestSlice (self, test_site, slice)
280 return TestSliver (self, test_node, test_slice)
282 def locate_first_node(self):
283 nodename=self.plc_spec['slices'][0]['nodenames'][0]
284 (site,node) = self.locate_node(nodename)
285 test_site = TestSite (self, site)
286 test_node = TestNode (self, test_site,node)
289 def locate_first_sliver (self):
290 slice_spec=self.plc_spec['slices'][0]
291 slicename=slice_spec['slice_fields']['name']
292 nodename=slice_spec['nodenames'][0]
293 return self.locate_sliver_obj(nodename,slicename)
295 # all different hostboxes used in this plc
296 def gather_hostBoxes(self):
297 # maps on sites and nodes, return [ (host_box,test_node) ]
299 for site_spec in self.plc_spec['sites']:
300 test_site = TestSite (self,site_spec)
301 for node_spec in site_spec['nodes']:
302 test_node = TestNode (self, test_site, node_spec)
303 if not test_node.is_real():
304 tuples.append( (test_node.host_box(),test_node) )
305 # transform into a dict { 'host_box' -> [ test_node .. ] }
307 for (box,node) in tuples:
308 if not result.has_key(box):
311 result[box].append(node)
314 # a step for checking this stuff
315 def show_boxes (self):
316 'print summary of nodes location'
317 for (box,nodes) in self.gather_hostBoxes().iteritems():
318 print box,":"," + ".join( [ node.name() for node in nodes ] )
321 # make this a valid step
322 def qemu_kill_all(self):
323 'kill all qemu instances on the qemu boxes involved by this setup'
324 # this is the brute force version, kill all qemus on that host box
325 for (box,nodes) in self.gather_hostBoxes().iteritems():
326 # pass the first nodename, as we don't push template-qemu on testboxes
327 nodedir=nodes[0].nodedir()
328 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
331 # make this a valid step
332 def qemu_list_all(self):
333 'list all qemu instances on the qemu boxes involved by this setup'
334 for (box,nodes) in self.gather_hostBoxes().iteritems():
335 # this is the brute force version, kill all qemus on that host box
336 TestBoxQemu(box,self.options.buildname).qemu_list_all()
339 # kill only the right qemus
340 def qemu_list_mine(self):
341 'list qemu instances for our nodes'
342 for (box,nodes) in self.gather_hostBoxes().iteritems():
343 # the fine-grain version
348 # kill only the right qemus
349 def qemu_kill_mine(self):
350 'kill the qemu instances for our nodes'
351 for (box,nodes) in self.gather_hostBoxes().iteritems():
352 # the fine-grain version
357 #################### display config
359 "show test configuration after localization"
360 self.display_pass (1)
361 self.display_pass (2)
365 "print cut'n paste-able stuff to export env variables to your shell"
366 # guess local domain from hostname
367 domain=socket.gethostname().split('.',1)[1]
368 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
369 print "export BUILD=%s"%self.options.buildname
370 if self.options.plcs_use_lxc:
371 print "export PLCHOSTLXC=%s"%fqdn
373 print "export PLCHOSTVS=%s"%fqdn
374 print "export GUESTNAME=%s"%self.plc_spec['vservername']
375 vplcname=self.plc_spec['vservername'].split('-')[-1]
376 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
377 # find hostname of first node
378 (hostname,qemubox) = self.all_node_infos()[0]
379 print "export KVMHOST=%s.%s"%(qemubox,domain)
380 print "export NODE=%s"%(hostname)
384 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
385 def display_pass (self,passno):
386 for (key,val) in self.plc_spec.iteritems():
387 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
391 self.display_site_spec(site)
392 for node in site['nodes']:
393 self.display_node_spec(node)
394 elif key=='initscripts':
395 for initscript in val:
396 self.display_initscript_spec (initscript)
399 self.display_slice_spec (slice)
402 self.display_key_spec (key)
404 if key not in ['sites','initscripts','slices','keys', 'sfa']:
405 print '+ ',key,':',val
407 def display_site_spec (self,site):
408 print '+ ======== site',site['site_fields']['name']
409 for (k,v) in site.iteritems():
410 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
413 print '+ ','nodes : ',
415 print node['node_fields']['hostname'],'',
421 print user['name'],'',
423 elif k == 'site_fields':
424 print '+ login_base',':',v['login_base']
425 elif k == 'address_fields':
431 def display_initscript_spec (self,initscript):
432 print '+ ======== initscript',initscript['initscript_fields']['name']
434 def display_key_spec (self,key):
435 print '+ ======== key',key['name']
437 def display_slice_spec (self,slice):
438 print '+ ======== slice',slice['slice_fields']['name']
439 for (k,v) in slice.iteritems():
452 elif k=='slice_fields':
453 print '+ fields',':',
454 print 'max_nodes=',v['max_nodes'],
459 def display_node_spec (self,node):
460 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
461 print "hostname=",node['node_fields']['hostname'],
462 print "ip=",node['interface_fields']['ip']
463 if self.options.verbose:
464 utils.pprint("node details",node,depth=3)
466 # another entry point for just showing the boxes involved
467 def display_mapping (self):
468 TestPlc.display_mapping_plc(self.plc_spec)
472 def display_mapping_plc (plc_spec):
473 print '+ MyPLC',plc_spec['name']
474 # WARNING this would not be right for lxc-based PLC's - should be harmless though
475 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
476 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
477 for site_spec in plc_spec['sites']:
478 for node_spec in site_spec['nodes']:
479 TestPlc.display_mapping_node(node_spec)
482 def display_mapping_node (node_spec):
483 print '+ NODE %s'%(node_spec['name'])
484 print '+\tqemu box %s'%node_spec['host_box']
485 print '+\thostname=%s'%node_spec['node_fields']['hostname']
487 # write a timestamp in /vservers/<>.timestamp
488 # cannot be inside the vserver, that causes vserver .. build to cough
489 def timestamp_vs (self):
491 # TODO-lxc check this one
492 # a first approx. is to store the timestamp close to the VM root like vs does
493 stamp_path=self.vm_timestamp_path ()
494 stamp_dir = os.path.dirname (stamp_path)
495 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
496 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
498 # this is called inconditionnally at the beginning of the test sequence
499 # just in case this is a rerun, so if the vm is not running it's fine
501 "vserver delete the test myplc"
502 stamp_path=self.vm_timestamp_path()
503 self.run_in_host("rm -f %s"%stamp_path)
504 if self.options.plcs_use_lxc:
505 self.run_in_host("lxc-stop --name %s"%self.vservername)
506 self.run_in_host("lxc-destroy --name %s"%self.vservername)
509 self.run_in_host("vserver --silent %s delete"%self.vservername)
513 # historically the build was being fetched by the tests
514 # now the build pushes itself as a subdir of the tests workdir
515 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
516 def vs_create (self):
517 "vserver creation (no install done)"
518 # push the local build/ dir to the testplc box
520 # a full path for the local calls
521 build_dir=os.path.dirname(sys.argv[0])
522 # sometimes this is empty - set to "." in such a case
523 if not build_dir: build_dir="."
524 build_dir += "/build"
526 # use a standard name - will be relative to remote buildname
528 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
529 self.test_ssh.rmdir(build_dir)
530 self.test_ssh.copy(build_dir,recursive=True)
531 # the repo url is taken from arch-rpms-url
532 # with the last step (i386) removed
533 repo_url = self.options.arch_rpms_url
534 for level in [ 'arch' ]:
535 repo_url = os.path.dirname(repo_url)
536 # pass the vbuild-nightly options to vtest-init-vserver
538 test_env_options += " -p %s"%self.options.personality
539 test_env_options += " -d %s"%self.options.pldistro
540 test_env_options += " -f %s"%self.options.fcdistro
541 if self.options.plcs_use_lxc:
542 script="vtest-init-lxc.sh"
544 script="vtest-init-vserver.sh"
545 vserver_name = self.vservername
546 vserver_options="--netdev eth0 --interface %s"%self.vserverip
548 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
549 vserver_options += " --hostname %s"%vserver_hostname
551 print "Cannot reverse lookup %s"%self.vserverip
552 print "This is considered fatal, as this might pollute the test results"
554 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
555 return self.run_in_host(create_vserver) == 0
558 def plc_install(self):
559 "yum install myplc, noderepo, and the plain bootstrapfs"
561 # workaround for getting pgsql8.2 on centos5
562 if self.options.fcdistro == "centos5":
563 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
566 if self.options.personality == "linux32":
568 elif self.options.personality == "linux64":
571 raise Exception, "Unsupported personality %r"%self.options.personality
572 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
575 pkgs_list.append ("slicerepo-%s"%nodefamily)
576 pkgs_list.append ("myplc")
577 pkgs_list.append ("noderepo-%s"%nodefamily)
578 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
579 pkgs_string=" ".join(pkgs_list)
580 return self.yum_install (pkgs_list)
583 def plc_configure(self):
585 tmpname='%s.plc-config-tty'%(self.name())
586 fileconf=open(tmpname,'w')
587 for var in [ 'PLC_NAME',
592 'PLC_MAIL_SUPPORT_ADDRESS',
595 # Above line was added for integrating SFA Testing
601 'PLC_RESERVATION_GRANULARITY',
603 'PLC_OMF_XMPP_SERVER',
605 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
606 fileconf.write('w\n')
607 fileconf.write('q\n')
609 utils.system('cat %s'%tmpname)
610 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
611 utils.system('rm %s'%tmpname)
616 self.run_in_guest('service plc start')
621 self.run_in_guest('service plc stop')
625 "start the PLC vserver"
630 "stop the PLC vserver"
634 # stores the keys from the config for further use
635 def keys_store(self):
636 "stores test users ssh keys in keys/"
637 for key_spec in self.plc_spec['keys']:
638 TestKey(self,key_spec).store_key()
641 def keys_clean(self):
642 "removes keys cached in keys/"
643 utils.system("rm -rf ./keys")
646 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
647 # for later direct access to the nodes
648 def keys_fetch(self):
649 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
651 if not os.path.isdir(dir):
653 vservername=self.vservername
654 vm_root=self.vm_root_in_host()
656 prefix = 'debug_ssh_key'
657 for ext in [ 'pub', 'rsa' ] :
658 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
659 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
660 if self.test_ssh.fetch(src,dst) != 0: overall=False
664 "create sites with PLCAPI"
665 return self.do_sites()
667 def delete_sites (self):
668 "delete sites with PLCAPI"
669 return self.do_sites(action="delete")
671 def do_sites (self,action="add"):
672 for site_spec in self.plc_spec['sites']:
673 test_site = TestSite (self,site_spec)
674 if (action != "add"):
675 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
676 test_site.delete_site()
677 # deleted with the site
678 #test_site.delete_users()
681 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
682 test_site.create_site()
683 test_site.create_users()
686 def delete_all_sites (self):
687 "Delete all sites in PLC, and related objects"
688 print 'auth_root',self.auth_root()
689 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
690 for site_id in site_ids:
691 print 'Deleting site_id',site_id
692 self.apiserver.DeleteSite(self.auth_root(),site_id)
696 "create nodes with PLCAPI"
697 return self.do_nodes()
698 def delete_nodes (self):
699 "delete nodes with PLCAPI"
700 return self.do_nodes(action="delete")
702 def do_nodes (self,action="add"):
703 for site_spec in self.plc_spec['sites']:
704 test_site = TestSite (self,site_spec)
706 utils.header("Deleting nodes in site %s"%test_site.name())
707 for node_spec in site_spec['nodes']:
708 test_node=TestNode(self,test_site,node_spec)
709 utils.header("Deleting %s"%test_node.name())
710 test_node.delete_node()
712 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
713 for node_spec in site_spec['nodes']:
714 utils.pprint('Creating node %s'%node_spec,node_spec)
715 test_node = TestNode (self,test_site,node_spec)
716 test_node.create_node ()
719 def nodegroups (self):
720 "create nodegroups with PLCAPI"
721 return self.do_nodegroups("add")
722 def delete_nodegroups (self):
723 "delete nodegroups with PLCAPI"
724 return self.do_nodegroups("delete")
728 def translate_timestamp (start,grain,timestamp):
729 if timestamp < TestPlc.YEAR: return start+timestamp*grain
730 else: return timestamp
733 def timestamp_printable (timestamp):
734 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
737 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
739 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
740 print 'API answered grain=',grain
741 start=(now/grain)*grain
743 # find out all nodes that are reservable
744 nodes=self.all_reservable_nodenames()
746 utils.header ("No reservable node found - proceeding without leases")
749 # attach them to the leases as specified in plc_specs
750 # this is where the 'leases' field gets interpreted as relative of absolute
751 for lease_spec in self.plc_spec['leases']:
752 # skip the ones that come with a null slice id
753 if not lease_spec['slice']: continue
754 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
755 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
756 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
757 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
758 if lease_addition['errors']:
759 utils.header("Cannot create leases, %s"%lease_addition['errors'])
762 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
763 (nodes,lease_spec['slice'],
764 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
765 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
769 def delete_leases (self):
770 "remove all leases in the myplc side"
771 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
772 utils.header("Cleaning leases %r"%lease_ids)
773 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
776 def list_leases (self):
777 "list all leases known to the myplc"
778 leases = self.apiserver.GetLeases(self.auth_root())
781 current=l['t_until']>=now
782 if self.options.verbose or current:
783 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
784 TestPlc.timestamp_printable(l['t_from']),
785 TestPlc.timestamp_printable(l['t_until'])))
788 # create nodegroups if needed, and populate
789 def do_nodegroups (self, action="add"):
790 # 1st pass to scan contents
792 for site_spec in self.plc_spec['sites']:
793 test_site = TestSite (self,site_spec)
794 for node_spec in site_spec['nodes']:
795 test_node=TestNode (self,test_site,node_spec)
796 if node_spec.has_key('nodegroups'):
797 nodegroupnames=node_spec['nodegroups']
798 if isinstance(nodegroupnames,StringTypes):
799 nodegroupnames = [ nodegroupnames ]
800 for nodegroupname in nodegroupnames:
801 if not groups_dict.has_key(nodegroupname):
802 groups_dict[nodegroupname]=[]
803 groups_dict[nodegroupname].append(test_node.name())
804 auth=self.auth_root()
806 for (nodegroupname,group_nodes) in groups_dict.iteritems():
808 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
809 # first, check if the nodetagtype is here
810 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
812 tag_type_id = tag_types[0]['tag_type_id']
814 tag_type_id = self.apiserver.AddTagType(auth,
815 {'tagname':nodegroupname,
816 'description': 'for nodegroup %s'%nodegroupname,
818 print 'located tag (type)',nodegroupname,'as',tag_type_id
820 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
822 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
823 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
824 # set node tag on all nodes, value='yes'
825 for nodename in group_nodes:
827 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
829 traceback.print_exc()
830 print 'node',nodename,'seems to already have tag',nodegroupname
833 expect_yes = self.apiserver.GetNodeTags(auth,
834 {'hostname':nodename,
835 'tagname':nodegroupname},
836 ['value'])[0]['value']
837 if expect_yes != "yes":
838 print 'Mismatch node tag on node',nodename,'got',expect_yes
841 if not self.options.dry_run:
842 print 'Cannot find tag',nodegroupname,'on node',nodename
846 print 'cleaning nodegroup',nodegroupname
847 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
849 traceback.print_exc()
853 # a list of TestNode objs
854 def all_nodes (self):
856 for site_spec in self.plc_spec['sites']:
857 test_site = TestSite (self,site_spec)
858 for node_spec in site_spec['nodes']:
859 nodes.append(TestNode (self,test_site,node_spec))
862 # return a list of tuples (nodename,qemuname)
863 def all_node_infos (self) :
865 for site_spec in self.plc_spec['sites']:
866 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
867 for node_spec in site_spec['nodes'] ]
870 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
871 def all_reservable_nodenames (self):
873 for site_spec in self.plc_spec['sites']:
874 for node_spec in site_spec['nodes']:
875 node_fields=node_spec['node_fields']
876 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
877 res.append(node_fields['hostname'])
880 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
881 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
882 if self.options.dry_run:
886 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
887 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
888 # the nodes that haven't checked yet - start with a full list and shrink over time
889 tocheck = self.all_hostnames()
890 utils.header("checking nodes %r"%tocheck)
891 # create a dict hostname -> status
892 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
895 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
897 for array in tocheck_status:
898 hostname=array['hostname']
899 boot_state=array['boot_state']
900 if boot_state == target_boot_state:
901 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
903 # if it's a real node, never mind
904 (site_spec,node_spec)=self.locate_hostname(hostname)
905 if TestNode.is_real_model(node_spec['node_fields']['model']):
906 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
908 boot_state = target_boot_state
909 elif datetime.datetime.now() > graceout:
910 utils.header ("%s still in '%s' state"%(hostname,boot_state))
911 graceout=datetime.datetime.now()+datetime.timedelta(1)
912 status[hostname] = boot_state
914 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
917 if datetime.datetime.now() > timeout:
918 for hostname in tocheck:
919 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
921 # otherwise, sleep for a while
923 # only useful in empty plcs
926 def nodes_booted(self):
927 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
929 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
931 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
932 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
933 vservername=self.vservername
936 local_key = "keys/%(vservername)s-debug.rsa"%locals()
939 local_key = "keys/key1.rsa"
940 node_infos = self.all_node_infos()
941 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
942 for (nodename,qemuname) in node_infos:
943 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
944 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
945 (timeout_minutes,silent_minutes,period))
947 for node_info in node_infos:
948 (hostname,qemuname) = node_info
949 # try to run 'hostname' in the node
950 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
951 # don't spam logs - show the command only after the grace period
952 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
954 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
956 node_infos.remove(node_info)
958 # we will have tried real nodes once, in case they're up - but if not, just skip
959 (site_spec,node_spec)=self.locate_hostname(hostname)
960 if TestNode.is_real_model(node_spec['node_fields']['model']):
961 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
962 node_infos.remove(node_info)
965 if datetime.datetime.now() > timeout:
966 for (hostname,qemuname) in node_infos:
967 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
969 # otherwise, sleep for a while
971 # only useful in empty plcs
974 def ssh_node_debug(self):
975 "Tries to ssh into nodes in debug mode with the debug ssh key"
976 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
978 def ssh_node_boot(self):
979 "Tries to ssh into nodes in production mode with the root ssh key"
980 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
983 def qemu_local_init (self):
984 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
988 "all nodes: invoke GetBootMedium and store result locally"
991 def qemu_local_config (self):
992 "all nodes: compute qemu config qemu.conf and store it locally"
995 def nodestate_reinstall (self):
996 "all nodes: mark PLCAPI boot_state as reinstall"
999 def nodestate_safeboot (self):
1000 "all nodes: mark PLCAPI boot_state as safeboot"
1003 def nodestate_boot (self):
1004 "all nodes: mark PLCAPI boot_state as boot"
1007 def nodestate_show (self):
1008 "all nodes: show PLCAPI boot_state"
1011 def qemu_export (self):
1012 "all nodes: push local node-dep directory on the qemu box"
1015 ### check hooks : invoke scripts from hooks/{node,slice}
1016 def check_hooks_node (self):
1017 return self.locate_first_node().check_hooks()
1018 def check_hooks_sliver (self) :
1019 return self.locate_first_sliver().check_hooks()
1021 def check_hooks (self):
1022 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1023 return self.check_hooks_node() and self.check_hooks_sliver()
1026 def do_check_initscripts(self):
1028 for slice_spec in self.plc_spec['slices']:
1029 if not slice_spec.has_key('initscriptstamp'):
1031 stamp=slice_spec['initscriptstamp']
1032 for nodename in slice_spec['nodenames']:
1033 (site,node) = self.locate_node (nodename)
1034 # xxx - passing the wrong site - probably harmless
1035 test_site = TestSite (self,site)
1036 test_slice = TestSlice (self,test_site,slice_spec)
1037 test_node = TestNode (self,test_site,node)
1038 test_sliver = TestSliver (self, test_node, test_slice)
1039 if not test_sliver.check_initscript_stamp(stamp):
1043 def check_initscripts(self):
1044 "check that the initscripts have triggered"
1045 return self.do_check_initscripts()
1047 def initscripts (self):
1048 "create initscripts with PLCAPI"
1049 for initscript in self.plc_spec['initscripts']:
1050 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1051 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1054 def delete_initscripts (self):
1055 "delete initscripts with PLCAPI"
1056 for initscript in self.plc_spec['initscripts']:
1057 initscript_name = initscript['initscript_fields']['name']
1058 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1060 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1061 print initscript_name,'deleted'
1063 print 'deletion went wrong - probably did not exist'
1068 "create slices with PLCAPI"
1069 return self.do_slices()
1071 def delete_slices (self):
1072 "delete slices with PLCAPI"
1073 return self.do_slices("delete")
1075 def do_slices (self, action="add"):
1076 for slice in self.plc_spec['slices']:
1077 site_spec = self.locate_site (slice['sitename'])
1078 test_site = TestSite(self,site_spec)
1079 test_slice=TestSlice(self,test_site,slice)
1081 utils.header("Deleting slices in site %s"%test_site.name())
1082 test_slice.delete_slice()
1084 utils.pprint("Creating slice",slice)
1085 test_slice.create_slice()
1086 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1090 def ssh_slice(self):
1091 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1095 def keys_clear_known_hosts (self):
1096 "remove test nodes entries from the local known_hosts file"
1100 def qemu_start (self) :
1101 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1105 def timestamp_qemu (self) :
1106 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1109 def check_tcp (self):
1110 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1111 specs = self.plc_spec['tcp_test']
1116 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1117 if not s_test_sliver.run_tcp_server(port,timeout=10):
1121 # idem for the client side
1122 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1123 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1127 # painfully enough, we need to allow for some time as netflow might show up last
1128 def check_netflow (self):
1129 "all nodes: check that the netflow slice is alive"
1130 return self.check_systemslice ('netflow')
1132 # we have the slices up already here, so it should not take too long
1133 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1134 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1135 test_nodes=self.all_nodes()
1137 for test_node in test_nodes:
1138 if test_node.check_systemslice (slicename):
1140 test_nodes.remove(test_node)
1145 if datetime.datetime.now () > timeout:
1146 for test_node in test_nodes:
1147 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1152 def plcsh_stress_test (self):
1153 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1154 # install the stress-test in the plc image
1155 location = "/usr/share/plc_api/plcsh_stress_test.py"
1156 remote="%s/%s"%(self.vm_root_in_host(),location)
1157 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1159 command += " -- --check"
1160 if self.options.size == 1:
1161 command += " --tiny"
1162 return ( self.run_in_guest(command) == 0)
1164 # populate runs the same utility without slightly different options
1165 # in particular runs with --preserve (dont cleanup) and without --check
1166 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1168 def sfa_install_all (self):
1169 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1170 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1172 def sfa_install_core(self):
1174 return self.yum_install ("sfa")
1176 def sfa_install_plc(self):
1177 "yum install sfa-plc"
1178 return self.yum_install("sfa-plc")
1180 def sfa_install_client(self):
1181 "yum install sfa-client"
1182 return self.yum_install("sfa-client")
1184 def sfa_install_sfatables(self):
1185 "yum install sfa-sfatables"
1186 return self.yum_install ("sfa-sfatables")
1188 def sfa_dbclean(self):
1189 "thoroughly wipes off the SFA database"
1190 self.run_in_guest("sfa-nuke.py")==0 or \
1191 self.run_in_guest("sfa-nuke-plc.py") or \
1192 self.run_in_guest("sfaadmin.py registry nuke")
1195 def sfa_plcclean(self):
1196 "cleans the PLC entries that were created as a side effect of running the script"
1198 sfa_spec=self.plc_spec['sfa']
1200 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1201 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1202 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1203 except: print "Slice %s already absent from PLC db"%slicename
1205 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1206 try: self.apiserver.DeletePerson(self.auth_root(),username)
1207 except: print "User %s already absent from PLC db"%username
1209 print "REMEMBER TO RUN sfa_import AGAIN"
1212 def sfa_uninstall(self):
1213 "uses rpm to uninstall sfa - ignore result"
1214 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1215 self.run_in_guest("rm -rf /var/lib/sfa")
1216 self.run_in_guest("rm -rf /etc/sfa")
1217 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1219 self.run_in_guest("rpm -e --noscripts sfa-plc")
1222 ### run unit tests for SFA
1223 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1224 # Running Transaction
1225 # Transaction couldn't start:
1226 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1227 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1228 # no matter how many Gbs are available on the testplc
1229 # could not figure out what's wrong, so...
1230 # if the yum install phase fails, consider the test is successful
1231 # other combinations will eventually run it hopefully
1232 def sfa_utest(self):
1233 "yum install sfa-tests and run SFA unittests"
1234 self.run_in_guest("yum -y install sfa-tests")
1235 # failed to install - forget it
1236 if self.run_in_guest("rpm -q sfa-tests")!=0:
1237 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1239 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1243 dirname="conf.%s"%self.plc_spec['name']
1244 if not os.path.isdir(dirname):
1245 utils.system("mkdir -p %s"%dirname)
1246 if not os.path.isdir(dirname):
1247 raise "Cannot create config dir for plc %s"%self.name()
1250 def conffile(self,filename):
1251 return "%s/%s"%(self.confdir(),filename)
1252 def confsubdir(self,dirname,clean,dry_run=False):
1253 subdirname="%s/%s"%(self.confdir(),dirname)
1255 utils.system("rm -rf %s"%subdirname)
1256 if not os.path.isdir(subdirname):
1257 utils.system("mkdir -p %s"%subdirname)
1258 if not dry_run and not os.path.isdir(subdirname):
1259 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1262 def conffile_clean (self,filename):
1263 filename=self.conffile(filename)
1264 return utils.system("rm -rf %s"%filename)==0
1267 def sfa_configure(self):
1268 "run sfa-config-tty"
1269 tmpname=self.conffile("sfa-config-tty")
1270 fileconf=open(tmpname,'w')
1271 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1272 'SFA_INTERFACE_HRN',
1273 'SFA_REGISTRY_LEVEL1_AUTH',
1274 'SFA_REGISTRY_HOST',
1275 'SFA_AGGREGATE_HOST',
1286 if self.plc_spec['sfa'].has_key(var):
1287 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1288 # the way plc_config handles booleans just sucks..
1291 if self.plc_spec['sfa'][var]: val='true'
1292 fileconf.write ('e %s\n%s\n'%(var,val))
1293 fileconf.write('w\n')
1294 fileconf.write('R\n')
1295 fileconf.write('q\n')
1297 utils.system('cat %s'%tmpname)
1298 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1301 def aggregate_xml_line(self):
1302 port=self.plc_spec['sfa']['neighbours-port']
1303 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1304 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1306 def registry_xml_line(self):
1307 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1308 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1311 # a cross step that takes all other plcs in argument
1312 def cross_sfa_configure(self, other_plcs):
1313 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1314 # of course with a single plc, other_plcs is an empty list
1317 agg_fname=self.conffile("agg.xml")
1318 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1319 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1320 utils.header ("(Over)wrote %s"%agg_fname)
1321 reg_fname=self.conffile("reg.xml")
1322 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1323 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1324 utils.header ("(Over)wrote %s"%reg_fname)
1325 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1326 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1328 def sfa_import(self):
1330 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1331 return self.run_in_guest('sfa-import.py')==0 or \
1332 self.run_in_guest('sfa-import-plc.py')==0 or \
1333 self.run_in_guest('sfaadmin.py registry import_registry')==0
1334 # not needed anymore
1335 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1337 def sfa_start(self):
1339 return self.run_in_guest('service sfa start')==0
1341 def sfi_configure(self):
1342 "Create /root/sfi on the plc side for sfi client configuration"
1343 if self.options.dry_run:
1344 utils.header("DRY RUN - skipping step")
1346 sfa_spec=self.plc_spec['sfa']
1347 # cannot use sfa_slice_mapper to pass dir_name
1348 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1349 site_spec = self.locate_site (slice_spec['sitename'])
1350 test_site = TestSite(self,site_spec)
1351 test_slice=TestSliceSfa(self,test_site,slice_spec)
1352 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1353 test_slice.sfi_config(dir_name)
1354 # push into the remote /root/sfi area
1355 location = test_slice.sfi_path()
1356 remote="%s/%s"%(self.vm_root_in_host(),location)
1357 self.test_ssh.mkdir(remote,abs=True)
1358 # need to strip last level or remote otherwise we get an extra dir level
1359 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1363 def sfi_clean (self):
1364 "clean up /root/sfi on the plc side"
1365 self.run_in_guest("rm -rf /root/sfi")
1369 def sfa_add_user(self):
1374 def sfa_update_user(self):
1378 def sfa_add_slice(self):
1379 "run sfi.py add (on Registry) from slice.xml"
1383 def sfa_discover(self):
1384 "discover resources into resouces_in.rspec"
1388 def sfa_create_slice(self):
1389 "run sfi.py create (on SM) - 1st time"
1393 def sfa_check_slice_plc(self):
1394 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1398 def sfa_update_slice(self):
1399 "run sfi.py create (on SM) on existing object"
1404 "various registry-related calls"
1408 def ssh_slice_sfa(self):
1409 "tries to ssh-enter the SFA slice"
1413 def sfa_delete_user(self):
1418 def sfa_delete_slice(self):
1419 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1424 self.run_in_guest('service sfa stop')==0
1427 def populate (self):
1428 "creates random entries in the PLCAPI"
1429 # install the stress-test in the plc image
1430 location = "/usr/share/plc_api/plcsh_stress_test.py"
1431 remote="%s/%s"%(self.vm_root_in_host(),location)
1432 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1434 command += " -- --preserve --short-names"
1435 local = (self.run_in_guest(command) == 0);
1436 # second run with --foreign
1437 command += ' --foreign'
1438 remote = (self.run_in_guest(command) == 0);
1439 return ( local and remote)
1441 def gather_logs (self):
1442 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1443 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1444 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1445 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1446 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1447 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1449 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1450 self.gather_var_logs ()
1452 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1453 self.gather_pgsql_logs ()
1455 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1456 for site_spec in self.plc_spec['sites']:
1457 test_site = TestSite (self,site_spec)
1458 for node_spec in site_spec['nodes']:
1459 test_node=TestNode(self,test_site,node_spec)
1460 test_node.gather_qemu_logs()
1462 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1463 self.gather_nodes_var_logs()
1465 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1466 self.gather_slivers_var_logs()
1469 def gather_slivers_var_logs(self):
1470 for test_sliver in self.all_sliver_objs():
1471 remote = test_sliver.tar_var_logs()
1472 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1473 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1474 utils.system(command)
1477 def gather_var_logs (self):
1478 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1479 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1480 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1481 utils.system(command)
1482 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1483 utils.system(command)
1485 def gather_pgsql_logs (self):
1486 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1487 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1488 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1489 utils.system(command)
1491 def gather_nodes_var_logs (self):
1492 for site_spec in self.plc_spec['sites']:
1493 test_site = TestSite (self,site_spec)
1494 for node_spec in site_spec['nodes']:
1495 test_node=TestNode(self,test_site,node_spec)
1496 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1497 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1498 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1499 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1500 utils.system(command)
1503 # returns the filename to use for sql dump/restore, using options.dbname if set
1504 def dbfile (self, database):
1505 # uses options.dbname if it is found
1507 name=self.options.dbname
1508 if not isinstance(name,StringTypes):
1511 t=datetime.datetime.now()
1514 return "/root/%s-%s.sql"%(database,name)
1516 def plc_db_dump(self):
1517 'dump the planetlab5 DB in /root in the PLC - filename has time'
1518 dump=self.dbfile("planetab5")
1519 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1520 utils.header('Dumped planetlab5 database in %s'%dump)
1523 def plc_db_restore(self):
1524 'restore the planetlab5 DB - looks broken, but run -n might help'
1525 dump=self.dbfile("planetab5")
1526 ##stop httpd service
1527 self.run_in_guest('service httpd stop')
1528 # xxx - need another wrapper
1529 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1530 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1531 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1532 ##starting httpd service
1533 self.run_in_guest('service httpd start')
1535 utils.header('Database restored from ' + dump)
1537 def standby_1_through_20(self):
1538 """convenience function to wait for a specified number of minutes"""
1541 def standby_1(): pass
1543 def standby_2(): pass
1545 def standby_3(): pass
1547 def standby_4(): pass
1549 def standby_5(): pass
1551 def standby_6(): pass
1553 def standby_7(): pass
1555 def standby_8(): pass
1557 def standby_9(): pass
1559 def standby_10(): pass
1561 def standby_11(): pass
1563 def standby_12(): pass
1565 def standby_13(): pass
1567 def standby_14(): pass
1569 def standby_15(): pass
1571 def standby_16(): pass
1573 def standby_17(): pass
1575 def standby_18(): pass
1577 def standby_19(): pass
1579 def standby_20(): pass