1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node, *args, **kwds): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
104 # 'check_tcp', 'check_netflow', SEP,
106 'force_gather_logs', SEP,
109 'export', 'show_boxes', SEP,
110 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
111 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
112 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
113 'delete_leases', 'list_leases', SEP,
115 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
116 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
117 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
118 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
119 'plc_db_dump' , 'plc_db_restore', SEP,
120 'standby_1_through_20',SEP,
124 def printable_steps (list):
125 single_line=" ".join(list)+" "
126 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
128 def valid_step (step):
129 return step != SEP and step != SEPSFA
131 # turn off the sfa-related steps when build has skipped SFA
132 # this is originally for centos5 as recent SFAs won't build on this platform
134 def check_whether_build_has_sfa (rpms_url):
135 # warning, we're now building 'sface' so let's be a bit more picky
136 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
137 # full builds are expected to return with 0 here
139 # move all steps containing 'sfa' from default_steps to other_steps
140 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
141 TestPlc.other_steps += sfa_steps
142 for step in sfa_steps: TestPlc.default_steps.remove(step)
144 def __init__ (self,plc_spec,options):
145 self.plc_spec=plc_spec
147 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
148 self.vserverip=plc_spec['vserverip']
149 self.vservername=plc_spec['vservername']
150 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
151 self.apiserver=TestApiserver(self.url,options.dry_run)
154 name=self.plc_spec['name']
155 return "%s.%s"%(name,self.vservername)
158 return self.plc_spec['host_box']
161 return self.test_ssh.is_local()
163 # define the API methods on this object through xmlrpc
164 # would help, but not strictly necessary
168 def actual_command_in_guest (self,command):
169 return self.test_ssh.actual_command(self.host_to_guest(command))
171 def start_guest (self):
172 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
174 def stop_guest (self):
175 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
177 def run_in_guest (self,command):
178 return utils.system(self.actual_command_in_guest(command))
180 def run_in_host (self,command):
181 return self.test_ssh.run_in_buildname(command)
183 #command gets run in the plc's vm
184 def host_to_guest(self,command):
185 if self.options.plcs_use_lxc:
186 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
187 return "TODO-lxc TestPlc.host_to_guest"
189 return "vserver %s exec %s"%(self.vservername,command)
191 def vm_root_in_guest(self):
192 if self.options.plcs_use_lxc:
194 return "TODO TestPlc.vm_root_in_guest"
196 return "/vservers/%s"%self.vservername
198 #start/stop the vserver
199 def start_guest_in_host(self):
200 if self.options.plcs_use_lxc:
201 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
202 return "TODO-lxc TestPlc.start_guest_in_host"
204 return "vserver %s start"%(self.vservername)
206 def stop_guest_in_host(self):
207 if self.options.plcs_use_lxc:
208 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
209 return "TODO-lxc TestPlc.stop_guest_in_host"
211 return "vserver %s stop"%(self.vservername)
214 def run_in_guest_piped (self,local,remote):
215 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
217 # does a yum install in the vs, ignore yum retcod, check with rpm
218 def yum_install (self, rpms):
219 if isinstance (rpms, list):
221 self.run_in_guest("yum -y install %s"%rpms)
222 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
223 self.run_in_guest("yum-complete-transaction -y")
224 return self.run_in_guest("rpm -q %s"%rpms)==0
226 def auth_root (self):
227 return {'Username':self.plc_spec['PLC_ROOT_USER'],
228 'AuthMethod':'password',
229 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
230 'Role' : self.plc_spec['role']
232 def locate_site (self,sitename):
233 for site in self.plc_spec['sites']:
234 if site['site_fields']['name'] == sitename:
236 if site['site_fields']['login_base'] == sitename:
238 raise Exception,"Cannot locate site %s"%sitename
240 def locate_node (self,nodename):
241 for site in self.plc_spec['sites']:
242 for node in site['nodes']:
243 if node['name'] == nodename:
245 raise Exception,"Cannot locate node %s"%nodename
247 def locate_hostname (self,hostname):
248 for site in self.plc_spec['sites']:
249 for node in site['nodes']:
250 if node['node_fields']['hostname'] == hostname:
252 raise Exception,"Cannot locate hostname %s"%hostname
254 def locate_key (self,keyname):
255 for key in self.plc_spec['keys']:
256 if key['name'] == keyname:
258 raise Exception,"Cannot locate key %s"%keyname
260 def locate_slice (self, slicename):
261 for slice in self.plc_spec['slices']:
262 if slice['slice_fields']['name'] == slicename:
264 raise Exception,"Cannot locate slice %s"%slicename
266 def all_sliver_objs (self):
268 for slice_spec in self.plc_spec['slices']:
269 slicename = slice_spec['slice_fields']['name']
270 for nodename in slice_spec['nodenames']:
271 result.append(self.locate_sliver_obj (nodename,slicename))
274 def locate_sliver_obj (self,nodename,slicename):
275 (site,node) = self.locate_node(nodename)
276 slice = self.locate_slice (slicename)
278 test_site = TestSite (self, site)
279 test_node = TestNode (self, test_site,node)
280 # xxx the slice site is assumed to be the node site - mhh - probably harmless
281 test_slice = TestSlice (self, test_site, slice)
282 return TestSliver (self, test_node, test_slice)
284 def locate_first_node(self):
285 nodename=self.plc_spec['slices'][0]['nodenames'][0]
286 (site,node) = self.locate_node(nodename)
287 test_site = TestSite (self, site)
288 test_node = TestNode (self, test_site,node)
291 def locate_first_sliver (self):
292 slice_spec=self.plc_spec['slices'][0]
293 slicename=slice_spec['slice_fields']['name']
294 nodename=slice_spec['nodenames'][0]
295 return self.locate_sliver_obj(nodename,slicename)
297 # all different hostboxes used in this plc
298 def gather_hostBoxes(self):
299 # maps on sites and nodes, return [ (host_box,test_node) ]
301 for site_spec in self.plc_spec['sites']:
302 test_site = TestSite (self,site_spec)
303 for node_spec in site_spec['nodes']:
304 test_node = TestNode (self, test_site, node_spec)
305 if not test_node.is_real():
306 tuples.append( (test_node.host_box(),test_node) )
307 # transform into a dict { 'host_box' -> [ test_node .. ] }
309 for (box,node) in tuples:
310 if not result.has_key(box):
313 result[box].append(node)
316 # a step for checking this stuff
317 def show_boxes (self):
318 'print summary of nodes location'
319 for (box,nodes) in self.gather_hostBoxes().iteritems():
320 print box,":"," + ".join( [ node.name() for node in nodes ] )
323 # make this a valid step
324 def qemu_kill_all(self):
325 'kill all qemu instances on the qemu boxes involved by this setup'
326 # this is the brute force version, kill all qemus on that host box
327 for (box,nodes) in self.gather_hostBoxes().iteritems():
328 # pass the first nodename, as we don't push template-qemu on testboxes
329 nodedir=nodes[0].nodedir()
330 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
333 # make this a valid step
334 def qemu_list_all(self):
335 'list all qemu instances on the qemu boxes involved by this setup'
336 for (box,nodes) in self.gather_hostBoxes().iteritems():
337 # this is the brute force version, kill all qemus on that host box
338 TestBoxQemu(box,self.options.buildname).qemu_list_all()
341 # kill only the right qemus
342 def qemu_list_mine(self):
343 'list qemu instances for our nodes'
344 for (box,nodes) in self.gather_hostBoxes().iteritems():
345 # the fine-grain version
350 # kill only the right qemus
351 def qemu_kill_mine(self):
352 'kill the qemu instances for our nodes'
353 for (box,nodes) in self.gather_hostBoxes().iteritems():
354 # the fine-grain version
359 #################### display config
361 "show test configuration after localization"
362 self.display_pass (1)
363 self.display_pass (2)
367 "print cut'n paste-able stuff to export env variables to your shell"
368 # guess local domain from hostname
369 domain=socket.gethostname().split('.',1)[1]
370 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
371 print "export BUILD=%s"%self.options.buildname
372 print "export PLCHOST=%s"%fqdn
373 print "export GUEST=%s"%self.plc_spec['vservername']
374 # find hostname of first node
375 (hostname,qemubox) = self.all_node_infos()[0]
376 print "export KVMHOST=%s.%s"%(qemubox,domain)
377 print "export NODE=%s"%(hostname)
381 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
382 def display_pass (self,passno):
383 for (key,val) in self.plc_spec.iteritems():
384 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
388 self.display_site_spec(site)
389 for node in site['nodes']:
390 self.display_node_spec(node)
391 elif key=='initscripts':
392 for initscript in val:
393 self.display_initscript_spec (initscript)
396 self.display_slice_spec (slice)
399 self.display_key_spec (key)
401 if key not in ['sites','initscripts','slices','keys', 'sfa']:
402 print '+ ',key,':',val
404 def display_site_spec (self,site):
405 print '+ ======== site',site['site_fields']['name']
406 for (k,v) in site.iteritems():
407 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
410 print '+ ','nodes : ',
412 print node['node_fields']['hostname'],'',
418 print user['name'],'',
420 elif k == 'site_fields':
421 print '+ login_base',':',v['login_base']
422 elif k == 'address_fields':
428 def display_initscript_spec (self,initscript):
429 print '+ ======== initscript',initscript['initscript_fields']['name']
431 def display_key_spec (self,key):
432 print '+ ======== key',key['name']
434 def display_slice_spec (self,slice):
435 print '+ ======== slice',slice['slice_fields']['name']
436 for (k,v) in slice.iteritems():
449 elif k=='slice_fields':
450 print '+ fields',':',
451 print 'max_nodes=',v['max_nodes'],
456 def display_node_spec (self,node):
457 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
458 print "hostname=",node['node_fields']['hostname'],
459 print "ip=",node['interface_fields']['ip']
460 if self.options.verbose:
461 utils.pprint("node details",node,depth=3)
463 # another entry point for just showing the boxes involved
464 def display_mapping (self):
465 TestPlc.display_mapping_plc(self.plc_spec)
469 def display_mapping_plc (plc_spec):
470 print '+ MyPLC',plc_spec['name']
471 # WARNING this would not be right for lxc-based PLC's - should be harmless though
472 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
473 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
474 for site_spec in plc_spec['sites']:
475 for node_spec in site_spec['nodes']:
476 TestPlc.display_mapping_node(node_spec)
479 def display_mapping_node (node_spec):
480 print '+ NODE %s'%(node_spec['name'])
481 print '+\tqemu box %s'%node_spec['host_box']
482 print '+\thostname=%s'%node_spec['node_fields']['hostname']
484 # write a timestamp in /vservers/<>.timestamp
485 # cannot be inside the vserver, that causes vserver .. build to cough
486 def timestamp_vs (self):
488 # TODO-lxc check this one
489 # a first approx. is to store the timestamp close to the VM root like vs does
490 stamp_path="%s.timestamp"%self.vm_root_in_guest()
491 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
493 # this is called inconditionnally at the beginning of the test sequence
494 # just in case this is a rerun, so if the vm is not running it's fine
496 "vserver delete the test myplc"
497 stamp_path="%s.timestamp"%self.vm_root_in_guest()
498 self.run_in_host("rm -f %s"%stamp_path)
499 if self.options.plcs_use_lxc:
500 # TODO-lxc : how to trash a VM altogether and the related timestamp as well
501 # might make sense to test that this has been done - unlike for vs
502 print "TODO TestPlc.vs_delete"
505 self.run_in_host("vserver --silent %s delete"%self.vservername)
509 # historically the build was being fetched by the tests
510 # now the build pushes itself as a subdir of the tests workdir
511 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
512 def vs_create (self):
513 "vserver creation (no install done)"
514 # push the local build/ dir to the testplc box
516 # a full path for the local calls
517 build_dir=os.path.dirname(sys.argv[0])
518 # sometimes this is empty - set to "." in such a case
519 if not build_dir: build_dir="."
520 build_dir += "/build"
522 # use a standard name - will be relative to remote buildname
524 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
525 self.test_ssh.rmdir(build_dir)
526 self.test_ssh.copy(build_dir,recursive=True)
527 # the repo url is taken from arch-rpms-url
528 # with the last step (i386) removed
529 repo_url = self.options.arch_rpms_url
530 for level in [ 'arch' ]:
531 repo_url = os.path.dirname(repo_url)
532 # pass the vbuild-nightly options to vtest-init-vserver
534 test_env_options += " -p %s"%self.options.personality
535 test_env_options += " -d %s"%self.options.pldistro
536 test_env_options += " -f %s"%self.options.fcdistro
537 if self.options.plcs_use_lxc:
538 # TODO-lxc : might need some tweaks
539 script="vtest-init-lxc.sh"
541 script="vtest-init-vserver.sh"
542 vserver_name = self.vservername
543 vserver_options="--netdev eth0 --interface %s"%self.vserverip
545 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
546 vserver_options += " --hostname %s"%vserver_hostname
548 print "Cannot reverse lookup %s"%self.vserverip
549 print "This is considered fatal, as this might pollute the test results"
551 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
552 return self.run_in_host(create_vserver) == 0
555 def plc_install(self):
556 "yum install myplc, noderepo, and the plain bootstrapfs"
558 # workaround for getting pgsql8.2 on centos5
559 if self.options.fcdistro == "centos5":
560 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
563 if self.options.personality == "linux32":
565 elif self.options.personality == "linux64":
568 raise Exception, "Unsupported personality %r"%self.options.personality
569 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
572 pkgs_list.append ("slicerepo-%s"%nodefamily)
573 pkgs_list.append ("myplc")
574 pkgs_list.append ("noderepo-%s"%nodefamily)
575 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
576 pkgs_string=" ".join(pkgs_list)
577 return self.yum_install (pkgs_list)
580 def plc_configure(self):
582 tmpname='%s.plc-config-tty'%(self.name())
583 fileconf=open(tmpname,'w')
584 for var in [ 'PLC_NAME',
589 'PLC_MAIL_SUPPORT_ADDRESS',
592 # Above line was added for integrating SFA Testing
598 'PLC_RESERVATION_GRANULARITY',
600 'PLC_OMF_XMPP_SERVER',
602 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
603 fileconf.write('w\n')
604 fileconf.write('q\n')
606 utils.system('cat %s'%tmpname)
607 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
608 utils.system('rm %s'%tmpname)
613 self.run_in_guest('service plc start')
618 self.run_in_guest('service plc stop')
622 "start the PLC vserver"
627 "stop the PLC vserver"
631 # stores the keys from the config for further use
632 def keys_store(self):
633 "stores test users ssh keys in keys/"
634 for key_spec in self.plc_spec['keys']:
635 TestKey(self,key_spec).store_key()
638 def keys_clean(self):
639 "removes keys cached in keys/"
640 utils.system("rm -rf ./keys")
643 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
644 # for later direct access to the nodes
645 def keys_fetch(self):
646 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
648 if not os.path.isdir(dir):
650 vservername=self.vservername
651 vm_root=self.vm_root_in_guest()
653 prefix = 'debug_ssh_key'
654 for ext in [ 'pub', 'rsa' ] :
655 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
656 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
657 if self.test_ssh.fetch(src,dst) != 0: overall=False
661 "create sites with PLCAPI"
662 return self.do_sites()
664 def delete_sites (self):
665 "delete sites with PLCAPI"
666 return self.do_sites(action="delete")
668 def do_sites (self,action="add"):
669 for site_spec in self.plc_spec['sites']:
670 test_site = TestSite (self,site_spec)
671 if (action != "add"):
672 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
673 test_site.delete_site()
674 # deleted with the site
675 #test_site.delete_users()
678 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
679 test_site.create_site()
680 test_site.create_users()
683 def delete_all_sites (self):
684 "Delete all sites in PLC, and related objects"
685 print 'auth_root',self.auth_root()
686 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
687 for site_id in site_ids:
688 print 'Deleting site_id',site_id
689 self.apiserver.DeleteSite(self.auth_root(),site_id)
693 "create nodes with PLCAPI"
694 return self.do_nodes()
695 def delete_nodes (self):
696 "delete nodes with PLCAPI"
697 return self.do_nodes(action="delete")
699 def do_nodes (self,action="add"):
700 for site_spec in self.plc_spec['sites']:
701 test_site = TestSite (self,site_spec)
703 utils.header("Deleting nodes in site %s"%test_site.name())
704 for node_spec in site_spec['nodes']:
705 test_node=TestNode(self,test_site,node_spec)
706 utils.header("Deleting %s"%test_node.name())
707 test_node.delete_node()
709 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
710 for node_spec in site_spec['nodes']:
711 utils.pprint('Creating node %s'%node_spec,node_spec)
712 test_node = TestNode (self,test_site,node_spec)
713 test_node.create_node ()
716 def nodegroups (self):
717 "create nodegroups with PLCAPI"
718 return self.do_nodegroups("add")
719 def delete_nodegroups (self):
720 "delete nodegroups with PLCAPI"
721 return self.do_nodegroups("delete")
725 def translate_timestamp (start,grain,timestamp):
726 if timestamp < TestPlc.YEAR: return start+timestamp*grain
727 else: return timestamp
730 def timestamp_printable (timestamp):
731 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
734 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
736 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
737 print 'API answered grain=',grain
738 start=(now/grain)*grain
740 # find out all nodes that are reservable
741 nodes=self.all_reservable_nodenames()
743 utils.header ("No reservable node found - proceeding without leases")
746 # attach them to the leases as specified in plc_specs
747 # this is where the 'leases' field gets interpreted as relative of absolute
748 for lease_spec in self.plc_spec['leases']:
749 # skip the ones that come with a null slice id
750 if not lease_spec['slice']: continue
751 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
752 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
753 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
754 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
755 if lease_addition['errors']:
756 utils.header("Cannot create leases, %s"%lease_addition['errors'])
759 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
760 (nodes,lease_spec['slice'],
761 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
762 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
766 def delete_leases (self):
767 "remove all leases in the myplc side"
768 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
769 utils.header("Cleaning leases %r"%lease_ids)
770 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
773 def list_leases (self):
774 "list all leases known to the myplc"
775 leases = self.apiserver.GetLeases(self.auth_root())
778 current=l['t_until']>=now
779 if self.options.verbose or current:
780 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
781 TestPlc.timestamp_printable(l['t_from']),
782 TestPlc.timestamp_printable(l['t_until'])))
785 # create nodegroups if needed, and populate
786 def do_nodegroups (self, action="add"):
787 # 1st pass to scan contents
789 for site_spec in self.plc_spec['sites']:
790 test_site = TestSite (self,site_spec)
791 for node_spec in site_spec['nodes']:
792 test_node=TestNode (self,test_site,node_spec)
793 if node_spec.has_key('nodegroups'):
794 nodegroupnames=node_spec['nodegroups']
795 if isinstance(nodegroupnames,StringTypes):
796 nodegroupnames = [ nodegroupnames ]
797 for nodegroupname in nodegroupnames:
798 if not groups_dict.has_key(nodegroupname):
799 groups_dict[nodegroupname]=[]
800 groups_dict[nodegroupname].append(test_node.name())
801 auth=self.auth_root()
803 for (nodegroupname,group_nodes) in groups_dict.iteritems():
805 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
806 # first, check if the nodetagtype is here
807 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
809 tag_type_id = tag_types[0]['tag_type_id']
811 tag_type_id = self.apiserver.AddTagType(auth,
812 {'tagname':nodegroupname,
813 'description': 'for nodegroup %s'%nodegroupname,
815 print 'located tag (type)',nodegroupname,'as',tag_type_id
817 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
819 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
820 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
821 # set node tag on all nodes, value='yes'
822 for nodename in group_nodes:
824 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
826 traceback.print_exc()
827 print 'node',nodename,'seems to already have tag',nodegroupname
830 expect_yes = self.apiserver.GetNodeTags(auth,
831 {'hostname':nodename,
832 'tagname':nodegroupname},
833 ['value'])[0]['value']
834 if expect_yes != "yes":
835 print 'Mismatch node tag on node',nodename,'got',expect_yes
838 if not self.options.dry_run:
839 print 'Cannot find tag',nodegroupname,'on node',nodename
843 print 'cleaning nodegroup',nodegroupname
844 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
846 traceback.print_exc()
850 # return a list of tuples (nodename,qemuname)
851 def all_node_infos (self) :
853 for site_spec in self.plc_spec['sites']:
854 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
855 for node_spec in site_spec['nodes'] ]
858 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
859 def all_reservable_nodenames (self):
861 for site_spec in self.plc_spec['sites']:
862 for node_spec in site_spec['nodes']:
863 node_fields=node_spec['node_fields']
864 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
865 res.append(node_fields['hostname'])
868 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
869 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
870 if self.options.dry_run:
874 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
875 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
876 # the nodes that haven't checked yet - start with a full list and shrink over time
877 tocheck = self.all_hostnames()
878 utils.header("checking nodes %r"%tocheck)
879 # create a dict hostname -> status
880 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
883 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
885 for array in tocheck_status:
886 hostname=array['hostname']
887 boot_state=array['boot_state']
888 if boot_state == target_boot_state:
889 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
891 # if it's a real node, never mind
892 (site_spec,node_spec)=self.locate_hostname(hostname)
893 if TestNode.is_real_model(node_spec['node_fields']['model']):
894 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
896 boot_state = target_boot_state
897 elif datetime.datetime.now() > graceout:
898 utils.header ("%s still in '%s' state"%(hostname,boot_state))
899 graceout=datetime.datetime.now()+datetime.timedelta(1)
900 status[hostname] = boot_state
902 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
905 if datetime.datetime.now() > timeout:
906 for hostname in tocheck:
907 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
909 # otherwise, sleep for a while
911 # only useful in empty plcs
914 def nodes_booted(self):
915 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
917 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
919 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
920 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
921 vservername=self.vservername
924 local_key = "keys/%(vservername)s-debug.rsa"%locals()
927 local_key = "keys/key1.rsa"
928 node_infos = self.all_node_infos()
929 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
930 for (nodename,qemuname) in node_infos:
931 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
932 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
933 (timeout_minutes,silent_minutes,period))
935 for node_info in node_infos:
936 (hostname,qemuname) = node_info
937 # try to run 'hostname' in the node
938 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
939 # don't spam logs - show the command only after the grace period
940 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
942 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
944 node_infos.remove(node_info)
946 # we will have tried real nodes once, in case they're up - but if not, just skip
947 (site_spec,node_spec)=self.locate_hostname(hostname)
948 if TestNode.is_real_model(node_spec['node_fields']['model']):
949 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
950 node_infos.remove(node_info)
953 if datetime.datetime.now() > timeout:
954 for (hostname,qemuname) in node_infos:
955 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
957 # otherwise, sleep for a while
959 # only useful in empty plcs
962 def ssh_node_debug(self):
963 "Tries to ssh into nodes in debug mode with the debug ssh key"
964 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
966 def ssh_node_boot(self):
967 "Tries to ssh into nodes in production mode with the root ssh key"
968 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
971 def qemu_local_init (self):
972 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
976 "all nodes: invoke GetBootMedium and store result locally"
979 def qemu_local_config (self):
980 "all nodes: compute qemu config qemu.conf and store it locally"
983 def nodestate_reinstall (self):
984 "all nodes: mark PLCAPI boot_state as reinstall"
987 def nodestate_safeboot (self):
988 "all nodes: mark PLCAPI boot_state as safeboot"
991 def nodestate_boot (self):
992 "all nodes: mark PLCAPI boot_state as boot"
995 def nodestate_show (self):
996 "all nodes: show PLCAPI boot_state"
999 def qemu_export (self):
1000 "all nodes: push local node-dep directory on the qemu box"
1003 ### check hooks : invoke scripts from hooks/{node,slice}
1004 def check_hooks_node (self):
1005 return self.locate_first_node().check_hooks()
1006 def check_hooks_sliver (self) :
1007 return self.locate_first_sliver().check_hooks()
1009 def check_hooks (self):
1010 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1011 return self.check_hooks_node() and self.check_hooks_sliver()
1014 def do_check_initscripts(self):
1016 for slice_spec in self.plc_spec['slices']:
1017 if not slice_spec.has_key('initscriptstamp'):
1019 stamp=slice_spec['initscriptstamp']
1020 for nodename in slice_spec['nodenames']:
1021 (site,node) = self.locate_node (nodename)
1022 # xxx - passing the wrong site - probably harmless
1023 test_site = TestSite (self,site)
1024 test_slice = TestSlice (self,test_site,slice_spec)
1025 test_node = TestNode (self,test_site,node)
1026 test_sliver = TestSliver (self, test_node, test_slice)
1027 if not test_sliver.check_initscript_stamp(stamp):
1031 def check_initscripts(self):
1032 "check that the initscripts have triggered"
1033 return self.do_check_initscripts()
1035 def initscripts (self):
1036 "create initscripts with PLCAPI"
1037 for initscript in self.plc_spec['initscripts']:
1038 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1039 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1042 def delete_initscripts (self):
1043 "delete initscripts with PLCAPI"
1044 for initscript in self.plc_spec['initscripts']:
1045 initscript_name = initscript['initscript_fields']['name']
1046 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1048 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1049 print initscript_name,'deleted'
1051 print 'deletion went wrong - probably did not exist'
1056 "create slices with PLCAPI"
1057 return self.do_slices()
1059 def delete_slices (self):
1060 "delete slices with PLCAPI"
1061 return self.do_slices("delete")
1063 def do_slices (self, action="add"):
1064 for slice in self.plc_spec['slices']:
1065 site_spec = self.locate_site (slice['sitename'])
1066 test_site = TestSite(self,site_spec)
1067 test_slice=TestSlice(self,test_site,slice)
1069 utils.header("Deleting slices in site %s"%test_site.name())
1070 test_slice.delete_slice()
1072 utils.pprint("Creating slice",slice)
1073 test_slice.create_slice()
1074 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1078 def ssh_slice(self):
1079 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1082 def check_netflow (self):
1083 "all nodes: check that the netflow slice is alive"
1084 return self.check_systemslice ('netflow')
1087 def check_systemslice (self, slicename):
1091 def keys_clear_known_hosts (self):
1092 "remove test nodes entries from the local known_hosts file"
1096 def qemu_start (self) :
1097 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1101 def timestamp_qemu (self) :
1102 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1105 def check_tcp (self):
1106 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1107 specs = self.plc_spec['tcp_test']
1112 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1113 if not s_test_sliver.run_tcp_server(port,timeout=10):
1117 # idem for the client side
1118 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1119 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1123 def plcsh_stress_test (self):
1124 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1125 # install the stress-test in the plc image
1126 location = "/usr/share/plc_api/plcsh_stress_test.py"
1128 remote="%s/%s"%(self.vm_root_in_guest(),location)
1129 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1131 command += " -- --check"
1132 if self.options.size == 1:
1133 command += " --tiny"
1134 return ( self.run_in_guest(command) == 0)
1136 # populate runs the same utility without slightly different options
1137 # in particular runs with --preserve (dont cleanup) and without --check
1138 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1140 def sfa_install_all (self):
1141 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1142 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1144 def sfa_install_core(self):
1146 return self.yum_install ("sfa")
1148 def sfa_install_plc(self):
1149 "yum install sfa-plc"
1150 return self.yum_install("sfa-plc")
1152 def sfa_install_client(self):
1153 "yum install sfa-client"
1154 return self.yum_install("sfa-client")
1156 def sfa_install_sfatables(self):
1157 "yum install sfa-sfatables"
1158 return self.yum_install ("sfa-sfatables")
1160 def sfa_dbclean(self):
1161 "thoroughly wipes off the SFA database"
1162 self.run_in_guest("sfa-nuke.py")==0 or \
1163 self.run_in_guest("sfa-nuke-plc.py") or \
1164 self.run_in_guest("sfaadmin.py registry nuke")
1167 def sfa_plcclean(self):
1168 "cleans the PLC entries that were created as a side effect of running the script"
1170 sfa_spec=self.plc_spec['sfa']
1172 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1173 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1174 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1175 except: print "Slice %s already absent from PLC db"%slicename
1177 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1178 try: self.apiserver.DeletePerson(self.auth_root(),username)
1179 except: print "User %s already absent from PLC db"%username
1181 print "REMEMBER TO RUN sfa_import AGAIN"
1184 def sfa_uninstall(self):
1185 "uses rpm to uninstall sfa - ignore result"
1186 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1187 self.run_in_guest("rm -rf /var/lib/sfa")
1188 self.run_in_guest("rm -rf /etc/sfa")
1189 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1191 self.run_in_guest("rpm -e --noscripts sfa-plc")
1194 ### run unit tests for SFA
1195 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1196 # Running Transaction
1197 # Transaction couldn't start:
1198 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1199 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1200 # no matter how many Gbs are available on the testplc
1201 # could not figure out what's wrong, so...
1202 # if the yum install phase fails, consider the test is successful
1203 # other combinations will eventually run it hopefully
1204 def sfa_utest(self):
1205 "yum install sfa-tests and run SFA unittests"
1206 self.run_in_guest("yum -y install sfa-tests")
1207 # failed to install - forget it
1208 if self.run_in_guest("rpm -q sfa-tests")!=0:
1209 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1211 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1215 dirname="conf.%s"%self.plc_spec['name']
1216 if not os.path.isdir(dirname):
1217 utils.system("mkdir -p %s"%dirname)
1218 if not os.path.isdir(dirname):
1219 raise "Cannot create config dir for plc %s"%self.name()
1222 def conffile(self,filename):
1223 return "%s/%s"%(self.confdir(),filename)
1224 def confsubdir(self,dirname,clean,dry_run=False):
1225 subdirname="%s/%s"%(self.confdir(),dirname)
1227 utils.system("rm -rf %s"%subdirname)
1228 if not os.path.isdir(subdirname):
1229 utils.system("mkdir -p %s"%subdirname)
1230 if not dry_run and not os.path.isdir(subdirname):
1231 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1234 def conffile_clean (self,filename):
1235 filename=self.conffile(filename)
1236 return utils.system("rm -rf %s"%filename)==0
1239 def sfa_configure(self):
1240 "run sfa-config-tty"
1241 tmpname=self.conffile("sfa-config-tty")
1242 fileconf=open(tmpname,'w')
1243 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1244 'SFA_INTERFACE_HRN',
1245 'SFA_REGISTRY_LEVEL1_AUTH',
1246 'SFA_REGISTRY_HOST',
1247 'SFA_AGGREGATE_HOST',
1258 if self.plc_spec['sfa'].has_key(var):
1259 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1260 # the way plc_config handles booleans just sucks..
1263 if self.plc_spec['sfa'][var]: val='true'
1264 fileconf.write ('e %s\n%s\n'%(var,val))
1265 fileconf.write('w\n')
1266 fileconf.write('R\n')
1267 fileconf.write('q\n')
1269 utils.system('cat %s'%tmpname)
1270 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1273 def aggregate_xml_line(self):
1274 port=self.plc_spec['sfa']['neighbours-port']
1275 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1276 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1278 def registry_xml_line(self):
1279 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1280 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1283 # a cross step that takes all other plcs in argument
1284 def cross_sfa_configure(self, other_plcs):
1285 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1286 # of course with a single plc, other_plcs is an empty list
1289 agg_fname=self.conffile("agg.xml")
1290 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1291 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1292 utils.header ("(Over)wrote %s"%agg_fname)
1293 reg_fname=self.conffile("reg.xml")
1294 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1295 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1296 utils.header ("(Over)wrote %s"%reg_fname)
1297 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_guest())==0 \
1298 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_guest())==0
1300 def sfa_import(self):
1302 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1303 return self.run_in_guest('sfa-import.py')==0 or \
1304 self.run_in_guest('sfa-import-plc.py')==0 or \
1305 self.run_in_guest('sfaadmin.py registry import_registry')==0
1306 # not needed anymore
1307 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1309 def sfa_start(self):
1311 return self.run_in_guest('service sfa start')==0
1313 def sfi_configure(self):
1314 "Create /root/sfi on the plc side for sfi client configuration"
1315 if self.options.dry_run:
1316 utils.header("DRY RUN - skipping step")
1318 sfa_spec=self.plc_spec['sfa']
1319 # cannot use sfa_slice_mapper to pass dir_name
1320 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1321 site_spec = self.locate_site (slice_spec['sitename'])
1322 test_site = TestSite(self,site_spec)
1323 test_slice=TestSliceSfa(self,test_site,slice_spec)
1324 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1325 test_slice.sfi_config(dir_name)
1326 # push into the remote /root/sfi area
1327 location = test_slice.sfi_path()
1328 remote="%s/%s"%(self.vm_root_in_guest(),location)
1329 self.test_ssh.mkdir(remote,abs=True)
1330 # need to strip last level or remote otherwise we get an extra dir level
1331 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1335 def sfi_clean (self):
1336 "clean up /root/sfi on the plc side"
1337 self.run_in_guest("rm -rf /root/sfi")
1341 def sfa_add_user(self):
1346 def sfa_update_user(self):
1350 def sfa_add_slice(self):
1351 "run sfi.py add (on Registry) from slice.xml"
1355 def sfa_discover(self):
1356 "discover resources into resouces_in.rspec"
1360 def sfa_create_slice(self):
1361 "run sfi.py create (on SM) - 1st time"
1365 def sfa_check_slice_plc(self):
1366 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1370 def sfa_update_slice(self):
1371 "run sfi.py create (on SM) on existing object"
1376 "various registry-related calls"
1380 def ssh_slice_sfa(self):
1381 "tries to ssh-enter the SFA slice"
1385 def sfa_delete_user(self):
1390 def sfa_delete_slice(self):
1391 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1396 self.run_in_guest('service sfa stop')==0
1399 def populate (self):
1400 "creates random entries in the PLCAPI"
1401 # install the stress-test in the plc image
1402 location = "/usr/share/plc_api/plcsh_stress_test.py"
1403 remote="%s/%s"%(self.vm_root_in_guest(),location)
1404 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1406 command += " -- --preserve --short-names"
1407 local = (self.run_in_guest(command) == 0);
1408 # second run with --foreign
1409 command += ' --foreign'
1410 remote = (self.run_in_guest(command) == 0);
1411 return ( local and remote)
1413 def gather_logs (self):
1414 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1415 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1416 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1417 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1418 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1419 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1421 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1422 self.gather_var_logs ()
1424 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1425 self.gather_pgsql_logs ()
1427 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1428 for site_spec in self.plc_spec['sites']:
1429 test_site = TestSite (self,site_spec)
1430 for node_spec in site_spec['nodes']:
1431 test_node=TestNode(self,test_site,node_spec)
1432 test_node.gather_qemu_logs()
1434 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1435 self.gather_nodes_var_logs()
1437 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1438 self.gather_slivers_var_logs()
1441 def gather_slivers_var_logs(self):
1442 for test_sliver in self.all_sliver_objs():
1443 remote = test_sliver.tar_var_logs()
1444 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1445 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1446 utils.system(command)
1449 def gather_var_logs (self):
1450 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1451 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1452 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1453 utils.system(command)
1454 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1455 utils.system(command)
1457 def gather_pgsql_logs (self):
1458 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1459 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1460 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1461 utils.system(command)
1463 def gather_nodes_var_logs (self):
1464 for site_spec in self.plc_spec['sites']:
1465 test_site = TestSite (self,site_spec)
1466 for node_spec in site_spec['nodes']:
1467 test_node=TestNode(self,test_site,node_spec)
1468 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1469 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1470 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1471 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1472 utils.system(command)
1475 # returns the filename to use for sql dump/restore, using options.dbname if set
1476 def dbfile (self, database):
1477 # uses options.dbname if it is found
1479 name=self.options.dbname
1480 if not isinstance(name,StringTypes):
1483 t=datetime.datetime.now()
1486 return "/root/%s-%s.sql"%(database,name)
1488 def plc_db_dump(self):
1489 'dump the planetlab5 DB in /root in the PLC - filename has time'
1490 dump=self.dbfile("planetab5")
1491 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1492 utils.header('Dumped planetlab5 database in %s'%dump)
1495 def plc_db_restore(self):
1496 'restore the planetlab5 DB - looks broken, but run -n might help'
1497 dump=self.dbfile("planetab5")
1498 ##stop httpd service
1499 self.run_in_guest('service httpd stop')
1500 # xxx - need another wrapper
1501 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1502 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1503 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1504 ##starting httpd service
1505 self.run_in_guest('service httpd start')
1507 utils.header('Database restored from ' + dump)
1509 def standby_1_through_20(self):
1510 """convenience function to wait for a specified number of minutes"""
1513 def standby_1(): pass
1515 def standby_2(): pass
1517 def standby_3(): pass
1519 def standby_4(): pass
1521 def standby_5(): pass
1523 def standby_6(): pass
1525 def standby_7(): pass
1527 def standby_8(): pass
1529 def standby_9(): pass
1531 def standby_10(): pass
1533 def standby_11(): pass
1535 def standby_12(): pass
1537 def standby_13(): pass
1539 def standby_14(): pass
1541 def standby_15(): pass
1543 def standby_16(): pass
1545 def standby_17(): pass
1547 def standby_18(): pass
1549 def standby_19(): pass
1551 def standby_20(): pass