1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node, *args, **kwds): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_netflow', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'force_gather_logs', SEP,
108 'export', 'show_boxes', SEP,
109 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
110 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
111 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
112 'delete_leases', 'list_leases', SEP,
114 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
115 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
116 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
117 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
118 'plc_db_dump' , 'plc_db_restore', SEP,
119 'standby_1_through_20',SEP,
123 def printable_steps (list):
124 single_line=" ".join(list)+" "
125 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
127 def valid_step (step):
128 return step != SEP and step != SEPSFA
130 # turn off the sfa-related steps when build has skipped SFA
131 # this is originally for centos5 as recent SFAs won't build on this platform
133 def check_whether_build_has_sfa (rpms_url):
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 # move all steps containing 'sfa' from default_steps to other_steps
139 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
140 TestPlc.other_steps += sfa_steps
141 for step in sfa_steps: TestPlc.default_steps.remove(step)
143 def __init__ (self,plc_spec,options):
144 self.plc_spec=plc_spec
146 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
147 self.vserverip=plc_spec['vserverip']
148 self.vservername=plc_spec['vservername']
149 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
150 self.apiserver=TestApiserver(self.url,options.dry_run)
153 name=self.plc_spec['name']
154 return "%s.%s"%(name,self.vservername)
157 return self.plc_spec['host_box']
160 return self.test_ssh.is_local()
162 # define the API methods on this object through xmlrpc
163 # would help, but not strictly necessary
167 def actual_command_in_guest (self,command):
168 return self.test_ssh.actual_command(self.host_to_guest(command))
170 def start_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
173 def stop_guest (self):
174 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
176 def run_in_guest (self,command):
177 return utils.system(self.actual_command_in_guest(command))
179 def run_in_host (self,command):
180 return self.test_ssh.run_in_buildname(command)
182 #command gets run in the plc's vm
183 def host_to_guest(self,command):
184 if self.options.plcs_use_lxc:
185 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
186 return "TODO-lxc TestPlc.host_to_guest"
188 return "vserver %s exec %s"%(self.vservername,command)
190 def vm_root_in_guest(self):
191 if self.options.plcs_use_lxc:
193 return "TODO TestPlc.vm_root_in_guest"
195 return "/vservers/%s"%self.vservername
197 #start/stop the vserver
198 def start_guest_in_host(self):
199 if self.options.plcs_use_lxc:
200 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
201 return "TODO-lxc TestPlc.start_guest_in_host"
203 return "vserver %s start"%(self.vservername)
205 def stop_guest_in_host(self):
206 if self.options.plcs_use_lxc:
207 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
208 return "TODO-lxc TestPlc.stop_guest_in_host"
210 return "vserver %s stop"%(self.vservername)
213 def run_in_guest_piped (self,local,remote):
214 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
216 # does a yum install in the vs, ignore yum retcod, check with rpm
217 def yum_install (self, rpms):
218 if isinstance (rpms, list):
220 self.run_in_guest("yum -y install %s"%rpms)
221 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
222 self.run_in_guest("yum-complete-transaction -y")
223 return self.run_in_guest("rpm -q %s"%rpms)==0
225 def auth_root (self):
226 return {'Username':self.plc_spec['PLC_ROOT_USER'],
227 'AuthMethod':'password',
228 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
229 'Role' : self.plc_spec['role']
231 def locate_site (self,sitename):
232 for site in self.plc_spec['sites']:
233 if site['site_fields']['name'] == sitename:
235 if site['site_fields']['login_base'] == sitename:
237 raise Exception,"Cannot locate site %s"%sitename
239 def locate_node (self,nodename):
240 for site in self.plc_spec['sites']:
241 for node in site['nodes']:
242 if node['name'] == nodename:
244 raise Exception,"Cannot locate node %s"%nodename
246 def locate_hostname (self,hostname):
247 for site in self.plc_spec['sites']:
248 for node in site['nodes']:
249 if node['node_fields']['hostname'] == hostname:
251 raise Exception,"Cannot locate hostname %s"%hostname
253 def locate_key (self,keyname):
254 for key in self.plc_spec['keys']:
255 if key['name'] == keyname:
257 raise Exception,"Cannot locate key %s"%keyname
259 def locate_slice (self, slicename):
260 for slice in self.plc_spec['slices']:
261 if slice['slice_fields']['name'] == slicename:
263 raise Exception,"Cannot locate slice %s"%slicename
265 def all_sliver_objs (self):
267 for slice_spec in self.plc_spec['slices']:
268 slicename = slice_spec['slice_fields']['name']
269 for nodename in slice_spec['nodenames']:
270 result.append(self.locate_sliver_obj (nodename,slicename))
273 def locate_sliver_obj (self,nodename,slicename):
274 (site,node) = self.locate_node(nodename)
275 slice = self.locate_slice (slicename)
277 test_site = TestSite (self, site)
278 test_node = TestNode (self, test_site,node)
279 # xxx the slice site is assumed to be the node site - mhh - probably harmless
280 test_slice = TestSlice (self, test_site, slice)
281 return TestSliver (self, test_node, test_slice)
283 def locate_first_node(self):
284 nodename=self.plc_spec['slices'][0]['nodenames'][0]
285 (site,node) = self.locate_node(nodename)
286 test_site = TestSite (self, site)
287 test_node = TestNode (self, test_site,node)
290 def locate_first_sliver (self):
291 slice_spec=self.plc_spec['slices'][0]
292 slicename=slice_spec['slice_fields']['name']
293 nodename=slice_spec['nodenames'][0]
294 return self.locate_sliver_obj(nodename,slicename)
296 # all different hostboxes used in this plc
297 def gather_hostBoxes(self):
298 # maps on sites and nodes, return [ (host_box,test_node) ]
300 for site_spec in self.plc_spec['sites']:
301 test_site = TestSite (self,site_spec)
302 for node_spec in site_spec['nodes']:
303 test_node = TestNode (self, test_site, node_spec)
304 if not test_node.is_real():
305 tuples.append( (test_node.host_box(),test_node) )
306 # transform into a dict { 'host_box' -> [ test_node .. ] }
308 for (box,node) in tuples:
309 if not result.has_key(box):
312 result[box].append(node)
315 # a step for checking this stuff
316 def show_boxes (self):
317 'print summary of nodes location'
318 for (box,nodes) in self.gather_hostBoxes().iteritems():
319 print box,":"," + ".join( [ node.name() for node in nodes ] )
322 # make this a valid step
323 def qemu_kill_all(self):
324 'kill all qemu instances on the qemu boxes involved by this setup'
325 # this is the brute force version, kill all qemus on that host box
326 for (box,nodes) in self.gather_hostBoxes().iteritems():
327 # pass the first nodename, as we don't push template-qemu on testboxes
328 nodedir=nodes[0].nodedir()
329 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
332 # make this a valid step
333 def qemu_list_all(self):
334 'list all qemu instances on the qemu boxes involved by this setup'
335 for (box,nodes) in self.gather_hostBoxes().iteritems():
336 # this is the brute force version, kill all qemus on that host box
337 TestBoxQemu(box,self.options.buildname).qemu_list_all()
340 # kill only the right qemus
341 def qemu_list_mine(self):
342 'list qemu instances for our nodes'
343 for (box,nodes) in self.gather_hostBoxes().iteritems():
344 # the fine-grain version
349 # kill only the right qemus
350 def qemu_kill_mine(self):
351 'kill the qemu instances for our nodes'
352 for (box,nodes) in self.gather_hostBoxes().iteritems():
353 # the fine-grain version
358 #################### display config
360 "show test configuration after localization"
361 self.display_pass (1)
362 self.display_pass (2)
366 "print cut'n paste-able stuff to export env variables to your shell"
367 # guess local domain from hostname
368 domain=socket.gethostname().split('.',1)[1]
369 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
370 print "export BUILD=%s"%self.options.buildname
371 print "export PLCHOST=%s"%fqdn
372 print "export GUEST=%s"%self.plc_spec['vservername']
373 # find hostname of first node
374 (hostname,qemubox) = self.all_node_infos()[0]
375 print "export KVMHOST=%s.%s"%(qemubox,domain)
376 print "export NODE=%s"%(hostname)
380 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
381 def display_pass (self,passno):
382 for (key,val) in self.plc_spec.iteritems():
383 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
387 self.display_site_spec(site)
388 for node in site['nodes']:
389 self.display_node_spec(node)
390 elif key=='initscripts':
391 for initscript in val:
392 self.display_initscript_spec (initscript)
395 self.display_slice_spec (slice)
398 self.display_key_spec (key)
400 if key not in ['sites','initscripts','slices','keys', 'sfa']:
401 print '+ ',key,':',val
403 def display_site_spec (self,site):
404 print '+ ======== site',site['site_fields']['name']
405 for (k,v) in site.iteritems():
406 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
409 print '+ ','nodes : ',
411 print node['node_fields']['hostname'],'',
417 print user['name'],'',
419 elif k == 'site_fields':
420 print '+ login_base',':',v['login_base']
421 elif k == 'address_fields':
427 def display_initscript_spec (self,initscript):
428 print '+ ======== initscript',initscript['initscript_fields']['name']
430 def display_key_spec (self,key):
431 print '+ ======== key',key['name']
433 def display_slice_spec (self,slice):
434 print '+ ======== slice',slice['slice_fields']['name']
435 for (k,v) in slice.iteritems():
448 elif k=='slice_fields':
449 print '+ fields',':',
450 print 'max_nodes=',v['max_nodes'],
455 def display_node_spec (self,node):
456 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
457 print "hostname=",node['node_fields']['hostname'],
458 print "ip=",node['interface_fields']['ip']
459 if self.options.verbose:
460 utils.pprint("node details",node,depth=3)
462 # another entry point for just showing the boxes involved
463 def display_mapping (self):
464 TestPlc.display_mapping_plc(self.plc_spec)
468 def display_mapping_plc (plc_spec):
469 print '+ MyPLC',plc_spec['name']
470 # WARNING this would not be right for lxc-based PLC's - should be harmless though
471 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
472 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
473 for site_spec in plc_spec['sites']:
474 for node_spec in site_spec['nodes']:
475 TestPlc.display_mapping_node(node_spec)
478 def display_mapping_node (node_spec):
479 print '+ NODE %s'%(node_spec['name'])
480 print '+\tqemu box %s'%node_spec['host_box']
481 print '+\thostname=%s'%node_spec['node_fields']['hostname']
483 # write a timestamp in /vservers/<>.timestamp
484 # cannot be inside the vserver, that causes vserver .. build to cough
485 def timestamp_vs (self):
487 # TODO-lxc check this one
488 # a first approx. is to store the timestamp close to the VM root like vs does
489 stamp_path="%s.timestamp"%self.vm_root_in_guest()
490 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
492 # this is called inconditionnally at the beginning of the test sequence
493 # just in case this is a rerun, so if the vm is not running it's fine
495 "vserver delete the test myplc"
496 stamp_path="%s.timestamp"%self.vm_root_in_guest()
497 self.run_in_host("rm -f %s"%stamp_path)
498 if self.options.plcs_use_lxc:
499 # TODO-lxc : how to trash a VM altogether and the related timestamp as well
500 # might make sense to test that this has been done - unlike for vs
501 print "TODO TestPlc.vs_delete"
504 self.run_in_host("vserver --silent %s delete"%self.vservername)
508 # historically the build was being fetched by the tests
509 # now the build pushes itself as a subdir of the tests workdir
510 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
511 def vs_create (self):
512 "vserver creation (no install done)"
513 # push the local build/ dir to the testplc box
515 # a full path for the local calls
516 build_dir=os.path.dirname(sys.argv[0])
517 # sometimes this is empty - set to "." in such a case
518 if not build_dir: build_dir="."
519 build_dir += "/build"
521 # use a standard name - will be relative to remote buildname
523 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
524 self.test_ssh.rmdir(build_dir)
525 self.test_ssh.copy(build_dir,recursive=True)
526 # the repo url is taken from arch-rpms-url
527 # with the last step (i386) removed
528 repo_url = self.options.arch_rpms_url
529 for level in [ 'arch' ]:
530 repo_url = os.path.dirname(repo_url)
531 # pass the vbuild-nightly options to vtest-init-vserver
533 test_env_options += " -p %s"%self.options.personality
534 test_env_options += " -d %s"%self.options.pldistro
535 test_env_options += " -f %s"%self.options.fcdistro
536 if self.options.plcs_use_lxc:
537 # TODO-lxc : might need some tweaks
538 script="vtest-init-lxc.sh"
540 script="vtest-init-vserver.sh"
541 vserver_name = self.vservername
542 vserver_options="--netdev eth0 --interface %s"%self.vserverip
544 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
545 vserver_options += " --hostname %s"%vserver_hostname
547 print "Cannot reverse lookup %s"%self.vserverip
548 print "This is considered fatal, as this might pollute the test results"
550 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
551 return self.run_in_host(create_vserver) == 0
554 def plc_install(self):
555 "yum install myplc, noderepo, and the plain bootstrapfs"
557 # workaround for getting pgsql8.2 on centos5
558 if self.options.fcdistro == "centos5":
559 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
562 if self.options.personality == "linux32":
564 elif self.options.personality == "linux64":
567 raise Exception, "Unsupported personality %r"%self.options.personality
568 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
571 pkgs_list.append ("slicerepo-%s"%nodefamily)
572 pkgs_list.append ("myplc")
573 pkgs_list.append ("noderepo-%s"%nodefamily)
574 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
575 pkgs_string=" ".join(pkgs_list)
576 return self.yum_install (pkgs_list)
579 def plc_configure(self):
581 tmpname='%s.plc-config-tty'%(self.name())
582 fileconf=open(tmpname,'w')
583 for var in [ 'PLC_NAME',
588 'PLC_MAIL_SUPPORT_ADDRESS',
591 # Above line was added for integrating SFA Testing
597 'PLC_RESERVATION_GRANULARITY',
599 'PLC_OMF_XMPP_SERVER',
601 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
602 fileconf.write('w\n')
603 fileconf.write('q\n')
605 utils.system('cat %s'%tmpname)
606 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
607 utils.system('rm %s'%tmpname)
612 self.run_in_guest('service plc start')
617 self.run_in_guest('service plc stop')
621 "start the PLC vserver"
626 "stop the PLC vserver"
630 # stores the keys from the config for further use
631 def keys_store(self):
632 "stores test users ssh keys in keys/"
633 for key_spec in self.plc_spec['keys']:
634 TestKey(self,key_spec).store_key()
637 def keys_clean(self):
638 "removes keys cached in keys/"
639 utils.system("rm -rf ./keys")
642 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
643 # for later direct access to the nodes
644 def keys_fetch(self):
645 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
647 if not os.path.isdir(dir):
649 vservername=self.vservername
650 vm_root=self.vm_root_in_guest()
652 prefix = 'debug_ssh_key'
653 for ext in [ 'pub', 'rsa' ] :
654 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
655 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
656 if self.test_ssh.fetch(src,dst) != 0: overall=False
660 "create sites with PLCAPI"
661 return self.do_sites()
663 def delete_sites (self):
664 "delete sites with PLCAPI"
665 return self.do_sites(action="delete")
667 def do_sites (self,action="add"):
668 for site_spec in self.plc_spec['sites']:
669 test_site = TestSite (self,site_spec)
670 if (action != "add"):
671 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
672 test_site.delete_site()
673 # deleted with the site
674 #test_site.delete_users()
677 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
678 test_site.create_site()
679 test_site.create_users()
682 def delete_all_sites (self):
683 "Delete all sites in PLC, and related objects"
684 print 'auth_root',self.auth_root()
685 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
686 for site_id in site_ids:
687 print 'Deleting site_id',site_id
688 self.apiserver.DeleteSite(self.auth_root(),site_id)
692 "create nodes with PLCAPI"
693 return self.do_nodes()
694 def delete_nodes (self):
695 "delete nodes with PLCAPI"
696 return self.do_nodes(action="delete")
698 def do_nodes (self,action="add"):
699 for site_spec in self.plc_spec['sites']:
700 test_site = TestSite (self,site_spec)
702 utils.header("Deleting nodes in site %s"%test_site.name())
703 for node_spec in site_spec['nodes']:
704 test_node=TestNode(self,test_site,node_spec)
705 utils.header("Deleting %s"%test_node.name())
706 test_node.delete_node()
708 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
709 for node_spec in site_spec['nodes']:
710 utils.pprint('Creating node %s'%node_spec,node_spec)
711 test_node = TestNode (self,test_site,node_spec)
712 test_node.create_node ()
715 def nodegroups (self):
716 "create nodegroups with PLCAPI"
717 return self.do_nodegroups("add")
718 def delete_nodegroups (self):
719 "delete nodegroups with PLCAPI"
720 return self.do_nodegroups("delete")
724 def translate_timestamp (start,grain,timestamp):
725 if timestamp < TestPlc.YEAR: return start+timestamp*grain
726 else: return timestamp
729 def timestamp_printable (timestamp):
730 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
733 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
735 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
736 print 'API answered grain=',grain
737 start=(now/grain)*grain
739 # find out all nodes that are reservable
740 nodes=self.all_reservable_nodenames()
742 utils.header ("No reservable node found - proceeding without leases")
745 # attach them to the leases as specified in plc_specs
746 # this is where the 'leases' field gets interpreted as relative of absolute
747 for lease_spec in self.plc_spec['leases']:
748 # skip the ones that come with a null slice id
749 if not lease_spec['slice']: continue
750 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
751 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
752 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
753 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
754 if lease_addition['errors']:
755 utils.header("Cannot create leases, %s"%lease_addition['errors'])
758 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
759 (nodes,lease_spec['slice'],
760 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
761 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
765 def delete_leases (self):
766 "remove all leases in the myplc side"
767 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
768 utils.header("Cleaning leases %r"%lease_ids)
769 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
772 def list_leases (self):
773 "list all leases known to the myplc"
774 leases = self.apiserver.GetLeases(self.auth_root())
777 current=l['t_until']>=now
778 if self.options.verbose or current:
779 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
780 TestPlc.timestamp_printable(l['t_from']),
781 TestPlc.timestamp_printable(l['t_until'])))
784 # create nodegroups if needed, and populate
785 def do_nodegroups (self, action="add"):
786 # 1st pass to scan contents
788 for site_spec in self.plc_spec['sites']:
789 test_site = TestSite (self,site_spec)
790 for node_spec in site_spec['nodes']:
791 test_node=TestNode (self,test_site,node_spec)
792 if node_spec.has_key('nodegroups'):
793 nodegroupnames=node_spec['nodegroups']
794 if isinstance(nodegroupnames,StringTypes):
795 nodegroupnames = [ nodegroupnames ]
796 for nodegroupname in nodegroupnames:
797 if not groups_dict.has_key(nodegroupname):
798 groups_dict[nodegroupname]=[]
799 groups_dict[nodegroupname].append(test_node.name())
800 auth=self.auth_root()
802 for (nodegroupname,group_nodes) in groups_dict.iteritems():
804 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
805 # first, check if the nodetagtype is here
806 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
808 tag_type_id = tag_types[0]['tag_type_id']
810 tag_type_id = self.apiserver.AddTagType(auth,
811 {'tagname':nodegroupname,
812 'description': 'for nodegroup %s'%nodegroupname,
814 print 'located tag (type)',nodegroupname,'as',tag_type_id
816 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
818 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
819 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
820 # set node tag on all nodes, value='yes'
821 for nodename in group_nodes:
823 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
825 traceback.print_exc()
826 print 'node',nodename,'seems to already have tag',nodegroupname
829 expect_yes = self.apiserver.GetNodeTags(auth,
830 {'hostname':nodename,
831 'tagname':nodegroupname},
832 ['value'])[0]['value']
833 if expect_yes != "yes":
834 print 'Mismatch node tag on node',nodename,'got',expect_yes
837 if not self.options.dry_run:
838 print 'Cannot find tag',nodegroupname,'on node',nodename
842 print 'cleaning nodegroup',nodegroupname
843 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
845 traceback.print_exc()
849 # return a list of tuples (nodename,qemuname)
850 def all_node_infos (self) :
852 for site_spec in self.plc_spec['sites']:
853 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
854 for node_spec in site_spec['nodes'] ]
857 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
858 def all_reservable_nodenames (self):
860 for site_spec in self.plc_spec['sites']:
861 for node_spec in site_spec['nodes']:
862 node_fields=node_spec['node_fields']
863 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
864 res.append(node_fields['hostname'])
867 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
868 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
869 if self.options.dry_run:
873 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
874 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
875 # the nodes that haven't checked yet - start with a full list and shrink over time
876 tocheck = self.all_hostnames()
877 utils.header("checking nodes %r"%tocheck)
878 # create a dict hostname -> status
879 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
882 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
884 for array in tocheck_status:
885 hostname=array['hostname']
886 boot_state=array['boot_state']
887 if boot_state == target_boot_state:
888 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
890 # if it's a real node, never mind
891 (site_spec,node_spec)=self.locate_hostname(hostname)
892 if TestNode.is_real_model(node_spec['node_fields']['model']):
893 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
895 boot_state = target_boot_state
896 elif datetime.datetime.now() > graceout:
897 utils.header ("%s still in '%s' state"%(hostname,boot_state))
898 graceout=datetime.datetime.now()+datetime.timedelta(1)
899 status[hostname] = boot_state
901 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
904 if datetime.datetime.now() > timeout:
905 for hostname in tocheck:
906 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
908 # otherwise, sleep for a while
910 # only useful in empty plcs
913 def nodes_booted(self):
914 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
916 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
918 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
919 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
920 vservername=self.vservername
923 local_key = "keys/%(vservername)s-debug.rsa"%locals()
926 local_key = "keys/key1.rsa"
927 node_infos = self.all_node_infos()
928 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
929 for (nodename,qemuname) in node_infos:
930 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
931 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
932 (timeout_minutes,silent_minutes,period))
934 for node_info in node_infos:
935 (hostname,qemuname) = node_info
936 # try to run 'hostname' in the node
937 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
938 # don't spam logs - show the command only after the grace period
939 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
941 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
943 node_infos.remove(node_info)
945 # we will have tried real nodes once, in case they're up - but if not, just skip
946 (site_spec,node_spec)=self.locate_hostname(hostname)
947 if TestNode.is_real_model(node_spec['node_fields']['model']):
948 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
949 node_infos.remove(node_info)
952 if datetime.datetime.now() > timeout:
953 for (hostname,qemuname) in node_infos:
954 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
956 # otherwise, sleep for a while
958 # only useful in empty plcs
961 def ssh_node_debug(self):
962 "Tries to ssh into nodes in debug mode with the debug ssh key"
963 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
965 def ssh_node_boot(self):
966 "Tries to ssh into nodes in production mode with the root ssh key"
967 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
970 def qemu_local_init (self):
971 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
975 "all nodes: invoke GetBootMedium and store result locally"
978 def qemu_local_config (self):
979 "all nodes: compute qemu config qemu.conf and store it locally"
982 def nodestate_reinstall (self):
983 "all nodes: mark PLCAPI boot_state as reinstall"
986 def nodestate_safeboot (self):
987 "all nodes: mark PLCAPI boot_state as safeboot"
990 def nodestate_boot (self):
991 "all nodes: mark PLCAPI boot_state as boot"
994 def nodestate_show (self):
995 "all nodes: show PLCAPI boot_state"
998 def qemu_export (self):
999 "all nodes: push local node-dep directory on the qemu box"
1002 ### check hooks : invoke scripts from hooks/{node,slice}
1003 def check_hooks_node (self):
1004 return self.locate_first_node().check_hooks()
1005 def check_hooks_sliver (self) :
1006 return self.locate_first_sliver().check_hooks()
1008 def check_hooks (self):
1009 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1010 return self.check_hooks_node() and self.check_hooks_sliver()
1013 def do_check_initscripts(self):
1015 for slice_spec in self.plc_spec['slices']:
1016 if not slice_spec.has_key('initscriptstamp'):
1018 stamp=slice_spec['initscriptstamp']
1019 for nodename in slice_spec['nodenames']:
1020 (site,node) = self.locate_node (nodename)
1021 # xxx - passing the wrong site - probably harmless
1022 test_site = TestSite (self,site)
1023 test_slice = TestSlice (self,test_site,slice_spec)
1024 test_node = TestNode (self,test_site,node)
1025 test_sliver = TestSliver (self, test_node, test_slice)
1026 if not test_sliver.check_initscript_stamp(stamp):
1030 def check_initscripts(self):
1031 "check that the initscripts have triggered"
1032 return self.do_check_initscripts()
1034 def initscripts (self):
1035 "create initscripts with PLCAPI"
1036 for initscript in self.plc_spec['initscripts']:
1037 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1038 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1041 def delete_initscripts (self):
1042 "delete initscripts with PLCAPI"
1043 for initscript in self.plc_spec['initscripts']:
1044 initscript_name = initscript['initscript_fields']['name']
1045 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1047 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1048 print initscript_name,'deleted'
1050 print 'deletion went wrong - probably did not exist'
1055 "create slices with PLCAPI"
1056 return self.do_slices()
1058 def delete_slices (self):
1059 "delete slices with PLCAPI"
1060 return self.do_slices("delete")
1062 def do_slices (self, action="add"):
1063 for slice in self.plc_spec['slices']:
1064 site_spec = self.locate_site (slice['sitename'])
1065 test_site = TestSite(self,site_spec)
1066 test_slice=TestSlice(self,test_site,slice)
1068 utils.header("Deleting slices in site %s"%test_site.name())
1069 test_slice.delete_slice()
1071 utils.pprint("Creating slice",slice)
1072 test_slice.create_slice()
1073 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1077 def ssh_slice(self):
1078 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1081 def check_netflow (self):
1082 "all nodes: check that the netflow slice is alive"
1083 return self.check_systemslice ('netflow')
1086 def check_systemslice (self, slicename):
1090 def keys_clear_known_hosts (self):
1091 "remove test nodes entries from the local known_hosts file"
1095 def qemu_start (self) :
1096 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1100 def timestamp_qemu (self) :
1101 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1104 def check_tcp (self):
1105 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1106 specs = self.plc_spec['tcp_test']
1111 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1112 if not s_test_sliver.run_tcp_server(port,timeout=10):
1116 # idem for the client side
1117 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1118 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1122 def plcsh_stress_test (self):
1123 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1124 # install the stress-test in the plc image
1125 location = "/usr/share/plc_api/plcsh_stress_test.py"
1127 remote="%s/%s"%(self.vm_root_in_guest(),location)
1128 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1130 command += " -- --check"
1131 if self.options.size == 1:
1132 command += " --tiny"
1133 return ( self.run_in_guest(command) == 0)
1135 # populate runs the same utility without slightly different options
1136 # in particular runs with --preserve (dont cleanup) and without --check
1137 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1139 def sfa_install_all (self):
1140 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1141 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1143 def sfa_install_core(self):
1145 return self.yum_install ("sfa")
1147 def sfa_install_plc(self):
1148 "yum install sfa-plc"
1149 return self.yum_install("sfa-plc")
1151 def sfa_install_client(self):
1152 "yum install sfa-client"
1153 return self.yum_install("sfa-client")
1155 def sfa_install_sfatables(self):
1156 "yum install sfa-sfatables"
1157 return self.yum_install ("sfa-sfatables")
1159 def sfa_dbclean(self):
1160 "thoroughly wipes off the SFA database"
1161 self.run_in_guest("sfa-nuke.py")==0 or \
1162 self.run_in_guest("sfa-nuke-plc.py") or \
1163 self.run_in_guest("sfaadmin.py registry nuke")
1166 def sfa_plcclean(self):
1167 "cleans the PLC entries that were created as a side effect of running the script"
1169 sfa_spec=self.plc_spec['sfa']
1171 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1172 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1173 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1174 except: print "Slice %s already absent from PLC db"%slicename
1176 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1177 try: self.apiserver.DeletePerson(self.auth_root(),username)
1178 except: print "User %s already absent from PLC db"%username
1180 print "REMEMBER TO RUN sfa_import AGAIN"
1183 def sfa_uninstall(self):
1184 "uses rpm to uninstall sfa - ignore result"
1185 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1186 self.run_in_guest("rm -rf /var/lib/sfa")
1187 self.run_in_guest("rm -rf /etc/sfa")
1188 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1190 self.run_in_guest("rpm -e --noscripts sfa-plc")
1193 ### run unit tests for SFA
1194 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1195 # Running Transaction
1196 # Transaction couldn't start:
1197 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1198 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1199 # no matter how many Gbs are available on the testplc
1200 # could not figure out what's wrong, so...
1201 # if the yum install phase fails, consider the test is successful
1202 # other combinations will eventually run it hopefully
1203 def sfa_utest(self):
1204 "yum install sfa-tests and run SFA unittests"
1205 self.run_in_guest("yum -y install sfa-tests")
1206 # failed to install - forget it
1207 if self.run_in_guest("rpm -q sfa-tests")!=0:
1208 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1210 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1214 dirname="conf.%s"%self.plc_spec['name']
1215 if not os.path.isdir(dirname):
1216 utils.system("mkdir -p %s"%dirname)
1217 if not os.path.isdir(dirname):
1218 raise "Cannot create config dir for plc %s"%self.name()
1221 def conffile(self,filename):
1222 return "%s/%s"%(self.confdir(),filename)
1223 def confsubdir(self,dirname,clean,dry_run=False):
1224 subdirname="%s/%s"%(self.confdir(),dirname)
1226 utils.system("rm -rf %s"%subdirname)
1227 if not os.path.isdir(subdirname):
1228 utils.system("mkdir -p %s"%subdirname)
1229 if not dry_run and not os.path.isdir(subdirname):
1230 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1233 def conffile_clean (self,filename):
1234 filename=self.conffile(filename)
1235 return utils.system("rm -rf %s"%filename)==0
1238 def sfa_configure(self):
1239 "run sfa-config-tty"
1240 tmpname=self.conffile("sfa-config-tty")
1241 fileconf=open(tmpname,'w')
1242 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1243 'SFA_INTERFACE_HRN',
1244 'SFA_REGISTRY_LEVEL1_AUTH',
1245 'SFA_REGISTRY_HOST',
1246 'SFA_AGGREGATE_HOST',
1257 if self.plc_spec['sfa'].has_key(var):
1258 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1259 # the way plc_config handles booleans just sucks..
1262 if self.plc_spec['sfa'][var]: val='true'
1263 fileconf.write ('e %s\n%s\n'%(var,val))
1264 fileconf.write('w\n')
1265 fileconf.write('R\n')
1266 fileconf.write('q\n')
1268 utils.system('cat %s'%tmpname)
1269 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1272 def aggregate_xml_line(self):
1273 port=self.plc_spec['sfa']['neighbours-port']
1274 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1275 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1277 def registry_xml_line(self):
1278 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1279 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1282 # a cross step that takes all other plcs in argument
1283 def cross_sfa_configure(self, other_plcs):
1284 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1285 # of course with a single plc, other_plcs is an empty list
1288 agg_fname=self.conffile("agg.xml")
1289 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1290 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1291 utils.header ("(Over)wrote %s"%agg_fname)
1292 reg_fname=self.conffile("reg.xml")
1293 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1294 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1295 utils.header ("(Over)wrote %s"%reg_fname)
1296 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_guest())==0 \
1297 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_guest())==0
1299 def sfa_import(self):
1301 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1302 return self.run_in_guest('sfa-import.py')==0 or \
1303 self.run_in_guest('sfa-import-plc.py')==0 or \
1304 self.run_in_guest('sfaadmin.py registry import_registry')==0
1305 # not needed anymore
1306 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1308 def sfa_start(self):
1310 return self.run_in_guest('service sfa start')==0
1312 def sfi_configure(self):
1313 "Create /root/sfi on the plc side for sfi client configuration"
1314 if self.options.dry_run:
1315 utils.header("DRY RUN - skipping step")
1317 sfa_spec=self.plc_spec['sfa']
1318 # cannot use sfa_slice_mapper to pass dir_name
1319 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1320 site_spec = self.locate_site (slice_spec['sitename'])
1321 test_site = TestSite(self,site_spec)
1322 test_slice=TestSliceSfa(self,test_site,slice_spec)
1323 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1324 test_slice.sfi_config(dir_name)
1325 # push into the remote /root/sfi area
1326 location = test_slice.sfi_path()
1327 remote="%s/%s"%(self.vm_root_in_guest(),location)
1328 self.test_ssh.mkdir(remote,abs=True)
1329 # need to strip last level or remote otherwise we get an extra dir level
1330 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1334 def sfi_clean (self):
1335 "clean up /root/sfi on the plc side"
1336 self.run_in_guest("rm -rf /root/sfi")
1340 def sfa_add_user(self):
1345 def sfa_update_user(self):
1349 def sfa_add_slice(self):
1350 "run sfi.py add (on Registry) from slice.xml"
1354 def sfa_discover(self):
1355 "discover resources into resouces_in.rspec"
1359 def sfa_create_slice(self):
1360 "run sfi.py create (on SM) - 1st time"
1364 def sfa_check_slice_plc(self):
1365 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1369 def sfa_update_slice(self):
1370 "run sfi.py create (on SM) on existing object"
1375 "various registry-related calls"
1379 def ssh_slice_sfa(self):
1380 "tries to ssh-enter the SFA slice"
1384 def sfa_delete_user(self):
1389 def sfa_delete_slice(self):
1390 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1395 self.run_in_guest('service sfa stop')==0
1398 def populate (self):
1399 "creates random entries in the PLCAPI"
1400 # install the stress-test in the plc image
1401 location = "/usr/share/plc_api/plcsh_stress_test.py"
1402 remote="%s/%s"%(self.vm_root_in_guest(),location)
1403 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1405 command += " -- --preserve --short-names"
1406 local = (self.run_in_guest(command) == 0);
1407 # second run with --foreign
1408 command += ' --foreign'
1409 remote = (self.run_in_guest(command) == 0);
1410 return ( local and remote)
1412 def gather_logs (self):
1413 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1414 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1415 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1416 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1417 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1418 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1420 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1421 self.gather_var_logs ()
1423 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1424 self.gather_pgsql_logs ()
1426 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1427 for site_spec in self.plc_spec['sites']:
1428 test_site = TestSite (self,site_spec)
1429 for node_spec in site_spec['nodes']:
1430 test_node=TestNode(self,test_site,node_spec)
1431 test_node.gather_qemu_logs()
1433 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1434 self.gather_nodes_var_logs()
1436 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1437 self.gather_slivers_var_logs()
1440 def gather_slivers_var_logs(self):
1441 for test_sliver in self.all_sliver_objs():
1442 remote = test_sliver.tar_var_logs()
1443 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1444 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1445 utils.system(command)
1448 def gather_var_logs (self):
1449 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1450 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1451 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1452 utils.system(command)
1453 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1454 utils.system(command)
1456 def gather_pgsql_logs (self):
1457 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1458 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1459 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1460 utils.system(command)
1462 def gather_nodes_var_logs (self):
1463 for site_spec in self.plc_spec['sites']:
1464 test_site = TestSite (self,site_spec)
1465 for node_spec in site_spec['nodes']:
1466 test_node=TestNode(self,test_site,node_spec)
1467 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1468 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1469 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1470 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1471 utils.system(command)
1474 # returns the filename to use for sql dump/restore, using options.dbname if set
1475 def dbfile (self, database):
1476 # uses options.dbname if it is found
1478 name=self.options.dbname
1479 if not isinstance(name,StringTypes):
1482 t=datetime.datetime.now()
1485 return "/root/%s-%s.sql"%(database,name)
1487 def plc_db_dump(self):
1488 'dump the planetlab5 DB in /root in the PLC - filename has time'
1489 dump=self.dbfile("planetab5")
1490 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1491 utils.header('Dumped planetlab5 database in %s'%dump)
1494 def plc_db_restore(self):
1495 'restore the planetlab5 DB - looks broken, but run -n might help'
1496 dump=self.dbfile("planetab5")
1497 ##stop httpd service
1498 self.run_in_guest('service httpd stop')
1499 # xxx - need another wrapper
1500 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1501 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1502 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1503 ##starting httpd service
1504 self.run_in_guest('service httpd start')
1506 utils.header('Database restored from ' + dump)
1508 def standby_1_through_20(self):
1509 """convenience function to wait for a specified number of minutes"""
1512 def standby_1(): pass
1514 def standby_2(): pass
1516 def standby_3(): pass
1518 def standby_4(): pass
1520 def standby_5(): pass
1522 def standby_6(): pass
1524 def standby_7(): pass
1526 def standby_8(): pass
1528 def standby_9(): pass
1530 def standby_10(): pass
1532 def standby_11(): pass
1534 def standby_12(): pass
1536 def standby_13(): pass
1538 def standby_14(): pass
1540 def standby_15(): pass
1542 def standby_16(): pass
1544 def standby_17(): pass
1546 def standby_18(): pass
1548 def standby_19(): pass
1550 def standby_20(): pass