1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'force_gather_logs', SEP,
108 'export', 'show_boxes', SEP,
109 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
110 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
111 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
112 'delete_leases', 'list_leases', SEP,
114 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
115 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
116 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
117 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
118 'plc_db_dump' , 'plc_db_restore', SEP,
119 'standby_1_through_20',SEP,
123 def printable_steps (list):
124 single_line=" ".join(list)+" "
125 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
127 def valid_step (step):
128 return step != SEP and step != SEPSFA
130 # turn off the sfa-related steps when build has skipped SFA
131 # this is originally for centos5 as recent SFAs won't build on this platform
133 def check_whether_build_has_sfa (rpms_url):
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 # move all steps containing 'sfa' from default_steps to other_steps
139 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
140 TestPlc.other_steps += sfa_steps
141 for step in sfa_steps: TestPlc.default_steps.remove(step)
143 def __init__ (self,plc_spec,options):
144 self.plc_spec=plc_spec
146 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
147 self.vserverip=plc_spec['vserverip']
148 self.vservername=plc_spec['vservername']
149 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
150 self.apiserver=TestApiserver(self.url,options.dry_run)
153 name=self.plc_spec['name']
154 return "%s.%s"%(name,self.vservername)
157 return self.plc_spec['host_box']
160 return self.test_ssh.is_local()
162 # define the API methods on this object through xmlrpc
163 # would help, but not strictly necessary
167 def actual_command_in_guest (self,command):
168 return self.test_ssh.actual_command(self.host_to_guest(command))
170 def start_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
173 def stop_guest (self):
174 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
176 def run_in_guest (self,command):
177 return utils.system(self.actual_command_in_guest(command))
179 def run_in_host (self,command):
180 return self.test_ssh.run_in_buildname(command)
182 #command gets run in the plc's vm
183 def host_to_guest(self,command):
184 if self.options.plcs_use_lxc:
185 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
186 return "TODO-lxc TestPlc.host_to_guest"
188 return "vserver %s exec %s"%(self.vservername,command)
190 def vm_root_in_guest(self):
191 if self.options.plcs_use_lxc:
193 return "TODO TestPlc.vm_root_in_guest"
195 return "/vservers/%s"%self.vservername
197 #start/stop the vserver
198 def start_guest_in_host(self):
199 if self.options.plcs_use_lxc:
200 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
201 return "TODO-lxc TestPlc.start_guest_in_host"
203 return "vserver %s start"%(self.vservername)
205 def stop_guest_in_host(self):
206 if self.options.plcs_use_lxc:
207 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
208 return "TODO-lxc TestPlc.stop_guest_in_host"
210 return "vserver %s stop"%(self.vservername)
213 def run_in_guest_piped (self,local,remote):
214 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
216 # does a yum install in the vs, ignore yum retcod, check with rpm
217 def yum_install (self, rpms):
218 if isinstance (rpms, list):
220 self.run_in_guest("yum -y install %s"%rpms)
221 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
222 self.run_in_guest("yum-complete-transaction -y")
223 return self.run_in_guest("rpm -q %s"%rpms)==0
225 def auth_root (self):
226 return {'Username':self.plc_spec['PLC_ROOT_USER'],
227 'AuthMethod':'password',
228 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
229 'Role' : self.plc_spec['role']
231 def locate_site (self,sitename):
232 for site in self.plc_spec['sites']:
233 if site['site_fields']['name'] == sitename:
235 if site['site_fields']['login_base'] == sitename:
237 raise Exception,"Cannot locate site %s"%sitename
239 def locate_node (self,nodename):
240 for site in self.plc_spec['sites']:
241 for node in site['nodes']:
242 if node['name'] == nodename:
244 raise Exception,"Cannot locate node %s"%nodename
246 def locate_hostname (self,hostname):
247 for site in self.plc_spec['sites']:
248 for node in site['nodes']:
249 if node['node_fields']['hostname'] == hostname:
251 raise Exception,"Cannot locate hostname %s"%hostname
253 def locate_key (self,keyname):
254 for key in self.plc_spec['keys']:
255 if key['name'] == keyname:
257 raise Exception,"Cannot locate key %s"%keyname
259 def locate_slice (self, slicename):
260 for slice in self.plc_spec['slices']:
261 if slice['slice_fields']['name'] == slicename:
263 raise Exception,"Cannot locate slice %s"%slicename
265 def all_sliver_objs (self):
267 for slice_spec in self.plc_spec['slices']:
268 slicename = slice_spec['slice_fields']['name']
269 for nodename in slice_spec['nodenames']:
270 result.append(self.locate_sliver_obj (nodename,slicename))
273 def locate_sliver_obj (self,nodename,slicename):
274 (site,node) = self.locate_node(nodename)
275 slice = self.locate_slice (slicename)
277 test_site = TestSite (self, site)
278 test_node = TestNode (self, test_site,node)
279 # xxx the slice site is assumed to be the node site - mhh - probably harmless
280 test_slice = TestSlice (self, test_site, slice)
281 return TestSliver (self, test_node, test_slice)
283 def locate_first_node(self):
284 nodename=self.plc_spec['slices'][0]['nodenames'][0]
285 (site,node) = self.locate_node(nodename)
286 test_site = TestSite (self, site)
287 test_node = TestNode (self, test_site,node)
290 def locate_first_sliver (self):
291 slice_spec=self.plc_spec['slices'][0]
292 slicename=slice_spec['slice_fields']['name']
293 nodename=slice_spec['nodenames'][0]
294 return self.locate_sliver_obj(nodename,slicename)
296 # all different hostboxes used in this plc
297 def gather_hostBoxes(self):
298 # maps on sites and nodes, return [ (host_box,test_node) ]
300 for site_spec in self.plc_spec['sites']:
301 test_site = TestSite (self,site_spec)
302 for node_spec in site_spec['nodes']:
303 test_node = TestNode (self, test_site, node_spec)
304 if not test_node.is_real():
305 tuples.append( (test_node.host_box(),test_node) )
306 # transform into a dict { 'host_box' -> [ test_node .. ] }
308 for (box,node) in tuples:
309 if not result.has_key(box):
312 result[box].append(node)
315 # a step for checking this stuff
316 def show_boxes (self):
317 'print summary of nodes location'
318 for (box,nodes) in self.gather_hostBoxes().iteritems():
319 print box,":"," + ".join( [ node.name() for node in nodes ] )
322 # make this a valid step
323 def qemu_kill_all(self):
324 'kill all qemu instances on the qemu boxes involved by this setup'
325 # this is the brute force version, kill all qemus on that host box
326 for (box,nodes) in self.gather_hostBoxes().iteritems():
327 # pass the first nodename, as we don't push template-qemu on testboxes
328 nodedir=nodes[0].nodedir()
329 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
332 # make this a valid step
333 def qemu_list_all(self):
334 'list all qemu instances on the qemu boxes involved by this setup'
335 for (box,nodes) in self.gather_hostBoxes().iteritems():
336 # this is the brute force version, kill all qemus on that host box
337 TestBoxQemu(box,self.options.buildname).qemu_list_all()
340 # kill only the right qemus
341 def qemu_list_mine(self):
342 'list qemu instances for our nodes'
343 for (box,nodes) in self.gather_hostBoxes().iteritems():
344 # the fine-grain version
349 # kill only the right qemus
350 def qemu_kill_mine(self):
351 'kill the qemu instances for our nodes'
352 for (box,nodes) in self.gather_hostBoxes().iteritems():
353 # the fine-grain version
358 #################### display config
360 "show test configuration after localization"
361 self.display_pass (1)
362 self.display_pass (2)
366 "print cut'n paste-able stuff to export env variables to your shell"
367 # guess local domain from hostname
368 domain=socket.gethostname().split('.',1)[1]
369 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
370 print "export BUILD=%s"%self.options.buildname
371 print "export PLCHOST=%s"%fqdn
372 print "export GUEST=%s"%self.plc_spec['vservername']
373 # find hostname of first node
374 (hostname,qemubox) = self.all_node_infos()[0]
375 print "export KVMHOST=%s.%s"%(qemubox,domain)
376 print "export NODE=%s"%(hostname)
380 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
381 def display_pass (self,passno):
382 for (key,val) in self.plc_spec.iteritems():
383 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
387 self.display_site_spec(site)
388 for node in site['nodes']:
389 self.display_node_spec(node)
390 elif key=='initscripts':
391 for initscript in val:
392 self.display_initscript_spec (initscript)
395 self.display_slice_spec (slice)
398 self.display_key_spec (key)
400 if key not in ['sites','initscripts','slices','keys', 'sfa']:
401 print '+ ',key,':',val
403 def display_site_spec (self,site):
404 print '+ ======== site',site['site_fields']['name']
405 for (k,v) in site.iteritems():
406 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
409 print '+ ','nodes : ',
411 print node['node_fields']['hostname'],'',
417 print user['name'],'',
419 elif k == 'site_fields':
420 print '+ login_base',':',v['login_base']
421 elif k == 'address_fields':
427 def display_initscript_spec (self,initscript):
428 print '+ ======== initscript',initscript['initscript_fields']['name']
430 def display_key_spec (self,key):
431 print '+ ======== key',key['name']
433 def display_slice_spec (self,slice):
434 print '+ ======== slice',slice['slice_fields']['name']
435 for (k,v) in slice.iteritems():
448 elif k=='slice_fields':
449 print '+ fields',':',
450 print 'max_nodes=',v['max_nodes'],
455 def display_node_spec (self,node):
456 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
457 print "hostname=",node['node_fields']['hostname'],
458 print "ip=",node['interface_fields']['ip']
459 if self.options.verbose:
460 utils.pprint("node details",node,depth=3)
462 # another entry point for just showing the boxes involved
463 def display_mapping (self):
464 TestPlc.display_mapping_plc(self.plc_spec)
468 def display_mapping_plc (plc_spec):
469 print '+ MyPLC',plc_spec['name']
470 # WARNING this would not be right for lxc-based PLC's - should be harmless though
471 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
472 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
473 for site_spec in plc_spec['sites']:
474 for node_spec in site_spec['nodes']:
475 TestPlc.display_mapping_node(node_spec)
478 def display_mapping_node (node_spec):
479 print '+ NODE %s'%(node_spec['name'])
480 print '+\tqemu box %s'%node_spec['host_box']
481 print '+\thostname=%s'%node_spec['node_fields']['hostname']
483 # write a timestamp in /vservers/<>.timestamp
484 # cannot be inside the vserver, that causes vserver .. build to cough
485 def timestamp_vs (self):
487 # TODO-lxc check this one
488 # a first approx. is to store the timestamp close to the VM root like vs does
489 stamp_path="%s.timestamp"%self.vm_root_in_guest()
490 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
492 # this is called inconditionnally at the beginning of the test sequence
493 # just in case this is a rerun, so if the vm is not running it's fine
495 "vserver delete the test myplc"
496 stamp_path="%s.timestamp"%self.vm_root_in_guest()
497 self.run_in_host("rm -f %s"%stamp_path)
498 if self.options.plcs_use_lxc:
499 # TODO-lxc : how to trash a VM altogether and the related timestamp as well
500 # might make sense to test that this has been done - unlike for vs
501 print "TODO TestPlc.vs_delete"
504 self.run_in_host("vserver --silent %s delete"%self.vservername)
508 # historically the build was being fetched by the tests
509 # now the build pushes itself as a subdir of the tests workdir
510 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
511 def vs_create (self):
512 "vserver creation (no install done)"
513 # push the local build/ dir to the testplc box
515 # a full path for the local calls
516 build_dir=os.path.dirname(sys.argv[0])
517 # sometimes this is empty - set to "." in such a case
518 if not build_dir: build_dir="."
519 build_dir += "/build"
521 # use a standard name - will be relative to remote buildname
523 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
524 self.test_ssh.rmdir(build_dir)
525 self.test_ssh.copy(build_dir,recursive=True)
526 # the repo url is taken from arch-rpms-url
527 # with the last step (i386) removed
528 repo_url = self.options.arch_rpms_url
529 for level in [ 'arch' ]:
530 repo_url = os.path.dirname(repo_url)
531 # pass the vbuild-nightly options to vtest-init-vserver
533 test_env_options += " -p %s"%self.options.personality
534 test_env_options += " -d %s"%self.options.pldistro
535 test_env_options += " -f %s"%self.options.fcdistro
536 if self.options.plcs_use_lxc:
537 # TODO-lxc : might need some tweaks
538 script="vtest-init-lxc.sh"
540 script="vtest-init-vserver.sh"
541 vserver_name = self.vservername
542 vserver_options="--netdev eth0 --interface %s"%self.vserverip
544 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
545 vserver_options += " --hostname %s"%vserver_hostname
547 print "Cannot reverse lookup %s"%self.vserverip
548 print "This is considered fatal, as this might pollute the test results"
550 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
551 return self.run_in_host(create_vserver) == 0
554 def plc_install(self):
555 "yum install myplc, noderepo, and the plain bootstrapfs"
557 # workaround for getting pgsql8.2 on centos5
558 if self.options.fcdistro == "centos5":
559 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
562 if self.options.personality == "linux32":
564 elif self.options.personality == "linux64":
567 raise Exception, "Unsupported personality %r"%self.options.personality
568 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
571 pkgs_list.append ("slicerepo-%s"%nodefamily)
572 pkgs_list.append ("myplc")
573 pkgs_list.append ("noderepo-%s"%nodefamily)
574 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
575 pkgs_string=" ".join(pkgs_list)
576 return self.yum_install (pkgs_list)
579 def plc_configure(self):
581 tmpname='%s.plc-config-tty'%(self.name())
582 fileconf=open(tmpname,'w')
583 for var in [ 'PLC_NAME',
588 'PLC_MAIL_SUPPORT_ADDRESS',
591 # Above line was added for integrating SFA Testing
597 'PLC_RESERVATION_GRANULARITY',
599 'PLC_OMF_XMPP_SERVER',
601 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
602 fileconf.write('w\n')
603 fileconf.write('q\n')
605 utils.system('cat %s'%tmpname)
606 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
607 utils.system('rm %s'%tmpname)
612 self.run_in_guest('service plc start')
617 self.run_in_guest('service plc stop')
621 "start the PLC vserver"
626 "stop the PLC vserver"
630 # stores the keys from the config for further use
631 def keys_store(self):
632 "stores test users ssh keys in keys/"
633 for key_spec in self.plc_spec['keys']:
634 TestKey(self,key_spec).store_key()
637 def keys_clean(self):
638 "removes keys cached in keys/"
639 utils.system("rm -rf ./keys")
642 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
643 # for later direct access to the nodes
644 def keys_fetch(self):
645 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
647 if not os.path.isdir(dir):
649 vservername=self.vservername
650 vm_root=self.vm_root_in_guest()
652 prefix = 'debug_ssh_key'
653 for ext in [ 'pub', 'rsa' ] :
654 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
655 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
656 if self.test_ssh.fetch(src,dst) != 0: overall=False
660 "create sites with PLCAPI"
661 return self.do_sites()
663 def delete_sites (self):
664 "delete sites with PLCAPI"
665 return self.do_sites(action="delete")
667 def do_sites (self,action="add"):
668 for site_spec in self.plc_spec['sites']:
669 test_site = TestSite (self,site_spec)
670 if (action != "add"):
671 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
672 test_site.delete_site()
673 # deleted with the site
674 #test_site.delete_users()
677 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
678 test_site.create_site()
679 test_site.create_users()
682 def delete_all_sites (self):
683 "Delete all sites in PLC, and related objects"
684 print 'auth_root',self.auth_root()
685 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
686 for site_id in site_ids:
687 print 'Deleting site_id',site_id
688 self.apiserver.DeleteSite(self.auth_root(),site_id)
692 "create nodes with PLCAPI"
693 return self.do_nodes()
694 def delete_nodes (self):
695 "delete nodes with PLCAPI"
696 return self.do_nodes(action="delete")
698 def do_nodes (self,action="add"):
699 for site_spec in self.plc_spec['sites']:
700 test_site = TestSite (self,site_spec)
702 utils.header("Deleting nodes in site %s"%test_site.name())
703 for node_spec in site_spec['nodes']:
704 test_node=TestNode(self,test_site,node_spec)
705 utils.header("Deleting %s"%test_node.name())
706 test_node.delete_node()
708 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
709 for node_spec in site_spec['nodes']:
710 utils.pprint('Creating node %s'%node_spec,node_spec)
711 test_node = TestNode (self,test_site,node_spec)
712 test_node.create_node ()
715 def nodegroups (self):
716 "create nodegroups with PLCAPI"
717 return self.do_nodegroups("add")
718 def delete_nodegroups (self):
719 "delete nodegroups with PLCAPI"
720 return self.do_nodegroups("delete")
724 def translate_timestamp (start,grain,timestamp):
725 if timestamp < TestPlc.YEAR: return start+timestamp*grain
726 else: return timestamp
729 def timestamp_printable (timestamp):
730 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
733 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
735 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
736 print 'API answered grain=',grain
737 start=(now/grain)*grain
739 # find out all nodes that are reservable
740 nodes=self.all_reservable_nodenames()
742 utils.header ("No reservable node found - proceeding without leases")
745 # attach them to the leases as specified in plc_specs
746 # this is where the 'leases' field gets interpreted as relative of absolute
747 for lease_spec in self.plc_spec['leases']:
748 # skip the ones that come with a null slice id
749 if not lease_spec['slice']: continue
750 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
751 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
752 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
753 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
754 if lease_addition['errors']:
755 utils.header("Cannot create leases, %s"%lease_addition['errors'])
758 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
759 (nodes,lease_spec['slice'],
760 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
761 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
765 def delete_leases (self):
766 "remove all leases in the myplc side"
767 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
768 utils.header("Cleaning leases %r"%lease_ids)
769 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
772 def list_leases (self):
773 "list all leases known to the myplc"
774 leases = self.apiserver.GetLeases(self.auth_root())
777 current=l['t_until']>=now
778 if self.options.verbose or current:
779 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
780 TestPlc.timestamp_printable(l['t_from']),
781 TestPlc.timestamp_printable(l['t_until'])))
784 # create nodegroups if needed, and populate
785 def do_nodegroups (self, action="add"):
786 # 1st pass to scan contents
788 for site_spec in self.plc_spec['sites']:
789 test_site = TestSite (self,site_spec)
790 for node_spec in site_spec['nodes']:
791 test_node=TestNode (self,test_site,node_spec)
792 if node_spec.has_key('nodegroups'):
793 nodegroupnames=node_spec['nodegroups']
794 if isinstance(nodegroupnames,StringTypes):
795 nodegroupnames = [ nodegroupnames ]
796 for nodegroupname in nodegroupnames:
797 if not groups_dict.has_key(nodegroupname):
798 groups_dict[nodegroupname]=[]
799 groups_dict[nodegroupname].append(test_node.name())
800 auth=self.auth_root()
802 for (nodegroupname,group_nodes) in groups_dict.iteritems():
804 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
805 # first, check if the nodetagtype is here
806 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
808 tag_type_id = tag_types[0]['tag_type_id']
810 tag_type_id = self.apiserver.AddTagType(auth,
811 {'tagname':nodegroupname,
812 'description': 'for nodegroup %s'%nodegroupname,
814 print 'located tag (type)',nodegroupname,'as',tag_type_id
816 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
818 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
819 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
820 # set node tag on all nodes, value='yes'
821 for nodename in group_nodes:
823 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
825 traceback.print_exc()
826 print 'node',nodename,'seems to already have tag',nodegroupname
829 expect_yes = self.apiserver.GetNodeTags(auth,
830 {'hostname':nodename,
831 'tagname':nodegroupname},
832 ['value'])[0]['value']
833 if expect_yes != "yes":
834 print 'Mismatch node tag on node',nodename,'got',expect_yes
837 if not self.options.dry_run:
838 print 'Cannot find tag',nodegroupname,'on node',nodename
842 print 'cleaning nodegroup',nodegroupname
843 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
845 traceback.print_exc()
849 # return a list of tuples (nodename,qemuname)
850 def all_node_infos (self) :
852 for site_spec in self.plc_spec['sites']:
853 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
854 for node_spec in site_spec['nodes'] ]
857 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
858 def all_reservable_nodenames (self):
860 for site_spec in self.plc_spec['sites']:
861 for node_spec in site_spec['nodes']:
862 node_fields=node_spec['node_fields']
863 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
864 res.append(node_fields['hostname'])
867 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
868 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
869 if self.options.dry_run:
873 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
874 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
875 # the nodes that haven't checked yet - start with a full list and shrink over time
876 tocheck = self.all_hostnames()
877 utils.header("checking nodes %r"%tocheck)
878 # create a dict hostname -> status
879 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
882 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
884 for array in tocheck_status:
885 hostname=array['hostname']
886 boot_state=array['boot_state']
887 if boot_state == target_boot_state:
888 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
890 # if it's a real node, never mind
891 (site_spec,node_spec)=self.locate_hostname(hostname)
892 if TestNode.is_real_model(node_spec['node_fields']['model']):
893 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
895 boot_state = target_boot_state
896 elif datetime.datetime.now() > graceout:
897 utils.header ("%s still in '%s' state"%(hostname,boot_state))
898 graceout=datetime.datetime.now()+datetime.timedelta(1)
899 status[hostname] = boot_state
901 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
904 if datetime.datetime.now() > timeout:
905 for hostname in tocheck:
906 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
908 # otherwise, sleep for a while
910 # only useful in empty plcs
913 def nodes_booted(self):
914 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
916 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
918 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
919 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
920 vservername=self.vservername
923 local_key = "keys/%(vservername)s-debug.rsa"%locals()
926 local_key = "keys/key1.rsa"
927 node_infos = self.all_node_infos()
928 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
929 for (nodename,qemuname) in node_infos:
930 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
931 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
932 (timeout_minutes,silent_minutes,period))
934 for node_info in node_infos:
935 (hostname,qemuname) = node_info
936 # try to run 'hostname' in the node
937 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
938 # don't spam logs - show the command only after the grace period
939 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
941 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
943 node_infos.remove(node_info)
945 # we will have tried real nodes once, in case they're up - but if not, just skip
946 (site_spec,node_spec)=self.locate_hostname(hostname)
947 if TestNode.is_real_model(node_spec['node_fields']['model']):
948 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
949 node_infos.remove(node_info)
952 if datetime.datetime.now() > timeout:
953 for (hostname,qemuname) in node_infos:
954 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
956 # otherwise, sleep for a while
958 # only useful in empty plcs
961 def ssh_node_debug(self):
962 "Tries to ssh into nodes in debug mode with the debug ssh key"
963 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
965 def ssh_node_boot(self):
966 "Tries to ssh into nodes in production mode with the root ssh key"
967 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
970 def qemu_local_init (self):
971 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
975 "all nodes: invoke GetBootMedium and store result locally"
978 def qemu_local_config (self):
979 "all nodes: compute qemu config qemu.conf and store it locally"
982 def nodestate_reinstall (self):
983 "all nodes: mark PLCAPI boot_state as reinstall"
986 def nodestate_safeboot (self):
987 "all nodes: mark PLCAPI boot_state as safeboot"
990 def nodestate_boot (self):
991 "all nodes: mark PLCAPI boot_state as boot"
994 def nodestate_show (self):
995 "all nodes: show PLCAPI boot_state"
998 def qemu_export (self):
999 "all nodes: push local node-dep directory on the qemu box"
1002 ### check hooks : invoke scripts from hooks/{node,slice}
1003 def check_hooks_node (self):
1004 return self.locate_first_node().check_hooks()
1005 def check_hooks_sliver (self) :
1006 return self.locate_first_sliver().check_hooks()
1008 def check_hooks (self):
1009 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1010 return self.check_hooks_node() and self.check_hooks_sliver()
1013 def do_check_initscripts(self):
1015 for slice_spec in self.plc_spec['slices']:
1016 if not slice_spec.has_key('initscriptstamp'):
1018 stamp=slice_spec['initscriptstamp']
1019 for nodename in slice_spec['nodenames']:
1020 (site,node) = self.locate_node (nodename)
1021 # xxx - passing the wrong site - probably harmless
1022 test_site = TestSite (self,site)
1023 test_slice = TestSlice (self,test_site,slice_spec)
1024 test_node = TestNode (self,test_site,node)
1025 test_sliver = TestSliver (self, test_node, test_slice)
1026 if not test_sliver.check_initscript_stamp(stamp):
1030 def check_initscripts(self):
1031 "check that the initscripts have triggered"
1032 return self.do_check_initscripts()
1034 def initscripts (self):
1035 "create initscripts with PLCAPI"
1036 for initscript in self.plc_spec['initscripts']:
1037 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1038 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1041 def delete_initscripts (self):
1042 "delete initscripts with PLCAPI"
1043 for initscript in self.plc_spec['initscripts']:
1044 initscript_name = initscript['initscript_fields']['name']
1045 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1047 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1048 print initscript_name,'deleted'
1050 print 'deletion went wrong - probably did not exist'
1055 "create slices with PLCAPI"
1056 return self.do_slices()
1058 def delete_slices (self):
1059 "delete slices with PLCAPI"
1060 return self.do_slices("delete")
1062 def do_slices (self, action="add"):
1063 for slice in self.plc_spec['slices']:
1064 site_spec = self.locate_site (slice['sitename'])
1065 test_site = TestSite(self,site_spec)
1066 test_slice=TestSlice(self,test_site,slice)
1068 utils.header("Deleting slices in site %s"%test_site.name())
1069 test_slice.delete_slice()
1071 utils.pprint("Creating slice",slice)
1072 test_slice.create_slice()
1073 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1077 def ssh_slice(self):
1078 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1082 def keys_clear_known_hosts (self):
1083 "remove test nodes entries from the local known_hosts file"
1087 def qemu_start (self) :
1088 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1092 def timestamp_qemu (self) :
1093 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1096 def check_tcp (self):
1097 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1098 specs = self.plc_spec['tcp_test']
1103 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1104 if not s_test_sliver.run_tcp_server(port,timeout=10):
1108 # idem for the client side
1109 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1110 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1114 def plcsh_stress_test (self):
1115 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1116 # install the stress-test in the plc image
1117 location = "/usr/share/plc_api/plcsh_stress_test.py"
1119 remote="%s/%s"%(self.vm_root_in_guest(),location)
1120 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1122 command += " -- --check"
1123 if self.options.size == 1:
1124 command += " --tiny"
1125 return ( self.run_in_guest(command) == 0)
1127 # populate runs the same utility without slightly different options
1128 # in particular runs with --preserve (dont cleanup) and without --check
1129 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1131 def sfa_install_all (self):
1132 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1133 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1135 def sfa_install_core(self):
1137 return self.yum_install ("sfa")
1139 def sfa_install_plc(self):
1140 "yum install sfa-plc"
1141 return self.yum_install("sfa-plc")
1143 def sfa_install_client(self):
1144 "yum install sfa-client"
1145 return self.yum_install("sfa-client")
1147 def sfa_install_sfatables(self):
1148 "yum install sfa-sfatables"
1149 return self.yum_install ("sfa-sfatables")
1151 def sfa_dbclean(self):
1152 "thoroughly wipes off the SFA database"
1153 self.run_in_guest("sfa-nuke.py")==0 or \
1154 self.run_in_guest("sfa-nuke-plc.py") or \
1155 self.run_in_guest("sfaadmin.py registry nuke")
1158 def sfa_plcclean(self):
1159 "cleans the PLC entries that were created as a side effect of running the script"
1161 sfa_spec=self.plc_spec['sfa']
1163 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1164 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1165 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1166 except: print "Slice %s already absent from PLC db"%slicename
1168 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1169 try: self.apiserver.DeletePerson(self.auth_root(),username)
1170 except: print "User %s already absent from PLC db"%username
1172 print "REMEMBER TO RUN sfa_import AGAIN"
1175 def sfa_uninstall(self):
1176 "uses rpm to uninstall sfa - ignore result"
1177 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1178 self.run_in_guest("rm -rf /var/lib/sfa")
1179 self.run_in_guest("rm -rf /etc/sfa")
1180 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1182 self.run_in_guest("rpm -e --noscripts sfa-plc")
1185 ### run unit tests for SFA
1186 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1187 # Running Transaction
1188 # Transaction couldn't start:
1189 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1190 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1191 # no matter how many Gbs are available on the testplc
1192 # could not figure out what's wrong, so...
1193 # if the yum install phase fails, consider the test is successful
1194 # other combinations will eventually run it hopefully
1195 def sfa_utest(self):
1196 "yum install sfa-tests and run SFA unittests"
1197 self.run_in_guest("yum -y install sfa-tests")
1198 # failed to install - forget it
1199 if self.run_in_guest("rpm -q sfa-tests")!=0:
1200 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1202 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1206 dirname="conf.%s"%self.plc_spec['name']
1207 if not os.path.isdir(dirname):
1208 utils.system("mkdir -p %s"%dirname)
1209 if not os.path.isdir(dirname):
1210 raise "Cannot create config dir for plc %s"%self.name()
1213 def conffile(self,filename):
1214 return "%s/%s"%(self.confdir(),filename)
1215 def confsubdir(self,dirname,clean,dry_run=False):
1216 subdirname="%s/%s"%(self.confdir(),dirname)
1218 utils.system("rm -rf %s"%subdirname)
1219 if not os.path.isdir(subdirname):
1220 utils.system("mkdir -p %s"%subdirname)
1221 if not dry_run and not os.path.isdir(subdirname):
1222 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1225 def conffile_clean (self,filename):
1226 filename=self.conffile(filename)
1227 return utils.system("rm -rf %s"%filename)==0
1230 def sfa_configure(self):
1231 "run sfa-config-tty"
1232 tmpname=self.conffile("sfa-config-tty")
1233 fileconf=open(tmpname,'w')
1234 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1235 'SFA_INTERFACE_HRN',
1236 'SFA_REGISTRY_LEVEL1_AUTH',
1237 'SFA_REGISTRY_HOST',
1238 'SFA_AGGREGATE_HOST',
1249 if self.plc_spec['sfa'].has_key(var):
1250 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1251 # the way plc_config handles booleans just sucks..
1254 if self.plc_spec['sfa'][var]: val='true'
1255 fileconf.write ('e %s\n%s\n'%(var,val))
1256 fileconf.write('w\n')
1257 fileconf.write('R\n')
1258 fileconf.write('q\n')
1260 utils.system('cat %s'%tmpname)
1261 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1264 def aggregate_xml_line(self):
1265 port=self.plc_spec['sfa']['neighbours-port']
1266 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1267 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1269 def registry_xml_line(self):
1270 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1271 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1274 # a cross step that takes all other plcs in argument
1275 def cross_sfa_configure(self, other_plcs):
1276 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1277 # of course with a single plc, other_plcs is an empty list
1280 agg_fname=self.conffile("agg.xml")
1281 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1282 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1283 utils.header ("(Over)wrote %s"%agg_fname)
1284 reg_fname=self.conffile("reg.xml")
1285 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1286 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1287 utils.header ("(Over)wrote %s"%reg_fname)
1288 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_guest())==0 \
1289 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_guest())==0
1291 def sfa_import(self):
1293 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1294 return self.run_in_guest('sfa-import.py')==0 or \
1295 self.run_in_guest('sfa-import-plc.py')==0 or \
1296 self.run_in_guest('sfaadmin.py registry import_registry')==0
1297 # not needed anymore
1298 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1300 def sfa_start(self):
1302 return self.run_in_guest('service sfa start')==0
1304 def sfi_configure(self):
1305 "Create /root/sfi on the plc side for sfi client configuration"
1306 if self.options.dry_run:
1307 utils.header("DRY RUN - skipping step")
1309 sfa_spec=self.plc_spec['sfa']
1310 # cannot use sfa_slice_mapper to pass dir_name
1311 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1312 site_spec = self.locate_site (slice_spec['sitename'])
1313 test_site = TestSite(self,site_spec)
1314 test_slice=TestSliceSfa(self,test_site,slice_spec)
1315 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1316 test_slice.sfi_config(dir_name)
1317 # push into the remote /root/sfi area
1318 location = test_slice.sfi_path()
1319 remote="%s/%s"%(self.vm_root_in_guest(),location)
1320 self.test_ssh.mkdir(remote,abs=True)
1321 # need to strip last level or remote otherwise we get an extra dir level
1322 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1326 def sfi_clean (self):
1327 "clean up /root/sfi on the plc side"
1328 self.run_in_guest("rm -rf /root/sfi")
1332 def sfa_add_user(self):
1337 def sfa_update_user(self):
1341 def sfa_add_slice(self):
1342 "run sfi.py add (on Registry) from slice.xml"
1346 def sfa_discover(self):
1347 "discover resources into resouces_in.rspec"
1351 def sfa_create_slice(self):
1352 "run sfi.py create (on SM) - 1st time"
1356 def sfa_check_slice_plc(self):
1357 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1361 def sfa_update_slice(self):
1362 "run sfi.py create (on SM) on existing object"
1367 "various registry-related calls"
1371 def ssh_slice_sfa(self):
1372 "tries to ssh-enter the SFA slice"
1376 def sfa_delete_user(self):
1381 def sfa_delete_slice(self):
1382 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1387 self.run_in_guest('service sfa stop')==0
1390 def populate (self):
1391 "creates random entries in the PLCAPI"
1392 # install the stress-test in the plc image
1393 location = "/usr/share/plc_api/plcsh_stress_test.py"
1394 remote="%s/%s"%(self.vm_root_in_guest(),location)
1395 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1397 command += " -- --preserve --short-names"
1398 local = (self.run_in_guest(command) == 0);
1399 # second run with --foreign
1400 command += ' --foreign'
1401 remote = (self.run_in_guest(command) == 0);
1402 return ( local and remote)
1404 def gather_logs (self):
1405 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1406 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1407 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1408 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1409 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1410 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1412 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1413 self.gather_var_logs ()
1415 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1416 self.gather_pgsql_logs ()
1418 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1419 for site_spec in self.plc_spec['sites']:
1420 test_site = TestSite (self,site_spec)
1421 for node_spec in site_spec['nodes']:
1422 test_node=TestNode(self,test_site,node_spec)
1423 test_node.gather_qemu_logs()
1425 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1426 self.gather_nodes_var_logs()
1428 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1429 self.gather_slivers_var_logs()
1432 def gather_slivers_var_logs(self):
1433 for test_sliver in self.all_sliver_objs():
1434 remote = test_sliver.tar_var_logs()
1435 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1436 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1437 utils.system(command)
1440 def gather_var_logs (self):
1441 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1442 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1443 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1444 utils.system(command)
1445 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1446 utils.system(command)
1448 def gather_pgsql_logs (self):
1449 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1450 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1451 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1452 utils.system(command)
1454 def gather_nodes_var_logs (self):
1455 for site_spec in self.plc_spec['sites']:
1456 test_site = TestSite (self,site_spec)
1457 for node_spec in site_spec['nodes']:
1458 test_node=TestNode(self,test_site,node_spec)
1459 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1460 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1461 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1462 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1463 utils.system(command)
1466 # returns the filename to use for sql dump/restore, using options.dbname if set
1467 def dbfile (self, database):
1468 # uses options.dbname if it is found
1470 name=self.options.dbname
1471 if not isinstance(name,StringTypes):
1474 t=datetime.datetime.now()
1477 return "/root/%s-%s.sql"%(database,name)
1479 def plc_db_dump(self):
1480 'dump the planetlab5 DB in /root in the PLC - filename has time'
1481 dump=self.dbfile("planetab5")
1482 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1483 utils.header('Dumped planetlab5 database in %s'%dump)
1486 def plc_db_restore(self):
1487 'restore the planetlab5 DB - looks broken, but run -n might help'
1488 dump=self.dbfile("planetab5")
1489 ##stop httpd service
1490 self.run_in_guest('service httpd stop')
1491 # xxx - need another wrapper
1492 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1493 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1494 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1495 ##starting httpd service
1496 self.run_in_guest('service httpd start')
1498 utils.header('Database restored from ' + dump)
1500 def standby_1_through_20(self):
1501 """convenience function to wait for a specified number of minutes"""
1504 def standby_1(): pass
1506 def standby_2(): pass
1508 def standby_3(): pass
1510 def standby_4(): pass
1512 def standby_5(): pass
1514 def standby_6(): pass
1516 def standby_7(): pass
1518 def standby_8(): pass
1520 def standby_9(): pass
1522 def standby_10(): pass
1524 def standby_11(): pass
1526 def standby_12(): pass
1528 def standby_13(): pass
1530 def standby_14(): pass
1532 def standby_15(): pass
1534 def standby_16(): pass
1536 def standby_17(): pass
1538 def standby_18(): pass
1540 def standby_19(): pass
1542 def standby_20(): pass