1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'force_gather_logs', SEP,
108 'export', 'show_boxes', SEP,
109 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
110 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
111 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
112 'delete_leases', 'list_leases', SEP,
114 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
115 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
116 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
117 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
118 'plc_db_dump' , 'plc_db_restore', SEP,
119 'standby_1_through_20',SEP,
123 def printable_steps (list):
124 single_line=" ".join(list)+" "
125 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
127 def valid_step (step):
128 return step != SEP and step != SEPSFA
130 # turn off the sfa-related steps when build has skipped SFA
131 # this is originally for centos5 as recent SFAs won't build on this platform
133 def check_whether_build_has_sfa (rpms_url):
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 # move all steps containing 'sfa' from default_steps to other_steps
139 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
140 TestPlc.other_steps += sfa_steps
141 for step in sfa_steps: TestPlc.default_steps.remove(step)
143 def __init__ (self,plc_spec,options):
144 self.plc_spec=plc_spec
146 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
147 self.vserverip=plc_spec['vserverip']
148 self.vservername=plc_spec['vservername']
149 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
150 self.apiserver=TestApiserver(self.url,options.dry_run)
153 name=self.plc_spec['name']
154 return "%s.%s"%(name,self.vservername)
157 return self.plc_spec['host_box']
160 return self.test_ssh.is_local()
162 # define the API methods on this object through xmlrpc
163 # would help, but not strictly necessary
167 def actual_command_in_guest (self,command):
168 return self.test_ssh.actual_command(self.host_to_guest(command))
170 def start_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
173 def stop_guest (self):
174 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
176 def run_in_guest (self,command):
177 return utils.system(self.actual_command_in_guest(command))
179 def run_in_host (self,command):
180 return self.test_ssh.run_in_buildname(command)
182 #command gets run in the plc's vm
183 def host_to_guest(self,command):
184 if self.options.plcs_use_lxc:
185 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
186 return "TODO-lxc TestPlc.host_to_guest"
188 return "vserver %s exec %s"%(self.vservername,command)
190 def vm_root_in_guest(self):
191 if self.options.plcs_use_lxc:
193 return "TODO TestPlc.vm_root_in_guest"
195 return "/vservers/%s"%self.vservername
197 #start/stop the vserver
198 def start_guest_in_host(self):
199 if self.options.plcs_use_lxc:
200 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
201 return "TODO-lxc TestPlc.start_guest_in_host"
203 return "vserver %s start"%(self.vservername)
205 def stop_guest_in_host(self):
206 if self.options.plcs_use_lxc:
207 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
208 return "TODO-lxc TestPlc.stop_guest_in_host"
210 return "vserver %s stop"%(self.vservername)
213 def run_in_guest_piped (self,local,remote):
214 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
216 # does a yum install in the vs, ignore yum retcod, check with rpm
217 def yum_install (self, rpms):
218 if isinstance (rpms, list):
220 self.run_in_guest("yum -y install %s"%rpms)
221 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
222 self.run_in_guest("yum-complete-transaction -y")
223 return self.run_in_guest("rpm -q %s"%rpms)==0
225 def auth_root (self):
226 return {'Username':self.plc_spec['PLC_ROOT_USER'],
227 'AuthMethod':'password',
228 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
229 'Role' : self.plc_spec['role']
231 def locate_site (self,sitename):
232 for site in self.plc_spec['sites']:
233 if site['site_fields']['name'] == sitename:
235 if site['site_fields']['login_base'] == sitename:
237 raise Exception,"Cannot locate site %s"%sitename
239 def locate_node (self,nodename):
240 for site in self.plc_spec['sites']:
241 for node in site['nodes']:
242 if node['name'] == nodename:
244 raise Exception,"Cannot locate node %s"%nodename
246 def locate_hostname (self,hostname):
247 for site in self.plc_spec['sites']:
248 for node in site['nodes']:
249 if node['node_fields']['hostname'] == hostname:
251 raise Exception,"Cannot locate hostname %s"%hostname
253 def locate_key (self,keyname):
254 for key in self.plc_spec['keys']:
255 if key['name'] == keyname:
257 raise Exception,"Cannot locate key %s"%keyname
259 def locate_slice (self, slicename):
260 for slice in self.plc_spec['slices']:
261 if slice['slice_fields']['name'] == slicename:
263 raise Exception,"Cannot locate slice %s"%slicename
265 def all_sliver_objs (self):
267 for slice_spec in self.plc_spec['slices']:
268 slicename = slice_spec['slice_fields']['name']
269 for nodename in slice_spec['nodenames']:
270 result.append(self.locate_sliver_obj (nodename,slicename))
273 def locate_sliver_obj (self,nodename,slicename):
274 (site,node) = self.locate_node(nodename)
275 slice = self.locate_slice (slicename)
277 test_site = TestSite (self, site)
278 test_node = TestNode (self, test_site,node)
279 # xxx the slice site is assumed to be the node site - mhh - probably harmless
280 test_slice = TestSlice (self, test_site, slice)
281 return TestSliver (self, test_node, test_slice)
283 def locate_first_node(self):
284 nodename=self.plc_spec['slices'][0]['nodenames'][0]
285 (site,node) = self.locate_node(nodename)
286 test_site = TestSite (self, site)
287 test_node = TestNode (self, test_site,node)
290 def locate_first_sliver (self):
291 slice_spec=self.plc_spec['slices'][0]
292 slicename=slice_spec['slice_fields']['name']
293 nodename=slice_spec['nodenames'][0]
294 return self.locate_sliver_obj(nodename,slicename)
296 # all different hostboxes used in this plc
297 def gather_hostBoxes(self):
298 # maps on sites and nodes, return [ (host_box,test_node) ]
300 for site_spec in self.plc_spec['sites']:
301 test_site = TestSite (self,site_spec)
302 for node_spec in site_spec['nodes']:
303 test_node = TestNode (self, test_site, node_spec)
304 if not test_node.is_real():
305 tuples.append( (test_node.host_box(),test_node) )
306 # transform into a dict { 'host_box' -> [ test_node .. ] }
308 for (box,node) in tuples:
309 if not result.has_key(box):
312 result[box].append(node)
315 # a step for checking this stuff
316 def show_boxes (self):
317 'print summary of nodes location'
318 for (box,nodes) in self.gather_hostBoxes().iteritems():
319 print box,":"," + ".join( [ node.name() for node in nodes ] )
322 # make this a valid step
323 def qemu_kill_all(self):
324 'kill all qemu instances on the qemu boxes involved by this setup'
325 # this is the brute force version, kill all qemus on that host box
326 for (box,nodes) in self.gather_hostBoxes().iteritems():
327 # pass the first nodename, as we don't push template-qemu on testboxes
328 nodedir=nodes[0].nodedir()
329 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
332 # make this a valid step
333 def qemu_list_all(self):
334 'list all qemu instances on the qemu boxes involved by this setup'
335 for (box,nodes) in self.gather_hostBoxes().iteritems():
336 # this is the brute force version, kill all qemus on that host box
337 TestBoxQemu(box,self.options.buildname).qemu_list_all()
340 # kill only the right qemus
341 def qemu_list_mine(self):
342 'list qemu instances for our nodes'
343 for (box,nodes) in self.gather_hostBoxes().iteritems():
344 # the fine-grain version
349 # kill only the right qemus
350 def qemu_kill_mine(self):
351 'kill the qemu instances for our nodes'
352 for (box,nodes) in self.gather_hostBoxes().iteritems():
353 # the fine-grain version
358 #################### display config
360 "show test configuration after localization"
361 self.display_pass (1)
362 self.display_pass (2)
366 "print cut'n paste-able stuff to export env variables to your shell"
367 # guess local domain from hostname
368 domain=socket.gethostname().split('.',1)[1]
369 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
370 print "export BUILD=%s"%self.options.buildname
371 print "export PLCHOST=%s"%fqdn
372 print "export GUEST=%s"%self.plc_spec['vservername']
373 # find hostname of first node
374 (hostname,qemubox) = self.all_node_infos()[0]
375 print "export KVMHOST=%s.%s"%(qemubox,domain)
376 print "export NODE=%s"%(hostname)
380 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
381 def display_pass (self,passno):
382 for (key,val) in self.plc_spec.iteritems():
383 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
387 self.display_site_spec(site)
388 for node in site['nodes']:
389 self.display_node_spec(node)
390 elif key=='initscripts':
391 for initscript in val:
392 self.display_initscript_spec (initscript)
395 self.display_slice_spec (slice)
398 self.display_key_spec (key)
400 if key not in ['sites','initscripts','slices','keys', 'sfa']:
401 print '+ ',key,':',val
403 def display_site_spec (self,site):
404 print '+ ======== site',site['site_fields']['name']
405 for (k,v) in site.iteritems():
406 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
409 print '+ ','nodes : ',
411 print node['node_fields']['hostname'],'',
417 print user['name'],'',
419 elif k == 'site_fields':
420 print '+ login_base',':',v['login_base']
421 elif k == 'address_fields':
427 def display_initscript_spec (self,initscript):
428 print '+ ======== initscript',initscript['initscript_fields']['name']
430 def display_key_spec (self,key):
431 print '+ ======== key',key['name']
433 def display_slice_spec (self,slice):
434 print '+ ======== slice',slice['slice_fields']['name']
435 for (k,v) in slice.iteritems():
448 elif k=='slice_fields':
449 print '+ fields',':',
450 print 'max_nodes=',v['max_nodes'],
455 def display_node_spec (self,node):
456 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
457 print "hostname=",node['node_fields']['hostname'],
458 print "ip=",node['interface_fields']['ip']
459 if self.options.verbose:
460 utils.pprint("node details",node,depth=3)
462 # another entry point for just showing the boxes involved
463 def display_mapping (self):
464 TestPlc.display_mapping_plc(self.plc_spec)
468 def display_mapping_plc (plc_spec):
469 print '+ MyPLC',plc_spec['name']
470 # WARNING this would not be right for lxc-based PLC's - should be harmless though
471 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
472 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
473 for site_spec in plc_spec['sites']:
474 for node_spec in site_spec['nodes']:
475 TestPlc.display_mapping_node(node_spec)
478 def display_mapping_node (node_spec):
479 print '+ NODE %s'%(node_spec['name'])
480 print '+\tqemu box %s'%node_spec['host_box']
481 print '+\thostname=%s'%node_spec['node_fields']['hostname']
483 # write a timestamp in /vservers/<>.timestamp
484 # cannot be inside the vserver, that causes vserver .. build to cough
485 def timestamp_vs (self):
487 # TODO-lxc check this one
488 # a first approx. is to store the timestamp close to the VM root like vs does
489 stamp_path="%s.timestamp"%self.vm_root_in_guest()
490 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
493 "vserver delete the test myplc"
494 stamp_path="%s.timestamp"%self.vm_root_in_guest()
495 self.run_in_host("rm -f %s"%stamp_path)
496 if self.options.plcs_use_lxc:
497 # TODO-lxc : how to trash a VM altogether and the related timestamp as well
498 # might make sense to test that this has been done - unlike for vs
501 self.run_in_host("vserver --silent %s delete"%self.vservername)
505 # historically the build was being fetched by the tests
506 # now the build pushes itself as a subdir of the tests workdir
507 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
508 def vs_create (self):
509 "vserver creation (no install done)"
510 # push the local build/ dir to the testplc box
512 # a full path for the local calls
513 build_dir=os.path.dirname(sys.argv[0])
514 # sometimes this is empty - set to "." in such a case
515 if not build_dir: build_dir="."
516 build_dir += "/build"
518 # use a standard name - will be relative to remote buildname
520 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
521 self.test_ssh.rmdir(build_dir)
522 self.test_ssh.copy(build_dir,recursive=True)
523 # the repo url is taken from arch-rpms-url
524 # with the last step (i386) removed
525 repo_url = self.options.arch_rpms_url
526 for level in [ 'arch' ]:
527 repo_url = os.path.dirname(repo_url)
528 # pass the vbuild-nightly options to vtest-init-vserver
530 test_env_options += " -p %s"%self.options.personality
531 test_env_options += " -d %s"%self.options.pldistro
532 test_env_options += " -f %s"%self.options.fcdistro
533 if self.options.plcs_use_lxc:
534 # TODO-lxc : might need some tweaks
535 script="vtest-init-lxc.sh"
537 script="vtest-init-vserver.sh"
538 vserver_name = self.vservername
539 vserver_options="--netdev eth0 --interface %s"%self.vserverip
541 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
542 vserver_options += " --hostname %s"%vserver_hostname
544 print "Cannot reverse lookup %s"%self.vserverip
545 print "This is considered fatal, as this might pollute the test results"
547 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
548 return self.run_in_host(create_vserver) == 0
551 def plc_install(self):
552 "yum install myplc, noderepo, and the plain bootstrapfs"
554 # workaround for getting pgsql8.2 on centos5
555 if self.options.fcdistro == "centos5":
556 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
559 if self.options.personality == "linux32":
561 elif self.options.personality == "linux64":
564 raise Exception, "Unsupported personality %r"%self.options.personality
565 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
568 pkgs_list.append ("slicerepo-%s"%nodefamily)
569 pkgs_list.append ("myplc")
570 pkgs_list.append ("noderepo-%s"%nodefamily)
571 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
572 pkgs_string=" ".join(pkgs_list)
573 return self.yum_install (pkgs_list)
576 def plc_configure(self):
578 tmpname='%s.plc-config-tty'%(self.name())
579 fileconf=open(tmpname,'w')
580 for var in [ 'PLC_NAME',
585 'PLC_MAIL_SUPPORT_ADDRESS',
588 # Above line was added for integrating SFA Testing
594 'PLC_RESERVATION_GRANULARITY',
596 'PLC_OMF_XMPP_SERVER',
598 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
599 fileconf.write('w\n')
600 fileconf.write('q\n')
602 utils.system('cat %s'%tmpname)
603 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
604 utils.system('rm %s'%tmpname)
609 self.run_in_guest('service plc start')
614 self.run_in_guest('service plc stop')
618 "start the PLC vserver"
623 "stop the PLC vserver"
627 # stores the keys from the config for further use
628 def keys_store(self):
629 "stores test users ssh keys in keys/"
630 for key_spec in self.plc_spec['keys']:
631 TestKey(self,key_spec).store_key()
634 def keys_clean(self):
635 "removes keys cached in keys/"
636 utils.system("rm -rf ./keys")
639 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
640 # for later direct access to the nodes
641 def keys_fetch(self):
642 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
644 if not os.path.isdir(dir):
646 vservername=self.vservername
647 vm_root=self.vm_root_in_guest()
649 prefix = 'debug_ssh_key'
650 for ext in [ 'pub', 'rsa' ] :
651 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
652 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
653 if self.test_ssh.fetch(src,dst) != 0: overall=False
657 "create sites with PLCAPI"
658 return self.do_sites()
660 def delete_sites (self):
661 "delete sites with PLCAPI"
662 return self.do_sites(action="delete")
664 def do_sites (self,action="add"):
665 for site_spec in self.plc_spec['sites']:
666 test_site = TestSite (self,site_spec)
667 if (action != "add"):
668 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
669 test_site.delete_site()
670 # deleted with the site
671 #test_site.delete_users()
674 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
675 test_site.create_site()
676 test_site.create_users()
679 def delete_all_sites (self):
680 "Delete all sites in PLC, and related objects"
681 print 'auth_root',self.auth_root()
682 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
683 for site_id in site_ids:
684 print 'Deleting site_id',site_id
685 self.apiserver.DeleteSite(self.auth_root(),site_id)
689 "create nodes with PLCAPI"
690 return self.do_nodes()
691 def delete_nodes (self):
692 "delete nodes with PLCAPI"
693 return self.do_nodes(action="delete")
695 def do_nodes (self,action="add"):
696 for site_spec in self.plc_spec['sites']:
697 test_site = TestSite (self,site_spec)
699 utils.header("Deleting nodes in site %s"%test_site.name())
700 for node_spec in site_spec['nodes']:
701 test_node=TestNode(self,test_site,node_spec)
702 utils.header("Deleting %s"%test_node.name())
703 test_node.delete_node()
705 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
706 for node_spec in site_spec['nodes']:
707 utils.pprint('Creating node %s'%node_spec,node_spec)
708 test_node = TestNode (self,test_site,node_spec)
709 test_node.create_node ()
712 def nodegroups (self):
713 "create nodegroups with PLCAPI"
714 return self.do_nodegroups("add")
715 def delete_nodegroups (self):
716 "delete nodegroups with PLCAPI"
717 return self.do_nodegroups("delete")
721 def translate_timestamp (start,grain,timestamp):
722 if timestamp < TestPlc.YEAR: return start+timestamp*grain
723 else: return timestamp
726 def timestamp_printable (timestamp):
727 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
730 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
732 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
733 print 'API answered grain=',grain
734 start=(now/grain)*grain
736 # find out all nodes that are reservable
737 nodes=self.all_reservable_nodenames()
739 utils.header ("No reservable node found - proceeding without leases")
742 # attach them to the leases as specified in plc_specs
743 # this is where the 'leases' field gets interpreted as relative of absolute
744 for lease_spec in self.plc_spec['leases']:
745 # skip the ones that come with a null slice id
746 if not lease_spec['slice']: continue
747 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
748 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
749 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
750 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
751 if lease_addition['errors']:
752 utils.header("Cannot create leases, %s"%lease_addition['errors'])
755 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
756 (nodes,lease_spec['slice'],
757 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
758 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
762 def delete_leases (self):
763 "remove all leases in the myplc side"
764 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
765 utils.header("Cleaning leases %r"%lease_ids)
766 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
769 def list_leases (self):
770 "list all leases known to the myplc"
771 leases = self.apiserver.GetLeases(self.auth_root())
774 current=l['t_until']>=now
775 if self.options.verbose or current:
776 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
777 TestPlc.timestamp_printable(l['t_from']),
778 TestPlc.timestamp_printable(l['t_until'])))
781 # create nodegroups if needed, and populate
782 def do_nodegroups (self, action="add"):
783 # 1st pass to scan contents
785 for site_spec in self.plc_spec['sites']:
786 test_site = TestSite (self,site_spec)
787 for node_spec in site_spec['nodes']:
788 test_node=TestNode (self,test_site,node_spec)
789 if node_spec.has_key('nodegroups'):
790 nodegroupnames=node_spec['nodegroups']
791 if isinstance(nodegroupnames,StringTypes):
792 nodegroupnames = [ nodegroupnames ]
793 for nodegroupname in nodegroupnames:
794 if not groups_dict.has_key(nodegroupname):
795 groups_dict[nodegroupname]=[]
796 groups_dict[nodegroupname].append(test_node.name())
797 auth=self.auth_root()
799 for (nodegroupname,group_nodes) in groups_dict.iteritems():
801 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
802 # first, check if the nodetagtype is here
803 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
805 tag_type_id = tag_types[0]['tag_type_id']
807 tag_type_id = self.apiserver.AddTagType(auth,
808 {'tagname':nodegroupname,
809 'description': 'for nodegroup %s'%nodegroupname,
811 print 'located tag (type)',nodegroupname,'as',tag_type_id
813 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
815 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
816 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
817 # set node tag on all nodes, value='yes'
818 for nodename in group_nodes:
820 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
822 traceback.print_exc()
823 print 'node',nodename,'seems to already have tag',nodegroupname
826 expect_yes = self.apiserver.GetNodeTags(auth,
827 {'hostname':nodename,
828 'tagname':nodegroupname},
829 ['value'])[0]['value']
830 if expect_yes != "yes":
831 print 'Mismatch node tag on node',nodename,'got',expect_yes
834 if not self.options.dry_run:
835 print 'Cannot find tag',nodegroupname,'on node',nodename
839 print 'cleaning nodegroup',nodegroupname
840 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
842 traceback.print_exc()
846 # return a list of tuples (nodename,qemuname)
847 def all_node_infos (self) :
849 for site_spec in self.plc_spec['sites']:
850 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
851 for node_spec in site_spec['nodes'] ]
854 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
855 def all_reservable_nodenames (self):
857 for site_spec in self.plc_spec['sites']:
858 for node_spec in site_spec['nodes']:
859 node_fields=node_spec['node_fields']
860 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
861 res.append(node_fields['hostname'])
864 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
865 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
866 if self.options.dry_run:
870 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
871 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
872 # the nodes that haven't checked yet - start with a full list and shrink over time
873 tocheck = self.all_hostnames()
874 utils.header("checking nodes %r"%tocheck)
875 # create a dict hostname -> status
876 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
879 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
881 for array in tocheck_status:
882 hostname=array['hostname']
883 boot_state=array['boot_state']
884 if boot_state == target_boot_state:
885 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
887 # if it's a real node, never mind
888 (site_spec,node_spec)=self.locate_hostname(hostname)
889 if TestNode.is_real_model(node_spec['node_fields']['model']):
890 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
892 boot_state = target_boot_state
893 elif datetime.datetime.now() > graceout:
894 utils.header ("%s still in '%s' state"%(hostname,boot_state))
895 graceout=datetime.datetime.now()+datetime.timedelta(1)
896 status[hostname] = boot_state
898 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
901 if datetime.datetime.now() > timeout:
902 for hostname in tocheck:
903 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
905 # otherwise, sleep for a while
907 # only useful in empty plcs
910 def nodes_booted(self):
911 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
913 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
915 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
916 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
917 vservername=self.vservername
920 local_key = "keys/%(vservername)s-debug.rsa"%locals()
923 local_key = "keys/key1.rsa"
924 node_infos = self.all_node_infos()
925 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
926 for (nodename,qemuname) in node_infos:
927 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
928 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
929 (timeout_minutes,silent_minutes,period))
931 for node_info in node_infos:
932 (hostname,qemuname) = node_info
933 # try to run 'hostname' in the node
934 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
935 # don't spam logs - show the command only after the grace period
936 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
938 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
940 node_infos.remove(node_info)
942 # we will have tried real nodes once, in case they're up - but if not, just skip
943 (site_spec,node_spec)=self.locate_hostname(hostname)
944 if TestNode.is_real_model(node_spec['node_fields']['model']):
945 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
946 node_infos.remove(node_info)
949 if datetime.datetime.now() > timeout:
950 for (hostname,qemuname) in node_infos:
951 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
953 # otherwise, sleep for a while
955 # only useful in empty plcs
958 def ssh_node_debug(self):
959 "Tries to ssh into nodes in debug mode with the debug ssh key"
960 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
962 def ssh_node_boot(self):
963 "Tries to ssh into nodes in production mode with the root ssh key"
964 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
967 def qemu_local_init (self):
968 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
972 "all nodes: invoke GetBootMedium and store result locally"
975 def qemu_local_config (self):
976 "all nodes: compute qemu config qemu.conf and store it locally"
979 def nodestate_reinstall (self):
980 "all nodes: mark PLCAPI boot_state as reinstall"
983 def nodestate_safeboot (self):
984 "all nodes: mark PLCAPI boot_state as safeboot"
987 def nodestate_boot (self):
988 "all nodes: mark PLCAPI boot_state as boot"
991 def nodestate_show (self):
992 "all nodes: show PLCAPI boot_state"
995 def qemu_export (self):
996 "all nodes: push local node-dep directory on the qemu box"
999 ### check hooks : invoke scripts from hooks/{node,slice}
1000 def check_hooks_node (self):
1001 return self.locate_first_node().check_hooks()
1002 def check_hooks_sliver (self) :
1003 return self.locate_first_sliver().check_hooks()
1005 def check_hooks (self):
1006 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1007 return self.check_hooks_node() and self.check_hooks_sliver()
1010 def do_check_initscripts(self):
1012 for slice_spec in self.plc_spec['slices']:
1013 if not slice_spec.has_key('initscriptstamp'):
1015 stamp=slice_spec['initscriptstamp']
1016 for nodename in slice_spec['nodenames']:
1017 (site,node) = self.locate_node (nodename)
1018 # xxx - passing the wrong site - probably harmless
1019 test_site = TestSite (self,site)
1020 test_slice = TestSlice (self,test_site,slice_spec)
1021 test_node = TestNode (self,test_site,node)
1022 test_sliver = TestSliver (self, test_node, test_slice)
1023 if not test_sliver.check_initscript_stamp(stamp):
1027 def check_initscripts(self):
1028 "check that the initscripts have triggered"
1029 return self.do_check_initscripts()
1031 def initscripts (self):
1032 "create initscripts with PLCAPI"
1033 for initscript in self.plc_spec['initscripts']:
1034 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1035 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1038 def delete_initscripts (self):
1039 "delete initscripts with PLCAPI"
1040 for initscript in self.plc_spec['initscripts']:
1041 initscript_name = initscript['initscript_fields']['name']
1042 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1044 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1045 print initscript_name,'deleted'
1047 print 'deletion went wrong - probably did not exist'
1052 "create slices with PLCAPI"
1053 return self.do_slices()
1055 def delete_slices (self):
1056 "delete slices with PLCAPI"
1057 return self.do_slices("delete")
1059 def do_slices (self, action="add"):
1060 for slice in self.plc_spec['slices']:
1061 site_spec = self.locate_site (slice['sitename'])
1062 test_site = TestSite(self,site_spec)
1063 test_slice=TestSlice(self,test_site,slice)
1065 utils.header("Deleting slices in site %s"%test_site.name())
1066 test_slice.delete_slice()
1068 utils.pprint("Creating slice",slice)
1069 test_slice.create_slice()
1070 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1074 def ssh_slice(self):
1075 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1079 def keys_clear_known_hosts (self):
1080 "remove test nodes entries from the local known_hosts file"
1084 def qemu_start (self) :
1085 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1089 def timestamp_qemu (self) :
1090 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1093 def check_tcp (self):
1094 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1095 specs = self.plc_spec['tcp_test']
1100 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1101 if not s_test_sliver.run_tcp_server(port,timeout=10):
1105 # idem for the client side
1106 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1107 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1111 def plcsh_stress_test (self):
1112 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1113 # install the stress-test in the plc image
1114 location = "/usr/share/plc_api/plcsh_stress_test.py"
1116 remote="%s/%s"%(self.vm_root_in_guest(),location)
1117 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1119 command += " -- --check"
1120 if self.options.size == 1:
1121 command += " --tiny"
1122 return ( self.run_in_guest(command) == 0)
1124 # populate runs the same utility without slightly different options
1125 # in particular runs with --preserve (dont cleanup) and without --check
1126 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1128 def sfa_install_all (self):
1129 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1130 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1132 def sfa_install_core(self):
1134 return self.yum_install ("sfa")
1136 def sfa_install_plc(self):
1137 "yum install sfa-plc"
1138 return self.yum_install("sfa-plc")
1140 def sfa_install_client(self):
1141 "yum install sfa-client"
1142 return self.yum_install("sfa-client")
1144 def sfa_install_sfatables(self):
1145 "yum install sfa-sfatables"
1146 return self.yum_install ("sfa-sfatables")
1148 def sfa_dbclean(self):
1149 "thoroughly wipes off the SFA database"
1150 self.run_in_guest("sfa-nuke.py")==0 or \
1151 self.run_in_guest("sfa-nuke-plc.py") or \
1152 self.run_in_guest("sfaadmin.py registry nuke")
1155 def sfa_plcclean(self):
1156 "cleans the PLC entries that were created as a side effect of running the script"
1158 sfa_spec=self.plc_spec['sfa']
1160 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1161 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1162 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1163 except: print "Slice %s already absent from PLC db"%slicename
1165 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1166 try: self.apiserver.DeletePerson(self.auth_root(),username)
1167 except: print "User %s already absent from PLC db"%username
1169 print "REMEMBER TO RUN sfa_import AGAIN"
1172 def sfa_uninstall(self):
1173 "uses rpm to uninstall sfa - ignore result"
1174 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1175 self.run_in_guest("rm -rf /var/lib/sfa")
1176 self.run_in_guest("rm -rf /etc/sfa")
1177 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1179 self.run_in_guest("rpm -e --noscripts sfa-plc")
1182 ### run unit tests for SFA
1183 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1184 # Running Transaction
1185 # Transaction couldn't start:
1186 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1187 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1188 # no matter how many Gbs are available on the testplc
1189 # could not figure out what's wrong, so...
1190 # if the yum install phase fails, consider the test is successful
1191 # other combinations will eventually run it hopefully
1192 def sfa_utest(self):
1193 "yum install sfa-tests and run SFA unittests"
1194 self.run_in_guest("yum -y install sfa-tests")
1195 # failed to install - forget it
1196 if self.run_in_guest("rpm -q sfa-tests")!=0:
1197 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1199 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1203 dirname="conf.%s"%self.plc_spec['name']
1204 if not os.path.isdir(dirname):
1205 utils.system("mkdir -p %s"%dirname)
1206 if not os.path.isdir(dirname):
1207 raise "Cannot create config dir for plc %s"%self.name()
1210 def conffile(self,filename):
1211 return "%s/%s"%(self.confdir(),filename)
1212 def confsubdir(self,dirname,clean,dry_run=False):
1213 subdirname="%s/%s"%(self.confdir(),dirname)
1215 utils.system("rm -rf %s"%subdirname)
1216 if not os.path.isdir(subdirname):
1217 utils.system("mkdir -p %s"%subdirname)
1218 if not dry_run and not os.path.isdir(subdirname):
1219 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1222 def conffile_clean (self,filename):
1223 filename=self.conffile(filename)
1224 return utils.system("rm -rf %s"%filename)==0
1227 def sfa_configure(self):
1228 "run sfa-config-tty"
1229 tmpname=self.conffile("sfa-config-tty")
1230 fileconf=open(tmpname,'w')
1231 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1232 'SFA_INTERFACE_HRN',
1233 'SFA_REGISTRY_LEVEL1_AUTH',
1234 'SFA_REGISTRY_HOST',
1235 'SFA_AGGREGATE_HOST',
1246 if self.plc_spec['sfa'].has_key(var):
1247 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1248 # the way plc_config handles booleans just sucks..
1251 if self.plc_spec['sfa'][var]: val='true'
1252 fileconf.write ('e %s\n%s\n'%(var,val))
1253 fileconf.write('w\n')
1254 fileconf.write('R\n')
1255 fileconf.write('q\n')
1257 utils.system('cat %s'%tmpname)
1258 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1261 def aggregate_xml_line(self):
1262 port=self.plc_spec['sfa']['neighbours-port']
1263 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1264 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1266 def registry_xml_line(self):
1267 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1268 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1271 # a cross step that takes all other plcs in argument
1272 def cross_sfa_configure(self, other_plcs):
1273 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1274 # of course with a single plc, other_plcs is an empty list
1277 agg_fname=self.conffile("agg.xml")
1278 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1279 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1280 utils.header ("(Over)wrote %s"%agg_fname)
1281 reg_fname=self.conffile("reg.xml")
1282 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1283 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1284 utils.header ("(Over)wrote %s"%reg_fname)
1285 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_guest())==0 \
1286 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_guest())==0
1288 def sfa_import(self):
1290 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1291 return self.run_in_guest('sfa-import.py')==0 or \
1292 self.run_in_guest('sfa-import-plc.py')==0 or \
1293 self.run_in_guest('sfaadmin.py registry import_registry')==0
1294 # not needed anymore
1295 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1297 def sfa_start(self):
1299 return self.run_in_guest('service sfa start')==0
1301 def sfi_configure(self):
1302 "Create /root/sfi on the plc side for sfi client configuration"
1303 if self.options.dry_run:
1304 utils.header("DRY RUN - skipping step")
1306 sfa_spec=self.plc_spec['sfa']
1307 # cannot use sfa_slice_mapper to pass dir_name
1308 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1309 site_spec = self.locate_site (slice_spec['sitename'])
1310 test_site = TestSite(self,site_spec)
1311 test_slice=TestSliceSfa(self,test_site,slice_spec)
1312 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1313 test_slice.sfi_config(dir_name)
1314 # push into the remote /root/sfi area
1315 location = test_slice.sfi_path()
1316 remote="%s/%s"%(self.vm_root_in_guest(),location)
1317 self.test_ssh.mkdir(remote,abs=True)
1318 # need to strip last level or remote otherwise we get an extra dir level
1319 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1323 def sfi_clean (self):
1324 "clean up /root/sfi on the plc side"
1325 self.run_in_guest("rm -rf /root/sfi")
1329 def sfa_add_user(self):
1334 def sfa_update_user(self):
1338 def sfa_add_slice(self):
1339 "run sfi.py add (on Registry) from slice.xml"
1343 def sfa_discover(self):
1344 "discover resources into resouces_in.rspec"
1348 def sfa_create_slice(self):
1349 "run sfi.py create (on SM) - 1st time"
1353 def sfa_check_slice_plc(self):
1354 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1358 def sfa_update_slice(self):
1359 "run sfi.py create (on SM) on existing object"
1364 "various registry-related calls"
1368 def ssh_slice_sfa(self):
1369 "tries to ssh-enter the SFA slice"
1373 def sfa_delete_user(self):
1378 def sfa_delete_slice(self):
1379 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1384 self.run_in_guest('service sfa stop')==0
1387 def populate (self):
1388 "creates random entries in the PLCAPI"
1389 # install the stress-test in the plc image
1390 location = "/usr/share/plc_api/plcsh_stress_test.py"
1391 remote="%s/%s"%(self.vm_root_in_guest(),location)
1392 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1394 command += " -- --preserve --short-names"
1395 local = (self.run_in_guest(command) == 0);
1396 # second run with --foreign
1397 command += ' --foreign'
1398 remote = (self.run_in_guest(command) == 0);
1399 return ( local and remote)
1401 def gather_logs (self):
1402 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1403 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1404 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1405 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1406 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1407 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1409 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1410 self.gather_var_logs ()
1412 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1413 self.gather_pgsql_logs ()
1415 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1416 for site_spec in self.plc_spec['sites']:
1417 test_site = TestSite (self,site_spec)
1418 for node_spec in site_spec['nodes']:
1419 test_node=TestNode(self,test_site,node_spec)
1420 test_node.gather_qemu_logs()
1422 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1423 self.gather_nodes_var_logs()
1425 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1426 self.gather_slivers_var_logs()
1429 def gather_slivers_var_logs(self):
1430 for test_sliver in self.all_sliver_objs():
1431 remote = test_sliver.tar_var_logs()
1432 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1433 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1434 utils.system(command)
1437 def gather_var_logs (self):
1438 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1439 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1440 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1441 utils.system(command)
1442 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1443 utils.system(command)
1445 def gather_pgsql_logs (self):
1446 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1447 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1448 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1449 utils.system(command)
1451 def gather_nodes_var_logs (self):
1452 for site_spec in self.plc_spec['sites']:
1453 test_site = TestSite (self,site_spec)
1454 for node_spec in site_spec['nodes']:
1455 test_node=TestNode(self,test_site,node_spec)
1456 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1457 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1458 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1459 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1460 utils.system(command)
1463 # returns the filename to use for sql dump/restore, using options.dbname if set
1464 def dbfile (self, database):
1465 # uses options.dbname if it is found
1467 name=self.options.dbname
1468 if not isinstance(name,StringTypes):
1471 t=datetime.datetime.now()
1474 return "/root/%s-%s.sql"%(database,name)
1476 def plc_db_dump(self):
1477 'dump the planetlab5 DB in /root in the PLC - filename has time'
1478 dump=self.dbfile("planetab5")
1479 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1480 utils.header('Dumped planetlab5 database in %s'%dump)
1483 def plc_db_restore(self):
1484 'restore the planetlab5 DB - looks broken, but run -n might help'
1485 dump=self.dbfile("planetab5")
1486 ##stop httpd service
1487 self.run_in_guest('service httpd stop')
1488 # xxx - need another wrapper
1489 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1490 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1491 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1492 ##starting httpd service
1493 self.run_in_guest('service httpd start')
1495 utils.header('Database restored from ' + dump)
1497 def standby_1_through_20(self):
1498 """convenience function to wait for a specified number of minutes"""
1501 def standby_1(): pass
1503 def standby_2(): pass
1505 def standby_3(): pass
1507 def standby_4(): pass
1509 def standby_5(): pass
1511 def standby_6(): pass
1513 def standby_7(): pass
1515 def standby_8(): pass
1517 def standby_9(): pass
1519 def standby_10(): pass
1521 def standby_11(): pass
1523 def standby_12(): pass
1525 def standby_13(): pass
1527 def standby_14(): pass
1529 def standby_15(): pass
1531 def standby_16(): pass
1533 def standby_17(): pass
1535 def standby_18(): pass
1537 def standby_19(): pass
1539 def standby_20(): pass