1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
43 node_method = TestNode.__dict__[method.__name__]
44 for site_spec in self.plc_spec['sites']:
45 test_site = TestSite (self,site_spec)
46 for node_spec in site_spec['nodes']:
47 test_node = TestNode (self,test_site,node_spec)
48 if not node_method(test_node): overall=False
50 # restore the doc text
51 actual.__doc__=method.__doc__
54 def slice_mapper (method):
57 slice_method = TestSlice.__dict__[method.__name__]
58 for slice_spec in self.plc_spec['slices']:
59 site_spec = self.locate_site (slice_spec['sitename'])
60 test_site = TestSite(self,site_spec)
61 test_slice=TestSlice(self,test_site,slice_spec)
62 if not slice_method(test_slice,self.options): overall=False
64 # restore the doc text
65 actual.__doc__=method.__doc__
68 def slice_sfa_mapper (method):
71 slice_method = TestSliceSfa.__dict__[method.__name__]
72 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
73 site_spec = self.locate_site (slice_spec['sitename'])
74 test_site = TestSite(self,site_spec)
75 test_slice=TestSliceSfa(self,test_site,slice_spec)
76 if not slice_method(test_slice,self.options): overall=False
78 # restore the doc text
79 actual.__doc__=method.__doc__
89 'vs_delete','timestamp_vs','vs_create', SEP,
90 'plc_install', 'plc_configure', 'plc_start', SEP,
91 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
96 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
97 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
98 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
99 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
100 # but as the stress test might take a while, we sometimes missed the debug mode..
101 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
102 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
103 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'force_gather_logs', SEP,
108 'export', 'show_boxes', SEP,
109 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
110 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
111 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
112 'delete_leases', 'list_leases', SEP,
114 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
115 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
116 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
117 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
118 'plc_db_dump' , 'plc_db_restore', SEP,
119 'standby_1_through_20',SEP,
123 def printable_steps (list):
124 single_line=" ".join(list)+" "
125 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
127 def valid_step (step):
128 return step != SEP and step != SEPSFA
130 # turn off the sfa-related steps when build has skipped SFA
131 # this is originally for centos5 as recent SFAs won't build on this platform
133 def check_whether_build_has_sfa (rpms_url):
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 # move all steps containing 'sfa' from default_steps to other_steps
139 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
140 TestPlc.other_steps += sfa_steps
141 for step in sfa_steps: TestPlc.default_steps.remove(step)
143 def __init__ (self,plc_spec,options):
144 self.plc_spec=plc_spec
146 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
147 self.vserverip=plc_spec['vserverip']
148 self.vservername=plc_spec['vservername']
149 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
150 self.apiserver=TestApiserver(self.url,options.dry_run)
153 name=self.plc_spec['name']
154 return "%s.%s"%(name,self.vservername)
157 return self.plc_spec['host_box']
160 return self.test_ssh.is_local()
162 # define the API methods on this object through xmlrpc
163 # would help, but not strictly necessary
167 def actual_command_in_guest (self,command):
168 return self.test_ssh.actual_command(self.host_to_guest(command))
170 def start_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
173 def stop_guest (self):
174 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
176 def run_in_guest (self,command):
177 return utils.system(self.actual_command_in_guest(command))
179 def run_in_host (self,command):
180 return self.test_ssh.run_in_buildname(command)
182 #command gets run in the vserver
183 def host_to_guest(self,command):
184 return "vserver %s exec %s"%(self.vservername,command)
186 #start/stop the vserver
187 def start_guest_in_host(self):
188 return "vserver %s start"%(self.vservername)
190 def stop_guest_in_host(self):
191 return "vserver %s stop"%(self.vservername)
194 def run_in_guest_piped (self,local,remote):
195 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
197 # does a yum install in the vs, ignore yum retcod, check with rpm
198 def yum_install (self, rpms):
199 if isinstance (rpms, list):
201 self.run_in_guest("yum -y install %s"%rpms)
202 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
203 self.run_in_guest("yum-complete-transaction -y")
204 return self.run_in_guest("rpm -q %s"%rpms)==0
206 def auth_root (self):
207 return {'Username':self.plc_spec['PLC_ROOT_USER'],
208 'AuthMethod':'password',
209 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
210 'Role' : self.plc_spec['role']
212 def locate_site (self,sitename):
213 for site in self.plc_spec['sites']:
214 if site['site_fields']['name'] == sitename:
216 if site['site_fields']['login_base'] == sitename:
218 raise Exception,"Cannot locate site %s"%sitename
220 def locate_node (self,nodename):
221 for site in self.plc_spec['sites']:
222 for node in site['nodes']:
223 if node['name'] == nodename:
225 raise Exception,"Cannot locate node %s"%nodename
227 def locate_hostname (self,hostname):
228 for site in self.plc_spec['sites']:
229 for node in site['nodes']:
230 if node['node_fields']['hostname'] == hostname:
232 raise Exception,"Cannot locate hostname %s"%hostname
234 def locate_key (self,keyname):
235 for key in self.plc_spec['keys']:
236 if key['name'] == keyname:
238 raise Exception,"Cannot locate key %s"%keyname
240 def locate_slice (self, slicename):
241 for slice in self.plc_spec['slices']:
242 if slice['slice_fields']['name'] == slicename:
244 raise Exception,"Cannot locate slice %s"%slicename
246 def all_sliver_objs (self):
248 for slice_spec in self.plc_spec['slices']:
249 slicename = slice_spec['slice_fields']['name']
250 for nodename in slice_spec['nodenames']:
251 result.append(self.locate_sliver_obj (nodename,slicename))
254 def locate_sliver_obj (self,nodename,slicename):
255 (site,node) = self.locate_node(nodename)
256 slice = self.locate_slice (slicename)
258 test_site = TestSite (self, site)
259 test_node = TestNode (self, test_site,node)
260 # xxx the slice site is assumed to be the node site - mhh - probably harmless
261 test_slice = TestSlice (self, test_site, slice)
262 return TestSliver (self, test_node, test_slice)
264 def locate_first_node(self):
265 nodename=self.plc_spec['slices'][0]['nodenames'][0]
266 (site,node) = self.locate_node(nodename)
267 test_site = TestSite (self, site)
268 test_node = TestNode (self, test_site,node)
271 def locate_first_sliver (self):
272 slice_spec=self.plc_spec['slices'][0]
273 slicename=slice_spec['slice_fields']['name']
274 nodename=slice_spec['nodenames'][0]
275 return self.locate_sliver_obj(nodename,slicename)
277 # all different hostboxes used in this plc
278 def gather_hostBoxes(self):
279 # maps on sites and nodes, return [ (host_box,test_node) ]
281 for site_spec in self.plc_spec['sites']:
282 test_site = TestSite (self,site_spec)
283 for node_spec in site_spec['nodes']:
284 test_node = TestNode (self, test_site, node_spec)
285 if not test_node.is_real():
286 tuples.append( (test_node.host_box(),test_node) )
287 # transform into a dict { 'host_box' -> [ test_node .. ] }
289 for (box,node) in tuples:
290 if not result.has_key(box):
293 result[box].append(node)
296 # a step for checking this stuff
297 def show_boxes (self):
298 'print summary of nodes location'
299 for (box,nodes) in self.gather_hostBoxes().iteritems():
300 print box,":"," + ".join( [ node.name() for node in nodes ] )
303 # make this a valid step
304 def qemu_kill_all(self):
305 'kill all qemu instances on the qemu boxes involved by this setup'
306 # this is the brute force version, kill all qemus on that host box
307 for (box,nodes) in self.gather_hostBoxes().iteritems():
308 # pass the first nodename, as we don't push template-qemu on testboxes
309 nodedir=nodes[0].nodedir()
310 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
313 # make this a valid step
314 def qemu_list_all(self):
315 'list all qemu instances on the qemu boxes involved by this setup'
316 for (box,nodes) in self.gather_hostBoxes().iteritems():
317 # this is the brute force version, kill all qemus on that host box
318 TestBoxQemu(box,self.options.buildname).qemu_list_all()
321 # kill only the right qemus
322 def qemu_list_mine(self):
323 'list qemu instances for our nodes'
324 for (box,nodes) in self.gather_hostBoxes().iteritems():
325 # the fine-grain version
330 # kill only the right qemus
331 def qemu_kill_mine(self):
332 'kill the qemu instances for our nodes'
333 for (box,nodes) in self.gather_hostBoxes().iteritems():
334 # the fine-grain version
339 #################### display config
341 "show test configuration after localization"
342 self.display_pass (1)
343 self.display_pass (2)
347 "print cut'n paste-able stuff to export env variables to your shell"
348 # these work but the shell prompt does not get displayed..
349 command1="ssh %s vserver %s enter"%(self.plc_spec['host_box'],self.plc_spec['vservername'])
350 command2="ssh root@%s %s"%(socket.gethostname(),command1)
351 # guess local domain from hostname
352 domain=socket.gethostname().split('.',1)[1]
353 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
354 print "export BUILD=%s"%self.options.buildname
355 print "export PLCHOST=%s"%fqdn
356 print "export GUEST=%s"%self.plc_spec['vservername']
357 # find hostname of first node
358 (hostname,qemubox) = self.all_node_infos()[0]
359 print "export KVMHOST=%s.%s"%(qemubox,domain)
360 print "export NODE=%s"%(hostname)
364 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
365 def display_pass (self,passno):
366 for (key,val) in self.plc_spec.iteritems():
367 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
371 self.display_site_spec(site)
372 for node in site['nodes']:
373 self.display_node_spec(node)
374 elif key=='initscripts':
375 for initscript in val:
376 self.display_initscript_spec (initscript)
379 self.display_slice_spec (slice)
382 self.display_key_spec (key)
384 if key not in ['sites','initscripts','slices','keys', 'sfa']:
385 print '+ ',key,':',val
387 def display_site_spec (self,site):
388 print '+ ======== site',site['site_fields']['name']
389 for (k,v) in site.iteritems():
390 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
393 print '+ ','nodes : ',
395 print node['node_fields']['hostname'],'',
401 print user['name'],'',
403 elif k == 'site_fields':
404 print '+ login_base',':',v['login_base']
405 elif k == 'address_fields':
411 def display_initscript_spec (self,initscript):
412 print '+ ======== initscript',initscript['initscript_fields']['name']
414 def display_key_spec (self,key):
415 print '+ ======== key',key['name']
417 def display_slice_spec (self,slice):
418 print '+ ======== slice',slice['slice_fields']['name']
419 for (k,v) in slice.iteritems():
432 elif k=='slice_fields':
433 print '+ fields',':',
434 print 'max_nodes=',v['max_nodes'],
439 def display_node_spec (self,node):
440 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
441 print "hostname=",node['node_fields']['hostname'],
442 print "ip=",node['interface_fields']['ip']
443 if self.options.verbose:
444 utils.pprint("node details",node,depth=3)
446 # another entry point for just showing the boxes involved
447 def display_mapping (self):
448 TestPlc.display_mapping_plc(self.plc_spec)
452 def display_mapping_plc (plc_spec):
453 print '+ MyPLC',plc_spec['name']
454 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
455 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
456 for site_spec in plc_spec['sites']:
457 for node_spec in site_spec['nodes']:
458 TestPlc.display_mapping_node(node_spec)
461 def display_mapping_node (node_spec):
462 print '+ NODE %s'%(node_spec['name'])
463 print '+\tqemu box %s'%node_spec['host_box']
464 print '+\thostname=%s'%node_spec['node_fields']['hostname']
466 # write a timestamp in /vservers/<>.timestamp
467 # cannot be inside the vserver, that causes vserver .. build to cough
468 def timestamp_vs (self):
470 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s.timestamp"%(now,self.vservername)))==0
472 # def local_pre (self):
473 # "run site-dependant pre-test script as defined in LocalTestResources"
474 # from LocalTestResources import local_resources
475 # return local_resources.step_pre(self)
477 # def local_post (self):
478 # "run site-dependant post-test script as defined in LocalTestResources"
479 # from LocalTestResources import local_resources
480 # return local_resources.step_post(self)
482 # def local_list (self):
483 # "run site-dependant list script as defined in LocalTestResources"
484 # from LocalTestResources import local_resources
485 # return local_resources.step_list(self)
487 # def local_rel (self):
488 # "run site-dependant release script as defined in LocalTestResources"
489 # from LocalTestResources import local_resources
490 # return local_resources.step_release(self)
492 # def local_rel_plc (self):
493 # "run site-dependant release script as defined in LocalTestResources"
494 # from LocalTestResources import local_resources
495 # return local_resources.step_release_plc(self)
497 # def local_rel_qemu (self):
498 # "run site-dependant release script as defined in LocalTestResources"
499 # from LocalTestResources import local_resources
500 # return local_resources.step_release_qemu(self)
503 "vserver delete the test myplc"
504 self.run_in_host("vserver --silent %s delete"%self.vservername)
505 self.run_in_host("rm -f /vservers/%s.timestamp"%self.vservername)
509 # historically the build was being fetched by the tests
510 # now the build pushes itself as a subdir of the tests workdir
511 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
512 def vs_create (self):
513 "vserver creation (no install done)"
514 # push the local build/ dir to the testplc box
516 # a full path for the local calls
517 build_dir=os.path.dirname(sys.argv[0])
518 # sometimes this is empty - set to "." in such a case
519 if not build_dir: build_dir="."
520 build_dir += "/build"
522 # use a standard name - will be relative to remote buildname
524 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
525 self.test_ssh.rmdir(build_dir)
526 self.test_ssh.copy(build_dir,recursive=True)
527 # the repo url is taken from arch-rpms-url
528 # with the last step (i386) removed
529 repo_url = self.options.arch_rpms_url
530 for level in [ 'arch' ]:
531 repo_url = os.path.dirname(repo_url)
532 # pass the vbuild-nightly options to vtest-init-vserver
534 test_env_options += " -p %s"%self.options.personality
535 test_env_options += " -d %s"%self.options.pldistro
536 test_env_options += " -f %s"%self.options.fcdistro
537 script="vtest-init-vserver.sh"
538 vserver_name = self.vservername
539 vserver_options="--netdev eth0 --interface %s"%self.vserverip
541 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
542 vserver_options += " --hostname %s"%vserver_hostname
544 print "Cannot reverse lookup %s"%self.vserverip
545 print "This is considered fatal, as this might pollute the test results"
547 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
548 return self.run_in_host(create_vserver) == 0
551 def plc_install(self):
552 "yum install myplc, noderepo, and the plain bootstrapfs"
554 # workaround for getting pgsql8.2 on centos5
555 if self.options.fcdistro == "centos5":
556 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
559 if self.options.personality == "linux32":
561 elif self.options.personality == "linux64":
564 raise Exception, "Unsupported personality %r"%self.options.personality
565 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
568 pkgs_list.append ("slicerepo-%s"%nodefamily)
569 pkgs_list.append ("myplc")
570 pkgs_list.append ("noderepo-%s"%nodefamily)
571 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
572 pkgs_string=" ".join(pkgs_list)
573 return self.yum_install (pkgs_list)
576 def plc_configure(self):
578 tmpname='%s.plc-config-tty'%(self.name())
579 fileconf=open(tmpname,'w')
580 for var in [ 'PLC_NAME',
585 'PLC_MAIL_SUPPORT_ADDRESS',
588 # Above line was added for integrating SFA Testing
594 'PLC_RESERVATION_GRANULARITY',
596 'PLC_OMF_XMPP_SERVER',
598 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
599 fileconf.write('w\n')
600 fileconf.write('q\n')
602 utils.system('cat %s'%tmpname)
603 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
604 utils.system('rm %s'%tmpname)
609 self.run_in_guest('service plc start')
614 self.run_in_guest('service plc stop')
618 "start the PLC vserver"
623 "stop the PLC vserver"
627 # stores the keys from the config for further use
628 def keys_store(self):
629 "stores test users ssh keys in keys/"
630 for key_spec in self.plc_spec['keys']:
631 TestKey(self,key_spec).store_key()
634 def keys_clean(self):
635 "removes keys cached in keys/"
636 utils.system("rm -rf ./keys")
639 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
640 # for later direct access to the nodes
641 def keys_fetch(self):
642 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
644 if not os.path.isdir(dir):
646 vservername=self.vservername
648 prefix = 'debug_ssh_key'
649 for ext in [ 'pub', 'rsa' ] :
650 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
651 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
652 if self.test_ssh.fetch(src,dst) != 0: overall=False
656 "create sites with PLCAPI"
657 return self.do_sites()
659 def delete_sites (self):
660 "delete sites with PLCAPI"
661 return self.do_sites(action="delete")
663 def do_sites (self,action="add"):
664 for site_spec in self.plc_spec['sites']:
665 test_site = TestSite (self,site_spec)
666 if (action != "add"):
667 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
668 test_site.delete_site()
669 # deleted with the site
670 #test_site.delete_users()
673 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
674 test_site.create_site()
675 test_site.create_users()
678 def delete_all_sites (self):
679 "Delete all sites in PLC, and related objects"
680 print 'auth_root',self.auth_root()
681 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
682 for site_id in site_ids:
683 print 'Deleting site_id',site_id
684 self.apiserver.DeleteSite(self.auth_root(),site_id)
688 "create nodes with PLCAPI"
689 return self.do_nodes()
690 def delete_nodes (self):
691 "delete nodes with PLCAPI"
692 return self.do_nodes(action="delete")
694 def do_nodes (self,action="add"):
695 for site_spec in self.plc_spec['sites']:
696 test_site = TestSite (self,site_spec)
698 utils.header("Deleting nodes in site %s"%test_site.name())
699 for node_spec in site_spec['nodes']:
700 test_node=TestNode(self,test_site,node_spec)
701 utils.header("Deleting %s"%test_node.name())
702 test_node.delete_node()
704 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
705 for node_spec in site_spec['nodes']:
706 utils.pprint('Creating node %s'%node_spec,node_spec)
707 test_node = TestNode (self,test_site,node_spec)
708 test_node.create_node ()
711 def nodegroups (self):
712 "create nodegroups with PLCAPI"
713 return self.do_nodegroups("add")
714 def delete_nodegroups (self):
715 "delete nodegroups with PLCAPI"
716 return self.do_nodegroups("delete")
720 def translate_timestamp (start,grain,timestamp):
721 if timestamp < TestPlc.YEAR: return start+timestamp*grain
722 else: return timestamp
725 def timestamp_printable (timestamp):
726 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
729 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
731 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
732 print 'API answered grain=',grain
733 start=(now/grain)*grain
735 # find out all nodes that are reservable
736 nodes=self.all_reservable_nodenames()
738 utils.header ("No reservable node found - proceeding without leases")
741 # attach them to the leases as specified in plc_specs
742 # this is where the 'leases' field gets interpreted as relative of absolute
743 for lease_spec in self.plc_spec['leases']:
744 # skip the ones that come with a null slice id
745 if not lease_spec['slice']: continue
746 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
747 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
748 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
749 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
750 if lease_addition['errors']:
751 utils.header("Cannot create leases, %s"%lease_addition['errors'])
754 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
755 (nodes,lease_spec['slice'],
756 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
757 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
761 def delete_leases (self):
762 "remove all leases in the myplc side"
763 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
764 utils.header("Cleaning leases %r"%lease_ids)
765 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
768 def list_leases (self):
769 "list all leases known to the myplc"
770 leases = self.apiserver.GetLeases(self.auth_root())
773 current=l['t_until']>=now
774 if self.options.verbose or current:
775 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
776 TestPlc.timestamp_printable(l['t_from']),
777 TestPlc.timestamp_printable(l['t_until'])))
780 # create nodegroups if needed, and populate
781 def do_nodegroups (self, action="add"):
782 # 1st pass to scan contents
784 for site_spec in self.plc_spec['sites']:
785 test_site = TestSite (self,site_spec)
786 for node_spec in site_spec['nodes']:
787 test_node=TestNode (self,test_site,node_spec)
788 if node_spec.has_key('nodegroups'):
789 nodegroupnames=node_spec['nodegroups']
790 if isinstance(nodegroupnames,StringTypes):
791 nodegroupnames = [ nodegroupnames ]
792 for nodegroupname in nodegroupnames:
793 if not groups_dict.has_key(nodegroupname):
794 groups_dict[nodegroupname]=[]
795 groups_dict[nodegroupname].append(test_node.name())
796 auth=self.auth_root()
798 for (nodegroupname,group_nodes) in groups_dict.iteritems():
800 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
801 # first, check if the nodetagtype is here
802 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
804 tag_type_id = tag_types[0]['tag_type_id']
806 tag_type_id = self.apiserver.AddTagType(auth,
807 {'tagname':nodegroupname,
808 'description': 'for nodegroup %s'%nodegroupname,
810 print 'located tag (type)',nodegroupname,'as',tag_type_id
812 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
814 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
815 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
816 # set node tag on all nodes, value='yes'
817 for nodename in group_nodes:
819 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
821 traceback.print_exc()
822 print 'node',nodename,'seems to already have tag',nodegroupname
825 expect_yes = self.apiserver.GetNodeTags(auth,
826 {'hostname':nodename,
827 'tagname':nodegroupname},
828 ['value'])[0]['value']
829 if expect_yes != "yes":
830 print 'Mismatch node tag on node',nodename,'got',expect_yes
833 if not self.options.dry_run:
834 print 'Cannot find tag',nodegroupname,'on node',nodename
838 print 'cleaning nodegroup',nodegroupname
839 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
841 traceback.print_exc()
845 # return a list of tuples (nodename,qemuname)
846 def all_node_infos (self) :
848 for site_spec in self.plc_spec['sites']:
849 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
850 for node_spec in site_spec['nodes'] ]
853 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
854 def all_reservable_nodenames (self):
856 for site_spec in self.plc_spec['sites']:
857 for node_spec in site_spec['nodes']:
858 node_fields=node_spec['node_fields']
859 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
860 res.append(node_fields['hostname'])
863 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
864 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
865 if self.options.dry_run:
869 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
870 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
871 # the nodes that haven't checked yet - start with a full list and shrink over time
872 tocheck = self.all_hostnames()
873 utils.header("checking nodes %r"%tocheck)
874 # create a dict hostname -> status
875 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
878 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
880 for array in tocheck_status:
881 hostname=array['hostname']
882 boot_state=array['boot_state']
883 if boot_state == target_boot_state:
884 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
886 # if it's a real node, never mind
887 (site_spec,node_spec)=self.locate_hostname(hostname)
888 if TestNode.is_real_model(node_spec['node_fields']['model']):
889 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
891 boot_state = target_boot_state
892 elif datetime.datetime.now() > graceout:
893 utils.header ("%s still in '%s' state"%(hostname,boot_state))
894 graceout=datetime.datetime.now()+datetime.timedelta(1)
895 status[hostname] = boot_state
897 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
900 if datetime.datetime.now() > timeout:
901 for hostname in tocheck:
902 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
904 # otherwise, sleep for a while
906 # only useful in empty plcs
909 def nodes_booted(self):
910 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
912 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
914 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
915 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
916 vservername=self.vservername
919 local_key = "keys/%(vservername)s-debug.rsa"%locals()
922 local_key = "keys/key1.rsa"
923 node_infos = self.all_node_infos()
924 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
925 for (nodename,qemuname) in node_infos:
926 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
927 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
928 (timeout_minutes,silent_minutes,period))
930 for node_info in node_infos:
931 (hostname,qemuname) = node_info
932 # try to run 'hostname' in the node
933 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
934 # don't spam logs - show the command only after the grace period
935 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
937 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
939 node_infos.remove(node_info)
941 # we will have tried real nodes once, in case they're up - but if not, just skip
942 (site_spec,node_spec)=self.locate_hostname(hostname)
943 if TestNode.is_real_model(node_spec['node_fields']['model']):
944 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
945 node_infos.remove(node_info)
948 if datetime.datetime.now() > timeout:
949 for (hostname,qemuname) in node_infos:
950 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
952 # otherwise, sleep for a while
954 # only useful in empty plcs
957 def ssh_node_debug(self):
958 "Tries to ssh into nodes in debug mode with the debug ssh key"
959 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
961 def ssh_node_boot(self):
962 "Tries to ssh into nodes in production mode with the root ssh key"
963 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
966 def qemu_local_init (self):
967 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
971 "all nodes: invoke GetBootMedium and store result locally"
974 def qemu_local_config (self):
975 "all nodes: compute qemu config qemu.conf and store it locally"
978 def nodestate_reinstall (self):
979 "all nodes: mark PLCAPI boot_state as reinstall"
982 def nodestate_safeboot (self):
983 "all nodes: mark PLCAPI boot_state as safeboot"
986 def nodestate_boot (self):
987 "all nodes: mark PLCAPI boot_state as boot"
990 def nodestate_show (self):
991 "all nodes: show PLCAPI boot_state"
994 def qemu_export (self):
995 "all nodes: push local node-dep directory on the qemu box"
998 ### check hooks : invoke scripts from hooks/{node,slice}
999 def check_hooks_node (self):
1000 return self.locate_first_node().check_hooks()
1001 def check_hooks_sliver (self) :
1002 return self.locate_first_sliver().check_hooks()
1004 def check_hooks (self):
1005 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1006 return self.check_hooks_node() and self.check_hooks_sliver()
1009 def do_check_initscripts(self):
1011 for slice_spec in self.plc_spec['slices']:
1012 if not slice_spec.has_key('initscriptstamp'):
1014 stamp=slice_spec['initscriptstamp']
1015 for nodename in slice_spec['nodenames']:
1016 (site,node) = self.locate_node (nodename)
1017 # xxx - passing the wrong site - probably harmless
1018 test_site = TestSite (self,site)
1019 test_slice = TestSlice (self,test_site,slice_spec)
1020 test_node = TestNode (self,test_site,node)
1021 test_sliver = TestSliver (self, test_node, test_slice)
1022 if not test_sliver.check_initscript_stamp(stamp):
1026 def check_initscripts(self):
1027 "check that the initscripts have triggered"
1028 return self.do_check_initscripts()
1030 def initscripts (self):
1031 "create initscripts with PLCAPI"
1032 for initscript in self.plc_spec['initscripts']:
1033 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1034 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1037 def delete_initscripts (self):
1038 "delete initscripts with PLCAPI"
1039 for initscript in self.plc_spec['initscripts']:
1040 initscript_name = initscript['initscript_fields']['name']
1041 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1043 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1044 print initscript_name,'deleted'
1046 print 'deletion went wrong - probably did not exist'
1051 "create slices with PLCAPI"
1052 return self.do_slices()
1054 def delete_slices (self):
1055 "delete slices with PLCAPI"
1056 return self.do_slices("delete")
1058 def do_slices (self, action="add"):
1059 for slice in self.plc_spec['slices']:
1060 site_spec = self.locate_site (slice['sitename'])
1061 test_site = TestSite(self,site_spec)
1062 test_slice=TestSlice(self,test_site,slice)
1064 utils.header("Deleting slices in site %s"%test_site.name())
1065 test_slice.delete_slice()
1067 utils.pprint("Creating slice",slice)
1068 test_slice.create_slice()
1069 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1073 def ssh_slice(self):
1074 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1078 def keys_clear_known_hosts (self):
1079 "remove test nodes entries from the local known_hosts file"
1083 def qemu_start (self) :
1084 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1088 def timestamp_qemu (self) :
1089 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1092 def check_tcp (self):
1093 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1094 specs = self.plc_spec['tcp_test']
1099 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1100 if not s_test_sliver.run_tcp_server(port,timeout=10):
1104 # idem for the client side
1105 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1106 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1110 def plcsh_stress_test (self):
1111 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1112 # install the stress-test in the plc image
1113 location = "/usr/share/plc_api/plcsh_stress_test.py"
1114 remote="/vservers/%s/%s"%(self.vservername,location)
1115 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1117 command += " -- --check"
1118 if self.options.size == 1:
1119 command += " --tiny"
1120 return ( self.run_in_guest(command) == 0)
1122 # populate runs the same utility without slightly different options
1123 # in particular runs with --preserve (dont cleanup) and without --check
1124 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1126 def sfa_install_all (self):
1127 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1128 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1130 def sfa_install_core(self):
1132 return self.yum_install ("sfa")
1134 def sfa_install_plc(self):
1135 "yum install sfa-plc"
1136 return self.yum_install("sfa-plc")
1138 def sfa_install_client(self):
1139 "yum install sfa-client"
1140 return self.yum_install("sfa-client")
1142 def sfa_install_sfatables(self):
1143 "yum install sfa-sfatables"
1144 return self.yum_install ("sfa-sfatables")
1146 def sfa_dbclean(self):
1147 "thoroughly wipes off the SFA database"
1148 self.run_in_guest("sfa-nuke.py")==0 or \
1149 self.run_in_guest("sfa-nuke-plc.py") or \
1150 self.run_in_guest("sfaadmin.py registry nuke")
1153 def sfa_plcclean(self):
1154 "cleans the PLC entries that were created as a side effect of running the script"
1156 sfa_spec=self.plc_spec['sfa']
1158 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1159 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1160 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1161 except: print "Slice %s already absent from PLC db"%slicename
1163 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1164 try: self.apiserver.DeletePerson(self.auth_root(),username)
1165 except: print "User %s already absent from PLC db"%username
1167 print "REMEMBER TO RUN sfa_import AGAIN"
1170 def sfa_uninstall(self):
1171 "uses rpm to uninstall sfa - ignore result"
1172 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1173 self.run_in_guest("rm -rf /var/lib/sfa")
1174 self.run_in_guest("rm -rf /etc/sfa")
1175 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1177 self.run_in_guest("rpm -e --noscripts sfa-plc")
1180 ### run unit tests for SFA
1181 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1182 # Running Transaction
1183 # Transaction couldn't start:
1184 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1185 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1186 # no matter how many Gbs are available on the testplc
1187 # could not figure out what's wrong, so...
1188 # if the yum install phase fails, consider the test is successful
1189 # other combinations will eventually run it hopefully
1190 def sfa_utest(self):
1191 "yum install sfa-tests and run SFA unittests"
1192 self.run_in_guest("yum -y install sfa-tests")
1193 # failed to install - forget it
1194 if self.run_in_guest("rpm -q sfa-tests")!=0:
1195 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1197 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1201 dirname="conf.%s"%self.plc_spec['name']
1202 if not os.path.isdir(dirname):
1203 utils.system("mkdir -p %s"%dirname)
1204 if not os.path.isdir(dirname):
1205 raise "Cannot create config dir for plc %s"%self.name()
1208 def conffile(self,filename):
1209 return "%s/%s"%(self.confdir(),filename)
1210 def confsubdir(self,dirname,clean,dry_run=False):
1211 subdirname="%s/%s"%(self.confdir(),dirname)
1213 utils.system("rm -rf %s"%subdirname)
1214 if not os.path.isdir(subdirname):
1215 utils.system("mkdir -p %s"%subdirname)
1216 if not dry_run and not os.path.isdir(subdirname):
1217 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1220 def conffile_clean (self,filename):
1221 filename=self.conffile(filename)
1222 return utils.system("rm -rf %s"%filename)==0
1225 def sfa_configure(self):
1226 "run sfa-config-tty"
1227 tmpname=self.conffile("sfa-config-tty")
1228 fileconf=open(tmpname,'w')
1229 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1230 'SFA_INTERFACE_HRN',
1231 'SFA_REGISTRY_LEVEL1_AUTH',
1232 'SFA_REGISTRY_HOST',
1233 'SFA_AGGREGATE_HOST',
1244 if self.plc_spec['sfa'].has_key(var):
1245 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1246 # the way plc_config handles booleans just sucks..
1249 if self.plc_spec['sfa'][var]: val='true'
1250 fileconf.write ('e %s\n%s\n'%(var,val))
1251 fileconf.write('w\n')
1252 fileconf.write('R\n')
1253 fileconf.write('q\n')
1255 utils.system('cat %s'%tmpname)
1256 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1259 def aggregate_xml_line(self):
1260 port=self.plc_spec['sfa']['neighbours-port']
1261 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1262 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1264 def registry_xml_line(self):
1265 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1266 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1269 # a cross step that takes all other plcs in argument
1270 def cross_sfa_configure(self, other_plcs):
1271 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1272 # of course with a single plc, other_plcs is an empty list
1275 agg_fname=self.conffile("agg.xml")
1276 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1277 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1278 utils.header ("(Over)wrote %s"%agg_fname)
1279 reg_fname=self.conffile("reg.xml")
1280 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1281 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1282 utils.header ("(Over)wrote %s"%reg_fname)
1283 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1284 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1286 def sfa_import(self):
1288 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1289 return self.run_in_guest('sfa-import.py')==0 or \
1290 self.run_in_guest('sfa-import-plc.py')==0 or \
1291 self.run_in_guest('sfaadmin.py registry import_registry')==0
1292 # not needed anymore
1293 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1295 def sfa_start(self):
1297 return self.run_in_guest('service sfa start')==0
1299 def sfi_configure(self):
1300 "Create /root/sfi on the plc side for sfi client configuration"
1301 if self.options.dry_run:
1302 utils.header("DRY RUN - skipping step")
1304 sfa_spec=self.plc_spec['sfa']
1305 # cannot use sfa_slice_mapper to pass dir_name
1306 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1307 site_spec = self.locate_site (slice_spec['sitename'])
1308 test_site = TestSite(self,site_spec)
1309 test_slice=TestSliceSfa(self,test_site,slice_spec)
1310 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1311 test_slice.sfi_config(dir_name)
1312 # push into the remote /root/sfi area
1313 location = test_slice.sfi_path()
1314 remote="/vservers/%s/%s"%(self.vservername,location)
1315 self.test_ssh.mkdir(remote,abs=True)
1316 # need to strip last level or remote otherwise we get an extra dir level
1317 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1321 def sfi_clean (self):
1322 "clean up /root/sfi on the plc side"
1323 self.run_in_guest("rm -rf /root/sfi")
1327 def sfa_add_user(self):
1332 def sfa_update_user(self):
1336 def sfa_add_slice(self):
1337 "run sfi.py add (on Registry) from slice.xml"
1341 def sfa_discover(self):
1342 "discover resources into resouces_in.rspec"
1346 def sfa_create_slice(self):
1347 "run sfi.py create (on SM) - 1st time"
1351 def sfa_check_slice_plc(self):
1352 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1356 def sfa_update_slice(self):
1357 "run sfi.py create (on SM) on existing object"
1362 "various registry-related calls"
1366 def ssh_slice_sfa(self):
1367 "tries to ssh-enter the SFA slice"
1371 def sfa_delete_user(self):
1376 def sfa_delete_slice(self):
1377 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1382 self.run_in_guest('service sfa stop')==0
1385 def populate (self):
1386 "creates random entries in the PLCAPI"
1387 # install the stress-test in the plc image
1388 location = "/usr/share/plc_api/plcsh_stress_test.py"
1389 remote="/vservers/%s/%s"%(self.vservername,location)
1390 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1392 command += " -- --preserve --short-names"
1393 local = (self.run_in_guest(command) == 0);
1394 # second run with --foreign
1395 command += ' --foreign'
1396 remote = (self.run_in_guest(command) == 0);
1397 return ( local and remote)
1399 def gather_logs (self):
1400 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1401 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1402 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1403 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1404 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1405 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1407 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1408 self.gather_var_logs ()
1410 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1411 self.gather_pgsql_logs ()
1413 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1414 for site_spec in self.plc_spec['sites']:
1415 test_site = TestSite (self,site_spec)
1416 for node_spec in site_spec['nodes']:
1417 test_node=TestNode(self,test_site,node_spec)
1418 test_node.gather_qemu_logs()
1420 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1421 self.gather_nodes_var_logs()
1423 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1424 self.gather_slivers_var_logs()
1427 def gather_slivers_var_logs(self):
1428 for test_sliver in self.all_sliver_objs():
1429 remote = test_sliver.tar_var_logs()
1430 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1431 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1432 utils.system(command)
1435 def gather_var_logs (self):
1436 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1437 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1438 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1439 utils.system(command)
1440 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1441 utils.system(command)
1443 def gather_pgsql_logs (self):
1444 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1445 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1446 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1447 utils.system(command)
1449 def gather_nodes_var_logs (self):
1450 for site_spec in self.plc_spec['sites']:
1451 test_site = TestSite (self,site_spec)
1452 for node_spec in site_spec['nodes']:
1453 test_node=TestNode(self,test_site,node_spec)
1454 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1455 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1456 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1457 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1458 utils.system(command)
1461 # returns the filename to use for sql dump/restore, using options.dbname if set
1462 def dbfile (self, database):
1463 # uses options.dbname if it is found
1465 name=self.options.dbname
1466 if not isinstance(name,StringTypes):
1469 t=datetime.datetime.now()
1472 return "/root/%s-%s.sql"%(database,name)
1474 def plc_db_dump(self):
1475 'dump the planetlab5 DB in /root in the PLC - filename has time'
1476 dump=self.dbfile("planetab5")
1477 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1478 utils.header('Dumped planetlab5 database in %s'%dump)
1481 def plc_db_restore(self):
1482 'restore the planetlab5 DB - looks broken, but run -n might help'
1483 dump=self.dbfile("planetab5")
1484 ##stop httpd service
1485 self.run_in_guest('service httpd stop')
1486 # xxx - need another wrapper
1487 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1488 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1489 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1490 ##starting httpd service
1491 self.run_in_guest('service httpd start')
1493 utils.header('Database restored from ' + dump)
1495 def standby_1_through_20(self):
1496 """convenience function to wait for a specified number of minutes"""
1499 def standby_1(): pass
1501 def standby_2(): pass
1503 def standby_3(): pass
1505 def standby_4(): pass
1507 def standby_5(): pass
1509 def standby_6(): pass
1511 def standby_7(): pass
1513 def standby_8(): pass
1515 def standby_9(): pass
1517 def standby_10(): pass
1519 def standby_11(): pass
1521 def standby_12(): pass
1523 def standby_13(): pass
1525 def standby_14(): pass
1527 def standby_15(): pass
1529 def standby_16(): pass
1531 def standby_17(): pass
1533 def standby_18(): pass
1535 def standby_19(): pass
1537 def standby_20(): pass