1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'display', 'resources_pre', SEP,
90 'delete_vs','create_vs','install', 'configure', 'start', SEP,
91 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'init_node','bootcd', 'configure_qemu', 'export_qemu', 'kill_all_qemus', 'start_node', SEP,
94 'install_sfa', 'configure_sfa', 'cross_configure_sfa', 'import_sfa', 'start_sfa', SEPSFA,
95 'configure_sfi@1', 'add_user_sfa@1', 'add_sfa@1', 'create_sfa@1', SEPSFA,
96 'update_user_sfa@1', 'update_sfa@1', 'view_sfa@1', SEPSFA,
97 'install_unittest_sfa@1','unittest_sfa@1',SEPSFA,
98 # we used to run plcsh_stress_test, and then nodes_ssh_debug and nodes_ssh_boot
99 # but as the stress test might take a while, we sometimes missed the debug mode..
100 'nodes_ssh_debug', 'plcsh_stress_test@1', SEP,
101 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
102 'check_slice_sfa@1', 'delete_slice_sfa@1', 'delete_user_sfa@1', SEPSFA,
103 'check_tcp', 'check_hooks@1', SEP,
104 'force_gather_logs', 'force_resources_post', SEP,
107 'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP,
108 'stop', 'vs_start', SEP,
109 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
110 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
111 'clean_leases', 'list_leases', SEP,
113 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
114 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
115 'plcclean_sfa', 'dbclean_sfa', 'stop_sfa','uninstall_sfa', 'clean_sfi', SEP,
116 'db_dump' , 'db_restore', SEP,
117 'standby_1 through 20',SEP,
121 def printable_steps (list):
122 single_line=" ".join(list)+" "
123 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
125 def valid_step (step):
126 return step != SEP and step != SEPSFA
128 # turn off the sfa-related steps when build has skipped SFA
129 # this is originally for centos5 as recent SFAs won't build on this platformb
131 def check_whether_build_has_sfa (rpms_url):
132 # warning, we're now building 'sface' so let's be a bit more picky
133 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
134 # full builds are expected to return with 0 here
136 # move all steps containing 'sfa' from default_steps to other_steps
137 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
138 TestPlc.other_steps += sfa_steps
139 for step in sfa_steps: TestPlc.default_steps.remove(step)
141 def __init__ (self,plc_spec,options):
142 self.plc_spec=plc_spec
144 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
146 self.vserverip=plc_spec['vserverip']
147 self.vservername=plc_spec['vservername']
148 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
151 raise Exception,'chroot-based myplc testing is deprecated'
152 self.apiserver=TestApiserver(self.url,options.dry_run)
155 name=self.plc_spec['name']
156 return "%s.%s"%(name,self.vservername)
159 return self.plc_spec['hostname']
162 return self.test_ssh.is_local()
164 # define the API methods on this object through xmlrpc
165 # would help, but not strictly necessary
169 def actual_command_in_guest (self,command):
170 return self.test_ssh.actual_command(self.host_to_guest(command))
172 def start_guest (self):
173 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
175 def run_in_guest (self,command):
176 return utils.system(self.actual_command_in_guest(command))
178 def run_in_host (self,command):
179 return self.test_ssh.run_in_buildname(command)
181 #command gets run in the vserver
182 def host_to_guest(self,command):
183 return "vserver %s exec %s"%(self.vservername,command)
185 #command gets run in the vserver
186 def start_guest_in_host(self):
187 return "vserver %s start"%(self.vservername)
190 def run_in_guest_piped (self,local,remote):
191 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
193 def auth_root (self):
194 return {'Username':self.plc_spec['PLC_ROOT_USER'],
195 'AuthMethod':'password',
196 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
197 'Role' : self.plc_spec['role']
199 def locate_site (self,sitename):
200 for site in self.plc_spec['sites']:
201 if site['site_fields']['name'] == sitename:
203 if site['site_fields']['login_base'] == sitename:
205 raise Exception,"Cannot locate site %s"%sitename
207 def locate_node (self,nodename):
208 for site in self.plc_spec['sites']:
209 for node in site['nodes']:
210 if node['name'] == nodename:
212 raise Exception,"Cannot locate node %s"%nodename
214 def locate_hostname (self,hostname):
215 for site in self.plc_spec['sites']:
216 for node in site['nodes']:
217 if node['node_fields']['hostname'] == hostname:
219 raise Exception,"Cannot locate hostname %s"%hostname
221 def locate_key (self,keyname):
222 for key in self.plc_spec['keys']:
223 if key['name'] == keyname:
225 raise Exception,"Cannot locate key %s"%keyname
227 def locate_slice (self, slicename):
228 for slice in self.plc_spec['slices']:
229 if slice['slice_fields']['name'] == slicename:
231 raise Exception,"Cannot locate slice %s"%slicename
233 def all_sliver_objs (self):
235 for slice_spec in self.plc_spec['slices']:
236 slicename = slice_spec['slice_fields']['name']
237 for nodename in slice_spec['nodenames']:
238 result.append(self.locate_sliver_obj (nodename,slicename))
241 def locate_sliver_obj (self,nodename,slicename):
242 (site,node) = self.locate_node(nodename)
243 slice = self.locate_slice (slicename)
245 test_site = TestSite (self, site)
246 test_node = TestNode (self, test_site,node)
247 # xxx the slice site is assumed to be the node site - mhh - probably harmless
248 test_slice = TestSlice (self, test_site, slice)
249 return TestSliver (self, test_node, test_slice)
251 def locate_first_node(self):
252 nodename=self.plc_spec['slices'][0]['nodenames'][0]
253 (site,node) = self.locate_node(nodename)
254 test_site = TestSite (self, site)
255 test_node = TestNode (self, test_site,node)
258 def locate_first_sliver (self):
259 slice_spec=self.plc_spec['slices'][0]
260 slicename=slice_spec['slice_fields']['name']
261 nodename=slice_spec['nodenames'][0]
262 return self.locate_sliver_obj(nodename,slicename)
264 # all different hostboxes used in this plc
265 def gather_hostBoxes(self):
266 # maps on sites and nodes, return [ (host_box,test_node) ]
268 for site_spec in self.plc_spec['sites']:
269 test_site = TestSite (self,site_spec)
270 for node_spec in site_spec['nodes']:
271 test_node = TestNode (self, test_site, node_spec)
272 if not test_node.is_real():
273 tuples.append( (test_node.host_box(),test_node) )
274 # transform into a dict { 'host_box' -> [ test_node .. ] }
276 for (box,node) in tuples:
277 if not result.has_key(box):
280 result[box].append(node)
283 # a step for checking this stuff
284 def show_boxes (self):
285 'print summary of nodes location'
286 for (box,nodes) in self.gather_hostBoxes().iteritems():
287 print box,":"," + ".join( [ node.name() for node in nodes ] )
290 # make this a valid step
291 def kill_all_qemus(self):
292 'kill all qemu instances on the qemu boxes involved by this setup'
293 # this is the brute force version, kill all qemus on that host box
294 for (box,nodes) in self.gather_hostBoxes().iteritems():
295 # pass the first nodename, as we don't push template-qemu on testboxes
296 nodedir=nodes[0].nodedir()
297 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
300 # make this a valid step
301 def list_all_qemus(self):
302 'list all qemu instances on the qemu boxes involved by this setup'
303 for (box,nodes) in self.gather_hostBoxes().iteritems():
304 # this is the brute force version, kill all qemus on that host box
305 TestBox(box,self.options.buildname).list_all_qemus()
308 # kill only the right qemus
309 def list_qemus(self):
310 'list qemu instances for our nodes'
311 for (box,nodes) in self.gather_hostBoxes().iteritems():
312 # the fine-grain version
317 # kill only the right qemus
318 def kill_qemus(self):
319 'kill the qemu instances for our nodes'
320 for (box,nodes) in self.gather_hostBoxes().iteritems():
321 # the fine-grain version
326 #################### display config
328 "show test configuration after localization"
329 self.display_pass (1)
330 self.display_pass (2)
334 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
335 def display_pass (self,passno):
336 for (key,val) in self.plc_spec.iteritems():
337 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
341 self.display_site_spec(site)
342 for node in site['nodes']:
343 self.display_node_spec(node)
344 elif key=='initscripts':
345 for initscript in val:
346 self.display_initscript_spec (initscript)
349 self.display_slice_spec (slice)
352 self.display_key_spec (key)
354 if key not in ['sites','initscripts','slices','keys', 'sfa']:
355 print '+ ',key,':',val
357 def display_site_spec (self,site):
358 print '+ ======== site',site['site_fields']['name']
359 for (k,v) in site.iteritems():
360 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
363 print '+ ','nodes : ',
365 print node['node_fields']['hostname'],'',
371 print user['name'],'',
373 elif k == 'site_fields':
374 print '+ login_base',':',v['login_base']
375 elif k == 'address_fields':
381 def display_initscript_spec (self,initscript):
382 print '+ ======== initscript',initscript['initscript_fields']['name']
384 def display_key_spec (self,key):
385 print '+ ======== key',key['name']
387 def display_slice_spec (self,slice):
388 print '+ ======== slice',slice['slice_fields']['name']
389 for (k,v) in slice.iteritems():
402 elif k=='slice_fields':
403 print '+ fields',':',
404 print 'max_nodes=',v['max_nodes'],
409 def display_node_spec (self,node):
410 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
411 print "hostname=",node['node_fields']['hostname'],
412 print "ip=",node['interface_fields']['ip']
413 if self.options.verbose:
414 utils.pprint("node details",node,depth=3)
416 # another entry point for just showing the boxes involved
417 def display_mapping (self):
418 TestPlc.display_mapping_plc(self.plc_spec)
422 def display_mapping_plc (plc_spec):
423 print '+ MyPLC',plc_spec['name']
424 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
425 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
426 for site_spec in plc_spec['sites']:
427 for node_spec in site_spec['nodes']:
428 TestPlc.display_mapping_node(node_spec)
431 def display_mapping_node (node_spec):
432 print '+ NODE %s'%(node_spec['name'])
433 print '+\tqemu box %s'%node_spec['host_box']
434 print '+\thostname=%s'%node_spec['node_fields']['hostname']
436 def resources_pre (self):
437 "run site-dependant pre-test script as defined in LocalTestResources"
438 from LocalTestResources import local_resources
439 return local_resources.step_pre(self)
441 def resources_post (self):
442 "run site-dependant post-test script as defined in LocalTestResources"
443 from LocalTestResources import local_resources
444 return local_resources.step_post(self)
446 def resources_list (self):
447 "run site-dependant list script as defined in LocalTestResources"
448 from LocalTestResources import local_resources
449 return local_resources.step_list(self)
451 def resources_release (self):
452 "run site-dependant release script as defined in LocalTestResources"
453 from LocalTestResources import local_resources
454 return local_resources.step_release(self)
456 def resources_release_plc (self):
457 "run site-dependant release script as defined in LocalTestResources"
458 from LocalTestResources import local_resources
459 return local_resources.step_release_plc(self)
461 def resources_release_qemu (self):
462 "run site-dependant release script as defined in LocalTestResources"
463 from LocalTestResources import local_resources
464 return local_resources.step_release_qemu(self)
467 "vserver delete the test myplc"
468 self.run_in_host("vserver --silent %s delete"%self.vservername)
472 # historically the build was being fetched by the tests
473 # now the build pushes itself as a subdir of the tests workdir
474 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
475 def create_vs (self):
476 "vserver creation (no install done)"
477 # push the local build/ dir to the testplc box
479 # a full path for the local calls
480 build_dir=os.path.dirname(sys.argv[0])
481 # sometimes this is empty - set to "." in such a case
482 if not build_dir: build_dir="."
483 build_dir += "/build"
485 # use a standard name - will be relative to remote buildname
487 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
488 self.test_ssh.rmdir(build_dir)
489 self.test_ssh.copy(build_dir,recursive=True)
490 # the repo url is taken from arch-rpms-url
491 # with the last step (i386) removed
492 repo_url = self.options.arch_rpms_url
493 for level in [ 'arch' ]:
494 repo_url = os.path.dirname(repo_url)
495 # pass the vbuild-nightly options to vtest-init-vserver
497 test_env_options += " -p %s"%self.options.personality
498 test_env_options += " -d %s"%self.options.pldistro
499 test_env_options += " -f %s"%self.options.fcdistro
500 script="vtest-init-vserver.sh"
501 vserver_name = self.vservername
502 vserver_options="--netdev eth0 --interface %s"%self.vserverip
504 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
505 vserver_options += " --hostname %s"%vserver_hostname
507 print "Cannot reverse lookup %s"%self.vserverip
508 print "This is considered fatal, as this might pollute the test results"
510 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
511 return self.run_in_host(create_vserver) == 0
515 "yum install myplc, noderepo, and the plain bootstrapfs"
517 # workaround for getting pgsql8.2 on centos5
518 if self.options.fcdistro == "centos5":
519 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
522 if self.options.personality == "linux32":
524 elif self.options.personality == "linux64":
527 raise Exception, "Unsupported personality %r"%self.options.personality
528 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
531 pkgs_list.append ("slicerepo-%s"%nodefamily)
532 pkgs_list.append ("myplc")
533 pkgs_list.append ("noderepo-%s"%nodefamily)
534 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
535 pkgs_string=" ".join(pkgs_list)
536 self.run_in_guest("yum -y install %s"%pkgs_string)
537 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
542 tmpname='%s.plc-config-tty'%(self.name())
543 fileconf=open(tmpname,'w')
544 for var in [ 'PLC_NAME',
549 'PLC_MAIL_SUPPORT_ADDRESS',
552 # Above line was added for integrating SFA Testing
558 'PLC_RESERVATION_GRANULARITY',
561 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
562 fileconf.write('w\n')
563 fileconf.write('q\n')
565 utils.system('cat %s'%tmpname)
566 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
567 utils.system('rm %s'%tmpname)
572 self.run_in_guest('service plc start')
577 self.run_in_guest('service plc stop')
581 "start the PLC vserver"
585 # stores the keys from the config for further use
586 def store_keys(self):
587 "stores test users ssh keys in keys/"
588 for key_spec in self.plc_spec['keys']:
589 TestKey(self,key_spec).store_key()
592 def clean_keys(self):
593 "removes keys cached in keys/"
594 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
596 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
597 # for later direct access to the nodes
598 def fetch_keys(self):
599 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
601 if not os.path.isdir(dir):
603 vservername=self.vservername
605 prefix = 'debug_ssh_key'
606 for ext in [ 'pub', 'rsa' ] :
607 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
608 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
609 if self.test_ssh.fetch(src,dst) != 0: overall=False
613 "create sites with PLCAPI"
614 return self.do_sites()
616 def clean_sites (self):
617 "delete sites with PLCAPI"
618 return self.do_sites(action="delete")
620 def do_sites (self,action="add"):
621 for site_spec in self.plc_spec['sites']:
622 test_site = TestSite (self,site_spec)
623 if (action != "add"):
624 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
625 test_site.delete_site()
626 # deleted with the site
627 #test_site.delete_users()
630 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
631 test_site.create_site()
632 test_site.create_users()
635 def clean_all_sites (self):
636 "Delete all sites in PLC, and related objects"
637 print 'auth_root',self.auth_root()
638 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
639 for site_id in site_ids:
640 print 'Deleting site_id',site_id
641 self.apiserver.DeleteSite(self.auth_root(),site_id)
644 "create nodes with PLCAPI"
645 return self.do_nodes()
646 def clean_nodes (self):
647 "delete nodes with PLCAPI"
648 return self.do_nodes(action="delete")
650 def do_nodes (self,action="add"):
651 for site_spec in self.plc_spec['sites']:
652 test_site = TestSite (self,site_spec)
654 utils.header("Deleting nodes in site %s"%test_site.name())
655 for node_spec in site_spec['nodes']:
656 test_node=TestNode(self,test_site,node_spec)
657 utils.header("Deleting %s"%test_node.name())
658 test_node.delete_node()
660 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
661 for node_spec in site_spec['nodes']:
662 utils.pprint('Creating node %s'%node_spec,node_spec)
663 test_node = TestNode (self,test_site,node_spec)
664 test_node.create_node ()
667 def nodegroups (self):
668 "create nodegroups with PLCAPI"
669 return self.do_nodegroups("add")
670 def clean_nodegroups (self):
671 "delete nodegroups with PLCAPI"
672 return self.do_nodegroups("delete")
676 def translate_timestamp (start,grain,timestamp):
677 if timestamp < TestPlc.YEAR: return start+timestamp*grain
678 else: return timestamp
681 def timestamp_printable (timestamp):
682 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
685 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
687 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
688 print 'API answered grain=',grain
689 start=(now/grain)*grain
691 # find out all nodes that are reservable
692 nodes=self.all_reservable_nodenames()
694 utils.header ("No reservable node found - proceeding without leases")
697 # attach them to the leases as specified in plc_specs
698 # this is where the 'leases' field gets interpreted as relative of absolute
699 for lease_spec in self.plc_spec['leases']:
700 # skip the ones that come with a null slice id
701 if not lease_spec['slice']: continue
702 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
703 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
704 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
705 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
706 if lease_addition['errors']:
707 utils.header("Cannot create leases, %s"%lease_addition['errors'])
710 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
711 (nodes,lease_spec['slice'],
712 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
713 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
717 def clean_leases (self):
718 "remove all leases in the myplc side"
719 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
720 utils.header("Cleaning leases %r"%lease_ids)
721 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
724 def list_leases (self):
725 "list all leases known to the myplc"
726 leases = self.apiserver.GetLeases(self.auth_root())
729 current=l['t_until']>=now
730 if self.options.verbose or current:
731 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
732 TestPlc.timestamp_printable(l['t_from']),
733 TestPlc.timestamp_printable(l['t_until'])))
736 # create nodegroups if needed, and populate
737 def do_nodegroups (self, action="add"):
738 # 1st pass to scan contents
740 for site_spec in self.plc_spec['sites']:
741 test_site = TestSite (self,site_spec)
742 for node_spec in site_spec['nodes']:
743 test_node=TestNode (self,test_site,node_spec)
744 if node_spec.has_key('nodegroups'):
745 nodegroupnames=node_spec['nodegroups']
746 if isinstance(nodegroupnames,StringTypes):
747 nodegroupnames = [ nodegroupnames ]
748 for nodegroupname in nodegroupnames:
749 if not groups_dict.has_key(nodegroupname):
750 groups_dict[nodegroupname]=[]
751 groups_dict[nodegroupname].append(test_node.name())
752 auth=self.auth_root()
754 for (nodegroupname,group_nodes) in groups_dict.iteritems():
756 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
757 # first, check if the nodetagtype is here
758 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
760 tag_type_id = tag_types[0]['tag_type_id']
762 tag_type_id = self.apiserver.AddTagType(auth,
763 {'tagname':nodegroupname,
764 'description': 'for nodegroup %s'%nodegroupname,
766 print 'located tag (type)',nodegroupname,'as',tag_type_id
768 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
770 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
771 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
772 # set node tag on all nodes, value='yes'
773 for nodename in group_nodes:
775 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
777 traceback.print_exc()
778 print 'node',nodename,'seems to already have tag',nodegroupname
781 expect_yes = self.apiserver.GetNodeTags(auth,
782 {'hostname':nodename,
783 'tagname':nodegroupname},
784 ['value'])[0]['value']
785 if expect_yes != "yes":
786 print 'Mismatch node tag on node',nodename,'got',expect_yes
789 if not self.options.dry_run:
790 print 'Cannot find tag',nodegroupname,'on node',nodename
794 print 'cleaning nodegroup',nodegroupname
795 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
797 traceback.print_exc()
801 # return a list of tuples (nodename,qemuname)
802 def all_node_infos (self) :
804 for site_spec in self.plc_spec['sites']:
805 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
806 for node_spec in site_spec['nodes'] ]
809 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
810 def all_reservable_nodenames (self):
812 for site_spec in self.plc_spec['sites']:
813 for node_spec in site_spec['nodes']:
814 node_fields=node_spec['node_fields']
815 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
816 res.append(node_fields['hostname'])
819 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
820 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
821 if self.options.dry_run:
825 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
826 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
827 # the nodes that haven't checked yet - start with a full list and shrink over time
828 tocheck = self.all_hostnames()
829 utils.header("checking nodes %r"%tocheck)
830 # create a dict hostname -> status
831 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
834 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
836 for array in tocheck_status:
837 hostname=array['hostname']
838 boot_state=array['boot_state']
839 if boot_state == target_boot_state:
840 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
842 # if it's a real node, never mind
843 (site_spec,node_spec)=self.locate_hostname(hostname)
844 if TestNode.is_real_model(node_spec['node_fields']['model']):
845 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
847 boot_state = target_boot_state
848 elif datetime.datetime.now() > graceout:
849 utils.header ("%s still in '%s' state"%(hostname,boot_state))
850 graceout=datetime.datetime.now()+datetime.timedelta(1)
851 status[hostname] = boot_state
853 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
856 if datetime.datetime.now() > timeout:
857 for hostname in tocheck:
858 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
860 # otherwise, sleep for a while
862 # only useful in empty plcs
865 def nodes_booted(self):
866 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
868 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
870 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
871 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
872 vservername=self.vservername
875 local_key = "keys/%(vservername)s-debug.rsa"%locals()
878 local_key = "keys/key1.rsa"
879 node_infos = self.all_node_infos()
880 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
881 for (nodename,qemuname) in node_infos:
882 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
883 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
884 (timeout_minutes,silent_minutes,period))
886 for node_info in node_infos:
887 (hostname,qemuname) = node_info
888 # try to run 'hostname' in the node
889 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
890 # don't spam logs - show the command only after the grace period
891 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
893 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
895 node_infos.remove(node_info)
897 # we will have tried real nodes once, in case they're up - but if not, just skip
898 (site_spec,node_spec)=self.locate_hostname(hostname)
899 if TestNode.is_real_model(node_spec['node_fields']['model']):
900 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
901 node_infos.remove(node_info)
904 if datetime.datetime.now() > timeout:
905 for (hostname,qemuname) in node_infos:
906 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
908 # otherwise, sleep for a while
910 # only useful in empty plcs
913 def nodes_ssh_debug(self):
914 "Tries to ssh into nodes in debug mode with the debug ssh key"
915 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
917 def nodes_ssh_boot(self):
918 "Tries to ssh into nodes in production mode with the root ssh key"
919 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
922 def init_node (self):
923 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
927 "all nodes: invoke GetBootMedium and store result locally"
930 def configure_qemu (self):
931 "all nodes: compute qemu config qemu.conf and store it locally"
934 def nodestate_reinstall (self):
935 "all nodes: mark PLCAPI boot_state as reinstall"
938 def nodestate_safeboot (self):
939 "all nodes: mark PLCAPI boot_state as safeboot"
942 def nodestate_boot (self):
943 "all nodes: mark PLCAPI boot_state as boot"
946 def nodestate_show (self):
947 "all nodes: show PLCAPI boot_state"
950 def export_qemu (self):
951 "all nodes: push local node-dep directory on the qemu box"
954 ### check hooks : invoke scripts from hooks/{node,slice}
955 def check_hooks_node (self):
956 return self.locate_first_node().check_hooks()
957 def check_hooks_sliver (self) :
958 return self.locate_first_sliver().check_hooks()
960 def check_hooks (self):
961 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
962 return self.check_hooks_node() and self.check_hooks_sliver()
965 def do_check_initscripts(self):
967 for slice_spec in self.plc_spec['slices']:
968 if not slice_spec.has_key('initscriptname'):
970 initscript=slice_spec['initscriptname']
971 for nodename in slice_spec['nodenames']:
972 (site,node) = self.locate_node (nodename)
973 # xxx - passing the wrong site - probably harmless
974 test_site = TestSite (self,site)
975 test_slice = TestSlice (self,test_site,slice_spec)
976 test_node = TestNode (self,test_site,node)
977 test_sliver = TestSliver (self, test_node, test_slice)
978 if not test_sliver.check_initscript(initscript):
982 def check_initscripts(self):
983 "check that the initscripts have triggered"
984 return self.do_check_initscripts()
986 def initscripts (self):
987 "create initscripts with PLCAPI"
988 for initscript in self.plc_spec['initscripts']:
989 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
990 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
993 def clean_initscripts (self):
994 "delete initscripts with PLCAPI"
995 for initscript in self.plc_spec['initscripts']:
996 initscript_name = initscript['initscript_fields']['name']
997 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
999 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1000 print initscript_name,'deleted'
1002 print 'deletion went wrong - probably did not exist'
1007 "create slices with PLCAPI"
1008 return self.do_slices()
1010 def clean_slices (self):
1011 "delete slices with PLCAPI"
1012 return self.do_slices("delete")
1014 def do_slices (self, action="add"):
1015 for slice in self.plc_spec['slices']:
1016 site_spec = self.locate_site (slice['sitename'])
1017 test_site = TestSite(self,site_spec)
1018 test_slice=TestSlice(self,test_site,slice)
1020 utils.header("Deleting slices in site %s"%test_site.name())
1021 test_slice.delete_slice()
1023 utils.pprint("Creating slice",slice)
1024 test_slice.create_slice()
1025 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1029 def check_slice(self):
1030 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1034 def clear_known_hosts (self):
1035 "remove test nodes entries from the local known_hosts file"
1039 def start_node (self) :
1040 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1043 def check_tcp (self):
1044 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1045 specs = self.plc_spec['tcp_test']
1050 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1051 if not s_test_sliver.run_tcp_server(port,timeout=10):
1055 # idem for the client side
1056 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1057 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1061 def plcsh_stress_test (self):
1062 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1063 # install the stress-test in the plc image
1064 location = "/usr/share/plc_api/plcsh_stress_test.py"
1065 remote="/vservers/%s/%s"%(self.vservername,location)
1066 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1068 command += " -- --check"
1069 if self.options.size == 1:
1070 command += " --tiny"
1071 return ( self.run_in_guest(command) == 0)
1073 # populate runs the same utility without slightly different options
1074 # in particular runs with --preserve (dont cleanup) and without --check
1075 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1078 def install_sfa(self):
1079 "yum install sfa, sfa-plc and sfa-client"
1081 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1082 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1085 def dbclean_sfa(self):
1086 "thoroughly wipes off the SFA database"
1087 self.run_in_guest("sfa-nuke-plc.py")==0
1090 def plcclean_sfa(self):
1091 "cleans the PLC entries that were created as a side effect of running the script"
1093 sfa_spec=self.plc_spec['sfa']
1095 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1096 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1097 except: print "Slice %s already absent from PLC db"%slicename
1099 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1100 try: self.apiserver.DeletePerson(self.auth_root(),username)
1101 except: print "User %s already absent from PLC db"%username
1103 print "REMEMBER TO RUN import_sfa AGAIN"
1106 def uninstall_sfa(self):
1107 "uses rpm to uninstall sfa - ignore result"
1108 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1109 self.run_in_guest("rm -rf /var/lib/sfa")
1110 self.run_in_guest("rm -rf /etc/sfa")
1111 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1113 self.run_in_guest("rpm -e --noscripts sfa-plc")
1117 def install_unittest_sfa(self):
1118 "yum install sfa-tests"
1120 self.run_in_guest("yum -y install sfa-tests")
1121 return self.run_in_guest("rpm -q sfa-tests")==0
1123 def unittest_sfa(self):
1125 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1129 dirname="conf.%s"%self.plc_spec['name']
1130 if not os.path.isdir(dirname):
1131 utils.system("mkdir -p %s"%dirname)
1132 if not os.path.isdir(dirname):
1133 raise "Cannot create config dir for plc %s"%self.name()
1136 def conffile(self,filename):
1137 return "%s/%s"%(self.confdir(),filename)
1138 def confsubdir(self,dirname,clean):
1139 subdirname="%s/%s"%(self.confdir(),dirname)
1141 utils.system("rm -rf %s"%subdirname)
1142 if not os.path.isdir(subdirname):
1143 utils.system("mkdir -p %s"%subdirname)
1144 if not os.path.isdir(subdirname):
1145 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1148 def conffile_clean (self,filename):
1149 filename=self.conffile(filename)
1150 return utils.system("rm -rf %s"%filename)==0
1153 def configure_sfa(self):
1154 "run sfa-config-tty"
1155 tmpname=self.conffile("sfa-config-tty")
1156 fileconf=open(tmpname,'w')
1157 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1158 'SFA_INTERFACE_HRN',
1159 # 'SFA_REGISTRY_LEVEL1_AUTH',
1160 'SFA_REGISTRY_HOST',
1161 'SFA_AGGREGATE_HOST',
1167 'SFA_PLC_DB_PASSWORD',
1170 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1171 # the way plc_config handles booleans just sucks..
1172 for var in ['SFA_API_DEBUG']:
1174 if self.plc_spec['sfa'][var]: val='true'
1175 fileconf.write ('e %s\n%s\n'%(var,val))
1176 fileconf.write('w\n')
1177 fileconf.write('R\n')
1178 fileconf.write('q\n')
1180 utils.system('cat %s'%tmpname)
1181 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1184 def aggregate_xml_line(self):
1185 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1186 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1188 def registry_xml_line(self):
1189 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1190 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1193 # a cross step that takes all other plcs in argument
1194 def cross_configure_sfa(self, other_plcs):
1195 # of course with a single plc, other_plcs is an empty list
1198 agg_fname=self.conffile("agg.xml")
1199 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1200 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1201 utils.header ("(Over)wrote %s"%agg_fname)
1202 reg_fname=self.conffile("reg.xml")
1203 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1204 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1205 utils.header ("(Over)wrote %s"%reg_fname)
1206 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1207 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1209 def import_sfa(self):
1211 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1212 return self.run_in_guest('sfa-import-plc.py')==0
1213 # not needed anymore
1214 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1216 def start_sfa(self):
1218 return self.run_in_guest('service sfa start')==0
1220 def configure_sfi(self):
1221 sfa_spec=self.plc_spec['sfa']
1222 "sfi client configuration"
1223 dir_name=self.confsubdir("dot-sfi",clean=True)
1224 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1225 fileconf=open(file_name,'w')
1226 fileconf.write (self.plc_spec['keys'][0]['private'])
1228 utils.header ("(Over)wrote %s"%file_name)
1230 file_name=dir_name + os.sep + 'sfi_config'
1231 fileconf=open(file_name,'w')
1232 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1233 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1234 fileconf.write('\n')
1235 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1236 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1237 fileconf.write('\n')
1238 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1239 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1240 fileconf.write('\n')
1241 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1242 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1243 fileconf.write('\n')
1245 utils.header ("(Over)wrote %s"%file_name)
1247 file_name=dir_name + os.sep + 'person.xml'
1248 fileconf=open(file_name,'w')
1249 for record in sfa_spec['sfa_person_xml']:
1250 person_record=record
1251 fileconf.write(person_record)
1252 fileconf.write('\n')
1254 utils.header ("(Over)wrote %s"%file_name)
1256 file_name=dir_name + os.sep + 'slice.xml'
1257 fileconf=open(file_name,'w')
1258 for record in sfa_spec['sfa_slice_xml']:
1260 #slice_record=sfa_spec['sfa_slice_xml']
1261 fileconf.write(slice_record)
1262 fileconf.write('\n')
1263 utils.header ("(Over)wrote %s"%file_name)
1266 file_name=dir_name + os.sep + 'slice.rspec'
1267 fileconf=open(file_name,'w')
1269 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1271 fileconf.write(slice_rspec)
1272 fileconf.write('\n')
1274 utils.header ("(Over)wrote %s"%file_name)
1276 # push to the remote root's .sfi
1277 location = "root/.sfi"
1278 remote="/vservers/%s/%s"%(self.vservername,location)
1279 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1283 def clean_sfi (self):
1284 self.run_in_guest("rm -rf /root/.sfi")
1287 def add_user_sfa(self):
1288 return TestUserSfa(self).add_user()
1292 "run sfi.py add (on Registry)"
1296 def create_sfa(self):
1297 "run sfi.py create (on SM) for 1st-time creation"
1300 def update_user_sfa(self):
1301 return TestUserSfa(self).update_user()
1304 def update_sfa(self):
1305 "run sfi.py create (on SM) on existing object"
1309 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1310 sfa_spec=self.plc_spec['sfa']
1311 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1313 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1314 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1315 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1316 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1319 def check_slice_sfa(self):
1320 "tries to ssh-enter the SFA slice"
1323 def delete_user_sfa(self):
1324 "run sfi.py delete (on SM) for user"
1325 test_user_sfa=TestUserSfa(self)
1326 return test_user_sfa.delete_user()
1329 def delete_slice_sfa(self):
1330 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1335 self.run_in_guest('service sfa stop')==0
1338 def populate (self):
1339 "creates random entries in the PLCAPI"
1340 # install the stress-test in the plc image
1341 location = "/usr/share/plc_api/plcsh_stress_test.py"
1342 remote="/vservers/%s/%s"%(self.vservername,location)
1343 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1345 command += " -- --preserve --short-names"
1346 local = (self.run_in_guest(command) == 0);
1347 # second run with --foreign
1348 command += ' --foreign'
1349 remote = (self.run_in_guest(command) == 0);
1350 return ( local and remote)
1352 def gather_logs (self):
1353 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1354 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1355 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1356 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1357 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1358 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1360 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1361 self.gather_var_logs ()
1363 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1364 self.gather_pgsql_logs ()
1366 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1367 for site_spec in self.plc_spec['sites']:
1368 test_site = TestSite (self,site_spec)
1369 for node_spec in site_spec['nodes']:
1370 test_node=TestNode(self,test_site,node_spec)
1371 test_node.gather_qemu_logs()
1373 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1374 self.gather_nodes_var_logs()
1376 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1377 self.gather_slivers_var_logs()
1380 def gather_slivers_var_logs(self):
1381 for test_sliver in self.all_sliver_objs():
1382 remote = test_sliver.tar_var_logs()
1383 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1384 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1385 utils.system(command)
1388 def gather_var_logs (self):
1389 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1390 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1391 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1392 utils.system(command)
1393 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1394 utils.system(command)
1396 def gather_pgsql_logs (self):
1397 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1398 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1399 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1400 utils.system(command)
1402 def gather_nodes_var_logs (self):
1403 for site_spec in self.plc_spec['sites']:
1404 test_site = TestSite (self,site_spec)
1405 for node_spec in site_spec['nodes']:
1406 test_node=TestNode(self,test_site,node_spec)
1407 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1408 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1409 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1410 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1411 utils.system(command)
1414 # returns the filename to use for sql dump/restore, using options.dbname if set
1415 def dbfile (self, database):
1416 # uses options.dbname if it is found
1418 name=self.options.dbname
1419 if not isinstance(name,StringTypes):
1422 t=datetime.datetime.now()
1425 return "/root/%s-%s.sql"%(database,name)
1428 'dump the planetlab5 DB in /root in the PLC - filename has time'
1429 dump=self.dbfile("planetab5")
1430 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1431 utils.header('Dumped planetlab5 database in %s'%dump)
1434 def db_restore(self):
1435 'restore the planetlab5 DB - looks broken, but run -n might help'
1436 dump=self.dbfile("planetab5")
1437 ##stop httpd service
1438 self.run_in_guest('service httpd stop')
1439 # xxx - need another wrapper
1440 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1441 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1442 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1443 ##starting httpd service
1444 self.run_in_guest('service httpd start')
1446 utils.header('Database restored from ' + dump)
1449 def standby_1(): pass
1451 def standby_2(): pass
1453 def standby_3(): pass
1455 def standby_4(): pass
1457 def standby_5(): pass
1459 def standby_6(): pass
1461 def standby_7(): pass
1463 def standby_8(): pass
1465 def standby_9(): pass
1467 def standby_10(): pass
1469 def standby_11(): pass
1471 def standby_12(): pass
1473 def standby_13(): pass
1475 def standby_14(): pass
1477 def standby_15(): pass
1479 def standby_16(): pass
1481 def standby_17(): pass
1483 def standby_18(): pass
1485 def standby_19(): pass
1487 def standby_20(): pass