1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'display', 'resources_pre', SEP,
90 'delete_vs','create_vs','install', 'configure', 'start', SEP,
91 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'nodestate_reinstall', 'init_node','bootcd', 'configure_qemu', 'export_qemu', 'kill_all_qemus', 'start_node', SEP,
94 'install_sfa', 'configure_sfa', 'cross_configure_sfa', 'import_sfa', 'start_sfa', SEPSFA,
95 'configure_sfi@1', 'add_user_sfa@1', 'add_sfa@1', 'create_sfa@1', SEPSFA,
96 'update_user_sfa@1', 'update_sfa@1', 'view_sfa@1', SEPSFA,
97 'install_unittest_sfa@1','unittest_sfa@1',SEPSFA,
98 # we used to run plcsh_stress_test, and then nodes_ssh_debug and nodes_ssh_boot
99 # but as the stress test might take a while, we sometimes missed the debug mode..
100 'nodes_ssh_debug', 'plcsh_stress_test@1', SEP,
101 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
102 'check_slice_sfa@1', 'delete_slice_sfa@1', 'delete_user_sfa@1', SEPSFA,
103 'check_tcp', 'check_hooks@1', SEP,
104 'force_gather_logs', 'force_resources_post', SEP,
107 'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP,
108 'stop', 'vs_start', SEP,
109 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
110 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
111 'clean_leases', 'list_leases', SEP,
113 'nodestate_safeboot','nodestate_boot','list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
114 'plcclean_sfa', 'dbclean_sfa', 'stop_sfa','uninstall_sfa', 'clean_sfi', SEP,
115 'db_dump' , 'db_restore', SEP,
116 'standby_1 through 20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this is originally for centos5 as recent SFAs won't build on this platformb
130 def check_whether_build_has_sfa (rpms_url):
131 # warning, we're now building 'sface' so let's be a bit more picky
132 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
133 # full builds are expected to return with 0 here
135 # move all steps containing 'sfa' from default_steps to other_steps
136 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
137 TestPlc.other_steps += sfa_steps
138 for step in sfa_steps: TestPlc.default_steps.remove(step)
140 def __init__ (self,plc_spec,options):
141 self.plc_spec=plc_spec
143 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
145 self.vserverip=plc_spec['vserverip']
146 self.vservername=plc_spec['vservername']
147 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
150 raise Exception,'chroot-based myplc testing is deprecated'
151 self.apiserver=TestApiserver(self.url,options.dry_run)
154 name=self.plc_spec['name']
155 return "%s.%s"%(name,self.vservername)
158 return self.plc_spec['hostname']
161 return self.test_ssh.is_local()
163 # define the API methods on this object through xmlrpc
164 # would help, but not strictly necessary
168 def actual_command_in_guest (self,command):
169 return self.test_ssh.actual_command(self.host_to_guest(command))
171 def start_guest (self):
172 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
174 def run_in_guest (self,command):
175 return utils.system(self.actual_command_in_guest(command))
177 def run_in_host (self,command):
178 return self.test_ssh.run_in_buildname(command)
180 #command gets run in the vserver
181 def host_to_guest(self,command):
182 return "vserver %s exec %s"%(self.vservername,command)
184 #command gets run in the vserver
185 def start_guest_in_host(self):
186 return "vserver %s start"%(self.vservername)
189 def run_in_guest_piped (self,local,remote):
190 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
192 def auth_root (self):
193 return {'Username':self.plc_spec['PLC_ROOT_USER'],
194 'AuthMethod':'password',
195 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
196 'Role' : self.plc_spec['role']
198 def locate_site (self,sitename):
199 for site in self.plc_spec['sites']:
200 if site['site_fields']['name'] == sitename:
202 if site['site_fields']['login_base'] == sitename:
204 raise Exception,"Cannot locate site %s"%sitename
206 def locate_node (self,nodename):
207 for site in self.plc_spec['sites']:
208 for node in site['nodes']:
209 if node['name'] == nodename:
211 raise Exception,"Cannot locate node %s"%nodename
213 def locate_hostname (self,hostname):
214 for site in self.plc_spec['sites']:
215 for node in site['nodes']:
216 if node['node_fields']['hostname'] == hostname:
218 raise Exception,"Cannot locate hostname %s"%hostname
220 def locate_key (self,keyname):
221 for key in self.plc_spec['keys']:
222 if key['name'] == keyname:
224 raise Exception,"Cannot locate key %s"%keyname
226 def locate_slice (self, slicename):
227 for slice in self.plc_spec['slices']:
228 if slice['slice_fields']['name'] == slicename:
230 raise Exception,"Cannot locate slice %s"%slicename
232 def all_sliver_objs (self):
234 for slice_spec in self.plc_spec['slices']:
235 slicename = slice_spec['slice_fields']['name']
236 for nodename in slice_spec['nodenames']:
237 result.append(self.locate_sliver_obj (nodename,slicename))
240 def locate_sliver_obj (self,nodename,slicename):
241 (site,node) = self.locate_node(nodename)
242 slice = self.locate_slice (slicename)
244 test_site = TestSite (self, site)
245 test_node = TestNode (self, test_site,node)
246 # xxx the slice site is assumed to be the node site - mhh - probably harmless
247 test_slice = TestSlice (self, test_site, slice)
248 return TestSliver (self, test_node, test_slice)
250 def locate_first_node(self):
251 nodename=self.plc_spec['slices'][0]['nodenames'][0]
252 (site,node) = self.locate_node(nodename)
253 test_site = TestSite (self, site)
254 test_node = TestNode (self, test_site,node)
257 def locate_first_sliver (self):
258 slice_spec=self.plc_spec['slices'][0]
259 slicename=slice_spec['slice_fields']['name']
260 nodename=slice_spec['nodenames'][0]
261 return self.locate_sliver_obj(nodename,slicename)
263 # all different hostboxes used in this plc
264 def gather_hostBoxes(self):
265 # maps on sites and nodes, return [ (host_box,test_node) ]
267 for site_spec in self.plc_spec['sites']:
268 test_site = TestSite (self,site_spec)
269 for node_spec in site_spec['nodes']:
270 test_node = TestNode (self, test_site, node_spec)
271 if not test_node.is_real():
272 tuples.append( (test_node.host_box(),test_node) )
273 # transform into a dict { 'host_box' -> [ test_node .. ] }
275 for (box,node) in tuples:
276 if not result.has_key(box):
279 result[box].append(node)
282 # a step for checking this stuff
283 def show_boxes (self):
284 'print summary of nodes location'
285 for (box,nodes) in self.gather_hostBoxes().iteritems():
286 print box,":"," + ".join( [ node.name() for node in nodes ] )
289 # make this a valid step
290 def kill_all_qemus(self):
291 'kill all qemu instances on the qemu boxes involved by this setup'
292 # this is the brute force version, kill all qemus on that host box
293 for (box,nodes) in self.gather_hostBoxes().iteritems():
294 # pass the first nodename, as we don't push template-qemu on testboxes
295 nodedir=nodes[0].nodedir()
296 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
299 # make this a valid step
300 def list_all_qemus(self):
301 'list all qemu instances on the qemu boxes involved by this setup'
302 for (box,nodes) in self.gather_hostBoxes().iteritems():
303 # this is the brute force version, kill all qemus on that host box
304 TestBox(box,self.options.buildname).list_all_qemus()
307 # kill only the right qemus
308 def list_qemus(self):
309 'list qemu instances for our nodes'
310 for (box,nodes) in self.gather_hostBoxes().iteritems():
311 # the fine-grain version
316 # kill only the right qemus
317 def kill_qemus(self):
318 'kill the qemu instances for our nodes'
319 for (box,nodes) in self.gather_hostBoxes().iteritems():
320 # the fine-grain version
325 #################### display config
327 "show test configuration after localization"
328 self.display_pass (1)
329 self.display_pass (2)
333 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
334 def display_pass (self,passno):
335 for (key,val) in self.plc_spec.iteritems():
336 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
340 self.display_site_spec(site)
341 for node in site['nodes']:
342 self.display_node_spec(node)
343 elif key=='initscripts':
344 for initscript in val:
345 self.display_initscript_spec (initscript)
348 self.display_slice_spec (slice)
351 self.display_key_spec (key)
353 if key not in ['sites','initscripts','slices','keys', 'sfa']:
354 print '+ ',key,':',val
356 def display_site_spec (self,site):
357 print '+ ======== site',site['site_fields']['name']
358 for (k,v) in site.iteritems():
359 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
362 print '+ ','nodes : ',
364 print node['node_fields']['hostname'],'',
370 print user['name'],'',
372 elif k == 'site_fields':
373 print '+ login_base',':',v['login_base']
374 elif k == 'address_fields':
380 def display_initscript_spec (self,initscript):
381 print '+ ======== initscript',initscript['initscript_fields']['name']
383 def display_key_spec (self,key):
384 print '+ ======== key',key['name']
386 def display_slice_spec (self,slice):
387 print '+ ======== slice',slice['slice_fields']['name']
388 for (k,v) in slice.iteritems():
401 elif k=='slice_fields':
402 print '+ fields',':',
403 print 'max_nodes=',v['max_nodes'],
408 def display_node_spec (self,node):
409 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
410 print "hostname=",node['node_fields']['hostname'],
411 print "ip=",node['interface_fields']['ip']
412 if self.options.verbose:
413 utils.pprint("node details",node,depth=3)
415 # another entry point for just showing the boxes involved
416 def display_mapping (self):
417 TestPlc.display_mapping_plc(self.plc_spec)
421 def display_mapping_plc (plc_spec):
422 print '+ MyPLC',plc_spec['name']
423 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
424 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
425 for site_spec in plc_spec['sites']:
426 for node_spec in site_spec['nodes']:
427 TestPlc.display_mapping_node(node_spec)
430 def display_mapping_node (node_spec):
431 print '+ NODE %s'%(node_spec['name'])
432 print '+\tqemu box %s'%node_spec['host_box']
433 print '+\thostname=%s'%node_spec['node_fields']['hostname']
435 def resources_pre (self):
436 "run site-dependant pre-test script as defined in LocalTestResources"
437 from LocalTestResources import local_resources
438 return local_resources.step_pre(self)
440 def resources_post (self):
441 "run site-dependant post-test script as defined in LocalTestResources"
442 from LocalTestResources import local_resources
443 return local_resources.step_post(self)
445 def resources_list (self):
446 "run site-dependant list script as defined in LocalTestResources"
447 from LocalTestResources import local_resources
448 return local_resources.step_list(self)
450 def resources_release (self):
451 "run site-dependant release script as defined in LocalTestResources"
452 from LocalTestResources import local_resources
453 return local_resources.step_release(self)
455 def resources_release_plc (self):
456 "run site-dependant release script as defined in LocalTestResources"
457 from LocalTestResources import local_resources
458 return local_resources.step_release_plc(self)
460 def resources_release_qemu (self):
461 "run site-dependant release script as defined in LocalTestResources"
462 from LocalTestResources import local_resources
463 return local_resources.step_release_qemu(self)
466 "vserver delete the test myplc"
467 self.run_in_host("vserver --silent %s delete"%self.vservername)
471 # historically the build was being fetched by the tests
472 # now the build pushes itself as a subdir of the tests workdir
473 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
474 def create_vs (self):
475 "vserver creation (no install done)"
476 # push the local build/ dir to the testplc box
478 # a full path for the local calls
479 build_dir=os.path.dirname(sys.argv[0])
480 # sometimes this is empty - set to "." in such a case
481 if not build_dir: build_dir="."
482 build_dir += "/build"
484 # use a standard name - will be relative to remote buildname
486 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
487 self.test_ssh.rmdir(build_dir)
488 self.test_ssh.copy(build_dir,recursive=True)
489 # the repo url is taken from arch-rpms-url
490 # with the last step (i386) removed
491 repo_url = self.options.arch_rpms_url
492 for level in [ 'arch' ]:
493 repo_url = os.path.dirname(repo_url)
494 # pass the vbuild-nightly options to vtest-init-vserver
496 test_env_options += " -p %s"%self.options.personality
497 test_env_options += " -d %s"%self.options.pldistro
498 test_env_options += " -f %s"%self.options.fcdistro
499 script="vtest-init-vserver.sh"
500 vserver_name = self.vservername
501 vserver_options="--netdev eth0 --interface %s"%self.vserverip
503 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
504 vserver_options += " --hostname %s"%vserver_hostname
506 print "Cannot reverse lookup %s"%self.vserverip
507 print "This is considered fatal, as this might pollute the test results"
509 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
510 return self.run_in_host(create_vserver) == 0
514 "yum install myplc, noderepo, and the plain bootstrapfs"
516 # workaround for getting pgsql8.2 on centos5
517 if self.options.fcdistro == "centos5":
518 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
521 if self.options.personality == "linux32":
523 elif self.options.personality == "linux64":
526 raise Exception, "Unsupported personality %r"%self.options.personality
527 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
530 pkgs_list.append ("slicerepo-%s"%nodefamily)
531 pkgs_list.append ("myplc")
532 pkgs_list.append ("noderepo-%s"%nodefamily)
533 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
534 pkgs_string=" ".join(pkgs_list)
535 self.run_in_guest("yum -y install %s"%pkgs_string)
536 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
541 tmpname='%s.plc-config-tty'%(self.name())
542 fileconf=open(tmpname,'w')
543 for var in [ 'PLC_NAME',
548 'PLC_MAIL_SUPPORT_ADDRESS',
551 # Above line was added for integrating SFA Testing
557 'PLC_RESERVATION_GRANULARITY',
560 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
561 fileconf.write('w\n')
562 fileconf.write('q\n')
564 utils.system('cat %s'%tmpname)
565 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
566 utils.system('rm %s'%tmpname)
571 self.run_in_guest('service plc start')
576 self.run_in_guest('service plc stop')
580 "start the PLC vserver"
584 # stores the keys from the config for further use
585 def store_keys(self):
586 "stores test users ssh keys in keys/"
587 for key_spec in self.plc_spec['keys']:
588 TestKey(self,key_spec).store_key()
591 def clean_keys(self):
592 "removes keys cached in keys/"
593 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
595 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
596 # for later direct access to the nodes
597 def fetch_keys(self):
598 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
600 if not os.path.isdir(dir):
602 vservername=self.vservername
604 prefix = 'debug_ssh_key'
605 for ext in [ 'pub', 'rsa' ] :
606 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
607 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
608 if self.test_ssh.fetch(src,dst) != 0: overall=False
612 "create sites with PLCAPI"
613 return self.do_sites()
615 def clean_sites (self):
616 "delete sites with PLCAPI"
617 return self.do_sites(action="delete")
619 def do_sites (self,action="add"):
620 for site_spec in self.plc_spec['sites']:
621 test_site = TestSite (self,site_spec)
622 if (action != "add"):
623 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
624 test_site.delete_site()
625 # deleted with the site
626 #test_site.delete_users()
629 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
630 test_site.create_site()
631 test_site.create_users()
634 def clean_all_sites (self):
635 "Delete all sites in PLC, and related objects"
636 print 'auth_root',self.auth_root()
637 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
638 for site_id in site_ids:
639 print 'Deleting site_id',site_id
640 self.apiserver.DeleteSite(self.auth_root(),site_id)
643 "create nodes with PLCAPI"
644 return self.do_nodes()
645 def clean_nodes (self):
646 "delete nodes with PLCAPI"
647 return self.do_nodes(action="delete")
649 def do_nodes (self,action="add"):
650 for site_spec in self.plc_spec['sites']:
651 test_site = TestSite (self,site_spec)
653 utils.header("Deleting nodes in site %s"%test_site.name())
654 for node_spec in site_spec['nodes']:
655 test_node=TestNode(self,test_site,node_spec)
656 utils.header("Deleting %s"%test_node.name())
657 test_node.delete_node()
659 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
660 for node_spec in site_spec['nodes']:
661 utils.pprint('Creating node %s'%node_spec,node_spec)
662 test_node = TestNode (self,test_site,node_spec)
663 test_node.create_node ()
666 def nodegroups (self):
667 "create nodegroups with PLCAPI"
668 return self.do_nodegroups("add")
669 def clean_nodegroups (self):
670 "delete nodegroups with PLCAPI"
671 return self.do_nodegroups("delete")
675 def translate_timestamp (start,grain,timestamp):
676 if timestamp < TestPlc.YEAR: return start+timestamp*grain
677 else: return timestamp
680 def timestamp_printable (timestamp):
681 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
684 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
686 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
687 print 'API answered grain=',grain
688 start=(now/grain)*grain
690 # find out all nodes that are reservable
691 nodes=self.all_reservable_nodenames()
693 utils.header ("No reservable node found - proceeding without leases")
696 # attach them to the leases as specified in plc_specs
697 # this is where the 'leases' field gets interpreted as relative of absolute
698 for lease_spec in self.plc_spec['leases']:
699 # skip the ones that come with a null slice id
700 if not lease_spec['slice']: continue
701 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
702 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
703 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
704 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
705 if lease_addition['errors']:
706 utils.header("Cannot create leases, %s"%lease_addition['errors'])
709 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
710 (nodes,lease_spec['slice'],
711 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
712 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
716 def clean_leases (self):
717 "remove all leases in the myplc side"
718 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
719 utils.header("Cleaning leases %r"%lease_ids)
720 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
723 def list_leases (self):
724 "list all leases known to the myplc"
725 leases = self.apiserver.GetLeases(self.auth_root())
728 current=l['t_until']>=now
729 if self.options.verbose or current:
730 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
731 TestPlc.timestamp_printable(l['t_from']),
732 TestPlc.timestamp_printable(l['t_until'])))
735 # create nodegroups if needed, and populate
736 def do_nodegroups (self, action="add"):
737 # 1st pass to scan contents
739 for site_spec in self.plc_spec['sites']:
740 test_site = TestSite (self,site_spec)
741 for node_spec in site_spec['nodes']:
742 test_node=TestNode (self,test_site,node_spec)
743 if node_spec.has_key('nodegroups'):
744 nodegroupnames=node_spec['nodegroups']
745 if isinstance(nodegroupnames,StringTypes):
746 nodegroupnames = [ nodegroupnames ]
747 for nodegroupname in nodegroupnames:
748 if not groups_dict.has_key(nodegroupname):
749 groups_dict[nodegroupname]=[]
750 groups_dict[nodegroupname].append(test_node.name())
751 auth=self.auth_root()
753 for (nodegroupname,group_nodes) in groups_dict.iteritems():
755 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
756 # first, check if the nodetagtype is here
757 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
759 tag_type_id = tag_types[0]['tag_type_id']
761 tag_type_id = self.apiserver.AddTagType(auth,
762 {'tagname':nodegroupname,
763 'description': 'for nodegroup %s'%nodegroupname,
765 print 'located tag (type)',nodegroupname,'as',tag_type_id
767 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
769 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
770 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
771 # set node tag on all nodes, value='yes'
772 for nodename in group_nodes:
774 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
776 traceback.print_exc()
777 print 'node',nodename,'seems to already have tag',nodegroupname
780 expect_yes = self.apiserver.GetNodeTags(auth,
781 {'hostname':nodename,
782 'tagname':nodegroupname},
783 ['value'])[0]['value']
784 if expect_yes != "yes":
785 print 'Mismatch node tag on node',nodename,'got',expect_yes
788 if not self.options.dry_run:
789 print 'Cannot find tag',nodegroupname,'on node',nodename
793 print 'cleaning nodegroup',nodegroupname
794 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
796 traceback.print_exc()
800 # return a list of tuples (nodename,qemuname)
801 def all_node_infos (self) :
803 for site_spec in self.plc_spec['sites']:
804 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
805 for node_spec in site_spec['nodes'] ]
808 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
809 def all_reservable_nodenames (self):
811 for site_spec in self.plc_spec['sites']:
812 for node_spec in site_spec['nodes']:
813 node_fields=node_spec['node_fields']
814 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
815 res.append(node_fields['hostname'])
818 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
819 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
820 if self.options.dry_run:
824 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
825 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
826 # the nodes that haven't checked yet - start with a full list and shrink over time
827 tocheck = self.all_hostnames()
828 utils.header("checking nodes %r"%tocheck)
829 # create a dict hostname -> status
830 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
833 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
835 for array in tocheck_status:
836 hostname=array['hostname']
837 boot_state=array['boot_state']
838 if boot_state == target_boot_state:
839 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
841 # if it's a real node, never mind
842 (site_spec,node_spec)=self.locate_hostname(hostname)
843 if TestNode.is_real_model(node_spec['node_fields']['model']):
844 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
846 boot_state = target_boot_state
847 elif datetime.datetime.now() > graceout:
848 utils.header ("%s still in '%s' state"%(hostname,boot_state))
849 graceout=datetime.datetime.now()+datetime.timedelta(1)
850 status[hostname] = boot_state
852 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
855 if datetime.datetime.now() > timeout:
856 for hostname in tocheck:
857 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
859 # otherwise, sleep for a while
861 # only useful in empty plcs
864 def nodes_booted(self):
865 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
867 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
869 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
870 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
871 vservername=self.vservername
874 local_key = "keys/%(vservername)s-debug.rsa"%locals()
877 local_key = "keys/key1.rsa"
878 node_infos = self.all_node_infos()
879 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
880 for (nodename,qemuname) in node_infos:
881 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
882 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
883 (timeout_minutes,silent_minutes,period))
885 for node_info in node_infos:
886 (hostname,qemuname) = node_info
887 # try to run 'hostname' in the node
888 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
889 # don't spam logs - show the command only after the grace period
890 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
892 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
894 node_infos.remove(node_info)
896 # we will have tried real nodes once, in case they're up - but if not, just skip
897 (site_spec,node_spec)=self.locate_hostname(hostname)
898 if TestNode.is_real_model(node_spec['node_fields']['model']):
899 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
900 node_infos.remove(node_info)
903 if datetime.datetime.now() > timeout:
904 for (hostname,qemuname) in node_infos:
905 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
907 # otherwise, sleep for a while
909 # only useful in empty plcs
912 def nodes_ssh_debug(self):
913 "Tries to ssh into nodes in debug mode with the debug ssh key"
914 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
916 def nodes_ssh_boot(self):
917 "Tries to ssh into nodes in production mode with the root ssh key"
918 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
921 def init_node (self):
922 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
926 "all nodes: invoke GetBootMedium and store result locally"
929 def configure_qemu (self):
930 "all nodes: compute qemu config qemu.conf and store it locally"
933 def nodestate_reinstall (self):
934 "all nodes: mark PLCAPI boot_state as reinstall"
937 def nodestate_safeboot (self):
938 "all nodes: mark PLCAPI boot_state as safeboot"
941 def nodestate_boot (self):
942 "all nodes: mark PLCAPI boot_state as safeboot"
945 def export_qemu (self):
946 "all nodes: push local node-dep directory on the qemu box"
949 ### check hooks : invoke scripts from hooks/{node,slice}
950 def check_hooks_node (self):
951 return self.locate_first_node().check_hooks()
952 def check_hooks_sliver (self) :
953 return self.locate_first_sliver().check_hooks()
955 def check_hooks (self):
956 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
957 return self.check_hooks_node() and self.check_hooks_sliver()
960 def do_check_initscripts(self):
962 for slice_spec in self.plc_spec['slices']:
963 if not slice_spec.has_key('initscriptname'):
965 initscript=slice_spec['initscriptname']
966 for nodename in slice_spec['nodenames']:
967 (site,node) = self.locate_node (nodename)
968 # xxx - passing the wrong site - probably harmless
969 test_site = TestSite (self,site)
970 test_slice = TestSlice (self,test_site,slice_spec)
971 test_node = TestNode (self,test_site,node)
972 test_sliver = TestSliver (self, test_node, test_slice)
973 if not test_sliver.check_initscript(initscript):
977 def check_initscripts(self):
978 "check that the initscripts have triggered"
979 return self.do_check_initscripts()
981 def initscripts (self):
982 "create initscripts with PLCAPI"
983 for initscript in self.plc_spec['initscripts']:
984 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
985 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
988 def clean_initscripts (self):
989 "delete initscripts with PLCAPI"
990 for initscript in self.plc_spec['initscripts']:
991 initscript_name = initscript['initscript_fields']['name']
992 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
994 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
995 print initscript_name,'deleted'
997 print 'deletion went wrong - probably did not exist'
1002 "create slices with PLCAPI"
1003 return self.do_slices()
1005 def clean_slices (self):
1006 "delete slices with PLCAPI"
1007 return self.do_slices("delete")
1009 def do_slices (self, action="add"):
1010 for slice in self.plc_spec['slices']:
1011 site_spec = self.locate_site (slice['sitename'])
1012 test_site = TestSite(self,site_spec)
1013 test_slice=TestSlice(self,test_site,slice)
1015 utils.header("Deleting slices in site %s"%test_site.name())
1016 test_slice.delete_slice()
1018 utils.pprint("Creating slice",slice)
1019 test_slice.create_slice()
1020 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1024 def check_slice(self):
1025 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1029 def clear_known_hosts (self):
1030 "remove test nodes entries from the local known_hosts file"
1034 def start_node (self) :
1035 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1038 def check_tcp (self):
1039 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1040 specs = self.plc_spec['tcp_test']
1045 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1046 if not s_test_sliver.run_tcp_server(port,timeout=10):
1050 # idem for the client side
1051 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1052 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1056 def plcsh_stress_test (self):
1057 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1058 # install the stress-test in the plc image
1059 location = "/usr/share/plc_api/plcsh_stress_test.py"
1060 remote="/vservers/%s/%s"%(self.vservername,location)
1061 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1063 command += " -- --check"
1064 if self.options.size == 1:
1065 command += " --tiny"
1066 return ( self.run_in_guest(command) == 0)
1068 # populate runs the same utility without slightly different options
1069 # in particular runs with --preserve (dont cleanup) and without --check
1070 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1073 def install_sfa(self):
1074 "yum install sfa, sfa-plc and sfa-client"
1076 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1077 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1080 def dbclean_sfa(self):
1081 "thoroughly wipes off the SFA database"
1082 self.run_in_guest("sfa-nuke-plc.py")==0
1085 def plcclean_sfa(self):
1086 "cleans the PLC entries that were created as a side effect of running the script"
1088 sfa_spec=self.plc_spec['sfa']
1090 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1091 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1092 except: print "Slice %s already absent from PLC db"%slicename
1094 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1095 try: self.apiserver.DeletePerson(self.auth_root(),username)
1096 except: print "User %s already absent from PLC db"%username
1098 print "REMEMBER TO RUN import_sfa AGAIN"
1101 def uninstall_sfa(self):
1102 "uses rpm to uninstall sfa - ignore result"
1103 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1104 self.run_in_guest("rm -rf /var/lib/sfa")
1105 self.run_in_guest("rm -rf /etc/sfa")
1106 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1108 self.run_in_guest("rpm -e --noscripts sfa-plc")
1112 def install_unittest_sfa(self):
1113 "yum install sfa-tests"
1115 self.run_in_guest("yum -y install sfa-tests")
1116 return self.run_in_guest("rpm -q sfa-tests")==0
1118 def unittest_sfa(self):
1120 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1124 dirname="conf.%s"%self.plc_spec['name']
1125 if not os.path.isdir(dirname):
1126 utils.system("mkdir -p %s"%dirname)
1127 if not os.path.isdir(dirname):
1128 raise "Cannot create config dir for plc %s"%self.name()
1131 def conffile(self,filename):
1132 return "%s/%s"%(self.confdir(),filename)
1133 def confsubdir(self,dirname,clean):
1134 subdirname="%s/%s"%(self.confdir(),dirname)
1136 utils.system("rm -rf %s"%subdirname)
1137 if not os.path.isdir(subdirname):
1138 utils.system("mkdir -p %s"%subdirname)
1139 if not os.path.isdir(subdirname):
1140 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1143 def conffile_clean (self,filename):
1144 filename=self.conffile(filename)
1145 return utils.system("rm -rf %s"%filename)==0
1148 def configure_sfa(self):
1149 "run sfa-config-tty"
1150 tmpname=self.conffile("sfa-config-tty")
1151 fileconf=open(tmpname,'w')
1152 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1153 'SFA_INTERFACE_HRN',
1154 # 'SFA_REGISTRY_LEVEL1_AUTH',
1155 'SFA_REGISTRY_HOST',
1156 'SFA_AGGREGATE_HOST',
1162 'SFA_PLC_DB_PASSWORD',
1165 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1166 # the way plc_config handles booleans just sucks..
1167 for var in ['SFA_API_DEBUG']:
1169 if self.plc_spec['sfa'][var]: val='true'
1170 fileconf.write ('e %s\n%s\n'%(var,val))
1171 fileconf.write('w\n')
1172 fileconf.write('R\n')
1173 fileconf.write('q\n')
1175 utils.system('cat %s'%tmpname)
1176 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1179 def aggregate_xml_line(self):
1180 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1181 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1183 def registry_xml_line(self):
1184 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1185 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1188 # a cross step that takes all other plcs in argument
1189 def cross_configure_sfa(self, other_plcs):
1190 # of course with a single plc, other_plcs is an empty list
1193 agg_fname=self.conffile("agg.xml")
1194 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1195 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1196 utils.header ("(Over)wrote %s"%agg_fname)
1197 reg_fname=self.conffile("reg.xml")
1198 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1199 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1200 utils.header ("(Over)wrote %s"%reg_fname)
1201 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1202 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1204 def import_sfa(self):
1206 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1207 return self.run_in_guest('sfa-import-plc.py')==0
1208 # not needed anymore
1209 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1211 def start_sfa(self):
1213 return self.run_in_guest('service sfa start')==0
1215 def configure_sfi(self):
1216 sfa_spec=self.plc_spec['sfa']
1217 "sfi client configuration"
1218 dir_name=self.confsubdir("dot-sfi",clean=True)
1219 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1220 fileconf=open(file_name,'w')
1221 fileconf.write (self.plc_spec['keys'][0]['private'])
1223 utils.header ("(Over)wrote %s"%file_name)
1225 file_name=dir_name + os.sep + 'sfi_config'
1226 fileconf=open(file_name,'w')
1227 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1228 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1229 fileconf.write('\n')
1230 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1231 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1232 fileconf.write('\n')
1233 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1234 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1235 fileconf.write('\n')
1236 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1237 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1238 fileconf.write('\n')
1240 utils.header ("(Over)wrote %s"%file_name)
1242 file_name=dir_name + os.sep + 'person.xml'
1243 fileconf=open(file_name,'w')
1244 for record in sfa_spec['sfa_person_xml']:
1245 person_record=record
1246 fileconf.write(person_record)
1247 fileconf.write('\n')
1249 utils.header ("(Over)wrote %s"%file_name)
1251 file_name=dir_name + os.sep + 'slice.xml'
1252 fileconf=open(file_name,'w')
1253 for record in sfa_spec['sfa_slice_xml']:
1255 #slice_record=sfa_spec['sfa_slice_xml']
1256 fileconf.write(slice_record)
1257 fileconf.write('\n')
1258 utils.header ("(Over)wrote %s"%file_name)
1261 file_name=dir_name + os.sep + 'slice.rspec'
1262 fileconf=open(file_name,'w')
1264 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1266 fileconf.write(slice_rspec)
1267 fileconf.write('\n')
1269 utils.header ("(Over)wrote %s"%file_name)
1271 # push to the remote root's .sfi
1272 location = "root/.sfi"
1273 remote="/vservers/%s/%s"%(self.vservername,location)
1274 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1278 def clean_sfi (self):
1279 self.run_in_guest("rm -rf /root/.sfi")
1282 def add_user_sfa(self):
1283 return TestUserSfa(self).add_user()
1287 "run sfi.py add (on Registry)"
1291 def create_sfa(self):
1292 "run sfi.py create (on SM) for 1st-time creation"
1295 def update_user_sfa(self):
1296 return TestUserSfa(self).update_user()
1299 def update_sfa(self):
1300 "run sfi.py create (on SM) on existing object"
1304 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1305 sfa_spec=self.plc_spec['sfa']
1306 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1308 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1309 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1310 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1311 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1314 def check_slice_sfa(self):
1315 "tries to ssh-enter the SFA slice"
1318 def delete_user_sfa(self):
1319 "run sfi.py delete (on SM) for user"
1320 test_user_sfa=TestUserSfa(self)
1321 return test_user_sfa.delete_user()
1324 def delete_slice_sfa(self):
1325 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1330 self.run_in_guest('service sfa stop')==0
1333 def populate (self):
1334 "creates random entries in the PLCAPI"
1335 # install the stress-test in the plc image
1336 location = "/usr/share/plc_api/plcsh_stress_test.py"
1337 remote="/vservers/%s/%s"%(self.vservername,location)
1338 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1340 command += " -- --preserve --short-names"
1341 local = (self.run_in_guest(command) == 0);
1342 # second run with --foreign
1343 command += ' --foreign'
1344 remote = (self.run_in_guest(command) == 0);
1345 return ( local and remote)
1347 def gather_logs (self):
1348 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1349 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1350 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1351 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1352 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1353 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1355 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1356 self.gather_var_logs ()
1358 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1359 self.gather_pgsql_logs ()
1361 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1362 for site_spec in self.plc_spec['sites']:
1363 test_site = TestSite (self,site_spec)
1364 for node_spec in site_spec['nodes']:
1365 test_node=TestNode(self,test_site,node_spec)
1366 test_node.gather_qemu_logs()
1368 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1369 self.gather_nodes_var_logs()
1371 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1372 self.gather_slivers_var_logs()
1375 def gather_slivers_var_logs(self):
1376 for test_sliver in self.all_sliver_objs():
1377 remote = test_sliver.tar_var_logs()
1378 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1379 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1380 utils.system(command)
1383 def gather_var_logs (self):
1384 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1385 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1386 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1387 utils.system(command)
1388 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1389 utils.system(command)
1391 def gather_pgsql_logs (self):
1392 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1393 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1394 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1395 utils.system(command)
1397 def gather_nodes_var_logs (self):
1398 for site_spec in self.plc_spec['sites']:
1399 test_site = TestSite (self,site_spec)
1400 for node_spec in site_spec['nodes']:
1401 test_node=TestNode(self,test_site,node_spec)
1402 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1403 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1404 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1405 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1406 utils.system(command)
1409 # returns the filename to use for sql dump/restore, using options.dbname if set
1410 def dbfile (self, database):
1411 # uses options.dbname if it is found
1413 name=self.options.dbname
1414 if not isinstance(name,StringTypes):
1417 t=datetime.datetime.now()
1420 return "/root/%s-%s.sql"%(database,name)
1423 'dump the planetlab5 DB in /root in the PLC - filename has time'
1424 dump=self.dbfile("planetab5")
1425 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1426 utils.header('Dumped planetlab5 database in %s'%dump)
1429 def db_restore(self):
1430 'restore the planetlab5 DB - looks broken, but run -n might help'
1431 dump=self.dbfile("planetab5")
1432 ##stop httpd service
1433 self.run_in_guest('service httpd stop')
1434 # xxx - need another wrapper
1435 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1436 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1437 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1438 ##starting httpd service
1439 self.run_in_guest('service httpd start')
1441 utils.header('Database restored from ' + dump)
1444 def standby_1(): pass
1446 def standby_2(): pass
1448 def standby_3(): pass
1450 def standby_4(): pass
1452 def standby_5(): pass
1454 def standby_6(): pass
1456 def standby_7(): pass
1458 def standby_8(): pass
1460 def standby_9(): pass
1462 def standby_10(): pass
1464 def standby_11(): pass
1466 def standby_12(): pass
1468 def standby_13(): pass
1470 def standby_14(): pass
1472 def standby_15(): pass
1474 def standby_16(): pass
1476 def standby_17(): pass
1478 def standby_18(): pass
1480 def standby_19(): pass
1482 def standby_20(): pass