1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'display', 'resources_pre', SEP,
90 'delete_vs','create_vs','install', 'configure', 'start', SEP,
91 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu', 'kill_all_qemus', 'start_node', SEP,
94 'install_sfa', 'configure_sfa', 'cross_configure_sfa', 'import_sfa', 'start_sfa', SEPSFA,
95 'configure_sfi@1', 'add_user_sfa@1', 'add_sfa@1', 'create_sfa@1', SEPSFA,
96 'update_user_sfa@1', 'update_sfa@1', 'view_sfa@1', SEPSFA,
97 'install_unittest_sfa','unittest_sfa',SEPSFA,
98 # we used to run plcsh_stress_test, and then nodes_ssh_debug and nodes_ssh_boot
99 # but as the stress test might take a while, we sometimes missed the debug mode..
100 'nodes_ssh_debug', 'plcsh_stress_test@1', SEP,
101 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEP,
102 'check_slice_sfa@1', 'delete_slice_sfa@1', 'delete_user_sfa@1', SEPSFA,
103 'check_tcp', 'check_hooks@1', SEP,
104 'force_gather_logs', 'force_resources_post', SEP,
107 'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP,
108 'stop', 'vs_start', SEP,
109 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
110 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
111 'clean_leases', 'list_leases', SEP,
113 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
114 'plcclean_sfa', 'dbclean_sfa', 'stop_sfa','uninstall_sfa', 'clean_sfi', SEP,
115 'db_dump' , 'db_restore', SEP,
116 'standby_1 through 20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this is originally for centos5 as recent SFAs won't build on this platformb
130 def check_whether_build_has_sfa (rpms_url):
131 # warning, we're now building 'sface' so let's be a bit more picky
132 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
133 # full builds are expected to return with 0 here
135 # move all steps containing 'sfa' from default_steps to other_steps
136 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
137 TestPlc.other_steps += sfa_steps
138 for step in sfa_steps: TestPlc.default_steps.remove(step)
140 def __init__ (self,plc_spec,options):
141 self.plc_spec=plc_spec
143 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
145 self.vserverip=plc_spec['vserverip']
146 self.vservername=plc_spec['vservername']
147 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
150 raise Exception,'chroot-based myplc testing is deprecated'
151 self.apiserver=TestApiserver(self.url,options.dry_run)
154 name=self.plc_spec['name']
155 return "%s.%s"%(name,self.vservername)
158 return self.plc_spec['hostname']
161 return self.test_ssh.is_local()
163 # define the API methods on this object through xmlrpc
164 # would help, but not strictly necessary
168 def actual_command_in_guest (self,command):
169 return self.test_ssh.actual_command(self.host_to_guest(command))
171 def start_guest (self):
172 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
174 def run_in_guest (self,command):
175 return utils.system(self.actual_command_in_guest(command))
177 def run_in_host (self,command):
178 return self.test_ssh.run_in_buildname(command)
180 #command gets run in the vserver
181 def host_to_guest(self,command):
182 return "vserver %s exec %s"%(self.vservername,command)
184 #command gets run in the vserver
185 def start_guest_in_host(self):
186 return "vserver %s start"%(self.vservername)
189 def run_in_guest_piped (self,local,remote):
190 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
192 def auth_root (self):
193 return {'Username':self.plc_spec['PLC_ROOT_USER'],
194 'AuthMethod':'password',
195 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
196 'Role' : self.plc_spec['role']
198 def locate_site (self,sitename):
199 for site in self.plc_spec['sites']:
200 if site['site_fields']['name'] == sitename:
202 if site['site_fields']['login_base'] == sitename:
204 raise Exception,"Cannot locate site %s"%sitename
206 def locate_node (self,nodename):
207 for site in self.plc_spec['sites']:
208 for node in site['nodes']:
209 if node['name'] == nodename:
211 raise Exception,"Cannot locate node %s"%nodename
213 def locate_hostname (self,hostname):
214 for site in self.plc_spec['sites']:
215 for node in site['nodes']:
216 if node['node_fields']['hostname'] == hostname:
218 raise Exception,"Cannot locate hostname %s"%hostname
220 def locate_key (self,keyname):
221 for key in self.plc_spec['keys']:
222 if key['name'] == keyname:
224 raise Exception,"Cannot locate key %s"%keyname
226 def locate_slice (self, slicename):
227 for slice in self.plc_spec['slices']:
228 if slice['slice_fields']['name'] == slicename:
230 raise Exception,"Cannot locate slice %s"%slicename
232 def all_sliver_objs (self):
234 for slice_spec in self.plc_spec['slices']:
235 slicename = slice_spec['slice_fields']['name']
236 for nodename in slice_spec['nodenames']:
237 result.append(self.locate_sliver_obj (nodename,slicename))
240 def locate_sliver_obj (self,nodename,slicename):
241 (site,node) = self.locate_node(nodename)
242 slice = self.locate_slice (slicename)
244 test_site = TestSite (self, site)
245 test_node = TestNode (self, test_site,node)
246 # xxx the slice site is assumed to be the node site - mhh - probably harmless
247 test_slice = TestSlice (self, test_site, slice)
248 return TestSliver (self, test_node, test_slice)
250 def locate_first_node(self):
251 nodename=self.plc_spec['slices'][0]['nodenames'][0]
252 (site,node) = self.locate_node(nodename)
253 test_site = TestSite (self, site)
254 test_node = TestNode (self, test_site,node)
257 def locate_first_sliver (self):
258 slice_spec=self.plc_spec['slices'][0]
259 slicename=slice_spec['slice_fields']['name']
260 nodename=slice_spec['nodenames'][0]
261 return self.locate_sliver_obj(nodename,slicename)
263 # all different hostboxes used in this plc
264 def gather_hostBoxes(self):
265 # maps on sites and nodes, return [ (host_box,test_node) ]
267 for site_spec in self.plc_spec['sites']:
268 test_site = TestSite (self,site_spec)
269 for node_spec in site_spec['nodes']:
270 test_node = TestNode (self, test_site, node_spec)
271 if not test_node.is_real():
272 tuples.append( (test_node.host_box(),test_node) )
273 # transform into a dict { 'host_box' -> [ test_node .. ] }
275 for (box,node) in tuples:
276 if not result.has_key(box):
279 result[box].append(node)
282 # a step for checking this stuff
283 def show_boxes (self):
284 'print summary of nodes location'
285 for (box,nodes) in self.gather_hostBoxes().iteritems():
286 print box,":"," + ".join( [ node.name() for node in nodes ] )
289 # make this a valid step
290 def kill_all_qemus(self):
291 'kill all qemu instances on the qemu boxes involved by this setup'
292 # this is the brute force version, kill all qemus on that host box
293 for (box,nodes) in self.gather_hostBoxes().iteritems():
294 # pass the first nodename, as we don't push template-qemu on testboxes
295 nodedir=nodes[0].nodedir()
296 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
299 # make this a valid step
300 def list_all_qemus(self):
301 'list all qemu instances on the qemu boxes involved by this setup'
302 for (box,nodes) in self.gather_hostBoxes().iteritems():
303 # this is the brute force version, kill all qemus on that host box
304 TestBox(box,self.options.buildname).list_all_qemus()
307 # kill only the right qemus
308 def list_qemus(self):
309 'list qemu instances for our nodes'
310 for (box,nodes) in self.gather_hostBoxes().iteritems():
311 # the fine-grain version
316 # kill only the right qemus
317 def kill_qemus(self):
318 'kill the qemu instances for our nodes'
319 for (box,nodes) in self.gather_hostBoxes().iteritems():
320 # the fine-grain version
325 #################### display config
327 "show test configuration after localization"
328 self.display_pass (1)
329 self.display_pass (2)
333 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
334 def display_pass (self,passno):
335 for (key,val) in self.plc_spec.iteritems():
336 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
340 self.display_site_spec(site)
341 for node in site['nodes']:
342 self.display_node_spec(node)
343 elif key=='initscripts':
344 for initscript in val:
345 self.display_initscript_spec (initscript)
348 self.display_slice_spec (slice)
351 self.display_key_spec (key)
353 if key not in ['sites','initscripts','slices','keys', 'sfa']:
354 print '+ ',key,':',val
356 def display_site_spec (self,site):
357 print '+ ======== site',site['site_fields']['name']
358 for (k,v) in site.iteritems():
359 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
362 print '+ ','nodes : ',
364 print node['node_fields']['hostname'],'',
370 print user['name'],'',
372 elif k == 'site_fields':
373 print '+ login_base',':',v['login_base']
374 elif k == 'address_fields':
380 def display_initscript_spec (self,initscript):
381 print '+ ======== initscript',initscript['initscript_fields']['name']
383 def display_key_spec (self,key):
384 print '+ ======== key',key['name']
386 def display_slice_spec (self,slice):
387 print '+ ======== slice',slice['slice_fields']['name']
388 for (k,v) in slice.iteritems():
401 elif k=='slice_fields':
402 print '+ fields',':',
403 print 'max_nodes=',v['max_nodes'],
408 def display_node_spec (self,node):
409 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
410 print "hostname=",node['node_fields']['hostname'],
411 print "ip=",node['interface_fields']['ip']
412 if self.options.verbose:
413 utils.pprint("node details",node,depth=3)
415 # another entry point for just showing the boxes involved
416 def display_mapping (self):
417 TestPlc.display_mapping_plc(self.plc_spec)
421 def display_mapping_plc (plc_spec):
422 print '+ MyPLC',plc_spec['name']
423 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
424 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
425 for site_spec in plc_spec['sites']:
426 for node_spec in site_spec['nodes']:
427 TestPlc.display_mapping_node(node_spec)
430 def display_mapping_node (node_spec):
431 print '+ NODE %s'%(node_spec['name'])
432 print '+\tqemu box %s'%node_spec['host_box']
433 print '+\thostname=%s'%node_spec['node_fields']['hostname']
435 def resources_pre (self):
436 "run site-dependant pre-test script as defined in LocalTestResources"
437 from LocalTestResources import local_resources
438 return local_resources.step_pre(self)
440 def resources_post (self):
441 "run site-dependant post-test script as defined in LocalTestResources"
442 from LocalTestResources import local_resources
443 return local_resources.step_post(self)
445 def resources_list (self):
446 "run site-dependant list script as defined in LocalTestResources"
447 from LocalTestResources import local_resources
448 return local_resources.step_list(self)
450 def resources_release (self):
451 "run site-dependant release script as defined in LocalTestResources"
452 from LocalTestResources import local_resources
453 return local_resources.step_release(self)
455 def resources_release_plc (self):
456 "run site-dependant release script as defined in LocalTestResources"
457 from LocalTestResources import local_resources
458 return local_resources.step_release_plc(self)
460 def resources_release_qemu (self):
461 "run site-dependant release script as defined in LocalTestResources"
462 from LocalTestResources import local_resources
463 return local_resources.step_release_qemu(self)
466 "vserver delete the test myplc"
467 self.run_in_host("vserver --silent %s delete"%self.vservername)
471 # historically the build was being fetched by the tests
472 # now the build pushes itself as a subdir of the tests workdir
473 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
474 def create_vs (self):
475 "vserver creation (no install done)"
476 # push the local build/ dir to the testplc box
478 # a full path for the local calls
479 build_dir=os.path.dirname(sys.argv[0])
480 # sometimes this is empty - set to "." in such a case
481 if not build_dir: build_dir="."
482 build_dir += "/build"
484 # use a standard name - will be relative to remote buildname
486 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
487 self.test_ssh.rmdir(build_dir)
488 self.test_ssh.copy(build_dir,recursive=True)
489 # the repo url is taken from arch-rpms-url
490 # with the last step (i386) removed
491 repo_url = self.options.arch_rpms_url
492 for level in [ 'arch' ]:
493 repo_url = os.path.dirname(repo_url)
494 # pass the vbuild-nightly options to vtest-init-vserver
496 test_env_options += " -p %s"%self.options.personality
497 test_env_options += " -d %s"%self.options.pldistro
498 test_env_options += " -f %s"%self.options.fcdistro
499 script="vtest-init-vserver.sh"
500 vserver_name = self.vservername
501 vserver_options="--netdev eth0 --interface %s"%self.vserverip
503 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
504 vserver_options += " --hostname %s"%vserver_hostname
506 print "Cannot reverse lookup %s"%self.vserverip
507 print "This is considered fatal, as this might pollute the test results"
509 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
510 return self.run_in_host(create_vserver) == 0
514 "yum install myplc, noderepo, and the plain bootstrapfs"
516 # workaround for getting pgsql8.2 on centos5
517 if self.options.fcdistro == "centos5":
518 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
521 if self.options.personality == "linux32":
523 elif self.options.personality == "linux64":
526 raise Exception, "Unsupported personality %r"%self.options.personality
527 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
530 pkgs_list.append ("slicerepo-%s"%nodefamily)
531 pkgs_list.append ("myplc")
532 pkgs_list.append ("noderepo-%s"%nodefamily)
533 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
534 pkgs_string=" ".join(pkgs_list)
535 self.run_in_guest("yum -y install %s"%pkgs_string)
536 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
541 tmpname='%s.plc-config-tty'%(self.name())
542 fileconf=open(tmpname,'w')
543 for var in [ 'PLC_NAME',
548 'PLC_MAIL_SUPPORT_ADDRESS',
551 # Above line was added for integrating SFA Testing
557 'PLC_RESERVATION_GRANULARITY',
560 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
561 fileconf.write('w\n')
562 fileconf.write('q\n')
564 utils.system('cat %s'%tmpname)
565 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
566 utils.system('rm %s'%tmpname)
571 self.run_in_guest('service plc start')
576 self.run_in_guest('service plc stop')
580 "start the PLC vserver"
584 # stores the keys from the config for further use
585 def store_keys(self):
586 "stores test users ssh keys in keys/"
587 for key_spec in self.plc_spec['keys']:
588 TestKey(self,key_spec).store_key()
591 def clean_keys(self):
592 "removes keys cached in keys/"
593 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
595 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
596 # for later direct access to the nodes
597 def fetch_keys(self):
598 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
600 if not os.path.isdir(dir):
602 vservername=self.vservername
604 prefix = 'debug_ssh_key'
605 for ext in [ 'pub', 'rsa' ] :
606 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
607 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
608 if self.test_ssh.fetch(src,dst) != 0: overall=False
612 "create sites with PLCAPI"
613 return self.do_sites()
615 def clean_sites (self):
616 "delete sites with PLCAPI"
617 return self.do_sites(action="delete")
619 def do_sites (self,action="add"):
620 for site_spec in self.plc_spec['sites']:
621 test_site = TestSite (self,site_spec)
622 if (action != "add"):
623 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
624 test_site.delete_site()
625 # deleted with the site
626 #test_site.delete_users()
629 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
630 test_site.create_site()
631 test_site.create_users()
634 def clean_all_sites (self):
635 "Delete all sites in PLC, and related objects"
636 print 'auth_root',self.auth_root()
637 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
638 for site_id in site_ids:
639 print 'Deleting site_id',site_id
640 self.apiserver.DeleteSite(self.auth_root(),site_id)
643 "create nodes with PLCAPI"
644 return self.do_nodes()
645 def clean_nodes (self):
646 "delete nodes with PLCAPI"
647 return self.do_nodes(action="delete")
649 def do_nodes (self,action="add"):
650 for site_spec in self.plc_spec['sites']:
651 test_site = TestSite (self,site_spec)
653 utils.header("Deleting nodes in site %s"%test_site.name())
654 for node_spec in site_spec['nodes']:
655 test_node=TestNode(self,test_site,node_spec)
656 utils.header("Deleting %s"%test_node.name())
657 test_node.delete_node()
659 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
660 for node_spec in site_spec['nodes']:
661 utils.pprint('Creating node %s'%node_spec,node_spec)
662 test_node = TestNode (self,test_site,node_spec)
663 test_node.create_node ()
666 def nodegroups (self):
667 "create nodegroups with PLCAPI"
668 return self.do_nodegroups("add")
669 def clean_nodegroups (self):
670 "delete nodegroups with PLCAPI"
671 return self.do_nodegroups("delete")
675 def translate_timestamp (start,grain,timestamp):
676 if timestamp < TestPlc.YEAR: return start+timestamp*grain
677 else: return timestamp
680 def timestamp_printable (timestamp):
681 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
684 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
686 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
687 print 'API answered grain=',grain
688 start=(now/grain)*grain
690 # find out all nodes that are reservable
691 nodes=self.all_reservable_nodenames()
693 utils.header ("No reservable node found - proceeding without leases")
696 # attach them to the leases as specified in plc_specs
697 # this is where the 'leases' field gets interpreted as relative of absolute
698 for lease_spec in self.plc_spec['leases']:
699 # skip the ones that come with a null slice id
700 if not lease_spec['slice']: continue
701 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
702 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
703 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
704 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
705 if lease_addition['errors']:
706 utils.header("Cannot create leases, %s"%lease_addition['errors'])
709 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
710 (nodes,lease_spec['slice'],
711 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
712 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
716 def clean_leases (self):
717 "remove all leases in the myplc side"
718 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
719 utils.header("Cleaning leases %r"%lease_ids)
720 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
723 def list_leases (self):
724 "list all leases known to the myplc"
725 leases = self.apiserver.GetLeases(self.auth_root())
728 current=l['t_until']>=now
729 if self.options.verbose or current:
730 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
731 TestPlc.timestamp_printable(l['t_from']),
732 TestPlc.timestamp_printable(l['t_until'])))
735 # create nodegroups if needed, and populate
736 def do_nodegroups (self, action="add"):
737 # 1st pass to scan contents
739 for site_spec in self.plc_spec['sites']:
740 test_site = TestSite (self,site_spec)
741 for node_spec in site_spec['nodes']:
742 test_node=TestNode (self,test_site,node_spec)
743 if node_spec.has_key('nodegroups'):
744 nodegroupnames=node_spec['nodegroups']
745 if isinstance(nodegroupnames,StringTypes):
746 nodegroupnames = [ nodegroupnames ]
747 for nodegroupname in nodegroupnames:
748 if not groups_dict.has_key(nodegroupname):
749 groups_dict[nodegroupname]=[]
750 groups_dict[nodegroupname].append(test_node.name())
751 auth=self.auth_root()
753 for (nodegroupname,group_nodes) in groups_dict.iteritems():
755 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
756 # first, check if the nodetagtype is here
757 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
759 tag_type_id = tag_types[0]['tag_type_id']
761 tag_type_id = self.apiserver.AddTagType(auth,
762 {'tagname':nodegroupname,
763 'description': 'for nodegroup %s'%nodegroupname,
766 print 'located tag (type)',nodegroupname,'as',tag_type_id
768 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
770 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
771 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
772 # set node tag on all nodes, value='yes'
773 for nodename in group_nodes:
775 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
777 traceback.print_exc()
778 print 'node',nodename,'seems to already have tag',nodegroupname
781 expect_yes = self.apiserver.GetNodeTags(auth,
782 {'hostname':nodename,
783 'tagname':nodegroupname},
784 ['value'])[0]['value']
785 if expect_yes != "yes":
786 print 'Mismatch node tag on node',nodename,'got',expect_yes
789 if not self.options.dry_run:
790 print 'Cannot find tag',nodegroupname,'on node',nodename
794 print 'cleaning nodegroup',nodegroupname
795 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
797 traceback.print_exc()
801 # return a list of tuples (nodename,qemuname)
802 def all_node_infos (self) :
804 for site_spec in self.plc_spec['sites']:
805 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
806 for node_spec in site_spec['nodes'] ]
809 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
810 def all_reservable_nodenames (self):
812 for site_spec in self.plc_spec['sites']:
813 for node_spec in site_spec['nodes']:
814 node_fields=node_spec['node_fields']
815 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
816 res.append(node_fields['hostname'])
819 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
820 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
821 if self.options.dry_run:
825 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
826 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
827 # the nodes that haven't checked yet - start with a full list and shrink over time
828 tocheck = self.all_hostnames()
829 utils.header("checking nodes %r"%tocheck)
830 # create a dict hostname -> status
831 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
834 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
836 for array in tocheck_status:
837 hostname=array['hostname']
838 boot_state=array['boot_state']
839 if boot_state == target_boot_state:
840 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
842 # if it's a real node, never mind
843 (site_spec,node_spec)=self.locate_hostname(hostname)
844 if TestNode.is_real_model(node_spec['node_fields']['model']):
845 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
847 boot_state = target_boot_state
848 elif datetime.datetime.now() > graceout:
849 utils.header ("%s still in '%s' state"%(hostname,boot_state))
850 graceout=datetime.datetime.now()+datetime.timedelta(1)
851 status[hostname] = boot_state
853 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
856 if datetime.datetime.now() > timeout:
857 for hostname in tocheck:
858 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
860 # otherwise, sleep for a while
862 # only useful in empty plcs
865 def nodes_booted(self):
866 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
868 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
870 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
871 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
872 vservername=self.vservername
875 local_key = "keys/%(vservername)s-debug.rsa"%locals()
878 local_key = "keys/key1.rsa"
879 node_infos = self.all_node_infos()
880 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
881 for (nodename,qemuname) in node_infos:
882 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
883 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
884 (timeout_minutes,silent_minutes,period))
886 for node_info in node_infos:
887 (hostname,qemuname) = node_info
888 # try to run 'hostname' in the node
889 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
890 # don't spam logs - show the command only after the grace period
891 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
893 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
895 node_infos.remove(node_info)
897 # we will have tried real nodes once, in case they're up - but if not, just skip
898 (site_spec,node_spec)=self.locate_hostname(hostname)
899 if TestNode.is_real_model(node_spec['node_fields']['model']):
900 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
901 node_infos.remove(node_info)
904 if datetime.datetime.now() > timeout:
905 for (hostname,qemuname) in node_infos:
906 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
908 # otherwise, sleep for a while
910 # only useful in empty plcs
913 def nodes_ssh_debug(self):
914 "Tries to ssh into nodes in debug mode with the debug ssh key"
915 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
917 def nodes_ssh_boot(self):
918 "Tries to ssh into nodes in production mode with the root ssh key"
919 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
922 def init_node (self):
923 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
927 "all nodes: invoke GetBootMedium and store result locally"
930 def configure_qemu (self):
931 "all nodes: compute qemu config qemu.conf and store it locally"
934 def reinstall_node (self):
935 "all nodes: mark PLCAPI boot_state as reinstall"
938 def export_qemu (self):
939 "all nodes: push local node-dep directory on the qemu box"
942 ### check hooks : invoke scripts from hooks/{node,slice}
943 def check_hooks_node (self):
944 return self.locate_first_node().check_hooks()
945 def check_hooks_sliver (self) :
946 return self.locate_first_sliver().check_hooks()
948 def check_hooks (self):
949 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
950 return self.check_hooks_node() and self.check_hooks_sliver()
953 def do_check_initscripts(self):
955 for slice_spec in self.plc_spec['slices']:
956 if not slice_spec.has_key('initscriptname'):
958 initscript=slice_spec['initscriptname']
959 for nodename in slice_spec['nodenames']:
960 (site,node) = self.locate_node (nodename)
961 # xxx - passing the wrong site - probably harmless
962 test_site = TestSite (self,site)
963 test_slice = TestSlice (self,test_site,slice_spec)
964 test_node = TestNode (self,test_site,node)
965 test_sliver = TestSliver (self, test_node, test_slice)
966 if not test_sliver.check_initscript(initscript):
970 def check_initscripts(self):
971 "check that the initscripts have triggered"
972 return self.do_check_initscripts()
974 def initscripts (self):
975 "create initscripts with PLCAPI"
976 for initscript in self.plc_spec['initscripts']:
977 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
978 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
981 def clean_initscripts (self):
982 "delete initscripts with PLCAPI"
983 for initscript in self.plc_spec['initscripts']:
984 initscript_name = initscript['initscript_fields']['name']
985 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
987 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
988 print initscript_name,'deleted'
990 print 'deletion went wrong - probably did not exist'
995 "create slices with PLCAPI"
996 return self.do_slices()
998 def clean_slices (self):
999 "delete slices with PLCAPI"
1000 return self.do_slices("delete")
1002 def do_slices (self, action="add"):
1003 for slice in self.plc_spec['slices']:
1004 site_spec = self.locate_site (slice['sitename'])
1005 test_site = TestSite(self,site_spec)
1006 test_slice=TestSlice(self,test_site,slice)
1008 utils.header("Deleting slices in site %s"%test_site.name())
1009 test_slice.delete_slice()
1011 utils.pprint("Creating slice",slice)
1012 test_slice.create_slice()
1013 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1017 def check_slice(self):
1018 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1022 def clear_known_hosts (self):
1023 "remove test nodes entries from the local known_hosts file"
1027 def start_node (self) :
1028 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1031 def check_tcp (self):
1032 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1033 specs = self.plc_spec['tcp_test']
1038 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1039 if not s_test_sliver.run_tcp_server(port,timeout=10):
1043 # idem for the client side
1044 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1045 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1049 def plcsh_stress_test (self):
1050 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1051 # install the stress-test in the plc image
1052 location = "/usr/share/plc_api/plcsh_stress_test.py"
1053 remote="/vservers/%s/%s"%(self.vservername,location)
1054 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1056 command += " -- --check"
1057 if self.options.size == 1:
1058 command += " --tiny"
1059 return ( self.run_in_guest(command) == 0)
1061 # populate runs the same utility without slightly different options
1062 # in particular runs with --preserve (dont cleanup) and without --check
1063 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1066 def install_sfa(self):
1067 "yum install sfa, sfa-plc and sfa-client"
1069 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1070 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1073 def dbclean_sfa(self):
1074 "thoroughly wipes off the SFA database"
1075 self.run_in_guest("sfa-nuke-plc.py")==0
1078 def plcclean_sfa(self):
1079 "cleans the PLC entries that were created as a side effect of running the script"
1081 sfa_spec=self.plc_spec['sfa']
1083 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1084 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1085 except: print "Slice %s already absent from PLC db"%slicename
1087 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1088 try: self.apiserver.DeletePerson(self.auth_root(),username)
1089 except: print "User %s already absent from PLC db"%username
1091 print "REMEMBER TO RUN import_sfa AGAIN"
1094 def uninstall_sfa(self):
1095 "uses rpm to uninstall sfa - ignore result"
1096 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1097 self.run_in_guest("rm -rf /var/lib/sfa")
1098 self.run_in_guest("rm -rf /etc/sfa")
1099 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1101 self.run_in_guest("rpm -e --noscripts sfa-plc")
1105 def install_unittest_sfa(self):
1106 "yum install sfa-tests"
1108 self.run_in_guest("yum -y install sfa-tests")
1109 return self.run_in_guest("rpm -q sfa-tests")==0
1111 def unittest_sfa(self):
1113 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1117 dirname="conf.%s"%self.plc_spec['name']
1118 if not os.path.isdir(dirname):
1119 utils.system("mkdir -p %s"%dirname)
1120 if not os.path.isdir(dirname):
1121 raise "Cannot create config dir for plc %s"%self.name()
1124 def conffile(self,filename):
1125 return "%s/%s"%(self.confdir(),filename)
1126 def confsubdir(self,dirname,clean):
1127 subdirname="%s/%s"%(self.confdir(),dirname)
1129 utils.system("rm -rf %s"%subdirname)
1130 if not os.path.isdir(subdirname):
1131 utils.system("mkdir -p %s"%subdirname)
1132 if not os.path.isdir(subdirname):
1133 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1136 def conffile_clean (self,filename):
1137 filename=self.conffile(filename)
1138 return utils.system("rm -rf %s"%filename)==0
1141 def configure_sfa(self):
1142 "run sfa-config-tty"
1143 tmpname=self.conffile("sfa-config-tty")
1144 fileconf=open(tmpname,'w')
1145 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1146 'SFA_INTERFACE_HRN',
1147 # 'SFA_REGISTRY_LEVEL1_AUTH',
1148 'SFA_REGISTRY_HOST',
1149 'SFA_AGGREGATE_HOST',
1155 'SFA_PLC_DB_PASSWORD',
1158 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1159 # the way plc_config handles booleans just sucks..
1160 for var in ['SFA_API_DEBUG']:
1162 if self.plc_spec['sfa'][var]: val='true'
1163 fileconf.write ('e %s\n%s\n'%(var,val))
1164 fileconf.write('w\n')
1165 fileconf.write('R\n')
1166 fileconf.write('q\n')
1168 utils.system('cat %s'%tmpname)
1169 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1172 def aggregate_xml_line(self):
1173 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1174 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1176 def registry_xml_line(self):
1177 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1178 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1181 # a cross step that takes all other plcs in argument
1182 def cross_configure_sfa(self, other_plcs):
1183 # of course with a single plc, other_plcs is an empty list
1186 agg_fname=self.conffile("agg.xml")
1187 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1188 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1189 utils.header ("(Over)wrote %s"%agg_fname)
1190 reg_fname=self.conffile("reg.xml")
1191 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1192 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1193 utils.header ("(Over)wrote %s"%reg_fname)
1194 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1195 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1197 def import_sfa(self):
1199 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1200 return self.run_in_guest('sfa-import-plc.py')==0
1201 # not needed anymore
1202 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1204 def start_sfa(self):
1206 return self.run_in_guest('service sfa start')==0
1208 def configure_sfi(self):
1209 sfa_spec=self.plc_spec['sfa']
1210 "sfi client configuration"
1211 dir_name=self.confsubdir("dot-sfi",clean=True)
1212 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1213 fileconf=open(file_name,'w')
1214 fileconf.write (self.plc_spec['keys'][0]['private'])
1216 utils.header ("(Over)wrote %s"%file_name)
1218 file_name=dir_name + os.sep + 'sfi_config'
1219 fileconf=open(file_name,'w')
1220 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1221 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1222 fileconf.write('\n')
1223 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1224 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1225 fileconf.write('\n')
1226 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1227 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1228 fileconf.write('\n')
1229 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1230 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1231 fileconf.write('\n')
1233 utils.header ("(Over)wrote %s"%file_name)
1235 file_name=dir_name + os.sep + 'person.xml'
1236 fileconf=open(file_name,'w')
1237 for record in sfa_spec['sfa_person_xml']:
1238 person_record=record
1239 fileconf.write(person_record)
1240 fileconf.write('\n')
1242 utils.header ("(Over)wrote %s"%file_name)
1244 file_name=dir_name + os.sep + 'slice.xml'
1245 fileconf=open(file_name,'w')
1246 for record in sfa_spec['sfa_slice_xml']:
1248 #slice_record=sfa_spec['sfa_slice_xml']
1249 fileconf.write(slice_record)
1250 fileconf.write('\n')
1251 utils.header ("(Over)wrote %s"%file_name)
1254 file_name=dir_name + os.sep + 'slice.rspec'
1255 fileconf=open(file_name,'w')
1257 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1259 fileconf.write(slice_rspec)
1260 fileconf.write('\n')
1262 utils.header ("(Over)wrote %s"%file_name)
1264 # push to the remote root's .sfi
1265 location = "root/.sfi"
1266 remote="/vservers/%s/%s"%(self.vservername,location)
1267 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1271 def clean_sfi (self):
1272 self.run_in_guest("rm -rf /root/.sfi")
1275 def add_user_sfa(self):
1276 return TestUserSfa(self).add_user()
1280 "run sfi.py add (on Registry)"
1284 def create_sfa(self):
1285 "run sfi.py create (on SM) for 1st-time creation"
1288 def update_user_sfa(self):
1289 return TestUserSfa(self).update_user()
1292 def update_sfa(self):
1293 "run sfi.py create (on SM) on existing object"
1297 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1298 sfa_spec=self.plc_spec['sfa']
1299 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1301 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1302 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1303 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1304 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1307 def check_slice_sfa(self):
1308 "tries to ssh-enter the SFA slice"
1311 def delete_user_sfa(self):
1312 "run sfi.py delete (on SM) for user"
1313 test_user_sfa=TestUserSfa(self)
1314 return test_user_sfa.delete_user()
1317 def delete_slice_sfa(self):
1318 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1323 self.run_in_guest('service sfa stop')==0
1326 def populate (self):
1327 "creates random entries in the PLCAPI"
1328 # install the stress-test in the plc image
1329 location = "/usr/share/plc_api/plcsh_stress_test.py"
1330 remote="/vservers/%s/%s"%(self.vservername,location)
1331 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1333 command += " -- --preserve --short-names"
1334 local = (self.run_in_guest(command) == 0);
1335 # second run with --foreign
1336 command += ' --foreign'
1337 remote = (self.run_in_guest(command) == 0);
1338 return ( local and remote)
1340 def gather_logs (self):
1341 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1342 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1343 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1344 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1345 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1346 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1348 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1349 self.gather_var_logs ()
1351 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1352 self.gather_pgsql_logs ()
1354 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1355 for site_spec in self.plc_spec['sites']:
1356 test_site = TestSite (self,site_spec)
1357 for node_spec in site_spec['nodes']:
1358 test_node=TestNode(self,test_site,node_spec)
1359 test_node.gather_qemu_logs()
1361 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1362 self.gather_nodes_var_logs()
1364 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1365 self.gather_slivers_var_logs()
1368 def gather_slivers_var_logs(self):
1369 for test_sliver in self.all_sliver_objs():
1370 remote = test_sliver.tar_var_logs()
1371 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1372 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1373 utils.system(command)
1376 def gather_var_logs (self):
1377 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1378 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1379 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1380 utils.system(command)
1381 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1382 utils.system(command)
1384 def gather_pgsql_logs (self):
1385 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1386 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1387 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1388 utils.system(command)
1390 def gather_nodes_var_logs (self):
1391 for site_spec in self.plc_spec['sites']:
1392 test_site = TestSite (self,site_spec)
1393 for node_spec in site_spec['nodes']:
1394 test_node=TestNode(self,test_site,node_spec)
1395 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1396 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1397 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1398 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1399 utils.system(command)
1402 # returns the filename to use for sql dump/restore, using options.dbname if set
1403 def dbfile (self, database):
1404 # uses options.dbname if it is found
1406 name=self.options.dbname
1407 if not isinstance(name,StringTypes):
1410 t=datetime.datetime.now()
1413 return "/root/%s-%s.sql"%(database,name)
1416 'dump the planetlab5 DB in /root in the PLC - filename has time'
1417 dump=self.dbfile("planetab5")
1418 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1419 utils.header('Dumped planetlab5 database in %s'%dump)
1422 def db_restore(self):
1423 'restore the planetlab5 DB - looks broken, but run -n might help'
1424 dump=self.dbfile("planetab5")
1425 ##stop httpd service
1426 self.run_in_guest('service httpd stop')
1427 # xxx - need another wrapper
1428 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1429 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1430 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1431 ##starting httpd service
1432 self.run_in_guest('service httpd start')
1434 utils.header('Database restored from ' + dump)
1437 def standby_1(): pass
1439 def standby_2(): pass
1441 def standby_3(): pass
1443 def standby_4(): pass
1445 def standby_5(): pass
1447 def standby_6(): pass
1449 def standby_7(): pass
1451 def standby_8(): pass
1453 def standby_9(): pass
1455 def standby_10(): pass
1457 def standby_11(): pass
1459 def standby_12(): pass
1461 def standby_13(): pass
1463 def standby_14(): pass
1465 def standby_15(): pass
1467 def standby_16(): pass
1469 def standby_17(): pass
1471 def standby_18(): pass
1473 def standby_19(): pass
1475 def standby_20(): pass