1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper_options (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_mapper_options_sfa (method):
73 slice_method = TestSliceSfa.__dict__[method.__name__]
74 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
75 site_spec = self.locate_site (slice_spec['sitename'])
76 test_site = TestSite(self,site_spec)
77 test_slice=TestSliceSfa(test_plc,test_site,slice_spec)
78 if not slice_method(test_slice,self.options): overall=False
80 # restore the doc text
81 actual.__doc__=method.__doc__
90 'display', 'resources_pre', SEP,
91 'delete_vs','create_vs','install', 'configure', 'start', SEP,
92 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
93 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
94 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu',
95 'kill_all_qemus', 'start_node', SEP,
96 # better use of time: do this now that the nodes are taking off
97 'plcsh_stress_test', SEP,
98 'install_sfa', 'configure_sfa', 'cross_configure_sfa', 'import_sfa', 'start_sfa', SEPSFA,
99 # xxx tmp - working on focusing on one side only
100 # 'configure_sfi@1', 'add_sfa@1', 'update_sfa', 'view_sfa', SEPSFA,
101 'configure_sfi', 'add_sfa', 'update_sfa', 'view_sfa', SEPSFA,
102 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEPSFA,
103 'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEPSFA,
104 'check_tcp', 'check_hooks', SEP,
105 'force_gather_logs', 'force_resources_post', SEP,
108 'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP,
109 'stop', 'vs_start', SEP,
110 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
111 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
112 'clean_leases', 'list_leases', SEP,
114 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
115 'plcclean_sfa', 'dbclean_sfa', 'logclean_sfa', 'uninstall_sfa', 'clean_sfi', SEP,
116 'db_dump' , 'db_restore', SEP,
117 'standby_1 through 20',SEP,
121 def printable_steps (list):
122 single_line=" ".join(list)+" "
123 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
125 def valid_step (step):
126 return step != SEP and step != SEPSFA
128 # turn off the sfa-related steps when build has skipped SFA
129 # this is originally for centos5 as recent SFAs won't build on this platformb
131 def check_whether_build_has_sfa (rpms_url):
132 # warning, we're now building 'sface' so let's be a bit more picky
133 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
134 # full builds are expected to return with 0 here
136 # move all steps containing 'sfa' from default_steps to other_steps
137 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
138 TestPlc.other_steps += sfa_steps
139 for step in sfa_steps: TestPlc.default_steps.remove(step)
141 def __init__ (self,plc_spec,options):
142 self.plc_spec=plc_spec
144 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
146 self.vserverip=plc_spec['vserverip']
147 self.vservername=plc_spec['vservername']
148 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
151 raise Exception,'chroot-based myplc testing is deprecated'
152 self.apiserver=TestApiserver(self.url,options.dry_run)
155 name=self.plc_spec['name']
156 return "%s.%s"%(name,self.vservername)
159 return self.plc_spec['hostname']
162 return self.test_ssh.is_local()
164 # define the API methods on this object through xmlrpc
165 # would help, but not strictly necessary
169 def actual_command_in_guest (self,command):
170 return self.test_ssh.actual_command(self.host_to_guest(command))
172 def start_guest (self):
173 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
175 def run_in_guest (self,command):
176 return utils.system(self.actual_command_in_guest(command))
178 def run_in_host (self,command):
179 return self.test_ssh.run_in_buildname(command)
181 #command gets run in the vserver
182 def host_to_guest(self,command):
183 return "vserver %s exec %s"%(self.vservername,command)
185 #command gets run in the vserver
186 def start_guest_in_host(self):
187 return "vserver %s start"%(self.vservername)
190 def run_in_guest_piped (self,local,remote):
191 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
193 def auth_root (self):
194 return {'Username':self.plc_spec['PLC_ROOT_USER'],
195 'AuthMethod':'password',
196 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
197 'Role' : self.plc_spec['role']
199 def locate_site (self,sitename):
200 for site in self.plc_spec['sites']:
201 if site['site_fields']['name'] == sitename:
203 if site['site_fields']['login_base'] == sitename:
205 raise Exception,"Cannot locate site %s"%sitename
207 def locate_node (self,nodename):
208 for site in self.plc_spec['sites']:
209 for node in site['nodes']:
210 if node['name'] == nodename:
212 raise Exception,"Cannot locate node %s"%nodename
214 def locate_hostname (self,hostname):
215 for site in self.plc_spec['sites']:
216 for node in site['nodes']:
217 if node['node_fields']['hostname'] == hostname:
219 raise Exception,"Cannot locate hostname %s"%hostname
221 def locate_key (self,keyname):
222 for key in self.plc_spec['keys']:
223 if key['name'] == keyname:
225 raise Exception,"Cannot locate key %s"%keyname
227 def locate_slice (self, slicename):
228 for slice in self.plc_spec['slices']:
229 if slice['slice_fields']['name'] == slicename:
231 raise Exception,"Cannot locate slice %s"%slicename
233 def all_sliver_objs (self):
235 for slice_spec in self.plc_spec['slices']:
236 slicename = slice_spec['slice_fields']['name']
237 for nodename in slice_spec['nodenames']:
238 result.append(self.locate_sliver_obj (nodename,slicename))
241 def locate_sliver_obj (self,nodename,slicename):
242 (site,node) = self.locate_node(nodename)
243 slice = self.locate_slice (slicename)
245 test_site = TestSite (self, site)
246 test_node = TestNode (self, test_site,node)
247 # xxx the slice site is assumed to be the node site - mhh - probably harmless
248 test_slice = TestSlice (self, test_site, slice)
249 return TestSliver (self, test_node, test_slice)
251 def locate_first_node(self):
252 nodename=self.plc_spec['slices'][0]['nodenames'][0]
253 (site,node) = self.locate_node(nodename)
254 test_site = TestSite (self, site)
255 test_node = TestNode (self, test_site,node)
258 def locate_first_sliver (self):
259 slice_spec=self.plc_spec['slices'][0]
260 slicename=slice_spec['slice_fields']['name']
261 nodename=slice_spec['nodenames'][0]
262 return self.locate_sliver_obj(nodename,slicename)
264 # all different hostboxes used in this plc
265 def gather_hostBoxes(self):
266 # maps on sites and nodes, return [ (host_box,test_node) ]
268 for site_spec in self.plc_spec['sites']:
269 test_site = TestSite (self,site_spec)
270 for node_spec in site_spec['nodes']:
271 test_node = TestNode (self, test_site, node_spec)
272 if not test_node.is_real():
273 tuples.append( (test_node.host_box(),test_node) )
274 # transform into a dict { 'host_box' -> [ test_node .. ] }
276 for (box,node) in tuples:
277 if not result.has_key(box):
280 result[box].append(node)
283 # a step for checking this stuff
284 def show_boxes (self):
285 'print summary of nodes location'
286 for (box,nodes) in self.gather_hostBoxes().iteritems():
287 print box,":"," + ".join( [ node.name() for node in nodes ] )
290 # make this a valid step
291 def kill_all_qemus(self):
292 'kill all qemu instances on the qemu boxes involved by this setup'
293 # this is the brute force version, kill all qemus on that host box
294 for (box,nodes) in self.gather_hostBoxes().iteritems():
295 # pass the first nodename, as we don't push template-qemu on testboxes
296 nodedir=nodes[0].nodedir()
297 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
300 # make this a valid step
301 def list_all_qemus(self):
302 'list all qemu instances on the qemu boxes involved by this setup'
303 for (box,nodes) in self.gather_hostBoxes().iteritems():
304 # this is the brute force version, kill all qemus on that host box
305 TestBox(box,self.options.buildname).list_all_qemus()
308 # kill only the right qemus
309 def list_qemus(self):
310 'list qemu instances for our nodes'
311 for (box,nodes) in self.gather_hostBoxes().iteritems():
312 # the fine-grain version
317 # kill only the right qemus
318 def kill_qemus(self):
319 'kill the qemu instances for our nodes'
320 for (box,nodes) in self.gather_hostBoxes().iteritems():
321 # the fine-grain version
326 #################### display config
328 "show test configuration after localization"
329 self.display_pass (1)
330 self.display_pass (2)
334 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
335 def display_pass (self,passno):
336 for (key,val) in self.plc_spec.iteritems():
337 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
341 self.display_site_spec(site)
342 for node in site['nodes']:
343 self.display_node_spec(node)
344 elif key=='initscripts':
345 for initscript in val:
346 self.display_initscript_spec (initscript)
349 self.display_slice_spec (slice)
352 self.display_key_spec (key)
354 if key not in ['sites','initscripts','slices','keys', 'sfa']:
355 print '+ ',key,':',val
357 def display_site_spec (self,site):
358 print '+ ======== site',site['site_fields']['name']
359 for (k,v) in site.iteritems():
360 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
363 print '+ ','nodes : ',
365 print node['node_fields']['hostname'],'',
371 print user['name'],'',
373 elif k == 'site_fields':
374 print '+ login_base',':',v['login_base']
375 elif k == 'address_fields':
381 def display_initscript_spec (self,initscript):
382 print '+ ======== initscript',initscript['initscript_fields']['name']
384 def display_key_spec (self,key):
385 print '+ ======== key',key['name']
387 def display_slice_spec (self,slice):
388 print '+ ======== slice',slice['slice_fields']['name']
389 for (k,v) in slice.iteritems():
402 elif k=='slice_fields':
403 print '+ fields',':',
404 print 'max_nodes=',v['max_nodes'],
409 def display_node_spec (self,node):
410 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
411 print "hostname=",node['node_fields']['hostname'],
412 print "ip=",node['interface_fields']['ip']
413 if self.options.verbose:
414 utils.pprint("node details",node,depth=3)
416 # another entry point for just showing the boxes involved
417 def display_mapping (self):
418 TestPlc.display_mapping_plc(self.plc_spec)
422 def display_mapping_plc (plc_spec):
423 print '+ MyPLC',plc_spec['name']
424 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
425 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
426 for site_spec in plc_spec['sites']:
427 for node_spec in site_spec['nodes']:
428 TestPlc.display_mapping_node(node_spec)
431 def display_mapping_node (node_spec):
432 print '+ NODE %s'%(node_spec['name'])
433 print '+\tqemu box %s'%node_spec['host_box']
434 print '+\thostname=%s'%node_spec['node_fields']['hostname']
436 def resources_pre (self):
437 "run site-dependant pre-test script as defined in LocalTestResources"
438 from LocalTestResources import local_resources
439 return local_resources.step_pre(self)
441 def resources_post (self):
442 "run site-dependant post-test script as defined in LocalTestResources"
443 from LocalTestResources import local_resources
444 return local_resources.step_post(self)
446 def resources_list (self):
447 "run site-dependant list script as defined in LocalTestResources"
448 from LocalTestResources import local_resources
449 return local_resources.step_list(self)
451 def resources_release (self):
452 "run site-dependant release script as defined in LocalTestResources"
453 from LocalTestResources import local_resources
454 return local_resources.step_release(self)
456 def resources_release_plc (self):
457 "run site-dependant release script as defined in LocalTestResources"
458 from LocalTestResources import local_resources
459 return local_resources.step_release_plc(self)
461 def resources_release_qemu (self):
462 "run site-dependant release script as defined in LocalTestResources"
463 from LocalTestResources import local_resources
464 return local_resources.step_release_qemu(self)
467 "vserver delete the test myplc"
468 self.run_in_host("vserver --silent %s delete"%self.vservername)
472 # historically the build was being fetched by the tests
473 # now the build pushes itself as a subdir of the tests workdir
474 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
475 def create_vs (self):
476 "vserver creation (no install done)"
477 # push the local build/ dir to the testplc box
479 # a full path for the local calls
480 build_dir=os.path.dirname(sys.argv[0])
481 # sometimes this is empty - set to "." in such a case
482 if not build_dir: build_dir="."
483 build_dir += "/build"
485 # use a standard name - will be relative to remote buildname
487 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
488 self.test_ssh.rmdir(build_dir)
489 self.test_ssh.copy(build_dir,recursive=True)
490 # the repo url is taken from arch-rpms-url
491 # with the last step (i386) removed
492 repo_url = self.options.arch_rpms_url
493 for level in [ 'arch' ]:
494 repo_url = os.path.dirname(repo_url)
495 # pass the vbuild-nightly options to vtest-init-vserver
497 test_env_options += " -p %s"%self.options.personality
498 test_env_options += " -d %s"%self.options.pldistro
499 test_env_options += " -f %s"%self.options.fcdistro
500 script="vtest-init-vserver.sh"
501 vserver_name = self.vservername
502 vserver_options="--netdev eth0 --interface %s"%self.vserverip
504 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
505 vserver_options += " --hostname %s"%vserver_hostname
507 print "Cannot reverse lookup %s"%self.vserverip
508 print "This is considered fatal, as this might pollute the test results"
510 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
511 return self.run_in_host(create_vserver) == 0
515 "yum install myplc, noderepo, and the plain bootstrapfs"
517 # workaround for getting pgsql8.2 on centos5
518 if self.options.fcdistro == "centos5":
519 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
522 if self.options.personality == "linux32":
524 elif self.options.personality == "linux64":
527 raise Exception, "Unsupported personality %r"%self.options.personality
528 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
531 pkgs_list.append ("slicerepo-%s"%nodefamily)
532 pkgs_list.append ("myplc")
533 pkgs_list.append ("noderepo-%s"%nodefamily)
534 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
535 pkgs_string=" ".join(pkgs_list)
536 self.run_in_guest("yum -y install %s"%pkgs_string)
537 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
542 tmpname='%s.plc-config-tty'%(self.name())
543 fileconf=open(tmpname,'w')
544 for var in [ 'PLC_NAME',
549 'PLC_MAIL_SUPPORT_ADDRESS',
552 # Above line was added for integrating SFA Testing
558 'PLC_RESERVATION_GRANULARITY',
561 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
562 fileconf.write('w\n')
563 fileconf.write('q\n')
565 utils.system('cat %s'%tmpname)
566 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
567 utils.system('rm %s'%tmpname)
572 self.run_in_guest('service plc start')
577 self.run_in_guest('service plc stop')
581 "start the PLC vserver"
585 # stores the keys from the config for further use
586 def store_keys(self):
587 "stores test users ssh keys in keys/"
588 for key_spec in self.plc_spec['keys']:
589 TestKey(self,key_spec).store_key()
592 def clean_keys(self):
593 "removes keys cached in keys/"
594 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
596 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
597 # for later direct access to the nodes
598 def fetch_keys(self):
599 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
601 if not os.path.isdir(dir):
603 vservername=self.vservername
605 prefix = 'debug_ssh_key'
606 for ext in [ 'pub', 'rsa' ] :
607 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
608 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
609 if self.test_ssh.fetch(src,dst) != 0: overall=False
613 "create sites with PLCAPI"
614 return self.do_sites()
616 def clean_sites (self):
617 "delete sites with PLCAPI"
618 return self.do_sites(action="delete")
620 def do_sites (self,action="add"):
621 for site_spec in self.plc_spec['sites']:
622 test_site = TestSite (self,site_spec)
623 if (action != "add"):
624 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
625 test_site.delete_site()
626 # deleted with the site
627 #test_site.delete_users()
630 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
631 test_site.create_site()
632 test_site.create_users()
635 def clean_all_sites (self):
636 "Delete all sites in PLC, and related objects"
637 print 'auth_root',self.auth_root()
638 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
639 for site_id in site_ids:
640 print 'Deleting site_id',site_id
641 self.apiserver.DeleteSite(self.auth_root(),site_id)
644 "create nodes with PLCAPI"
645 return self.do_nodes()
646 def clean_nodes (self):
647 "delete nodes with PLCAPI"
648 return self.do_nodes(action="delete")
650 def do_nodes (self,action="add"):
651 for site_spec in self.plc_spec['sites']:
652 test_site = TestSite (self,site_spec)
654 utils.header("Deleting nodes in site %s"%test_site.name())
655 for node_spec in site_spec['nodes']:
656 test_node=TestNode(self,test_site,node_spec)
657 utils.header("Deleting %s"%test_node.name())
658 test_node.delete_node()
660 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
661 for node_spec in site_spec['nodes']:
662 utils.pprint('Creating node %s'%node_spec,node_spec)
663 test_node = TestNode (self,test_site,node_spec)
664 test_node.create_node ()
667 def nodegroups (self):
668 "create nodegroups with PLCAPI"
669 return self.do_nodegroups("add")
670 def clean_nodegroups (self):
671 "delete nodegroups with PLCAPI"
672 return self.do_nodegroups("delete")
676 def translate_timestamp (start,grain,timestamp):
677 if timestamp < TestPlc.YEAR: return start+timestamp*grain
678 else: return timestamp
681 def timestamp_printable (timestamp):
682 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
685 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
687 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
688 print 'API answered grain=',grain
689 start=(now/grain)*grain
691 # find out all nodes that are reservable
692 nodes=self.all_reservable_nodenames()
694 utils.header ("No reservable node found - proceeding without leases")
697 # attach them to the leases as specified in plc_specs
698 # this is where the 'leases' field gets interpreted as relative of absolute
699 for lease_spec in self.plc_spec['leases']:
700 # skip the ones that come with a null slice id
701 if not lease_spec['slice']: continue
702 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
703 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
704 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
705 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
706 if lease_addition['errors']:
707 utils.header("Cannot create leases, %s"%lease_addition['errors'])
710 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
711 (nodes,lease_spec['slice'],
712 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
713 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
717 def clean_leases (self):
718 "remove all leases in the myplc side"
719 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
720 utils.header("Cleaning leases %r"%lease_ids)
721 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
724 def list_leases (self):
725 "list all leases known to the myplc"
726 leases = self.apiserver.GetLeases(self.auth_root())
729 current=l['t_until']>=now
730 if self.options.verbose or current:
731 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
732 TestPlc.timestamp_printable(l['t_from']),
733 TestPlc.timestamp_printable(l['t_until'])))
736 # create nodegroups if needed, and populate
737 def do_nodegroups (self, action="add"):
738 # 1st pass to scan contents
740 for site_spec in self.plc_spec['sites']:
741 test_site = TestSite (self,site_spec)
742 for node_spec in site_spec['nodes']:
743 test_node=TestNode (self,test_site,node_spec)
744 if node_spec.has_key('nodegroups'):
745 nodegroupnames=node_spec['nodegroups']
746 if isinstance(nodegroupnames,StringTypes):
747 nodegroupnames = [ nodegroupnames ]
748 for nodegroupname in nodegroupnames:
749 if not groups_dict.has_key(nodegroupname):
750 groups_dict[nodegroupname]=[]
751 groups_dict[nodegroupname].append(test_node.name())
752 auth=self.auth_root()
754 for (nodegroupname,group_nodes) in groups_dict.iteritems():
756 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
757 # first, check if the nodetagtype is here
758 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
760 tag_type_id = tag_types[0]['tag_type_id']
762 tag_type_id = self.apiserver.AddTagType(auth,
763 {'tagname':nodegroupname,
764 'description': 'for nodegroup %s'%nodegroupname,
767 print 'located tag (type)',nodegroupname,'as',tag_type_id
769 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
771 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
772 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
773 # set node tag on all nodes, value='yes'
774 for nodename in group_nodes:
776 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
778 traceback.print_exc()
779 print 'node',nodename,'seems to already have tag',nodegroupname
782 expect_yes = self.apiserver.GetNodeTags(auth,
783 {'hostname':nodename,
784 'tagname':nodegroupname},
785 ['value'])[0]['value']
786 if expect_yes != "yes":
787 print 'Mismatch node tag on node',nodename,'got',expect_yes
790 if not self.options.dry_run:
791 print 'Cannot find tag',nodegroupname,'on node',nodename
795 print 'cleaning nodegroup',nodegroupname
796 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
798 traceback.print_exc()
802 # return a list of tuples (nodename,qemuname)
803 def all_node_infos (self) :
805 for site_spec in self.plc_spec['sites']:
806 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
807 for node_spec in site_spec['nodes'] ]
810 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
811 def all_reservable_nodenames (self):
813 for site_spec in self.plc_spec['sites']:
814 for node_spec in site_spec['nodes']:
815 node_fields=node_spec['node_fields']
816 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
817 res.append(node_fields['hostname'])
820 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
821 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
822 if self.options.dry_run:
826 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
827 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
828 # the nodes that haven't checked yet - start with a full list and shrink over time
829 tocheck = self.all_hostnames()
830 utils.header("checking nodes %r"%tocheck)
831 # create a dict hostname -> status
832 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
835 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
837 for array in tocheck_status:
838 hostname=array['hostname']
839 boot_state=array['boot_state']
840 if boot_state == target_boot_state:
841 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
843 # if it's a real node, never mind
844 (site_spec,node_spec)=self.locate_hostname(hostname)
845 if TestNode.is_real_model(node_spec['node_fields']['model']):
846 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
848 boot_state = target_boot_state
849 elif datetime.datetime.now() > graceout:
850 utils.header ("%s still in '%s' state"%(hostname,boot_state))
851 graceout=datetime.datetime.now()+datetime.timedelta(1)
852 status[hostname] = boot_state
854 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
857 if datetime.datetime.now() > timeout:
858 for hostname in tocheck:
859 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
861 # otherwise, sleep for a while
863 # only useful in empty plcs
866 def nodes_booted(self):
867 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
869 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
871 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
872 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
873 vservername=self.vservername
876 local_key = "keys/%(vservername)s-debug.rsa"%locals()
879 local_key = "keys/key1.rsa"
880 node_infos = self.all_node_infos()
881 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
882 for (nodename,qemuname) in node_infos:
883 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
884 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
885 (timeout_minutes,silent_minutes,period))
887 for node_info in node_infos:
888 (hostname,qemuname) = node_info
889 # try to run 'hostname' in the node
890 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
891 # don't spam logs - show the command only after the grace period
892 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
894 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
896 node_infos.remove(node_info)
898 # we will have tried real nodes once, in case they're up - but if not, just skip
899 (site_spec,node_spec)=self.locate_hostname(hostname)
900 if TestNode.is_real_model(node_spec['node_fields']['model']):
901 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
902 node_infos.remove(node_info)
905 if datetime.datetime.now() > timeout:
906 for (hostname,qemuname) in node_infos:
907 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
909 # otherwise, sleep for a while
911 # only useful in empty plcs
914 def nodes_ssh_debug(self):
915 "Tries to ssh into nodes in debug mode with the debug ssh key"
916 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
918 def nodes_ssh_boot(self):
919 "Tries to ssh into nodes in production mode with the root ssh key"
920 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
923 def init_node (self):
924 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
928 "all nodes: invoke GetBootMedium and store result locally"
931 def configure_qemu (self):
932 "all nodes: compute qemu config qemu.conf and store it locally"
935 def reinstall_node (self):
936 "all nodes: mark PLCAPI boot_state as reinstall"
939 def export_qemu (self):
940 "all nodes: push local node-dep directory on the qemu box"
943 ### check hooks : invoke scripts from hooks/{node,slice}
944 def check_hooks_node (self):
945 return self.locate_first_node().check_hooks()
946 def check_hooks_sliver (self) :
947 return self.locate_first_sliver().check_hooks()
949 def check_hooks (self):
950 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
951 return self.check_hooks_node() and self.check_hooks_sliver()
954 def do_check_initscripts(self):
956 for slice_spec in self.plc_spec['slices']:
957 if not slice_spec.has_key('initscriptname'):
959 initscript=slice_spec['initscriptname']
960 for nodename in slice_spec['nodenames']:
961 (site,node) = self.locate_node (nodename)
962 # xxx - passing the wrong site - probably harmless
963 test_site = TestSite (self,site)
964 test_slice = TestSlice (self,test_site,slice_spec)
965 test_node = TestNode (self,test_site,node)
966 test_sliver = TestSliver (self, test_node, test_slice)
967 if not test_sliver.check_initscript(initscript):
971 def check_initscripts(self):
972 "check that the initscripts have triggered"
973 return self.do_check_initscripts()
975 def initscripts (self):
976 "create initscripts with PLCAPI"
977 for initscript in self.plc_spec['initscripts']:
978 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
979 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
982 def clean_initscripts (self):
983 "delete initscripts with PLCAPI"
984 for initscript in self.plc_spec['initscripts']:
985 initscript_name = initscript['initscript_fields']['name']
986 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
988 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
989 print initscript_name,'deleted'
991 print 'deletion went wrong - probably did not exist'
996 "create slices with PLCAPI"
997 return self.do_slices()
999 def clean_slices (self):
1000 "delete slices with PLCAPI"
1001 return self.do_slices("delete")
1003 def do_slices (self, action="add"):
1004 for slice in self.plc_spec['slices']:
1005 site_spec = self.locate_site (slice['sitename'])
1006 test_site = TestSite(self,site_spec)
1007 test_slice=TestSlice(self,test_site,slice)
1009 utils.header("Deleting slices in site %s"%test_site.name())
1010 test_slice.delete_slice()
1012 utils.pprint("Creating slice",slice)
1013 test_slice.create_slice()
1014 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1017 @slice_mapper_options
1018 def check_slice(self):
1019 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1023 def clear_known_hosts (self):
1024 "remove test nodes entries from the local known_hosts file"
1028 def start_node (self) :
1029 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1032 def check_tcp (self):
1033 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1034 specs = self.plc_spec['tcp_test']
1039 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1040 if not s_test_sliver.run_tcp_server(port,timeout=10):
1044 # idem for the client side
1045 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1046 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1050 def plcsh_stress_test (self):
1051 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1052 # install the stress-test in the plc image
1053 location = "/usr/share/plc_api/plcsh_stress_test.py"
1054 remote="/vservers/%s/%s"%(self.vservername,location)
1055 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1057 command += " -- --check"
1058 if self.options.size == 1:
1059 command += " --tiny"
1060 return ( self.run_in_guest(command) == 0)
1062 # populate runs the same utility without slightly different options
1063 # in particular runs with --preserve (dont cleanup) and without --check
1064 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1067 def install_sfa(self):
1068 "yum install sfa, sfa-plc and sfa-client"
1070 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1071 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1074 def dbclean_sfa(self):
1075 "thoroughly wipes off the SFA database"
1076 return self.run_in_guest("sfa-nuke-plc.py")==0
1078 def plcclean_sfa(self):
1079 "cleans the PLC entries that were created as a side effect of running the script"
1081 sfa_spec=self.plc_spec['sfa']
1083 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1084 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1085 except: print "Slice %s already absent from PLC db"%slicename
1087 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1088 try: self.apiserver.DeletePerson(self.auth_root(),username)
1089 except: print "User %s already absent from PLC db"%username
1091 print "REMEMBER TO RUN import_sfa AGAIN"
1094 def logclean_sfa(self):
1095 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1098 def uninstall_sfa(self):
1099 "uses rpm to uninstall sfa - ignore result"
1100 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1101 self.run_in_guest("rm -rf /var/lib/sfa")
1106 dirname="conf.%s"%self.plc_spec['name']
1107 if not os.path.isdir(dirname):
1108 utils.system("mkdir -p %s"%dirname)
1109 if not os.path.isdir(dirname):
1110 raise "Cannot create config dir for plc %s"%self.name()
1113 def conffile(self,filename):
1114 return "%s/%s"%(self.confdir(),filename)
1115 def confsubdir(self,dirname,clean):
1116 subdirname="%s/%s"%(self.confdir(),dirname)
1118 utils.system("rm -rf %s"%subdirname)
1119 if not os.path.isdir(subdirname):
1120 utils.system("mkdir -p %s"%subdirname)
1121 if not os.path.isdir(subdirname):
1122 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1125 def conffile_clean (self,filename):
1126 filename=self.conffile(filename)
1127 return utils.system("rm -rf %s"%filename)==0
1130 def configure_sfa(self):
1131 "run sfa-config-tty"
1132 tmpname=self.conffile("sfa-config-tty")
1133 fileconf=open(tmpname,'w')
1134 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1135 'SFA_INTERFACE_HRN',
1136 # 'SFA_REGISTRY_LEVEL1_AUTH',
1137 'SFA_REGISTRY_HOST',
1138 'SFA_AGGREGATE_HOST',
1144 'SFA_PLC_DB_PASSWORD',
1147 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1148 fileconf.write('w\n')
1149 fileconf.write('R\n')
1150 fileconf.write('q\n')
1152 utils.system('cat %s'%tmpname)
1153 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1156 def aggregate_xml_line(self):
1157 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1158 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1160 def registry_xml_line(self):
1161 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1162 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1165 # a cross step that takes all other plcs in argument
1166 def cross_configure_sfa(self, other_plcs):
1167 # of course with a single plc, other_plcs is an empty list
1170 agg_fname=self.conffile("agg.xml")
1171 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1172 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1173 utils.header ("(Over)wrote %s"%agg_fname)
1174 reg_fname=self.conffile("reg.xml")
1175 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1176 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1177 utils.header ("(Over)wrote %s"%reg_fname)
1178 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1179 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1181 def import_sfa(self):
1183 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1184 return self.run_in_guest('sfa-import-plc.py')==0
1185 # not needed anymore
1186 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1188 def start_sfa(self):
1190 return self.run_in_guest('service sfa start')==0
1192 def configure_sfi(self):
1193 sfa_spec=self.plc_spec['sfa']
1194 "sfi client configuration"
1195 dir_name=self.confsubdir("dot-sfi",clean=True)
1196 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1197 fileconf=open(file_name,'w')
1198 fileconf.write (self.plc_spec['keys'][0]['private'])
1201 file_name=dir_name + os.sep + 'sfi_config'
1202 fileconf=open(file_name,'w')
1203 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1204 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1205 fileconf.write('\n')
1206 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1207 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1208 fileconf.write('\n')
1209 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1210 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1211 fileconf.write('\n')
1212 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1213 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1214 fileconf.write('\n')
1217 file_name=dir_name + os.sep + 'person.xml'
1218 fileconf=open(file_name,'w')
1219 for record in sfa_spec['sfa_person_xml']:
1220 person_record=record
1221 fileconf.write(person_record)
1222 fileconf.write('\n')
1225 file_name=dir_name + os.sep + 'slice.xml'
1226 fileconf=open(file_name,'w')
1227 for record in sfa_spec['sfa_slice_xml']:
1229 #slice_record=sfa_spec['sfa_slice_xml']
1230 fileconf.write(slice_record)
1231 fileconf.write('\n')
1234 file_name=dir_name + os.sep + 'slice.rspec'
1235 fileconf=open(file_name,'w')
1237 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1239 fileconf.write(slice_rspec)
1240 fileconf.write('\n')
1242 location = "root/.sfi"
1243 remote="/vservers/%s/%s"%(self.vservername,location)
1244 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1248 def clean_sfi (self):
1249 self.run_in_guest("rm -rf /root/.sfi")
1253 "run sfi.py add (on Registry) and sfi.py create (on SM) to form new objects"
1254 test_user_sfa=TestUserSfa(self)
1255 if not test_user_sfa.add_user(): return False
1257 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1258 site_spec = self.locate_site (slice_spec['sitename'])
1259 test_site = TestSite(self,site_spec)
1260 test_slice_sfa=TestSliceSfa(self,test_site,slice_spec)
1261 if not test_slice_sfa.add_slice(): return False
1262 if not test_slice_sfa.create_slice(): return False
1265 def update_sfa(self):
1266 "run sfi.py update (on Registry) and sfi.py create (on SM) on existing objects"
1267 test_user_sfa=TestUserSfa(self)
1268 if not test_user_sfa.update_user(): return False
1270 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1271 site_spec = self.locate_site (slice_spec['sitename'])
1272 test_site = TestSite(self,site_spec)
1273 test_slice_sfa=TestSliceSfa(self,test_site,slice_spec)
1274 if not test_slice_sfa.update_slice(): return False
1278 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1279 sfa_spec=self.plc_spec['sfa']
1280 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1282 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1283 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1284 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1285 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1287 @slice_mapper_options_sfa
1288 def check_slice_sfa(self):
1289 "tries to ssh-enter the SFA slice"
1292 def delete_sfa(self):
1293 "run sfi.py delete (on SM), sfi.py remove (on Registry)"
1294 test_user_sfa=TestUserSfa(self)
1295 success1=test_user_sfa.delete_user()
1296 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1297 site_spec = self.locate_site (slice_spec['sitename'])
1298 test_site = TestSite(self,site_spec)
1299 test_slice_sfa=TestSliceSfa(self,test_site,slice_spec)
1300 success2=test_slice_sfa.delete_slice()
1302 return success1 and success2
1306 return self.run_in_guest('service sfa stop')==0
1308 def populate (self):
1309 "creates random entries in the PLCAPI"
1310 # install the stress-test in the plc image
1311 location = "/usr/share/plc_api/plcsh_stress_test.py"
1312 remote="/vservers/%s/%s"%(self.vservername,location)
1313 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1315 command += " -- --preserve --short-names"
1316 local = (self.run_in_guest(command) == 0);
1317 # second run with --foreign
1318 command += ' --foreign'
1319 remote = (self.run_in_guest(command) == 0);
1320 return ( local and remote)
1322 def gather_logs (self):
1323 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1324 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1325 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1326 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1327 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1328 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1330 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1331 self.gather_var_logs ()
1333 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1334 self.gather_pgsql_logs ()
1336 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1337 for site_spec in self.plc_spec['sites']:
1338 test_site = TestSite (self,site_spec)
1339 for node_spec in site_spec['nodes']:
1340 test_node=TestNode(self,test_site,node_spec)
1341 test_node.gather_qemu_logs()
1343 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1344 self.gather_nodes_var_logs()
1346 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1347 self.gather_slivers_var_logs()
1350 def gather_slivers_var_logs(self):
1351 for test_sliver in self.all_sliver_objs():
1352 remote = test_sliver.tar_var_logs()
1353 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1354 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1355 utils.system(command)
1358 def gather_var_logs (self):
1359 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1360 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1361 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1362 utils.system(command)
1363 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1364 utils.system(command)
1366 def gather_pgsql_logs (self):
1367 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1368 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1369 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1370 utils.system(command)
1372 def gather_nodes_var_logs (self):
1373 for site_spec in self.plc_spec['sites']:
1374 test_site = TestSite (self,site_spec)
1375 for node_spec in site_spec['nodes']:
1376 test_node=TestNode(self,test_site,node_spec)
1377 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1378 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1379 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1380 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1381 utils.system(command)
1384 # returns the filename to use for sql dump/restore, using options.dbname if set
1385 def dbfile (self, database):
1386 # uses options.dbname if it is found
1388 name=self.options.dbname
1389 if not isinstance(name,StringTypes):
1392 t=datetime.datetime.now()
1395 return "/root/%s-%s.sql"%(database,name)
1398 'dump the planetlab5 DB in /root in the PLC - filename has time'
1399 dump=self.dbfile("planetab5")
1400 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1401 utils.header('Dumped planetlab5 database in %s'%dump)
1404 def db_restore(self):
1405 'restore the planetlab5 DB - looks broken, but run -n might help'
1406 dump=self.dbfile("planetab5")
1407 ##stop httpd service
1408 self.run_in_guest('service httpd stop')
1409 # xxx - need another wrapper
1410 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1411 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1412 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1413 ##starting httpd service
1414 self.run_in_guest('service httpd start')
1416 utils.header('Database restored from ' + dump)
1419 def standby_1(): pass
1421 def standby_2(): pass
1423 def standby_3(): pass
1425 def standby_4(): pass
1427 def standby_5(): pass
1429 def standby_6(): pass
1431 def standby_7(): pass
1433 def standby_8(): pass
1435 def standby_9(): pass
1437 def standby_10(): pass
1439 def standby_11(): pass
1441 def standby_12(): pass
1443 def standby_13(): pass
1445 def standby_14(): pass
1447 def standby_15(): pass
1449 def standby_16(): pass
1451 def standby_17(): pass
1453 def standby_18(): pass
1455 def standby_19(): pass
1457 def standby_20(): pass