1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'display', 'resources_pre', SEP,
90 'delete_vs','create_vs','install', 'configure', 'start', SEP,
91 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu', 'kill_all_qemus', 'start_node', SEP,
94 'install_sfa', 'configure_sfa', 'cross_configure_sfa', 'import_sfa', 'start_sfa', SEPSFA,
95 'configure_sfi@1', 'add_sfa_user@1', 'add_sfa@1', 'create_sfa@1', SEPSFA,
96 'update_sfa_user@1', 'update_sfa@1', 'view_sfa@1', SEPSFA,
97 # better use of time: do this now that the nodes are taking off
98 'plcsh_stress_test', SEP,
99 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEPSFA,
100 'check_slice_sfa@1', 'delete_sfa_user@1', 'delete_sfa_slices@1', SEPSFA,
101 'check_tcp', 'check_hooks', SEP,
102 'force_gather_logs', 'force_resources_post', SEP,
105 'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP,
106 'stop', 'vs_start', SEP,
107 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
108 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
109 'clean_leases', 'list_leases', SEP,
111 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
112 'plcclean_sfa', 'dbclean_sfa', 'stop_sfa','uninstall_sfa', 'clean_sfi', SEP,
113 'db_dump' , 'db_restore', SEP,
114 'standby_1 through 20',SEP,
118 def printable_steps (list):
119 single_line=" ".join(list)+" "
120 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
122 def valid_step (step):
123 return step != SEP and step != SEPSFA
125 # turn off the sfa-related steps when build has skipped SFA
126 # this is originally for centos5 as recent SFAs won't build on this platformb
128 def check_whether_build_has_sfa (rpms_url):
129 # warning, we're now building 'sface' so let's be a bit more picky
130 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
131 # full builds are expected to return with 0 here
133 # move all steps containing 'sfa' from default_steps to other_steps
134 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
135 TestPlc.other_steps += sfa_steps
136 for step in sfa_steps: TestPlc.default_steps.remove(step)
138 def __init__ (self,plc_spec,options):
139 self.plc_spec=plc_spec
141 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
143 self.vserverip=plc_spec['vserverip']
144 self.vservername=plc_spec['vservername']
145 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
148 raise Exception,'chroot-based myplc testing is deprecated'
149 self.apiserver=TestApiserver(self.url,options.dry_run)
152 name=self.plc_spec['name']
153 return "%s.%s"%(name,self.vservername)
156 return self.plc_spec['hostname']
159 return self.test_ssh.is_local()
161 # define the API methods on this object through xmlrpc
162 # would help, but not strictly necessary
166 def actual_command_in_guest (self,command):
167 return self.test_ssh.actual_command(self.host_to_guest(command))
169 def start_guest (self):
170 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
172 def run_in_guest (self,command):
173 return utils.system(self.actual_command_in_guest(command))
175 def run_in_host (self,command):
176 return self.test_ssh.run_in_buildname(command)
178 #command gets run in the vserver
179 def host_to_guest(self,command):
180 return "vserver %s exec %s"%(self.vservername,command)
182 #command gets run in the vserver
183 def start_guest_in_host(self):
184 return "vserver %s start"%(self.vservername)
187 def run_in_guest_piped (self,local,remote):
188 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
190 def auth_root (self):
191 return {'Username':self.plc_spec['PLC_ROOT_USER'],
192 'AuthMethod':'password',
193 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
194 'Role' : self.plc_spec['role']
196 def locate_site (self,sitename):
197 for site in self.plc_spec['sites']:
198 if site['site_fields']['name'] == sitename:
200 if site['site_fields']['login_base'] == sitename:
202 raise Exception,"Cannot locate site %s"%sitename
204 def locate_node (self,nodename):
205 for site in self.plc_spec['sites']:
206 for node in site['nodes']:
207 if node['name'] == nodename:
209 raise Exception,"Cannot locate node %s"%nodename
211 def locate_hostname (self,hostname):
212 for site in self.plc_spec['sites']:
213 for node in site['nodes']:
214 if node['node_fields']['hostname'] == hostname:
216 raise Exception,"Cannot locate hostname %s"%hostname
218 def locate_key (self,keyname):
219 for key in self.plc_spec['keys']:
220 if key['name'] == keyname:
222 raise Exception,"Cannot locate key %s"%keyname
224 def locate_slice (self, slicename):
225 for slice in self.plc_spec['slices']:
226 if slice['slice_fields']['name'] == slicename:
228 raise Exception,"Cannot locate slice %s"%slicename
230 def all_sliver_objs (self):
232 for slice_spec in self.plc_spec['slices']:
233 slicename = slice_spec['slice_fields']['name']
234 for nodename in slice_spec['nodenames']:
235 result.append(self.locate_sliver_obj (nodename,slicename))
238 def locate_sliver_obj (self,nodename,slicename):
239 (site,node) = self.locate_node(nodename)
240 slice = self.locate_slice (slicename)
242 test_site = TestSite (self, site)
243 test_node = TestNode (self, test_site,node)
244 # xxx the slice site is assumed to be the node site - mhh - probably harmless
245 test_slice = TestSlice (self, test_site, slice)
246 return TestSliver (self, test_node, test_slice)
248 def locate_first_node(self):
249 nodename=self.plc_spec['slices'][0]['nodenames'][0]
250 (site,node) = self.locate_node(nodename)
251 test_site = TestSite (self, site)
252 test_node = TestNode (self, test_site,node)
255 def locate_first_sliver (self):
256 slice_spec=self.plc_spec['slices'][0]
257 slicename=slice_spec['slice_fields']['name']
258 nodename=slice_spec['nodenames'][0]
259 return self.locate_sliver_obj(nodename,slicename)
261 # all different hostboxes used in this plc
262 def gather_hostBoxes(self):
263 # maps on sites and nodes, return [ (host_box,test_node) ]
265 for site_spec in self.plc_spec['sites']:
266 test_site = TestSite (self,site_spec)
267 for node_spec in site_spec['nodes']:
268 test_node = TestNode (self, test_site, node_spec)
269 if not test_node.is_real():
270 tuples.append( (test_node.host_box(),test_node) )
271 # transform into a dict { 'host_box' -> [ test_node .. ] }
273 for (box,node) in tuples:
274 if not result.has_key(box):
277 result[box].append(node)
280 # a step for checking this stuff
281 def show_boxes (self):
282 'print summary of nodes location'
283 for (box,nodes) in self.gather_hostBoxes().iteritems():
284 print box,":"," + ".join( [ node.name() for node in nodes ] )
287 # make this a valid step
288 def kill_all_qemus(self):
289 'kill all qemu instances on the qemu boxes involved by this setup'
290 # this is the brute force version, kill all qemus on that host box
291 for (box,nodes) in self.gather_hostBoxes().iteritems():
292 # pass the first nodename, as we don't push template-qemu on testboxes
293 nodedir=nodes[0].nodedir()
294 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
297 # make this a valid step
298 def list_all_qemus(self):
299 'list all qemu instances on the qemu boxes involved by this setup'
300 for (box,nodes) in self.gather_hostBoxes().iteritems():
301 # this is the brute force version, kill all qemus on that host box
302 TestBox(box,self.options.buildname).list_all_qemus()
305 # kill only the right qemus
306 def list_qemus(self):
307 'list qemu instances for our nodes'
308 for (box,nodes) in self.gather_hostBoxes().iteritems():
309 # the fine-grain version
314 # kill only the right qemus
315 def kill_qemus(self):
316 'kill the qemu instances for our nodes'
317 for (box,nodes) in self.gather_hostBoxes().iteritems():
318 # the fine-grain version
323 #################### display config
325 "show test configuration after localization"
326 self.display_pass (1)
327 self.display_pass (2)
331 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
332 def display_pass (self,passno):
333 for (key,val) in self.plc_spec.iteritems():
334 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
338 self.display_site_spec(site)
339 for node in site['nodes']:
340 self.display_node_spec(node)
341 elif key=='initscripts':
342 for initscript in val:
343 self.display_initscript_spec (initscript)
346 self.display_slice_spec (slice)
349 self.display_key_spec (key)
351 if key not in ['sites','initscripts','slices','keys', 'sfa']:
352 print '+ ',key,':',val
354 def display_site_spec (self,site):
355 print '+ ======== site',site['site_fields']['name']
356 for (k,v) in site.iteritems():
357 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
360 print '+ ','nodes : ',
362 print node['node_fields']['hostname'],'',
368 print user['name'],'',
370 elif k == 'site_fields':
371 print '+ login_base',':',v['login_base']
372 elif k == 'address_fields':
378 def display_initscript_spec (self,initscript):
379 print '+ ======== initscript',initscript['initscript_fields']['name']
381 def display_key_spec (self,key):
382 print '+ ======== key',key['name']
384 def display_slice_spec (self,slice):
385 print '+ ======== slice',slice['slice_fields']['name']
386 for (k,v) in slice.iteritems():
399 elif k=='slice_fields':
400 print '+ fields',':',
401 print 'max_nodes=',v['max_nodes'],
406 def display_node_spec (self,node):
407 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
408 print "hostname=",node['node_fields']['hostname'],
409 print "ip=",node['interface_fields']['ip']
410 if self.options.verbose:
411 utils.pprint("node details",node,depth=3)
413 # another entry point for just showing the boxes involved
414 def display_mapping (self):
415 TestPlc.display_mapping_plc(self.plc_spec)
419 def display_mapping_plc (plc_spec):
420 print '+ MyPLC',plc_spec['name']
421 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
422 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
423 for site_spec in plc_spec['sites']:
424 for node_spec in site_spec['nodes']:
425 TestPlc.display_mapping_node(node_spec)
428 def display_mapping_node (node_spec):
429 print '+ NODE %s'%(node_spec['name'])
430 print '+\tqemu box %s'%node_spec['host_box']
431 print '+\thostname=%s'%node_spec['node_fields']['hostname']
433 def resources_pre (self):
434 "run site-dependant pre-test script as defined in LocalTestResources"
435 from LocalTestResources import local_resources
436 return local_resources.step_pre(self)
438 def resources_post (self):
439 "run site-dependant post-test script as defined in LocalTestResources"
440 from LocalTestResources import local_resources
441 return local_resources.step_post(self)
443 def resources_list (self):
444 "run site-dependant list script as defined in LocalTestResources"
445 from LocalTestResources import local_resources
446 return local_resources.step_list(self)
448 def resources_release (self):
449 "run site-dependant release script as defined in LocalTestResources"
450 from LocalTestResources import local_resources
451 return local_resources.step_release(self)
453 def resources_release_plc (self):
454 "run site-dependant release script as defined in LocalTestResources"
455 from LocalTestResources import local_resources
456 return local_resources.step_release_plc(self)
458 def resources_release_qemu (self):
459 "run site-dependant release script as defined in LocalTestResources"
460 from LocalTestResources import local_resources
461 return local_resources.step_release_qemu(self)
464 "vserver delete the test myplc"
465 self.run_in_host("vserver --silent %s delete"%self.vservername)
469 # historically the build was being fetched by the tests
470 # now the build pushes itself as a subdir of the tests workdir
471 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
472 def create_vs (self):
473 "vserver creation (no install done)"
474 # push the local build/ dir to the testplc box
476 # a full path for the local calls
477 build_dir=os.path.dirname(sys.argv[0])
478 # sometimes this is empty - set to "." in such a case
479 if not build_dir: build_dir="."
480 build_dir += "/build"
482 # use a standard name - will be relative to remote buildname
484 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
485 self.test_ssh.rmdir(build_dir)
486 self.test_ssh.copy(build_dir,recursive=True)
487 # the repo url is taken from arch-rpms-url
488 # with the last step (i386) removed
489 repo_url = self.options.arch_rpms_url
490 for level in [ 'arch' ]:
491 repo_url = os.path.dirname(repo_url)
492 # pass the vbuild-nightly options to vtest-init-vserver
494 test_env_options += " -p %s"%self.options.personality
495 test_env_options += " -d %s"%self.options.pldistro
496 test_env_options += " -f %s"%self.options.fcdistro
497 script="vtest-init-vserver.sh"
498 vserver_name = self.vservername
499 vserver_options="--netdev eth0 --interface %s"%self.vserverip
501 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
502 vserver_options += " --hostname %s"%vserver_hostname
504 print "Cannot reverse lookup %s"%self.vserverip
505 print "This is considered fatal, as this might pollute the test results"
507 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
508 return self.run_in_host(create_vserver) == 0
512 "yum install myplc, noderepo, and the plain bootstrapfs"
514 # workaround for getting pgsql8.2 on centos5
515 if self.options.fcdistro == "centos5":
516 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
519 if self.options.personality == "linux32":
521 elif self.options.personality == "linux64":
524 raise Exception, "Unsupported personality %r"%self.options.personality
525 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
528 pkgs_list.append ("slicerepo-%s"%nodefamily)
529 pkgs_list.append ("myplc")
530 pkgs_list.append ("noderepo-%s"%nodefamily)
531 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
532 pkgs_string=" ".join(pkgs_list)
533 self.run_in_guest("yum -y install %s"%pkgs_string)
534 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
539 tmpname='%s.plc-config-tty'%(self.name())
540 fileconf=open(tmpname,'w')
541 for var in [ 'PLC_NAME',
546 'PLC_MAIL_SUPPORT_ADDRESS',
549 # Above line was added for integrating SFA Testing
555 'PLC_RESERVATION_GRANULARITY',
558 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
559 fileconf.write('w\n')
560 fileconf.write('q\n')
562 utils.system('cat %s'%tmpname)
563 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
564 utils.system('rm %s'%tmpname)
569 self.run_in_guest('service plc start')
574 self.run_in_guest('service plc stop')
578 "start the PLC vserver"
582 # stores the keys from the config for further use
583 def store_keys(self):
584 "stores test users ssh keys in keys/"
585 for key_spec in self.plc_spec['keys']:
586 TestKey(self,key_spec).store_key()
589 def clean_keys(self):
590 "removes keys cached in keys/"
591 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
593 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
594 # for later direct access to the nodes
595 def fetch_keys(self):
596 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
598 if not os.path.isdir(dir):
600 vservername=self.vservername
602 prefix = 'debug_ssh_key'
603 for ext in [ 'pub', 'rsa' ] :
604 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
605 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
606 if self.test_ssh.fetch(src,dst) != 0: overall=False
610 "create sites with PLCAPI"
611 return self.do_sites()
613 def clean_sites (self):
614 "delete sites with PLCAPI"
615 return self.do_sites(action="delete")
617 def do_sites (self,action="add"):
618 for site_spec in self.plc_spec['sites']:
619 test_site = TestSite (self,site_spec)
620 if (action != "add"):
621 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
622 test_site.delete_site()
623 # deleted with the site
624 #test_site.delete_users()
627 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
628 test_site.create_site()
629 test_site.create_users()
632 def clean_all_sites (self):
633 "Delete all sites in PLC, and related objects"
634 print 'auth_root',self.auth_root()
635 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
636 for site_id in site_ids:
637 print 'Deleting site_id',site_id
638 self.apiserver.DeleteSite(self.auth_root(),site_id)
641 "create nodes with PLCAPI"
642 return self.do_nodes()
643 def clean_nodes (self):
644 "delete nodes with PLCAPI"
645 return self.do_nodes(action="delete")
647 def do_nodes (self,action="add"):
648 for site_spec in self.plc_spec['sites']:
649 test_site = TestSite (self,site_spec)
651 utils.header("Deleting nodes in site %s"%test_site.name())
652 for node_spec in site_spec['nodes']:
653 test_node=TestNode(self,test_site,node_spec)
654 utils.header("Deleting %s"%test_node.name())
655 test_node.delete_node()
657 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
658 for node_spec in site_spec['nodes']:
659 utils.pprint('Creating node %s'%node_spec,node_spec)
660 test_node = TestNode (self,test_site,node_spec)
661 test_node.create_node ()
664 def nodegroups (self):
665 "create nodegroups with PLCAPI"
666 return self.do_nodegroups("add")
667 def clean_nodegroups (self):
668 "delete nodegroups with PLCAPI"
669 return self.do_nodegroups("delete")
673 def translate_timestamp (start,grain,timestamp):
674 if timestamp < TestPlc.YEAR: return start+timestamp*grain
675 else: return timestamp
678 def timestamp_printable (timestamp):
679 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
682 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
684 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
685 print 'API answered grain=',grain
686 start=(now/grain)*grain
688 # find out all nodes that are reservable
689 nodes=self.all_reservable_nodenames()
691 utils.header ("No reservable node found - proceeding without leases")
694 # attach them to the leases as specified in plc_specs
695 # this is where the 'leases' field gets interpreted as relative of absolute
696 for lease_spec in self.plc_spec['leases']:
697 # skip the ones that come with a null slice id
698 if not lease_spec['slice']: continue
699 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
700 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
701 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
702 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
703 if lease_addition['errors']:
704 utils.header("Cannot create leases, %s"%lease_addition['errors'])
707 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
708 (nodes,lease_spec['slice'],
709 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
710 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
714 def clean_leases (self):
715 "remove all leases in the myplc side"
716 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
717 utils.header("Cleaning leases %r"%lease_ids)
718 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
721 def list_leases (self):
722 "list all leases known to the myplc"
723 leases = self.apiserver.GetLeases(self.auth_root())
726 current=l['t_until']>=now
727 if self.options.verbose or current:
728 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
729 TestPlc.timestamp_printable(l['t_from']),
730 TestPlc.timestamp_printable(l['t_until'])))
733 # create nodegroups if needed, and populate
734 def do_nodegroups (self, action="add"):
735 # 1st pass to scan contents
737 for site_spec in self.plc_spec['sites']:
738 test_site = TestSite (self,site_spec)
739 for node_spec in site_spec['nodes']:
740 test_node=TestNode (self,test_site,node_spec)
741 if node_spec.has_key('nodegroups'):
742 nodegroupnames=node_spec['nodegroups']
743 if isinstance(nodegroupnames,StringTypes):
744 nodegroupnames = [ nodegroupnames ]
745 for nodegroupname in nodegroupnames:
746 if not groups_dict.has_key(nodegroupname):
747 groups_dict[nodegroupname]=[]
748 groups_dict[nodegroupname].append(test_node.name())
749 auth=self.auth_root()
751 for (nodegroupname,group_nodes) in groups_dict.iteritems():
753 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
754 # first, check if the nodetagtype is here
755 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
757 tag_type_id = tag_types[0]['tag_type_id']
759 tag_type_id = self.apiserver.AddTagType(auth,
760 {'tagname':nodegroupname,
761 'description': 'for nodegroup %s'%nodegroupname,
764 print 'located tag (type)',nodegroupname,'as',tag_type_id
766 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
768 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
769 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
770 # set node tag on all nodes, value='yes'
771 for nodename in group_nodes:
773 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
775 traceback.print_exc()
776 print 'node',nodename,'seems to already have tag',nodegroupname
779 expect_yes = self.apiserver.GetNodeTags(auth,
780 {'hostname':nodename,
781 'tagname':nodegroupname},
782 ['value'])[0]['value']
783 if expect_yes != "yes":
784 print 'Mismatch node tag on node',nodename,'got',expect_yes
787 if not self.options.dry_run:
788 print 'Cannot find tag',nodegroupname,'on node',nodename
792 print 'cleaning nodegroup',nodegroupname
793 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
795 traceback.print_exc()
799 # return a list of tuples (nodename,qemuname)
800 def all_node_infos (self) :
802 for site_spec in self.plc_spec['sites']:
803 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
804 for node_spec in site_spec['nodes'] ]
807 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
808 def all_reservable_nodenames (self):
810 for site_spec in self.plc_spec['sites']:
811 for node_spec in site_spec['nodes']:
812 node_fields=node_spec['node_fields']
813 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
814 res.append(node_fields['hostname'])
817 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
818 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
819 if self.options.dry_run:
823 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
824 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
825 # the nodes that haven't checked yet - start with a full list and shrink over time
826 tocheck = self.all_hostnames()
827 utils.header("checking nodes %r"%tocheck)
828 # create a dict hostname -> status
829 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
832 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
834 for array in tocheck_status:
835 hostname=array['hostname']
836 boot_state=array['boot_state']
837 if boot_state == target_boot_state:
838 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
840 # if it's a real node, never mind
841 (site_spec,node_spec)=self.locate_hostname(hostname)
842 if TestNode.is_real_model(node_spec['node_fields']['model']):
843 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
845 boot_state = target_boot_state
846 elif datetime.datetime.now() > graceout:
847 utils.header ("%s still in '%s' state"%(hostname,boot_state))
848 graceout=datetime.datetime.now()+datetime.timedelta(1)
849 status[hostname] = boot_state
851 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
854 if datetime.datetime.now() > timeout:
855 for hostname in tocheck:
856 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
858 # otherwise, sleep for a while
860 # only useful in empty plcs
863 def nodes_booted(self):
864 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
866 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
868 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
869 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
870 vservername=self.vservername
873 local_key = "keys/%(vservername)s-debug.rsa"%locals()
876 local_key = "keys/key1.rsa"
877 node_infos = self.all_node_infos()
878 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
879 for (nodename,qemuname) in node_infos:
880 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
881 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
882 (timeout_minutes,silent_minutes,period))
884 for node_info in node_infos:
885 (hostname,qemuname) = node_info
886 # try to run 'hostname' in the node
887 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
888 # don't spam logs - show the command only after the grace period
889 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
891 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
893 node_infos.remove(node_info)
895 # we will have tried real nodes once, in case they're up - but if not, just skip
896 (site_spec,node_spec)=self.locate_hostname(hostname)
897 if TestNode.is_real_model(node_spec['node_fields']['model']):
898 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
899 node_infos.remove(node_info)
902 if datetime.datetime.now() > timeout:
903 for (hostname,qemuname) in node_infos:
904 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
906 # otherwise, sleep for a while
908 # only useful in empty plcs
911 def nodes_ssh_debug(self):
912 "Tries to ssh into nodes in debug mode with the debug ssh key"
913 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
915 def nodes_ssh_boot(self):
916 "Tries to ssh into nodes in production mode with the root ssh key"
917 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
920 def init_node (self):
921 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
925 "all nodes: invoke GetBootMedium and store result locally"
928 def configure_qemu (self):
929 "all nodes: compute qemu config qemu.conf and store it locally"
932 def reinstall_node (self):
933 "all nodes: mark PLCAPI boot_state as reinstall"
936 def export_qemu (self):
937 "all nodes: push local node-dep directory on the qemu box"
940 ### check hooks : invoke scripts from hooks/{node,slice}
941 def check_hooks_node (self):
942 return self.locate_first_node().check_hooks()
943 def check_hooks_sliver (self) :
944 return self.locate_first_sliver().check_hooks()
946 def check_hooks (self):
947 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
948 return self.check_hooks_node() and self.check_hooks_sliver()
951 def do_check_initscripts(self):
953 for slice_spec in self.plc_spec['slices']:
954 if not slice_spec.has_key('initscriptname'):
956 initscript=slice_spec['initscriptname']
957 for nodename in slice_spec['nodenames']:
958 (site,node) = self.locate_node (nodename)
959 # xxx - passing the wrong site - probably harmless
960 test_site = TestSite (self,site)
961 test_slice = TestSlice (self,test_site,slice_spec)
962 test_node = TestNode (self,test_site,node)
963 test_sliver = TestSliver (self, test_node, test_slice)
964 if not test_sliver.check_initscript(initscript):
968 def check_initscripts(self):
969 "check that the initscripts have triggered"
970 return self.do_check_initscripts()
972 def initscripts (self):
973 "create initscripts with PLCAPI"
974 for initscript in self.plc_spec['initscripts']:
975 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
976 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
979 def clean_initscripts (self):
980 "delete initscripts with PLCAPI"
981 for initscript in self.plc_spec['initscripts']:
982 initscript_name = initscript['initscript_fields']['name']
983 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
985 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
986 print initscript_name,'deleted'
988 print 'deletion went wrong - probably did not exist'
993 "create slices with PLCAPI"
994 return self.do_slices()
996 def clean_slices (self):
997 "delete slices with PLCAPI"
998 return self.do_slices("delete")
1000 def do_slices (self, action="add"):
1001 for slice in self.plc_spec['slices']:
1002 site_spec = self.locate_site (slice['sitename'])
1003 test_site = TestSite(self,site_spec)
1004 test_slice=TestSlice(self,test_site,slice)
1006 utils.header("Deleting slices in site %s"%test_site.name())
1007 test_slice.delete_slice()
1009 utils.pprint("Creating slice",slice)
1010 test_slice.create_slice()
1011 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1015 def check_slice(self):
1016 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1020 def clear_known_hosts (self):
1021 "remove test nodes entries from the local known_hosts file"
1025 def start_node (self) :
1026 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1029 def check_tcp (self):
1030 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1031 specs = self.plc_spec['tcp_test']
1036 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1037 if not s_test_sliver.run_tcp_server(port,timeout=10):
1041 # idem for the client side
1042 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1043 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1047 def plcsh_stress_test (self):
1048 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1049 # install the stress-test in the plc image
1050 location = "/usr/share/plc_api/plcsh_stress_test.py"
1051 remote="/vservers/%s/%s"%(self.vservername,location)
1052 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1054 command += " -- --check"
1055 if self.options.size == 1:
1056 command += " --tiny"
1057 return ( self.run_in_guest(command) == 0)
1059 # populate runs the same utility without slightly different options
1060 # in particular runs with --preserve (dont cleanup) and without --check
1061 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1064 def install_sfa(self):
1065 "yum install sfa, sfa-plc and sfa-client"
1067 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1068 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1071 def dbclean_sfa(self):
1072 "thoroughly wipes off the SFA database"
1073 self.run_in_guest("sfa-nuke-plc.py")==0
1076 def plcclean_sfa(self):
1077 "cleans the PLC entries that were created as a side effect of running the script"
1079 sfa_spec=self.plc_spec['sfa']
1081 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1082 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1083 except: print "Slice %s already absent from PLC db"%slicename
1085 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1086 try: self.apiserver.DeletePerson(self.auth_root(),username)
1087 except: print "User %s already absent from PLC db"%username
1089 print "REMEMBER TO RUN import_sfa AGAIN"
1092 def uninstall_sfa(self):
1093 "uses rpm to uninstall sfa - ignore result"
1094 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1095 self.run_in_guest("rm -rf /var/lib/sfa")
1096 self.run_in_guest("rm -rf /etc/sfa")
1097 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1099 self.run_in_guest("rpm -e --noscripts sfa-plc")
1104 dirname="conf.%s"%self.plc_spec['name']
1105 if not os.path.isdir(dirname):
1106 utils.system("mkdir -p %s"%dirname)
1107 if not os.path.isdir(dirname):
1108 raise "Cannot create config dir for plc %s"%self.name()
1111 def conffile(self,filename):
1112 return "%s/%s"%(self.confdir(),filename)
1113 def confsubdir(self,dirname,clean):
1114 subdirname="%s/%s"%(self.confdir(),dirname)
1116 utils.system("rm -rf %s"%subdirname)
1117 if not os.path.isdir(subdirname):
1118 utils.system("mkdir -p %s"%subdirname)
1119 if not os.path.isdir(subdirname):
1120 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1123 def conffile_clean (self,filename):
1124 filename=self.conffile(filename)
1125 return utils.system("rm -rf %s"%filename)==0
1128 def configure_sfa(self):
1129 "run sfa-config-tty"
1130 tmpname=self.conffile("sfa-config-tty")
1131 fileconf=open(tmpname,'w')
1132 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1133 'SFA_INTERFACE_HRN',
1134 # 'SFA_REGISTRY_LEVEL1_AUTH',
1135 'SFA_REGISTRY_HOST',
1136 'SFA_AGGREGATE_HOST',
1142 'SFA_PLC_DB_PASSWORD',
1145 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1146 # the way plc_config handles booleans just sucks..
1147 for var in ['SFA_API_DEBUG']:
1149 if self.plc_spec['sfa'][var]: val='true'
1150 fileconf.write ('e %s\n%s\n'%(var,val))
1151 fileconf.write('w\n')
1152 fileconf.write('R\n')
1153 fileconf.write('q\n')
1155 utils.system('cat %s'%tmpname)
1156 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1159 def aggregate_xml_line(self):
1160 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1161 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1163 def registry_xml_line(self):
1164 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1165 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1168 # a cross step that takes all other plcs in argument
1169 def cross_configure_sfa(self, other_plcs):
1170 # of course with a single plc, other_plcs is an empty list
1173 agg_fname=self.conffile("agg.xml")
1174 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1175 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1176 utils.header ("(Over)wrote %s"%agg_fname)
1177 reg_fname=self.conffile("reg.xml")
1178 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1179 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1180 utils.header ("(Over)wrote %s"%reg_fname)
1181 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1182 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1184 def import_sfa(self):
1186 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1187 return self.run_in_guest('sfa-import-plc.py')==0
1188 # not needed anymore
1189 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1191 def start_sfa(self):
1193 return self.run_in_guest('service sfa start')==0
1195 def configure_sfi(self):
1196 sfa_spec=self.plc_spec['sfa']
1197 "sfi client configuration"
1198 dir_name=self.confsubdir("dot-sfi",clean=True)
1199 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1200 fileconf=open(file_name,'w')
1201 fileconf.write (self.plc_spec['keys'][0]['private'])
1203 utils.header ("(Over)wrote %s"%file_name)
1205 file_name=dir_name + os.sep + 'sfi_config'
1206 fileconf=open(file_name,'w')
1207 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1208 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1209 fileconf.write('\n')
1210 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1211 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1212 fileconf.write('\n')
1213 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1214 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1215 fileconf.write('\n')
1216 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1217 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1218 fileconf.write('\n')
1220 utils.header ("(Over)wrote %s"%file_name)
1222 file_name=dir_name + os.sep + 'person.xml'
1223 fileconf=open(file_name,'w')
1224 for record in sfa_spec['sfa_person_xml']:
1225 person_record=record
1226 fileconf.write(person_record)
1227 fileconf.write('\n')
1229 utils.header ("(Over)wrote %s"%file_name)
1231 file_name=dir_name + os.sep + 'slice.xml'
1232 fileconf=open(file_name,'w')
1233 for record in sfa_spec['sfa_slice_xml']:
1235 #slice_record=sfa_spec['sfa_slice_xml']
1236 fileconf.write(slice_record)
1237 fileconf.write('\n')
1238 utils.header ("(Over)wrote %s"%file_name)
1241 file_name=dir_name + os.sep + 'slice.rspec'
1242 fileconf=open(file_name,'w')
1244 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1246 fileconf.write(slice_rspec)
1247 fileconf.write('\n')
1249 utils.header ("(Over)wrote %s"%file_name)
1251 # push to the remote root's .sfi
1252 location = "root/.sfi"
1253 remote="/vservers/%s/%s"%(self.vservername,location)
1254 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1258 def clean_sfi (self):
1259 self.run_in_guest("rm -rf /root/.sfi")
1262 def add_sfa_user(self):
1263 return TestUserSfa(self).add_user()
1267 "run sfi.py add (on Registry)"
1271 def create_sfa(self):
1272 "run sfi.py create (on SM) for 1st-time creation"
1275 def update_sfa_user(self):
1276 return TestUserSfa(self).update_user()
1279 def update_sfa(self):
1280 "run sfi.py create (on SM) on existing object"
1284 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1285 sfa_spec=self.plc_spec['sfa']
1286 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1288 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1289 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1290 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1291 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1294 def check_slice_sfa(self):
1295 "tries to ssh-enter the SFA slice"
1298 def delete_sfa_user(self):
1299 "run sfi.py delete (on SM) for user"
1300 test_user_sfa=TestUserSfa(self)
1301 return test_user_sfa.delete_user()
1304 def delete_sfa_slices(self):
1305 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1310 self.run_in_guest('service sfa stop')==0
1313 def populate (self):
1314 "creates random entries in the PLCAPI"
1315 # install the stress-test in the plc image
1316 location = "/usr/share/plc_api/plcsh_stress_test.py"
1317 remote="/vservers/%s/%s"%(self.vservername,location)
1318 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1320 command += " -- --preserve --short-names"
1321 local = (self.run_in_guest(command) == 0);
1322 # second run with --foreign
1323 command += ' --foreign'
1324 remote = (self.run_in_guest(command) == 0);
1325 return ( local and remote)
1327 def gather_logs (self):
1328 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1329 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1330 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1331 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1332 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1333 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1335 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1336 self.gather_var_logs ()
1338 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1339 self.gather_pgsql_logs ()
1341 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1342 for site_spec in self.plc_spec['sites']:
1343 test_site = TestSite (self,site_spec)
1344 for node_spec in site_spec['nodes']:
1345 test_node=TestNode(self,test_site,node_spec)
1346 test_node.gather_qemu_logs()
1348 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1349 self.gather_nodes_var_logs()
1351 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1352 self.gather_slivers_var_logs()
1355 def gather_slivers_var_logs(self):
1356 for test_sliver in self.all_sliver_objs():
1357 remote = test_sliver.tar_var_logs()
1358 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1359 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1360 utils.system(command)
1363 def gather_var_logs (self):
1364 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1365 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1366 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1367 utils.system(command)
1368 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1369 utils.system(command)
1371 def gather_pgsql_logs (self):
1372 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1373 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1374 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1375 utils.system(command)
1377 def gather_nodes_var_logs (self):
1378 for site_spec in self.plc_spec['sites']:
1379 test_site = TestSite (self,site_spec)
1380 for node_spec in site_spec['nodes']:
1381 test_node=TestNode(self,test_site,node_spec)
1382 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1383 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1384 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1385 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1386 utils.system(command)
1389 # returns the filename to use for sql dump/restore, using options.dbname if set
1390 def dbfile (self, database):
1391 # uses options.dbname if it is found
1393 name=self.options.dbname
1394 if not isinstance(name,StringTypes):
1397 t=datetime.datetime.now()
1400 return "/root/%s-%s.sql"%(database,name)
1403 'dump the planetlab5 DB in /root in the PLC - filename has time'
1404 dump=self.dbfile("planetab5")
1405 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1406 utils.header('Dumped planetlab5 database in %s'%dump)
1409 def db_restore(self):
1410 'restore the planetlab5 DB - looks broken, but run -n might help'
1411 dump=self.dbfile("planetab5")
1412 ##stop httpd service
1413 self.run_in_guest('service httpd stop')
1414 # xxx - need another wrapper
1415 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1416 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1417 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1418 ##starting httpd service
1419 self.run_in_guest('service httpd start')
1421 utils.header('Database restored from ' + dump)
1424 def standby_1(): pass
1426 def standby_2(): pass
1428 def standby_3(): pass
1430 def standby_4(): pass
1432 def standby_5(): pass
1434 def standby_6(): pass
1436 def standby_7(): pass
1438 def standby_8(): pass
1440 def standby_9(): pass
1442 def standby_10(): pass
1444 def standby_11(): pass
1446 def standby_12(): pass
1448 def standby_13(): pass
1450 def standby_14(): pass
1452 def standby_15(): pass
1454 def standby_16(): pass
1456 def standby_17(): pass
1458 def standby_18(): pass
1460 def standby_19(): pass
1462 def standby_20(): pass