1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper_options (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_mapper_options_sfa (method):
73 slice_method = TestSliceSfa.__dict__[method.__name__]
74 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
75 site_spec = self.locate_site (slice_spec['sitename'])
76 test_site = TestSite(self,site_spec)
77 test_slice=TestSliceSfa(test_plc,test_site,slice_spec)
78 if not slice_method(test_slice,self.options): overall=False
80 # restore the doc text
81 actual.__doc__=method.__doc__
90 'display', 'resources_pre', SEP,
91 'delete_vs','create_vs','install', 'configure', 'start', SEP,
92 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
93 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
94 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu',
95 'kill_all_qemus', 'start_node', SEP,
96 # better use of time: do this now that the nodes are taking off
97 'plcsh_stress_test', SEP,
98 'install_sfa', 'configure_sfa', 'cross_configure_sfa', 'import_sfa', 'start_sfa', SEPSFA,
99 'configure_sfi', 'add_sfa', 'update_sfa', 'view_sfa', SEPSFA,
100 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEPSFA,
101 'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEPSFA,
102 'check_tcp', 'check_hooks', SEP,
103 'force_gather_logs', 'force_resources_post', SEP,
106 'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP,
107 'stop', 'vs_start', SEP,
108 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
109 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
110 'clean_leases', 'list_leases', SEP,
112 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
113 'plcclean_sfa', 'dbclean_sfa', 'uninstall_sfa', 'clean_sfi', SEP,
114 'db_dump' , 'db_restore', SEP,
115 'standby_1 through 20',SEP,
119 def printable_steps (list):
120 single_line=" ".join(list)+" "
121 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
123 def valid_step (step):
124 return step != SEP and step != SEPSFA
126 # turn off the sfa-related steps when build has skipped SFA
127 # this is originally for centos5 as recent SFAs won't build on this platformb
129 def check_whether_build_has_sfa (rpms_url):
130 # warning, we're now building 'sface' so let's be a bit more picky
131 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
132 # full builds are expected to return with 0 here
134 # move all steps containing 'sfa' from default_steps to other_steps
135 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
136 TestPlc.other_steps += sfa_steps
137 for step in sfa_steps: TestPlc.default_steps.remove(step)
139 def __init__ (self,plc_spec,options):
140 self.plc_spec=plc_spec
142 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
144 self.vserverip=plc_spec['vserverip']
145 self.vservername=plc_spec['vservername']
146 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
149 raise Exception,'chroot-based myplc testing is deprecated'
150 self.apiserver=TestApiserver(self.url,options.dry_run)
153 name=self.plc_spec['name']
154 return "%s.%s"%(name,self.vservername)
157 return self.plc_spec['hostname']
160 return self.test_ssh.is_local()
162 # define the API methods on this object through xmlrpc
163 # would help, but not strictly necessary
167 def actual_command_in_guest (self,command):
168 return self.test_ssh.actual_command(self.host_to_guest(command))
170 def start_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
173 def run_in_guest (self,command):
174 return utils.system(self.actual_command_in_guest(command))
176 def run_in_host (self,command):
177 return self.test_ssh.run_in_buildname(command)
179 #command gets run in the vserver
180 def host_to_guest(self,command):
181 return "vserver %s exec %s"%(self.vservername,command)
183 #command gets run in the vserver
184 def start_guest_in_host(self):
185 return "vserver %s start"%(self.vservername)
188 def run_in_guest_piped (self,local,remote):
189 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
191 def auth_root (self):
192 return {'Username':self.plc_spec['PLC_ROOT_USER'],
193 'AuthMethod':'password',
194 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
195 'Role' : self.plc_spec['role']
197 def locate_site (self,sitename):
198 for site in self.plc_spec['sites']:
199 if site['site_fields']['name'] == sitename:
201 if site['site_fields']['login_base'] == sitename:
203 raise Exception,"Cannot locate site %s"%sitename
205 def locate_node (self,nodename):
206 for site in self.plc_spec['sites']:
207 for node in site['nodes']:
208 if node['name'] == nodename:
210 raise Exception,"Cannot locate node %s"%nodename
212 def locate_hostname (self,hostname):
213 for site in self.plc_spec['sites']:
214 for node in site['nodes']:
215 if node['node_fields']['hostname'] == hostname:
217 raise Exception,"Cannot locate hostname %s"%hostname
219 def locate_key (self,keyname):
220 for key in self.plc_spec['keys']:
221 if key['name'] == keyname:
223 raise Exception,"Cannot locate key %s"%keyname
225 def locate_slice (self, slicename):
226 for slice in self.plc_spec['slices']:
227 if slice['slice_fields']['name'] == slicename:
229 raise Exception,"Cannot locate slice %s"%slicename
231 def all_sliver_objs (self):
233 for slice_spec in self.plc_spec['slices']:
234 slicename = slice_spec['slice_fields']['name']
235 for nodename in slice_spec['nodenames']:
236 result.append(self.locate_sliver_obj (nodename,slicename))
239 def locate_sliver_obj (self,nodename,slicename):
240 (site,node) = self.locate_node(nodename)
241 slice = self.locate_slice (slicename)
243 test_site = TestSite (self, site)
244 test_node = TestNode (self, test_site,node)
245 # xxx the slice site is assumed to be the node site - mhh - probably harmless
246 test_slice = TestSlice (self, test_site, slice)
247 return TestSliver (self, test_node, test_slice)
249 def locate_first_node(self):
250 nodename=self.plc_spec['slices'][0]['nodenames'][0]
251 (site,node) = self.locate_node(nodename)
252 test_site = TestSite (self, site)
253 test_node = TestNode (self, test_site,node)
256 def locate_first_sliver (self):
257 slice_spec=self.plc_spec['slices'][0]
258 slicename=slice_spec['slice_fields']['name']
259 nodename=slice_spec['nodenames'][0]
260 return self.locate_sliver_obj(nodename,slicename)
262 # all different hostboxes used in this plc
263 def gather_hostBoxes(self):
264 # maps on sites and nodes, return [ (host_box,test_node) ]
266 for site_spec in self.plc_spec['sites']:
267 test_site = TestSite (self,site_spec)
268 for node_spec in site_spec['nodes']:
269 test_node = TestNode (self, test_site, node_spec)
270 if not test_node.is_real():
271 tuples.append( (test_node.host_box(),test_node) )
272 # transform into a dict { 'host_box' -> [ test_node .. ] }
274 for (box,node) in tuples:
275 if not result.has_key(box):
278 result[box].append(node)
281 # a step for checking this stuff
282 def show_boxes (self):
283 'print summary of nodes location'
284 for (box,nodes) in self.gather_hostBoxes().iteritems():
285 print box,":"," + ".join( [ node.name() for node in nodes ] )
288 # make this a valid step
289 def kill_all_qemus(self):
290 'kill all qemu instances on the qemu boxes involved by this setup'
291 # this is the brute force version, kill all qemus on that host box
292 for (box,nodes) in self.gather_hostBoxes().iteritems():
293 # pass the first nodename, as we don't push template-qemu on testboxes
294 nodedir=nodes[0].nodedir()
295 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
298 # make this a valid step
299 def list_all_qemus(self):
300 'list all qemu instances on the qemu boxes involved by this setup'
301 for (box,nodes) in self.gather_hostBoxes().iteritems():
302 # this is the brute force version, kill all qemus on that host box
303 TestBox(box,self.options.buildname).list_all_qemus()
306 # kill only the right qemus
307 def list_qemus(self):
308 'list qemu instances for our nodes'
309 for (box,nodes) in self.gather_hostBoxes().iteritems():
310 # the fine-grain version
315 # kill only the right qemus
316 def kill_qemus(self):
317 'kill the qemu instances for our nodes'
318 for (box,nodes) in self.gather_hostBoxes().iteritems():
319 # the fine-grain version
324 #################### display config
326 "show test configuration after localization"
327 self.display_pass (1)
328 self.display_pass (2)
332 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
333 def display_pass (self,passno):
334 for (key,val) in self.plc_spec.iteritems():
335 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
339 self.display_site_spec(site)
340 for node in site['nodes']:
341 self.display_node_spec(node)
342 elif key=='initscripts':
343 for initscript in val:
344 self.display_initscript_spec (initscript)
347 self.display_slice_spec (slice)
350 self.display_key_spec (key)
352 if key not in ['sites','initscripts','slices','keys', 'sfa']:
353 print '+ ',key,':',val
355 def display_site_spec (self,site):
356 print '+ ======== site',site['site_fields']['name']
357 for (k,v) in site.iteritems():
358 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
361 print '+ ','nodes : ',
363 print node['node_fields']['hostname'],'',
369 print user['name'],'',
371 elif k == 'site_fields':
372 print '+ login_base',':',v['login_base']
373 elif k == 'address_fields':
379 def display_initscript_spec (self,initscript):
380 print '+ ======== initscript',initscript['initscript_fields']['name']
382 def display_key_spec (self,key):
383 print '+ ======== key',key['name']
385 def display_slice_spec (self,slice):
386 print '+ ======== slice',slice['slice_fields']['name']
387 for (k,v) in slice.iteritems():
400 elif k=='slice_fields':
401 print '+ fields',':',
402 print 'max_nodes=',v['max_nodes'],
407 def display_node_spec (self,node):
408 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
409 print "hostname=",node['node_fields']['hostname'],
410 print "ip=",node['interface_fields']['ip']
411 if self.options.verbose:
412 utils.pprint("node details",node,depth=3)
414 # another entry point for just showing the boxes involved
415 def display_mapping (self):
416 TestPlc.display_mapping_plc(self.plc_spec)
420 def display_mapping_plc (plc_spec):
421 print '+ MyPLC',plc_spec['name']
422 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
423 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
424 for site_spec in plc_spec['sites']:
425 for node_spec in site_spec['nodes']:
426 TestPlc.display_mapping_node(node_spec)
429 def display_mapping_node (node_spec):
430 print '+ NODE %s'%(node_spec['name'])
431 print '+\tqemu box %s'%node_spec['host_box']
432 print '+\thostname=%s'%node_spec['node_fields']['hostname']
434 def resources_pre (self):
435 "run site-dependant pre-test script as defined in LocalTestResources"
436 from LocalTestResources import local_resources
437 return local_resources.step_pre(self)
439 def resources_post (self):
440 "run site-dependant post-test script as defined in LocalTestResources"
441 from LocalTestResources import local_resources
442 return local_resources.step_post(self)
444 def resources_list (self):
445 "run site-dependant list script as defined in LocalTestResources"
446 from LocalTestResources import local_resources
447 return local_resources.step_list(self)
449 def resources_release (self):
450 "run site-dependant release script as defined in LocalTestResources"
451 from LocalTestResources import local_resources
452 return local_resources.step_release(self)
454 def resources_release_plc (self):
455 "run site-dependant release script as defined in LocalTestResources"
456 from LocalTestResources import local_resources
457 return local_resources.step_release_plc(self)
459 def resources_release_qemu (self):
460 "run site-dependant release script as defined in LocalTestResources"
461 from LocalTestResources import local_resources
462 return local_resources.step_release_qemu(self)
465 "vserver delete the test myplc"
466 self.run_in_host("vserver --silent %s delete"%self.vservername)
470 # historically the build was being fetched by the tests
471 # now the build pushes itself as a subdir of the tests workdir
472 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
473 def create_vs (self):
474 "vserver creation (no install done)"
475 # push the local build/ dir to the testplc box
477 # a full path for the local calls
478 build_dir=os.path.dirname(sys.argv[0])
479 # sometimes this is empty - set to "." in such a case
480 if not build_dir: build_dir="."
481 build_dir += "/build"
483 # use a standard name - will be relative to remote buildname
485 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
486 self.test_ssh.rmdir(build_dir)
487 self.test_ssh.copy(build_dir,recursive=True)
488 # the repo url is taken from arch-rpms-url
489 # with the last step (i386) removed
490 repo_url = self.options.arch_rpms_url
491 for level in [ 'arch' ]:
492 repo_url = os.path.dirname(repo_url)
493 # pass the vbuild-nightly options to vtest-init-vserver
495 test_env_options += " -p %s"%self.options.personality
496 test_env_options += " -d %s"%self.options.pldistro
497 test_env_options += " -f %s"%self.options.fcdistro
498 script="vtest-init-vserver.sh"
499 vserver_name = self.vservername
500 vserver_options="--netdev eth0 --interface %s"%self.vserverip
502 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
503 vserver_options += " --hostname %s"%vserver_hostname
505 print "Cannot reverse lookup %s"%self.vserverip
506 print "This is considered fatal, as this might pollute the test results"
508 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
509 return self.run_in_host(create_vserver) == 0
513 "yum install myplc, noderepo, and the plain bootstrapfs"
515 # workaround for getting pgsql8.2 on centos5
516 if self.options.fcdistro == "centos5":
517 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
520 if self.options.personality == "linux32":
522 elif self.options.personality == "linux64":
525 raise Exception, "Unsupported personality %r"%self.options.personality
526 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
529 pkgs_list.append ("slicerepo-%s"%nodefamily)
530 pkgs_list.append ("myplc")
531 pkgs_list.append ("noderepo-%s"%nodefamily)
532 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
533 pkgs_string=" ".join(pkgs_list)
534 self.run_in_guest("yum -y install %s"%pkgs_string)
535 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
540 tmpname='%s.plc-config-tty'%(self.name())
541 fileconf=open(tmpname,'w')
542 for var in [ 'PLC_NAME',
547 'PLC_MAIL_SUPPORT_ADDRESS',
550 # Above line was added for integrating SFA Testing
556 'PLC_RESERVATION_GRANULARITY',
559 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
560 fileconf.write('w\n')
561 fileconf.write('q\n')
563 utils.system('cat %s'%tmpname)
564 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
565 utils.system('rm %s'%tmpname)
570 self.run_in_guest('service plc start')
575 self.run_in_guest('service plc stop')
579 "start the PLC vserver"
583 # stores the keys from the config for further use
584 def store_keys(self):
585 "stores test users ssh keys in keys/"
586 for key_spec in self.plc_spec['keys']:
587 TestKey(self,key_spec).store_key()
590 def clean_keys(self):
591 "removes keys cached in keys/"
592 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
594 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
595 # for later direct access to the nodes
596 def fetch_keys(self):
597 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
599 if not os.path.isdir(dir):
601 vservername=self.vservername
603 prefix = 'debug_ssh_key'
604 for ext in [ 'pub', 'rsa' ] :
605 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
606 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
607 if self.test_ssh.fetch(src,dst) != 0: overall=False
611 "create sites with PLCAPI"
612 return self.do_sites()
614 def clean_sites (self):
615 "delete sites with PLCAPI"
616 return self.do_sites(action="delete")
618 def do_sites (self,action="add"):
619 for site_spec in self.plc_spec['sites']:
620 test_site = TestSite (self,site_spec)
621 if (action != "add"):
622 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
623 test_site.delete_site()
624 # deleted with the site
625 #test_site.delete_users()
628 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
629 test_site.create_site()
630 test_site.create_users()
633 def clean_all_sites (self):
634 "Delete all sites in PLC, and related objects"
635 print 'auth_root',self.auth_root()
636 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
637 for site_id in site_ids:
638 print 'Deleting site_id',site_id
639 self.apiserver.DeleteSite(self.auth_root(),site_id)
642 "create nodes with PLCAPI"
643 return self.do_nodes()
644 def clean_nodes (self):
645 "delete nodes with PLCAPI"
646 return self.do_nodes(action="delete")
648 def do_nodes (self,action="add"):
649 for site_spec in self.plc_spec['sites']:
650 test_site = TestSite (self,site_spec)
652 utils.header("Deleting nodes in site %s"%test_site.name())
653 for node_spec in site_spec['nodes']:
654 test_node=TestNode(self,test_site,node_spec)
655 utils.header("Deleting %s"%test_node.name())
656 test_node.delete_node()
658 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
659 for node_spec in site_spec['nodes']:
660 utils.pprint('Creating node %s'%node_spec,node_spec)
661 test_node = TestNode (self,test_site,node_spec)
662 test_node.create_node ()
665 def nodegroups (self):
666 "create nodegroups with PLCAPI"
667 return self.do_nodegroups("add")
668 def clean_nodegroups (self):
669 "delete nodegroups with PLCAPI"
670 return self.do_nodegroups("delete")
674 def translate_timestamp (start,grain,timestamp):
675 if timestamp < TestPlc.YEAR: return start+timestamp*grain
676 else: return timestamp
679 def timestamp_printable (timestamp):
680 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
683 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
685 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
686 print 'API answered grain=',grain
687 start=(now/grain)*grain
689 # find out all nodes that are reservable
690 nodes=self.all_reservable_nodenames()
692 utils.header ("No reservable node found - proceeding without leases")
695 # attach them to the leases as specified in plc_specs
696 # this is where the 'leases' field gets interpreted as relative of absolute
697 for lease_spec in self.plc_spec['leases']:
698 # skip the ones that come with a null slice id
699 if not lease_spec['slice']: continue
700 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
701 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
702 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
703 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
704 if lease_addition['errors']:
705 utils.header("Cannot create leases, %s"%lease_addition['errors'])
708 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
709 (nodes,lease_spec['slice'],
710 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
711 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
715 def clean_leases (self):
716 "remove all leases in the myplc side"
717 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
718 utils.header("Cleaning leases %r"%lease_ids)
719 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
722 def list_leases (self):
723 "list all leases known to the myplc"
724 leases = self.apiserver.GetLeases(self.auth_root())
727 current=l['t_until']>=now
728 if self.options.verbose or current:
729 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
730 TestPlc.timestamp_printable(l['t_from']),
731 TestPlc.timestamp_printable(l['t_until'])))
734 # create nodegroups if needed, and populate
735 def do_nodegroups (self, action="add"):
736 # 1st pass to scan contents
738 for site_spec in self.plc_spec['sites']:
739 test_site = TestSite (self,site_spec)
740 for node_spec in site_spec['nodes']:
741 test_node=TestNode (self,test_site,node_spec)
742 if node_spec.has_key('nodegroups'):
743 nodegroupnames=node_spec['nodegroups']
744 if isinstance(nodegroupnames,StringTypes):
745 nodegroupnames = [ nodegroupnames ]
746 for nodegroupname in nodegroupnames:
747 if not groups_dict.has_key(nodegroupname):
748 groups_dict[nodegroupname]=[]
749 groups_dict[nodegroupname].append(test_node.name())
750 auth=self.auth_root()
752 for (nodegroupname,group_nodes) in groups_dict.iteritems():
754 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
755 # first, check if the nodetagtype is here
756 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
758 tag_type_id = tag_types[0]['tag_type_id']
760 tag_type_id = self.apiserver.AddTagType(auth,
761 {'tagname':nodegroupname,
762 'description': 'for nodegroup %s'%nodegroupname,
765 print 'located tag (type)',nodegroupname,'as',tag_type_id
767 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
769 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
770 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
771 # set node tag on all nodes, value='yes'
772 for nodename in group_nodes:
774 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
776 traceback.print_exc()
777 print 'node',nodename,'seems to already have tag',nodegroupname
780 expect_yes = self.apiserver.GetNodeTags(auth,
781 {'hostname':nodename,
782 'tagname':nodegroupname},
783 ['value'])[0]['value']
784 if expect_yes != "yes":
785 print 'Mismatch node tag on node',nodename,'got',expect_yes
788 if not self.options.dry_run:
789 print 'Cannot find tag',nodegroupname,'on node',nodename
793 print 'cleaning nodegroup',nodegroupname
794 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
796 traceback.print_exc()
800 # return a list of tuples (nodename,qemuname)
801 def all_node_infos (self) :
803 for site_spec in self.plc_spec['sites']:
804 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
805 for node_spec in site_spec['nodes'] ]
808 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
809 def all_reservable_nodenames (self):
811 for site_spec in self.plc_spec['sites']:
812 for node_spec in site_spec['nodes']:
813 node_fields=node_spec['node_fields']
814 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
815 res.append(node_fields['hostname'])
818 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
819 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
820 if self.options.dry_run:
824 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
825 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
826 # the nodes that haven't checked yet - start with a full list and shrink over time
827 tocheck = self.all_hostnames()
828 utils.header("checking nodes %r"%tocheck)
829 # create a dict hostname -> status
830 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
833 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
835 for array in tocheck_status:
836 hostname=array['hostname']
837 boot_state=array['boot_state']
838 if boot_state == target_boot_state:
839 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
841 # if it's a real node, never mind
842 (site_spec,node_spec)=self.locate_hostname(hostname)
843 if TestNode.is_real_model(node_spec['node_fields']['model']):
844 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
846 boot_state = target_boot_state
847 elif datetime.datetime.now() > graceout:
848 utils.header ("%s still in '%s' state"%(hostname,boot_state))
849 graceout=datetime.datetime.now()+datetime.timedelta(1)
850 status[hostname] = boot_state
852 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
855 if datetime.datetime.now() > timeout:
856 for hostname in tocheck:
857 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
859 # otherwise, sleep for a while
861 # only useful in empty plcs
864 def nodes_booted(self):
865 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
867 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
869 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
870 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
871 vservername=self.vservername
874 local_key = "keys/%(vservername)s-debug.rsa"%locals()
877 local_key = "keys/key1.rsa"
878 node_infos = self.all_node_infos()
879 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
880 for (nodename,qemuname) in node_infos:
881 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
882 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
883 (timeout_minutes,silent_minutes,period))
885 for node_info in node_infos:
886 (hostname,qemuname) = node_info
887 # try to run 'hostname' in the node
888 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
889 # don't spam logs - show the command only after the grace period
890 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
892 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
894 node_infos.remove(node_info)
896 # we will have tried real nodes once, in case they're up - but if not, just skip
897 (site_spec,node_spec)=self.locate_hostname(hostname)
898 if TestNode.is_real_model(node_spec['node_fields']['model']):
899 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
900 node_infos.remove(node_info)
903 if datetime.datetime.now() > timeout:
904 for (hostname,qemuname) in node_infos:
905 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
907 # otherwise, sleep for a while
909 # only useful in empty plcs
912 def nodes_ssh_debug(self):
913 "Tries to ssh into nodes in debug mode with the debug ssh key"
914 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
916 def nodes_ssh_boot(self):
917 "Tries to ssh into nodes in production mode with the root ssh key"
918 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
921 def init_node (self):
922 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
926 "all nodes: invoke GetBootMedium and store result locally"
929 def configure_qemu (self):
930 "all nodes: compute qemu config qemu.conf and store it locally"
933 def reinstall_node (self):
934 "all nodes: mark PLCAPI boot_state as reinstall"
937 def export_qemu (self):
938 "all nodes: push local node-dep directory on the qemu box"
941 ### check hooks : invoke scripts from hooks/{node,slice}
942 def check_hooks_node (self):
943 return self.locate_first_node().check_hooks()
944 def check_hooks_sliver (self) :
945 return self.locate_first_sliver().check_hooks()
947 def check_hooks (self):
948 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
949 return self.check_hooks_node() and self.check_hooks_sliver()
952 def do_check_initscripts(self):
954 for slice_spec in self.plc_spec['slices']:
955 if not slice_spec.has_key('initscriptname'):
957 initscript=slice_spec['initscriptname']
958 for nodename in slice_spec['nodenames']:
959 (site,node) = self.locate_node (nodename)
960 # xxx - passing the wrong site - probably harmless
961 test_site = TestSite (self,site)
962 test_slice = TestSlice (self,test_site,slice_spec)
963 test_node = TestNode (self,test_site,node)
964 test_sliver = TestSliver (self, test_node, test_slice)
965 if not test_sliver.check_initscript(initscript):
969 def check_initscripts(self):
970 "check that the initscripts have triggered"
971 return self.do_check_initscripts()
973 def initscripts (self):
974 "create initscripts with PLCAPI"
975 for initscript in self.plc_spec['initscripts']:
976 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
977 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
980 def clean_initscripts (self):
981 "delete initscripts with PLCAPI"
982 for initscript in self.plc_spec['initscripts']:
983 initscript_name = initscript['initscript_fields']['name']
984 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
986 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
987 print initscript_name,'deleted'
989 print 'deletion went wrong - probably did not exist'
994 "create slices with PLCAPI"
995 return self.do_slices()
997 def clean_slices (self):
998 "delete slices with PLCAPI"
999 return self.do_slices("delete")
1001 def do_slices (self, action="add"):
1002 for slice in self.plc_spec['slices']:
1003 site_spec = self.locate_site (slice['sitename'])
1004 test_site = TestSite(self,site_spec)
1005 test_slice=TestSlice(self,test_site,slice)
1007 utils.header("Deleting slices in site %s"%test_site.name())
1008 test_slice.delete_slice()
1010 utils.pprint("Creating slice",slice)
1011 test_slice.create_slice()
1012 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1015 @slice_mapper_options
1016 def check_slice(self):
1017 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1021 def clear_known_hosts (self):
1022 "remove test nodes entries from the local known_hosts file"
1026 def start_node (self) :
1027 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1030 def check_tcp (self):
1031 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1032 specs = self.plc_spec['tcp_test']
1037 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1038 if not s_test_sliver.run_tcp_server(port,timeout=10):
1042 # idem for the client side
1043 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1044 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1048 def plcsh_stress_test (self):
1049 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1050 # install the stress-test in the plc image
1051 location = "/usr/share/plc_api/plcsh_stress_test.py"
1052 remote="/vservers/%s/%s"%(self.vservername,location)
1053 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1055 command += " -- --check"
1056 if self.options.size == 1:
1057 command += " --tiny"
1058 return ( self.run_in_guest(command) == 0)
1060 # populate runs the same utility without slightly different options
1061 # in particular runs with --preserve (dont cleanup) and without --check
1062 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1065 def install_sfa(self):
1066 "yum install sfa, sfa-plc and sfa-client"
1068 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1069 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1072 def dbclean_sfa(self):
1073 "thoroughly wipes off the SFA database"
1074 return self.run_in_guest("sfa-nuke-plc.py")==0
1076 def plcclean_sfa(self):
1077 "cleans the PLC entries that were created as a side effect of running the script"
1079 sfa_spec=self.plc_spec['sfa']
1081 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1082 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1083 except: print "Slice %s already absent from PLC db"%slicename
1085 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1086 try: self.apiserver.DeletePerson(self.auth_root(),username)
1087 except: print "User %s already absent from PLC db"%username
1089 print "REMEMBER TO RUN import_sfa AGAIN"
1092 def uninstall_sfa(self):
1093 "uses rpm to uninstall sfa - ignore result"
1094 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1095 self.run_in_guest("rm -rf /var/lib/sfa")
1099 def configure_sfa(self):
1100 "run sfa-config-tty"
1101 tmpname='%s.sfa-config-tty'%(self.name())
1102 fileconf=open(tmpname,'w')
1103 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1104 'SFA_INTERFACE_HRN',
1105 # 'SFA_REGISTRY_LEVEL1_AUTH',
1106 'SFA_REGISTRY_HOST',
1107 'SFA_AGGREGATE_HOST',
1113 'SFA_PLC_DB_PASSWORD',
1116 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1117 fileconf.write('w\n')
1118 fileconf.write('R\n')
1119 fileconf.write('q\n')
1121 utils.system('cat %s'%tmpname)
1122 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1123 utils.system('rm %s'%tmpname)
1126 def aggregate_xml_line(self):
1127 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1128 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1130 def registry_xml_line(self):
1131 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1132 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1135 # a cross step that takes all other plcs in argument
1136 def cross_configure_sfa(self, other_plcs):
1137 # of course with a single plc, other_plcs is an empty list
1140 agg_fname="%s-agg.xml"%self.name()
1141 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1142 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1143 utils.header ("(Over)wrote %s"%agg_fname)
1144 reg_fname="%s-reg.xml"%self.name()
1145 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1146 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1147 utils.header ("(Over)wrote %s"%reg_fname)
1148 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1149 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1151 def import_sfa(self):
1153 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1154 return self.run_in_guest('sfa-import-plc.py')==0
1155 # not needed anymore
1156 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1158 def start_sfa(self):
1160 return self.run_in_guest('service sfa start')==0
1162 def configure_sfi(self):
1163 sfa_spec=self.plc_spec['sfa']
1164 "sfi client configuration"
1166 if os.path.exists(dir_name):
1167 utils.system('rm -rf %s'%dir_name)
1168 utils.system('mkdir %s'%dir_name)
1169 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1170 fileconf=open(file_name,'w')
1171 fileconf.write (self.plc_spec['keys'][0]['private'])
1174 file_name=dir_name + os.sep + 'sfi_config'
1175 fileconf=open(file_name,'w')
1176 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1177 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1178 fileconf.write('\n')
1179 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1180 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1181 fileconf.write('\n')
1182 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1183 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1184 fileconf.write('\n')
1185 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1186 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1187 fileconf.write('\n')
1190 file_name=dir_name + os.sep + 'person.xml'
1191 fileconf=open(file_name,'w')
1192 for record in sfa_spec['sfa_person_xml']:
1193 person_record=record
1194 fileconf.write(person_record)
1195 fileconf.write('\n')
1198 file_name=dir_name + os.sep + 'slice.xml'
1199 fileconf=open(file_name,'w')
1200 for record in sfa_spec['sfa_slice_xml']:
1202 #slice_record=sfa_spec['sfa_slice_xml']
1203 fileconf.write(slice_record)
1204 fileconf.write('\n')
1207 file_name=dir_name + os.sep + 'slice.rspec'
1208 fileconf=open(file_name,'w')
1210 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1212 fileconf.write(slice_rspec)
1213 fileconf.write('\n')
1216 remote="/vservers/%s/%s"%(self.vservername,location)
1217 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1219 #utils.system('cat %s'%tmpname)
1220 utils.system('rm -rf %s'%dir_name)
1223 def clean_sfi (self):
1224 self.run_in_guest("rm -rf /root/.sfi")
1228 "run sfi.py add (on Registry) and sfi.py create (on SM) to form new objects"
1229 test_user_sfa=TestUserSfa(self)
1230 if not test_user_sfa.add_user(): return False
1232 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1233 site_spec = self.locate_site (slice_spec['sitename'])
1234 test_site = TestSite(self,site_spec)
1235 test_slice_sfa=TestSliceSfa(self,test_site,slice_spec)
1236 if not test_slice_sfa.add_slice(): return False
1237 if not test_slice_sfa.create_slice(): return False
1240 def update_sfa(self):
1241 "run sfi.py update (on Registry) and sfi.py create (on SM) on existing objects"
1242 test_user_sfa=TestUserSfa(self)
1243 if not test_user_sfa.update_user(): return False
1245 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1246 site_spec = self.locate_site (slice_spec['sitename'])
1247 test_site = TestSite(self,site_spec)
1248 test_slice_sfa=TestSliceSfa(self,test_site,slice_spec)
1249 if not test_slice_sfa.update_slice(): return False
1253 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1254 sfa_spec=self.plc_spec['sfa']
1255 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1257 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1258 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1259 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1260 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1262 @slice_mapper_options_sfa
1263 def check_slice_sfa(self):
1264 "tries to ssh-enter the SFA slice"
1267 def delete_sfa(self):
1268 "run sfi.py delete (on SM), sfi.py remove (on Registry)"
1269 test_user_sfa=TestUserSfa(self)
1270 success1=test_user_sfa.delete_user()
1271 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1272 site_spec = self.locate_site (slice_spec['sitename'])
1273 test_site = TestSite(self,site_spec)
1274 test_slice_sfa=TestSliceSfa(self,test_site,slice_spec)
1275 success2=test_slice_sfa.delete_slice()
1277 return success1 and success2
1281 return self.run_in_guest('service sfa stop')==0
1283 def populate (self):
1284 "creates random entries in the PLCAPI"
1285 # install the stress-test in the plc image
1286 location = "/usr/share/plc_api/plcsh_stress_test.py"
1287 remote="/vservers/%s/%s"%(self.vservername,location)
1288 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1290 command += " -- --preserve --short-names"
1291 local = (self.run_in_guest(command) == 0);
1292 # second run with --foreign
1293 command += ' --foreign'
1294 remote = (self.run_in_guest(command) == 0);
1295 return ( local and remote)
1297 def gather_logs (self):
1298 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1299 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1300 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1301 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1302 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1303 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1305 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1306 self.gather_var_logs ()
1308 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1309 self.gather_pgsql_logs ()
1311 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1312 for site_spec in self.plc_spec['sites']:
1313 test_site = TestSite (self,site_spec)
1314 for node_spec in site_spec['nodes']:
1315 test_node=TestNode(self,test_site,node_spec)
1316 test_node.gather_qemu_logs()
1318 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1319 self.gather_nodes_var_logs()
1321 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1322 self.gather_slivers_var_logs()
1325 def gather_slivers_var_logs(self):
1326 for test_sliver in self.all_sliver_objs():
1327 remote = test_sliver.tar_var_logs()
1328 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1329 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1330 utils.system(command)
1333 def gather_var_logs (self):
1334 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1335 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1336 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1337 utils.system(command)
1338 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1339 utils.system(command)
1341 def gather_pgsql_logs (self):
1342 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1343 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1344 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1345 utils.system(command)
1347 def gather_nodes_var_logs (self):
1348 for site_spec in self.plc_spec['sites']:
1349 test_site = TestSite (self,site_spec)
1350 for node_spec in site_spec['nodes']:
1351 test_node=TestNode(self,test_site,node_spec)
1352 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1353 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1354 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1355 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1356 utils.system(command)
1359 # returns the filename to use for sql dump/restore, using options.dbname if set
1360 def dbfile (self, database):
1361 # uses options.dbname if it is found
1363 name=self.options.dbname
1364 if not isinstance(name,StringTypes):
1367 t=datetime.datetime.now()
1370 return "/root/%s-%s.sql"%(database,name)
1373 'dump the planetlab5 DB in /root in the PLC - filename has time'
1374 dump=self.dbfile("planetab5")
1375 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1376 utils.header('Dumped planetlab5 database in %s'%dump)
1379 def db_restore(self):
1380 'restore the planetlab5 DB - looks broken, but run -n might help'
1381 dump=self.dbfile("planetab5")
1382 ##stop httpd service
1383 self.run_in_guest('service httpd stop')
1384 # xxx - need another wrapper
1385 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1386 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1387 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1388 ##starting httpd service
1389 self.run_in_guest('service httpd start')
1391 utils.header('Database restored from ' + dump)
1394 def standby_1(): pass
1396 def standby_2(): pass
1398 def standby_3(): pass
1400 def standby_4(): pass
1402 def standby_5(): pass
1404 def standby_6(): pass
1406 def standby_7(): pass
1408 def standby_8(): pass
1410 def standby_9(): pass
1412 def standby_10(): pass
1414 def standby_11(): pass
1416 def standby_12(): pass
1418 def standby_13(): pass
1420 def standby_14(): pass
1422 def standby_15(): pass
1424 def standby_16(): pass
1426 def standby_17(): pass
1428 def standby_18(): pass
1430 def standby_19(): pass
1432 def standby_20(): pass