1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper_options (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_mapper_options_sfa (method):
73 slice_method = TestSliceSfa.__dict__[method.__name__]
74 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
75 site_spec = self.locate_site (slice_spec['sitename'])
76 test_site = TestSite(self,site_spec)
77 test_slice=TestSliceSfa(test_plc,test_site,slice_spec)
78 if not slice_method(test_slice,self.options): overall=False
80 # restore the doc text
81 actual.__doc__=method.__doc__
90 'display', 'resources_pre', SEP,
91 'delete_vs','create_vs','install', 'configure', 'start', SEP,
92 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
93 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
94 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu',
95 'kill_all_qemus', 'start_node', SEP,
96 # better use of time: do this now that the nodes are taking off
97 'plcsh_stress_test', SEP,
98 'install_sfa', 'configure_sfa', 'cross_configure_sfa', 'import_sfa', 'start_sfa', SEPSFA,
99 'configure_sfi', 'add_sfa', 'update_sfa', 'view_sfa', SEPSFA,
100 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEPSFA,
101 'check_slice_sfa', 'delete_sfa', 'stop_sfa', SEPSFA,
102 'check_tcp', 'check_hooks', SEP,
103 'force_gather_logs', 'force_resources_post', SEP,
106 'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP,
107 'stop', 'vs_start', SEP,
108 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
109 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
110 'clean_leases', 'list_leases', SEP,
112 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
113 'plcclean_sfa', 'dbclean_sfa', 'uninstall_sfa', 'clean_sfi', SEP,
114 'db_dump' , 'db_restore', SEP,
115 'standby_1 through 20',SEP,
119 def printable_steps (list):
120 single_line=" ".join(list)+" "
121 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
123 def valid_step (step):
124 return step != SEP and step != SEPSFA
126 # turn off the sfa-related steps when build has skipped SFA
127 # this is originally for centos5 as recent SFAs won't build on this platformb
129 def check_whether_build_has_sfa (rpms_url):
130 retcod=os.system ("curl --silent %s/ | grep -q sfa"%rpms_url)
131 # full builds are expected to return with 0 here
133 # move all steps containing 'sfa' from default_steps to other_steps
134 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
135 TestPlc.other_steps += sfa_steps
136 for step in sfa_steps: TestPlc.default_steps.remove(step)
138 def __init__ (self,plc_spec,options):
139 self.plc_spec=plc_spec
141 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
143 self.vserverip=plc_spec['vserverip']
144 self.vservername=plc_spec['vservername']
145 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
148 raise Exception,'chroot-based myplc testing is deprecated'
149 self.apiserver=TestApiserver(self.url,options.dry_run)
152 name=self.plc_spec['name']
153 return "%s.%s"%(name,self.vservername)
156 return self.plc_spec['hostname']
159 return self.test_ssh.is_local()
161 # define the API methods on this object through xmlrpc
162 # would help, but not strictly necessary
166 def actual_command_in_guest (self,command):
167 return self.test_ssh.actual_command(self.host_to_guest(command))
169 def start_guest (self):
170 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
172 def run_in_guest (self,command):
173 return utils.system(self.actual_command_in_guest(command))
175 def run_in_host (self,command):
176 return self.test_ssh.run_in_buildname(command)
178 #command gets run in the vserver
179 def host_to_guest(self,command):
180 return "vserver %s exec %s"%(self.vservername,command)
182 #command gets run in the vserver
183 def start_guest_in_host(self):
184 return "vserver %s start"%(self.vservername)
187 def run_in_guest_piped (self,local,remote):
188 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
190 def auth_root (self):
191 return {'Username':self.plc_spec['PLC_ROOT_USER'],
192 'AuthMethod':'password',
193 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
194 'Role' : self.plc_spec['role']
196 def locate_site (self,sitename):
197 for site in self.plc_spec['sites']:
198 if site['site_fields']['name'] == sitename:
200 if site['site_fields']['login_base'] == sitename:
202 raise Exception,"Cannot locate site %s"%sitename
204 def locate_node (self,nodename):
205 for site in self.plc_spec['sites']:
206 for node in site['nodes']:
207 if node['name'] == nodename:
209 raise Exception,"Cannot locate node %s"%nodename
211 def locate_hostname (self,hostname):
212 for site in self.plc_spec['sites']:
213 for node in site['nodes']:
214 if node['node_fields']['hostname'] == hostname:
216 raise Exception,"Cannot locate hostname %s"%hostname
218 def locate_key (self,keyname):
219 for key in self.plc_spec['keys']:
220 if key['name'] == keyname:
222 raise Exception,"Cannot locate key %s"%keyname
224 def locate_slice (self, slicename):
225 for slice in self.plc_spec['slices']:
226 if slice['slice_fields']['name'] == slicename:
228 raise Exception,"Cannot locate slice %s"%slicename
230 def all_sliver_objs (self):
232 for slice_spec in self.plc_spec['slices']:
233 slicename = slice_spec['slice_fields']['name']
234 for nodename in slice_spec['nodenames']:
235 result.append(self.locate_sliver_obj (nodename,slicename))
238 def locate_sliver_obj (self,nodename,slicename):
239 (site,node) = self.locate_node(nodename)
240 slice = self.locate_slice (slicename)
242 test_site = TestSite (self, site)
243 test_node = TestNode (self, test_site,node)
244 # xxx the slice site is assumed to be the node site - mhh - probably harmless
245 test_slice = TestSlice (self, test_site, slice)
246 return TestSliver (self, test_node, test_slice)
248 def locate_first_node(self):
249 nodename=self.plc_spec['slices'][0]['nodenames'][0]
250 (site,node) = self.locate_node(nodename)
251 test_site = TestSite (self, site)
252 test_node = TestNode (self, test_site,node)
255 def locate_first_sliver (self):
256 slice_spec=self.plc_spec['slices'][0]
257 slicename=slice_spec['slice_fields']['name']
258 nodename=slice_spec['nodenames'][0]
259 return self.locate_sliver_obj(nodename,slicename)
261 # all different hostboxes used in this plc
262 def gather_hostBoxes(self):
263 # maps on sites and nodes, return [ (host_box,test_node) ]
265 for site_spec in self.plc_spec['sites']:
266 test_site = TestSite (self,site_spec)
267 for node_spec in site_spec['nodes']:
268 test_node = TestNode (self, test_site, node_spec)
269 if not test_node.is_real():
270 tuples.append( (test_node.host_box(),test_node) )
271 # transform into a dict { 'host_box' -> [ test_node .. ] }
273 for (box,node) in tuples:
274 if not result.has_key(box):
277 result[box].append(node)
280 # a step for checking this stuff
281 def show_boxes (self):
282 'print summary of nodes location'
283 for (box,nodes) in self.gather_hostBoxes().iteritems():
284 print box,":"," + ".join( [ node.name() for node in nodes ] )
287 # make this a valid step
288 def kill_all_qemus(self):
289 'kill all qemu instances on the qemu boxes involved by this setup'
290 # this is the brute force version, kill all qemus on that host box
291 for (box,nodes) in self.gather_hostBoxes().iteritems():
292 # pass the first nodename, as we don't push template-qemu on testboxes
293 nodedir=nodes[0].nodedir()
294 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
297 # make this a valid step
298 def list_all_qemus(self):
299 'list all qemu instances on the qemu boxes involved by this setup'
300 for (box,nodes) in self.gather_hostBoxes().iteritems():
301 # this is the brute force version, kill all qemus on that host box
302 TestBox(box,self.options.buildname).list_all_qemus()
305 # kill only the right qemus
306 def list_qemus(self):
307 'list qemu instances for our nodes'
308 for (box,nodes) in self.gather_hostBoxes().iteritems():
309 # the fine-grain version
314 # kill only the right qemus
315 def kill_qemus(self):
316 'kill the qemu instances for our nodes'
317 for (box,nodes) in self.gather_hostBoxes().iteritems():
318 # the fine-grain version
323 #################### display config
325 "show test configuration after localization"
326 self.display_pass (1)
327 self.display_pass (2)
331 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
332 def display_pass (self,passno):
333 for (key,val) in self.plc_spec.iteritems():
334 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
338 self.display_site_spec(site)
339 for node in site['nodes']:
340 self.display_node_spec(node)
341 elif key=='initscripts':
342 for initscript in val:
343 self.display_initscript_spec (initscript)
346 self.display_slice_spec (slice)
349 self.display_key_spec (key)
351 if key not in ['sites','initscripts','slices','keys', 'sfa']:
352 print '+ ',key,':',val
354 def display_site_spec (self,site):
355 print '+ ======== site',site['site_fields']['name']
356 for (k,v) in site.iteritems():
357 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
360 print '+ ','nodes : ',
362 print node['node_fields']['hostname'],'',
368 print user['name'],'',
370 elif k == 'site_fields':
371 print '+ login_base',':',v['login_base']
372 elif k == 'address_fields':
378 def display_initscript_spec (self,initscript):
379 print '+ ======== initscript',initscript['initscript_fields']['name']
381 def display_key_spec (self,key):
382 print '+ ======== key',key['name']
384 def display_slice_spec (self,slice):
385 print '+ ======== slice',slice['slice_fields']['name']
386 for (k,v) in slice.iteritems():
399 elif k=='slice_fields':
400 print '+ fields',':',
401 print 'max_nodes=',v['max_nodes'],
406 def display_node_spec (self,node):
407 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
408 print "hostname=",node['node_fields']['hostname'],
409 print "ip=",node['interface_fields']['ip']
410 if self.options.verbose:
411 utils.pprint("node details",node,depth=3)
413 # another entry point for just showing the boxes involved
414 def display_mapping (self):
415 TestPlc.display_mapping_plc(self.plc_spec)
419 def display_mapping_plc (plc_spec):
420 print '+ MyPLC',plc_spec['name']
421 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
422 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
423 for site_spec in plc_spec['sites']:
424 for node_spec in site_spec['nodes']:
425 TestPlc.display_mapping_node(node_spec)
428 def display_mapping_node (node_spec):
429 print '+ NODE %s'%(node_spec['name'])
430 print '+\tqemu box %s'%node_spec['host_box']
431 print '+\thostname=%s'%node_spec['node_fields']['hostname']
433 def resources_pre (self):
434 "run site-dependant pre-test script as defined in LocalTestResources"
435 from LocalTestResources import local_resources
436 return local_resources.step_pre(self)
438 def resources_post (self):
439 "run site-dependant post-test script as defined in LocalTestResources"
440 from LocalTestResources import local_resources
441 return local_resources.step_post(self)
443 def resources_list (self):
444 "run site-dependant list script as defined in LocalTestResources"
445 from LocalTestResources import local_resources
446 return local_resources.step_list(self)
448 def resources_release (self):
449 "run site-dependant release script as defined in LocalTestResources"
450 from LocalTestResources import local_resources
451 return local_resources.step_release(self)
453 def resources_release_plc (self):
454 "run site-dependant release script as defined in LocalTestResources"
455 from LocalTestResources import local_resources
456 return local_resources.step_release_plc(self)
458 def resources_release_qemu (self):
459 "run site-dependant release script as defined in LocalTestResources"
460 from LocalTestResources import local_resources
461 return local_resources.step_release_qemu(self)
464 "vserver delete the test myplc"
465 self.run_in_host("vserver --silent %s delete"%self.vservername)
469 # historically the build was being fetched by the tests
470 # now the build pushes itself as a subdir of the tests workdir
471 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
472 def create_vs (self):
473 "vserver creation (no install done)"
474 # push the local build/ dir to the testplc box
476 # a full path for the local calls
477 build_dir=os.path.dirname(sys.argv[0])
478 # sometimes this is empty - set to "." in such a case
479 if not build_dir: build_dir="."
480 build_dir += "/build"
482 # use a standard name - will be relative to remote buildname
484 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
485 self.test_ssh.rmdir(build_dir)
486 self.test_ssh.copy(build_dir,recursive=True)
487 # the repo url is taken from arch-rpms-url
488 # with the last step (i386) removed
489 repo_url = self.options.arch_rpms_url
490 for level in [ 'arch' ]:
491 repo_url = os.path.dirname(repo_url)
492 # pass the vbuild-nightly options to vtest-init-vserver
494 test_env_options += " -p %s"%self.options.personality
495 test_env_options += " -d %s"%self.options.pldistro
496 test_env_options += " -f %s"%self.options.fcdistro
497 script="vtest-init-vserver.sh"
498 vserver_name = self.vservername
499 vserver_options="--netdev eth0 --interface %s"%self.vserverip
501 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
502 vserver_options += " --hostname %s"%vserver_hostname
504 print "Cannot reverse lookup %s"%self.vserverip
505 print "This is considered fatal, as this might pollute the test results"
507 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
508 return self.run_in_host(create_vserver) == 0
512 "yum install myplc, noderepo, and the plain bootstrapfs"
514 # workaround for getting pgsql8.2 on centos5
515 if self.options.fcdistro == "centos5":
516 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
518 if self.options.personality == "linux32":
520 elif self.options.personality == "linux64":
523 raise Exception, "Unsupported personality %r"%self.options.personality
525 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
527 # try to install slicerepo - not fatal yet
528 self.run_in_guest("yum -y install slicerepo-%s"%nodefamily)
531 self.run_in_guest("yum -y install myplc")==0 and \
532 self.run_in_guest("yum -y install noderepo-%s"%nodefamily)==0 and \
533 self.run_in_guest("yum -y install bootstrapfs-%s-plain"%nodefamily)==0
538 tmpname='%s.plc-config-tty'%(self.name())
539 fileconf=open(tmpname,'w')
540 for var in [ 'PLC_NAME',
545 'PLC_MAIL_SUPPORT_ADDRESS',
548 # Above line was added for integrating SFA Testing
554 'PLC_RESERVATION_GRANULARITY',
557 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
558 fileconf.write('w\n')
559 fileconf.write('q\n')
561 utils.system('cat %s'%tmpname)
562 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
563 utils.system('rm %s'%tmpname)
568 self.run_in_guest('service plc start')
573 self.run_in_guest('service plc stop')
577 "start the PLC vserver"
581 # stores the keys from the config for further use
582 def store_keys(self):
583 "stores test users ssh keys in keys/"
584 for key_spec in self.plc_spec['keys']:
585 TestKey(self,key_spec).store_key()
588 def clean_keys(self):
589 "removes keys cached in keys/"
590 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
592 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
593 # for later direct access to the nodes
594 def fetch_keys(self):
595 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
597 if not os.path.isdir(dir):
599 vservername=self.vservername
601 prefix = 'debug_ssh_key'
602 for ext in [ 'pub', 'rsa' ] :
603 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
604 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
605 if self.test_ssh.fetch(src,dst) != 0: overall=False
609 "create sites with PLCAPI"
610 return self.do_sites()
612 def clean_sites (self):
613 "delete sites with PLCAPI"
614 return self.do_sites(action="delete")
616 def do_sites (self,action="add"):
617 for site_spec in self.plc_spec['sites']:
618 test_site = TestSite (self,site_spec)
619 if (action != "add"):
620 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
621 test_site.delete_site()
622 # deleted with the site
623 #test_site.delete_users()
626 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
627 test_site.create_site()
628 test_site.create_users()
631 def clean_all_sites (self):
632 "Delete all sites in PLC, and related objects"
633 print 'auth_root',self.auth_root()
634 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
635 for site_id in site_ids:
636 print 'Deleting site_id',site_id
637 self.apiserver.DeleteSite(self.auth_root(),site_id)
640 "create nodes with PLCAPI"
641 return self.do_nodes()
642 def clean_nodes (self):
643 "delete nodes with PLCAPI"
644 return self.do_nodes(action="delete")
646 def do_nodes (self,action="add"):
647 for site_spec in self.plc_spec['sites']:
648 test_site = TestSite (self,site_spec)
650 utils.header("Deleting nodes in site %s"%test_site.name())
651 for node_spec in site_spec['nodes']:
652 test_node=TestNode(self,test_site,node_spec)
653 utils.header("Deleting %s"%test_node.name())
654 test_node.delete_node()
656 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
657 for node_spec in site_spec['nodes']:
658 utils.pprint('Creating node %s'%node_spec,node_spec)
659 test_node = TestNode (self,test_site,node_spec)
660 test_node.create_node ()
663 def nodegroups (self):
664 "create nodegroups with PLCAPI"
665 return self.do_nodegroups("add")
666 def clean_nodegroups (self):
667 "delete nodegroups with PLCAPI"
668 return self.do_nodegroups("delete")
672 def translate_timestamp (start,grain,timestamp):
673 if timestamp < TestPlc.YEAR: return start+timestamp*grain
674 else: return timestamp
677 def timestamp_printable (timestamp):
678 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
681 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
683 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
684 print 'API answered grain=',grain
685 start=(now/grain)*grain
687 # find out all nodes that are reservable
688 nodes=self.all_reservable_nodenames()
690 utils.header ("No reservable node found - proceeding without leases")
693 # attach them to the leases as specified in plc_specs
694 # this is where the 'leases' field gets interpreted as relative of absolute
695 for lease_spec in self.plc_spec['leases']:
696 # skip the ones that come with a null slice id
697 if not lease_spec['slice']: continue
698 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
699 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
700 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
701 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
702 if lease_addition['errors']:
703 utils.header("Cannot create leases, %s"%lease_addition['errors'])
706 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
707 (nodes,lease_spec['slice'],
708 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
709 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
713 def clean_leases (self):
714 "remove all leases in the myplc side"
715 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
716 utils.header("Cleaning leases %r"%lease_ids)
717 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
720 def list_leases (self):
721 "list all leases known to the myplc"
722 leases = self.apiserver.GetLeases(self.auth_root())
725 current=l['t_until']>=now
726 if self.options.verbose or current:
727 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
728 TestPlc.timestamp_printable(l['t_from']),
729 TestPlc.timestamp_printable(l['t_until'])))
732 # create nodegroups if needed, and populate
733 def do_nodegroups (self, action="add"):
734 # 1st pass to scan contents
736 for site_spec in self.plc_spec['sites']:
737 test_site = TestSite (self,site_spec)
738 for node_spec in site_spec['nodes']:
739 test_node=TestNode (self,test_site,node_spec)
740 if node_spec.has_key('nodegroups'):
741 nodegroupnames=node_spec['nodegroups']
742 if isinstance(nodegroupnames,StringTypes):
743 nodegroupnames = [ nodegroupnames ]
744 for nodegroupname in nodegroupnames:
745 if not groups_dict.has_key(nodegroupname):
746 groups_dict[nodegroupname]=[]
747 groups_dict[nodegroupname].append(test_node.name())
748 auth=self.auth_root()
750 for (nodegroupname,group_nodes) in groups_dict.iteritems():
752 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
753 # first, check if the nodetagtype is here
754 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
756 tag_type_id = tag_types[0]['tag_type_id']
758 tag_type_id = self.apiserver.AddTagType(auth,
759 {'tagname':nodegroupname,
760 'description': 'for nodegroup %s'%nodegroupname,
763 print 'located tag (type)',nodegroupname,'as',tag_type_id
765 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
767 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
768 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
769 # set node tag on all nodes, value='yes'
770 for nodename in group_nodes:
772 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
774 traceback.print_exc()
775 print 'node',nodename,'seems to already have tag',nodegroupname
778 expect_yes = self.apiserver.GetNodeTags(auth,
779 {'hostname':nodename,
780 'tagname':nodegroupname},
781 ['value'])[0]['value']
782 if expect_yes != "yes":
783 print 'Mismatch node tag on node',nodename,'got',expect_yes
786 if not self.options.dry_run:
787 print 'Cannot find tag',nodegroupname,'on node',nodename
791 print 'cleaning nodegroup',nodegroupname
792 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
794 traceback.print_exc()
798 # return a list of tuples (nodename,qemuname)
799 def all_node_infos (self) :
801 for site_spec in self.plc_spec['sites']:
802 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
803 for node_spec in site_spec['nodes'] ]
806 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
807 def all_reservable_nodenames (self):
809 for site_spec in self.plc_spec['sites']:
810 for node_spec in site_spec['nodes']:
811 node_fields=node_spec['node_fields']
812 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
813 res.append(node_fields['hostname'])
816 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
817 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
818 if self.options.dry_run:
822 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
823 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
824 # the nodes that haven't checked yet - start with a full list and shrink over time
825 tocheck = self.all_hostnames()
826 utils.header("checking nodes %r"%tocheck)
827 # create a dict hostname -> status
828 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
831 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
833 for array in tocheck_status:
834 hostname=array['hostname']
835 boot_state=array['boot_state']
836 if boot_state == target_boot_state:
837 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
839 # if it's a real node, never mind
840 (site_spec,node_spec)=self.locate_hostname(hostname)
841 if TestNode.is_real_model(node_spec['node_fields']['model']):
842 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
844 boot_state = target_boot_state
845 elif datetime.datetime.now() > graceout:
846 utils.header ("%s still in '%s' state"%(hostname,boot_state))
847 graceout=datetime.datetime.now()+datetime.timedelta(1)
848 status[hostname] = boot_state
850 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
853 if datetime.datetime.now() > timeout:
854 for hostname in tocheck:
855 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
857 # otherwise, sleep for a while
859 # only useful in empty plcs
862 def nodes_booted(self):
863 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
865 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
867 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
868 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
869 vservername=self.vservername
872 local_key = "keys/%(vservername)s-debug.rsa"%locals()
875 local_key = "keys/key1.rsa"
876 node_infos = self.all_node_infos()
877 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
878 for (nodename,qemuname) in node_infos:
879 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
880 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
881 (timeout_minutes,silent_minutes,period))
883 for node_info in node_infos:
884 (hostname,qemuname) = node_info
885 # try to run 'hostname' in the node
886 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
887 # don't spam logs - show the command only after the grace period
888 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
890 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
892 node_infos.remove(node_info)
894 # we will have tried real nodes once, in case they're up - but if not, just skip
895 (site_spec,node_spec)=self.locate_hostname(hostname)
896 if TestNode.is_real_model(node_spec['node_fields']['model']):
897 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
898 node_infos.remove(node_info)
901 if datetime.datetime.now() > timeout:
902 for (hostname,qemuname) in node_infos:
903 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
905 # otherwise, sleep for a while
907 # only useful in empty plcs
910 def nodes_ssh_debug(self):
911 "Tries to ssh into nodes in debug mode with the debug ssh key"
912 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
914 def nodes_ssh_boot(self):
915 "Tries to ssh into nodes in production mode with the root ssh key"
916 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
919 def init_node (self):
920 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
924 "all nodes: invoke GetBootMedium and store result locally"
927 def configure_qemu (self):
928 "all nodes: compute qemu config qemu.conf and store it locally"
931 def reinstall_node (self):
932 "all nodes: mark PLCAPI boot_state as reinstall"
935 def export_qemu (self):
936 "all nodes: push local node-dep directory on the qemu box"
939 ### check hooks : invoke scripts from hooks/{node,slice}
940 def check_hooks_node (self):
941 return self.locate_first_node().check_hooks()
942 def check_hooks_sliver (self) :
943 return self.locate_first_sliver().check_hooks()
945 def check_hooks (self):
946 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
947 return self.check_hooks_node() and self.check_hooks_sliver()
950 def do_check_initscripts(self):
952 for slice_spec in self.plc_spec['slices']:
953 if not slice_spec.has_key('initscriptname'):
955 initscript=slice_spec['initscriptname']
956 for nodename in slice_spec['nodenames']:
957 (site,node) = self.locate_node (nodename)
958 # xxx - passing the wrong site - probably harmless
959 test_site = TestSite (self,site)
960 test_slice = TestSlice (self,test_site,slice_spec)
961 test_node = TestNode (self,test_site,node)
962 test_sliver = TestSliver (self, test_node, test_slice)
963 if not test_sliver.check_initscript(initscript):
967 def check_initscripts(self):
968 "check that the initscripts have triggered"
969 return self.do_check_initscripts()
971 def initscripts (self):
972 "create initscripts with PLCAPI"
973 for initscript in self.plc_spec['initscripts']:
974 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
975 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
978 def clean_initscripts (self):
979 "delete initscripts with PLCAPI"
980 for initscript in self.plc_spec['initscripts']:
981 initscript_name = initscript['initscript_fields']['name']
982 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
984 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
985 print initscript_name,'deleted'
987 print 'deletion went wrong - probably did not exist'
992 "create slices with PLCAPI"
993 return self.do_slices()
995 def clean_slices (self):
996 "delete slices with PLCAPI"
997 return self.do_slices("delete")
999 def do_slices (self, action="add"):
1000 for slice in self.plc_spec['slices']:
1001 site_spec = self.locate_site (slice['sitename'])
1002 test_site = TestSite(self,site_spec)
1003 test_slice=TestSlice(self,test_site,slice)
1005 utils.header("Deleting slices in site %s"%test_site.name())
1006 test_slice.delete_slice()
1008 utils.pprint("Creating slice",slice)
1009 test_slice.create_slice()
1010 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1013 @slice_mapper_options
1014 def check_slice(self):
1015 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1019 def clear_known_hosts (self):
1020 "remove test nodes entries from the local known_hosts file"
1024 def start_node (self) :
1025 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1028 def check_tcp (self):
1029 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1030 specs = self.plc_spec['tcp_test']
1035 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1036 if not s_test_sliver.run_tcp_server(port,timeout=10):
1040 # idem for the client side
1041 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1042 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1046 def plcsh_stress_test (self):
1047 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1048 # install the stress-test in the plc image
1049 location = "/usr/share/plc_api/plcsh_stress_test.py"
1050 remote="/vservers/%s/%s"%(self.vservername,location)
1051 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1053 command += " -- --check"
1054 if self.options.size == 1:
1055 command += " --tiny"
1056 return ( self.run_in_guest(command) == 0)
1058 # populate runs the same utility without slightly different options
1059 # in particular runs with --preserve (dont cleanup) and without --check
1060 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1063 def install_sfa(self):
1064 "yum install sfa, sfa-plc and sfa-client"
1065 return self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")==0
1067 def dbclean_sfa(self):
1068 "thoroughly wipes off the SFA database"
1069 return self.run_in_guest("sfa-nuke-plc.py")==0
1071 def plcclean_sfa(self):
1072 "cleans the PLC entries that were created as a side effect of running the script"
1074 sfa_spec=self.plc_spec['sfa']
1076 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1077 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1078 except: print "Slice %s already absent from PLC db"%slicename
1080 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1081 try: self.apiserver.DeletePerson(self.auth_root(),username)
1082 except: print "User %s already absent from PLC db"%username
1084 print "REMEMBER TO RUN import_sfa AGAIN"
1087 def uninstall_sfa(self):
1088 "uses rpm to uninstall sfa - ignore result"
1089 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1090 self.run_in_guest("rm -rf /var/lib/sfa")
1094 def configure_sfa(self):
1095 "run sfa-config-tty"
1096 tmpname='%s.sfa-config-tty'%(self.name())
1097 fileconf=open(tmpname,'w')
1098 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1099 'SFA_INTERFACE_HRN',
1100 # 'SFA_REGISTRY_LEVEL1_AUTH',
1101 'SFA_REGISTRY_HOST',
1102 'SFA_AGGREGATE_HOST',
1108 'SFA_PLC_DB_PASSWORD',
1111 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1112 fileconf.write('w\n')
1113 fileconf.write('R\n')
1114 fileconf.write('q\n')
1116 utils.system('cat %s'%tmpname)
1117 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1118 utils.system('rm %s'%tmpname)
1121 def aggregate_xml_line(self):
1122 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1123 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1125 def registry_xml_line(self):
1126 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1127 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1130 # a cross step that takes all other plcs in argument
1131 def cross_configure_sfa(self, other_plcs):
1132 # of course with a single plc, other_plcs is an empty list
1134 filename="%s-agg.xml"%self.name()
1135 agg_file=file(filename,"w")
1136 agg_file.write("<aggregates>%s</aggregates>\n" % \
1137 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1139 if self.test_ssh.copy_abs(filename,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername) !=0 : return False
1141 filename="%s-reg.xml"%self.name()
1142 agg_file=file(filename,"w")
1143 agg_file.write("<registries>%s</registries>\n" % \
1144 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1146 if self.test_ssh.copy_abs(filename,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername) !=0 : return False
1149 def import_sfa(self):
1151 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1152 return self.run_in_guest('sfa-import-plc.py')==0
1153 # not needed anymore
1154 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1156 def start_sfa(self):
1158 return self.run_in_guest('service sfa start')==0
1160 def configure_sfi(self):
1161 sfa_spec=self.plc_spec['sfa']
1162 "sfi client configuration"
1164 if os.path.exists(dir_name):
1165 utils.system('rm -rf %s'%dir_name)
1166 utils.system('mkdir %s'%dir_name)
1167 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1168 fileconf=open(file_name,'w')
1169 fileconf.write (self.plc_spec['keys'][0]['private'])
1172 file_name=dir_name + os.sep + 'sfi_config'
1173 fileconf=open(file_name,'w')
1174 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1175 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1176 fileconf.write('\n')
1177 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1178 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1179 fileconf.write('\n')
1180 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1181 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1182 fileconf.write('\n')
1183 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1184 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1185 fileconf.write('\n')
1188 file_name=dir_name + os.sep + 'person.xml'
1189 fileconf=open(file_name,'w')
1190 for record in sfa_spec['sfa_person_xml']:
1191 person_record=record
1192 fileconf.write(person_record)
1193 fileconf.write('\n')
1196 file_name=dir_name + os.sep + 'slice.xml'
1197 fileconf=open(file_name,'w')
1198 for record in sfa_spec['sfa_slice_xml']:
1200 #slice_record=sfa_spec['sfa_slice_xml']
1201 fileconf.write(slice_record)
1202 fileconf.write('\n')
1205 file_name=dir_name + os.sep + 'slice.rspec'
1206 fileconf=open(file_name,'w')
1208 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1210 fileconf.write(slice_rspec)
1211 fileconf.write('\n')
1214 remote="/vservers/%s/%s"%(self.vservername,location)
1215 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1217 #utils.system('cat %s'%tmpname)
1218 utils.system('rm -rf %s'%dir_name)
1221 def clean_sfi (self):
1222 self.run_in_guest("rm -rf /root/.sfi")
1226 "run sfi.py add (on Registry) and sfi.py create (on SM) to form new objects"
1228 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1229 if not test_user_sfa.add_user(): return False
1231 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1232 site_spec = self.locate_site (slice_spec['sitename'])
1233 test_site = TestSite(self,site_spec)
1234 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1235 if not test_slice_sfa.add_slice(): return False
1236 if not test_slice_sfa.create_slice(): return False
1239 def update_sfa(self):
1240 "run sfi.py update (on Registry) and sfi.py create (on SM) on existing objects"
1242 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1243 if not test_user_sfa.update_user(): return False
1245 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1246 site_spec = self.locate_site (slice_spec['sitename'])
1247 test_site = TestSite(self,site_spec)
1248 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1249 if not test_slice_sfa.update_slice(): return False
1253 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1254 sfa_spec=self.plc_spec['sfa']
1255 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1257 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1258 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1259 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1260 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1262 @slice_mapper_options_sfa
1263 def check_slice_sfa(self):
1264 "tries to ssh-enter the SFA slice"
1267 def delete_sfa(self):
1268 "run sfi.py delete (on SM), sfi.py remove (on Registry)"
1270 test_user_sfa=TestUserSfa(test_plc,self.plc_spec['sfa'])
1271 success1=test_user_sfa.delete_user()
1272 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1273 site_spec = self.locate_site (slice_spec['sitename'])
1274 test_site = TestSite(self,site_spec)
1275 test_slice_sfa=TestSliceSfa(test_plc,test_site,slice_spec)
1276 success2=test_slice_sfa.delete_slice()
1278 return success1 and success2
1282 return self.run_in_guest('service sfa stop')==0
1284 def populate (self):
1285 "creates random entries in the PLCAPI"
1286 # install the stress-test in the plc image
1287 location = "/usr/share/plc_api/plcsh_stress_test.py"
1288 remote="/vservers/%s/%s"%(self.vservername,location)
1289 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1291 command += " -- --preserve --short-names"
1292 local = (self.run_in_guest(command) == 0);
1293 # second run with --foreign
1294 command += ' --foreign'
1295 remote = (self.run_in_guest(command) == 0);
1296 return ( local and remote)
1298 def gather_logs (self):
1299 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1300 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1301 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1302 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1303 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1304 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1306 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1307 self.gather_var_logs ()
1309 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1310 self.gather_pgsql_logs ()
1312 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1313 for site_spec in self.plc_spec['sites']:
1314 test_site = TestSite (self,site_spec)
1315 for node_spec in site_spec['nodes']:
1316 test_node=TestNode(self,test_site,node_spec)
1317 test_node.gather_qemu_logs()
1319 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1320 self.gather_nodes_var_logs()
1322 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1323 self.gather_slivers_var_logs()
1326 def gather_slivers_var_logs(self):
1327 for test_sliver in self.all_sliver_objs():
1328 remote = test_sliver.tar_var_logs()
1329 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1330 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1331 utils.system(command)
1334 def gather_var_logs (self):
1335 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1336 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1337 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1338 utils.system(command)
1339 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1340 utils.system(command)
1342 def gather_pgsql_logs (self):
1343 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1344 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1345 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1346 utils.system(command)
1348 def gather_nodes_var_logs (self):
1349 for site_spec in self.plc_spec['sites']:
1350 test_site = TestSite (self,site_spec)
1351 for node_spec in site_spec['nodes']:
1352 test_node=TestNode(self,test_site,node_spec)
1353 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1354 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1355 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1356 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1357 utils.system(command)
1360 # returns the filename to use for sql dump/restore, using options.dbname if set
1361 def dbfile (self, database):
1362 # uses options.dbname if it is found
1364 name=self.options.dbname
1365 if not isinstance(name,StringTypes):
1368 t=datetime.datetime.now()
1371 return "/root/%s-%s.sql"%(database,name)
1374 'dump the planetlab5 DB in /root in the PLC - filename has time'
1375 dump=self.dbfile("planetab5")
1376 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1377 utils.header('Dumped planetlab5 database in %s'%dump)
1380 def db_restore(self):
1381 'restore the planetlab5 DB - looks broken, but run -n might help'
1382 dump=self.dbfile("planetab5")
1383 ##stop httpd service
1384 self.run_in_guest('service httpd stop')
1385 # xxx - need another wrapper
1386 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1387 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1388 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1389 ##starting httpd service
1390 self.run_in_guest('service httpd start')
1392 utils.header('Database restored from ' + dump)
1395 def standby_1(): pass
1397 def standby_2(): pass
1399 def standby_3(): pass
1401 def standby_4(): pass
1403 def standby_5(): pass
1405 def standby_6(): pass
1407 def standby_7(): pass
1409 def standby_8(): pass
1411 def standby_9(): pass
1413 def standby_10(): pass
1415 def standby_11(): pass
1417 def standby_12(): pass
1419 def standby_13(): pass
1421 def standby_14(): pass
1423 def standby_15(): pass
1425 def standby_16(): pass
1427 def standby_17(): pass
1429 def standby_18(): pass
1431 def standby_19(): pass
1433 def standby_20(): pass