1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBox import TestBox
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
89 'display', 'resources_pre', SEP,
90 'delete_vs','create_vs','install', 'configure', 'start', SEP,
91 'fetch_keys', 'store_keys', 'clear_known_hosts', SEP,
92 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
93 'reinstall_node', 'init_node','bootcd', 'configure_qemu', 'export_qemu', 'kill_all_qemus', 'start_node', SEP,
94 'install_sfa', 'configure_sfa', 'cross_configure_sfa', 'import_sfa', 'start_sfa', SEPSFA,
95 'configure_sfi@1', 'add_sfa@1', 'create_sfa@1', 'update_sfa@1', 'view_sfa@1', SEPSFA,
96 # better use of time: do this now that the nodes are taking off
97 'plcsh_stress_test', SEP,
98 'nodes_ssh_debug', 'nodes_ssh_boot', 'check_slice', 'check_initscripts', SEPSFA,
99 'check_slice_sfa@1', 'delete_sfa_user@1', 'delete_sfa_slices@1', SEPSFA,
100 'check_tcp', 'check_hooks', SEP,
101 'force_gather_logs', 'force_resources_post', SEP,
104 'show_boxes', 'resources_list','resources_release','resources_release_plc','resources_release_qemu',SEP,
105 'stop', 'vs_start', SEP,
106 'clean_initscripts', 'clean_nodegroups','clean_all_sites', SEP,
107 'clean_sites', 'clean_nodes', 'clean_slices', 'clean_keys', SEP,
108 'clean_leases', 'list_leases', SEP,
110 'list_all_qemus', 'list_qemus', 'kill_qemus', SEP,
111 'plcclean_sfa', 'dbclean_sfa', 'stop_sfa','uninstall_sfa', 'clean_sfi', SEP,
112 'db_dump' , 'db_restore', SEP,
113 'standby_1 through 20',SEP,
117 def printable_steps (list):
118 single_line=" ".join(list)+" "
119 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
121 def valid_step (step):
122 return step != SEP and step != SEPSFA
124 # turn off the sfa-related steps when build has skipped SFA
125 # this is originally for centos5 as recent SFAs won't build on this platformb
127 def check_whether_build_has_sfa (rpms_url):
128 # warning, we're now building 'sface' so let's be a bit more picky
129 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
130 # full builds are expected to return with 0 here
132 # move all steps containing 'sfa' from default_steps to other_steps
133 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
134 TestPlc.other_steps += sfa_steps
135 for step in sfa_steps: TestPlc.default_steps.remove(step)
137 def __init__ (self,plc_spec,options):
138 self.plc_spec=plc_spec
140 self.test_ssh=TestSsh(self.plc_spec['hostname'],self.options.buildname)
142 self.vserverip=plc_spec['vserverip']
143 self.vservername=plc_spec['vservername']
144 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
147 raise Exception,'chroot-based myplc testing is deprecated'
148 self.apiserver=TestApiserver(self.url,options.dry_run)
151 name=self.plc_spec['name']
152 return "%s.%s"%(name,self.vservername)
155 return self.plc_spec['hostname']
158 return self.test_ssh.is_local()
160 # define the API methods on this object through xmlrpc
161 # would help, but not strictly necessary
165 def actual_command_in_guest (self,command):
166 return self.test_ssh.actual_command(self.host_to_guest(command))
168 def start_guest (self):
169 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
171 def run_in_guest (self,command):
172 return utils.system(self.actual_command_in_guest(command))
174 def run_in_host (self,command):
175 return self.test_ssh.run_in_buildname(command)
177 #command gets run in the vserver
178 def host_to_guest(self,command):
179 return "vserver %s exec %s"%(self.vservername,command)
181 #command gets run in the vserver
182 def start_guest_in_host(self):
183 return "vserver %s start"%(self.vservername)
186 def run_in_guest_piped (self,local,remote):
187 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
189 def auth_root (self):
190 return {'Username':self.plc_spec['PLC_ROOT_USER'],
191 'AuthMethod':'password',
192 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
193 'Role' : self.plc_spec['role']
195 def locate_site (self,sitename):
196 for site in self.plc_spec['sites']:
197 if site['site_fields']['name'] == sitename:
199 if site['site_fields']['login_base'] == sitename:
201 raise Exception,"Cannot locate site %s"%sitename
203 def locate_node (self,nodename):
204 for site in self.plc_spec['sites']:
205 for node in site['nodes']:
206 if node['name'] == nodename:
208 raise Exception,"Cannot locate node %s"%nodename
210 def locate_hostname (self,hostname):
211 for site in self.plc_spec['sites']:
212 for node in site['nodes']:
213 if node['node_fields']['hostname'] == hostname:
215 raise Exception,"Cannot locate hostname %s"%hostname
217 def locate_key (self,keyname):
218 for key in self.plc_spec['keys']:
219 if key['name'] == keyname:
221 raise Exception,"Cannot locate key %s"%keyname
223 def locate_slice (self, slicename):
224 for slice in self.plc_spec['slices']:
225 if slice['slice_fields']['name'] == slicename:
227 raise Exception,"Cannot locate slice %s"%slicename
229 def all_sliver_objs (self):
231 for slice_spec in self.plc_spec['slices']:
232 slicename = slice_spec['slice_fields']['name']
233 for nodename in slice_spec['nodenames']:
234 result.append(self.locate_sliver_obj (nodename,slicename))
237 def locate_sliver_obj (self,nodename,slicename):
238 (site,node) = self.locate_node(nodename)
239 slice = self.locate_slice (slicename)
241 test_site = TestSite (self, site)
242 test_node = TestNode (self, test_site,node)
243 # xxx the slice site is assumed to be the node site - mhh - probably harmless
244 test_slice = TestSlice (self, test_site, slice)
245 return TestSliver (self, test_node, test_slice)
247 def locate_first_node(self):
248 nodename=self.plc_spec['slices'][0]['nodenames'][0]
249 (site,node) = self.locate_node(nodename)
250 test_site = TestSite (self, site)
251 test_node = TestNode (self, test_site,node)
254 def locate_first_sliver (self):
255 slice_spec=self.plc_spec['slices'][0]
256 slicename=slice_spec['slice_fields']['name']
257 nodename=slice_spec['nodenames'][0]
258 return self.locate_sliver_obj(nodename,slicename)
260 # all different hostboxes used in this plc
261 def gather_hostBoxes(self):
262 # maps on sites and nodes, return [ (host_box,test_node) ]
264 for site_spec in self.plc_spec['sites']:
265 test_site = TestSite (self,site_spec)
266 for node_spec in site_spec['nodes']:
267 test_node = TestNode (self, test_site, node_spec)
268 if not test_node.is_real():
269 tuples.append( (test_node.host_box(),test_node) )
270 # transform into a dict { 'host_box' -> [ test_node .. ] }
272 for (box,node) in tuples:
273 if not result.has_key(box):
276 result[box].append(node)
279 # a step for checking this stuff
280 def show_boxes (self):
281 'print summary of nodes location'
282 for (box,nodes) in self.gather_hostBoxes().iteritems():
283 print box,":"," + ".join( [ node.name() for node in nodes ] )
286 # make this a valid step
287 def kill_all_qemus(self):
288 'kill all qemu instances on the qemu boxes involved by this setup'
289 # this is the brute force version, kill all qemus on that host box
290 for (box,nodes) in self.gather_hostBoxes().iteritems():
291 # pass the first nodename, as we don't push template-qemu on testboxes
292 nodedir=nodes[0].nodedir()
293 TestBox(box,self.options.buildname).kill_all_qemus(nodedir)
296 # make this a valid step
297 def list_all_qemus(self):
298 'list all qemu instances on the qemu boxes involved by this setup'
299 for (box,nodes) in self.gather_hostBoxes().iteritems():
300 # this is the brute force version, kill all qemus on that host box
301 TestBox(box,self.options.buildname).list_all_qemus()
304 # kill only the right qemus
305 def list_qemus(self):
306 'list qemu instances for our nodes'
307 for (box,nodes) in self.gather_hostBoxes().iteritems():
308 # the fine-grain version
313 # kill only the right qemus
314 def kill_qemus(self):
315 'kill the qemu instances for our nodes'
316 for (box,nodes) in self.gather_hostBoxes().iteritems():
317 # the fine-grain version
322 #################### display config
324 "show test configuration after localization"
325 self.display_pass (1)
326 self.display_pass (2)
330 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
331 def display_pass (self,passno):
332 for (key,val) in self.plc_spec.iteritems():
333 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
337 self.display_site_spec(site)
338 for node in site['nodes']:
339 self.display_node_spec(node)
340 elif key=='initscripts':
341 for initscript in val:
342 self.display_initscript_spec (initscript)
345 self.display_slice_spec (slice)
348 self.display_key_spec (key)
350 if key not in ['sites','initscripts','slices','keys', 'sfa']:
351 print '+ ',key,':',val
353 def display_site_spec (self,site):
354 print '+ ======== site',site['site_fields']['name']
355 for (k,v) in site.iteritems():
356 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
359 print '+ ','nodes : ',
361 print node['node_fields']['hostname'],'',
367 print user['name'],'',
369 elif k == 'site_fields':
370 print '+ login_base',':',v['login_base']
371 elif k == 'address_fields':
377 def display_initscript_spec (self,initscript):
378 print '+ ======== initscript',initscript['initscript_fields']['name']
380 def display_key_spec (self,key):
381 print '+ ======== key',key['name']
383 def display_slice_spec (self,slice):
384 print '+ ======== slice',slice['slice_fields']['name']
385 for (k,v) in slice.iteritems():
398 elif k=='slice_fields':
399 print '+ fields',':',
400 print 'max_nodes=',v['max_nodes'],
405 def display_node_spec (self,node):
406 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
407 print "hostname=",node['node_fields']['hostname'],
408 print "ip=",node['interface_fields']['ip']
409 if self.options.verbose:
410 utils.pprint("node details",node,depth=3)
412 # another entry point for just showing the boxes involved
413 def display_mapping (self):
414 TestPlc.display_mapping_plc(self.plc_spec)
418 def display_mapping_plc (plc_spec):
419 print '+ MyPLC',plc_spec['name']
420 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['hostname'],plc_spec['vservername'])
421 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
422 for site_spec in plc_spec['sites']:
423 for node_spec in site_spec['nodes']:
424 TestPlc.display_mapping_node(node_spec)
427 def display_mapping_node (node_spec):
428 print '+ NODE %s'%(node_spec['name'])
429 print '+\tqemu box %s'%node_spec['host_box']
430 print '+\thostname=%s'%node_spec['node_fields']['hostname']
432 def resources_pre (self):
433 "run site-dependant pre-test script as defined in LocalTestResources"
434 from LocalTestResources import local_resources
435 return local_resources.step_pre(self)
437 def resources_post (self):
438 "run site-dependant post-test script as defined in LocalTestResources"
439 from LocalTestResources import local_resources
440 return local_resources.step_post(self)
442 def resources_list (self):
443 "run site-dependant list script as defined in LocalTestResources"
444 from LocalTestResources import local_resources
445 return local_resources.step_list(self)
447 def resources_release (self):
448 "run site-dependant release script as defined in LocalTestResources"
449 from LocalTestResources import local_resources
450 return local_resources.step_release(self)
452 def resources_release_plc (self):
453 "run site-dependant release script as defined in LocalTestResources"
454 from LocalTestResources import local_resources
455 return local_resources.step_release_plc(self)
457 def resources_release_qemu (self):
458 "run site-dependant release script as defined in LocalTestResources"
459 from LocalTestResources import local_resources
460 return local_resources.step_release_qemu(self)
463 "vserver delete the test myplc"
464 self.run_in_host("vserver --silent %s delete"%self.vservername)
468 # historically the build was being fetched by the tests
469 # now the build pushes itself as a subdir of the tests workdir
470 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
471 def create_vs (self):
472 "vserver creation (no install done)"
473 # push the local build/ dir to the testplc box
475 # a full path for the local calls
476 build_dir=os.path.dirname(sys.argv[0])
477 # sometimes this is empty - set to "." in such a case
478 if not build_dir: build_dir="."
479 build_dir += "/build"
481 # use a standard name - will be relative to remote buildname
483 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
484 self.test_ssh.rmdir(build_dir)
485 self.test_ssh.copy(build_dir,recursive=True)
486 # the repo url is taken from arch-rpms-url
487 # with the last step (i386) removed
488 repo_url = self.options.arch_rpms_url
489 for level in [ 'arch' ]:
490 repo_url = os.path.dirname(repo_url)
491 # pass the vbuild-nightly options to vtest-init-vserver
493 test_env_options += " -p %s"%self.options.personality
494 test_env_options += " -d %s"%self.options.pldistro
495 test_env_options += " -f %s"%self.options.fcdistro
496 script="vtest-init-vserver.sh"
497 vserver_name = self.vservername
498 vserver_options="--netdev eth0 --interface %s"%self.vserverip
500 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
501 vserver_options += " --hostname %s"%vserver_hostname
503 print "Cannot reverse lookup %s"%self.vserverip
504 print "This is considered fatal, as this might pollute the test results"
506 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
507 return self.run_in_host(create_vserver) == 0
511 "yum install myplc, noderepo, and the plain bootstrapfs"
513 # workaround for getting pgsql8.2 on centos5
514 if self.options.fcdistro == "centos5":
515 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
518 if self.options.personality == "linux32":
520 elif self.options.personality == "linux64":
523 raise Exception, "Unsupported personality %r"%self.options.personality
524 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
527 pkgs_list.append ("slicerepo-%s"%nodefamily)
528 pkgs_list.append ("myplc")
529 pkgs_list.append ("noderepo-%s"%nodefamily)
530 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
531 pkgs_string=" ".join(pkgs_list)
532 self.run_in_guest("yum -y install %s"%pkgs_string)
533 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
538 tmpname='%s.plc-config-tty'%(self.name())
539 fileconf=open(tmpname,'w')
540 for var in [ 'PLC_NAME',
545 'PLC_MAIL_SUPPORT_ADDRESS',
548 # Above line was added for integrating SFA Testing
554 'PLC_RESERVATION_GRANULARITY',
557 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
558 fileconf.write('w\n')
559 fileconf.write('q\n')
561 utils.system('cat %s'%tmpname)
562 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
563 utils.system('rm %s'%tmpname)
568 self.run_in_guest('service plc start')
573 self.run_in_guest('service plc stop')
577 "start the PLC vserver"
581 # stores the keys from the config for further use
582 def store_keys(self):
583 "stores test users ssh keys in keys/"
584 for key_spec in self.plc_spec['keys']:
585 TestKey(self,key_spec).store_key()
588 def clean_keys(self):
589 "removes keys cached in keys/"
590 utils.system("rm -rf %s/keys/"%os.path(sys.argv[0]))
592 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
593 # for later direct access to the nodes
594 def fetch_keys(self):
595 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
597 if not os.path.isdir(dir):
599 vservername=self.vservername
601 prefix = 'debug_ssh_key'
602 for ext in [ 'pub', 'rsa' ] :
603 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
604 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
605 if self.test_ssh.fetch(src,dst) != 0: overall=False
609 "create sites with PLCAPI"
610 return self.do_sites()
612 def clean_sites (self):
613 "delete sites with PLCAPI"
614 return self.do_sites(action="delete")
616 def do_sites (self,action="add"):
617 for site_spec in self.plc_spec['sites']:
618 test_site = TestSite (self,site_spec)
619 if (action != "add"):
620 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
621 test_site.delete_site()
622 # deleted with the site
623 #test_site.delete_users()
626 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
627 test_site.create_site()
628 test_site.create_users()
631 def clean_all_sites (self):
632 "Delete all sites in PLC, and related objects"
633 print 'auth_root',self.auth_root()
634 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
635 for site_id in site_ids:
636 print 'Deleting site_id',site_id
637 self.apiserver.DeleteSite(self.auth_root(),site_id)
640 "create nodes with PLCAPI"
641 return self.do_nodes()
642 def clean_nodes (self):
643 "delete nodes with PLCAPI"
644 return self.do_nodes(action="delete")
646 def do_nodes (self,action="add"):
647 for site_spec in self.plc_spec['sites']:
648 test_site = TestSite (self,site_spec)
650 utils.header("Deleting nodes in site %s"%test_site.name())
651 for node_spec in site_spec['nodes']:
652 test_node=TestNode(self,test_site,node_spec)
653 utils.header("Deleting %s"%test_node.name())
654 test_node.delete_node()
656 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
657 for node_spec in site_spec['nodes']:
658 utils.pprint('Creating node %s'%node_spec,node_spec)
659 test_node = TestNode (self,test_site,node_spec)
660 test_node.create_node ()
663 def nodegroups (self):
664 "create nodegroups with PLCAPI"
665 return self.do_nodegroups("add")
666 def clean_nodegroups (self):
667 "delete nodegroups with PLCAPI"
668 return self.do_nodegroups("delete")
672 def translate_timestamp (start,grain,timestamp):
673 if timestamp < TestPlc.YEAR: return start+timestamp*grain
674 else: return timestamp
677 def timestamp_printable (timestamp):
678 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
681 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
683 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
684 print 'API answered grain=',grain
685 start=(now/grain)*grain
687 # find out all nodes that are reservable
688 nodes=self.all_reservable_nodenames()
690 utils.header ("No reservable node found - proceeding without leases")
693 # attach them to the leases as specified in plc_specs
694 # this is where the 'leases' field gets interpreted as relative of absolute
695 for lease_spec in self.plc_spec['leases']:
696 # skip the ones that come with a null slice id
697 if not lease_spec['slice']: continue
698 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
699 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
700 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
701 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
702 if lease_addition['errors']:
703 utils.header("Cannot create leases, %s"%lease_addition['errors'])
706 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
707 (nodes,lease_spec['slice'],
708 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
709 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
713 def clean_leases (self):
714 "remove all leases in the myplc side"
715 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
716 utils.header("Cleaning leases %r"%lease_ids)
717 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
720 def list_leases (self):
721 "list all leases known to the myplc"
722 leases = self.apiserver.GetLeases(self.auth_root())
725 current=l['t_until']>=now
726 if self.options.verbose or current:
727 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
728 TestPlc.timestamp_printable(l['t_from']),
729 TestPlc.timestamp_printable(l['t_until'])))
732 # create nodegroups if needed, and populate
733 def do_nodegroups (self, action="add"):
734 # 1st pass to scan contents
736 for site_spec in self.plc_spec['sites']:
737 test_site = TestSite (self,site_spec)
738 for node_spec in site_spec['nodes']:
739 test_node=TestNode (self,test_site,node_spec)
740 if node_spec.has_key('nodegroups'):
741 nodegroupnames=node_spec['nodegroups']
742 if isinstance(nodegroupnames,StringTypes):
743 nodegroupnames = [ nodegroupnames ]
744 for nodegroupname in nodegroupnames:
745 if not groups_dict.has_key(nodegroupname):
746 groups_dict[nodegroupname]=[]
747 groups_dict[nodegroupname].append(test_node.name())
748 auth=self.auth_root()
750 for (nodegroupname,group_nodes) in groups_dict.iteritems():
752 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
753 # first, check if the nodetagtype is here
754 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
756 tag_type_id = tag_types[0]['tag_type_id']
758 tag_type_id = self.apiserver.AddTagType(auth,
759 {'tagname':nodegroupname,
760 'description': 'for nodegroup %s'%nodegroupname,
763 print 'located tag (type)',nodegroupname,'as',tag_type_id
765 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
767 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
768 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
769 # set node tag on all nodes, value='yes'
770 for nodename in group_nodes:
772 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
774 traceback.print_exc()
775 print 'node',nodename,'seems to already have tag',nodegroupname
778 expect_yes = self.apiserver.GetNodeTags(auth,
779 {'hostname':nodename,
780 'tagname':nodegroupname},
781 ['value'])[0]['value']
782 if expect_yes != "yes":
783 print 'Mismatch node tag on node',nodename,'got',expect_yes
786 if not self.options.dry_run:
787 print 'Cannot find tag',nodegroupname,'on node',nodename
791 print 'cleaning nodegroup',nodegroupname
792 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
794 traceback.print_exc()
798 # return a list of tuples (nodename,qemuname)
799 def all_node_infos (self) :
801 for site_spec in self.plc_spec['sites']:
802 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
803 for node_spec in site_spec['nodes'] ]
806 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
807 def all_reservable_nodenames (self):
809 for site_spec in self.plc_spec['sites']:
810 for node_spec in site_spec['nodes']:
811 node_fields=node_spec['node_fields']
812 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
813 res.append(node_fields['hostname'])
816 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
817 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
818 if self.options.dry_run:
822 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
823 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
824 # the nodes that haven't checked yet - start with a full list and shrink over time
825 tocheck = self.all_hostnames()
826 utils.header("checking nodes %r"%tocheck)
827 # create a dict hostname -> status
828 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
831 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
833 for array in tocheck_status:
834 hostname=array['hostname']
835 boot_state=array['boot_state']
836 if boot_state == target_boot_state:
837 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
839 # if it's a real node, never mind
840 (site_spec,node_spec)=self.locate_hostname(hostname)
841 if TestNode.is_real_model(node_spec['node_fields']['model']):
842 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
844 boot_state = target_boot_state
845 elif datetime.datetime.now() > graceout:
846 utils.header ("%s still in '%s' state"%(hostname,boot_state))
847 graceout=datetime.datetime.now()+datetime.timedelta(1)
848 status[hostname] = boot_state
850 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
853 if datetime.datetime.now() > timeout:
854 for hostname in tocheck:
855 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
857 # otherwise, sleep for a while
859 # only useful in empty plcs
862 def nodes_booted(self):
863 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
865 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
867 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
868 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
869 vservername=self.vservername
872 local_key = "keys/%(vservername)s-debug.rsa"%locals()
875 local_key = "keys/key1.rsa"
876 node_infos = self.all_node_infos()
877 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
878 for (nodename,qemuname) in node_infos:
879 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
880 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
881 (timeout_minutes,silent_minutes,period))
883 for node_info in node_infos:
884 (hostname,qemuname) = node_info
885 # try to run 'hostname' in the node
886 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
887 # don't spam logs - show the command only after the grace period
888 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
890 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
892 node_infos.remove(node_info)
894 # we will have tried real nodes once, in case they're up - but if not, just skip
895 (site_spec,node_spec)=self.locate_hostname(hostname)
896 if TestNode.is_real_model(node_spec['node_fields']['model']):
897 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
898 node_infos.remove(node_info)
901 if datetime.datetime.now() > timeout:
902 for (hostname,qemuname) in node_infos:
903 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
905 # otherwise, sleep for a while
907 # only useful in empty plcs
910 def nodes_ssh_debug(self):
911 "Tries to ssh into nodes in debug mode with the debug ssh key"
912 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
914 def nodes_ssh_boot(self):
915 "Tries to ssh into nodes in production mode with the root ssh key"
916 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
919 def init_node (self):
920 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
924 "all nodes: invoke GetBootMedium and store result locally"
927 def configure_qemu (self):
928 "all nodes: compute qemu config qemu.conf and store it locally"
931 def reinstall_node (self):
932 "all nodes: mark PLCAPI boot_state as reinstall"
935 def export_qemu (self):
936 "all nodes: push local node-dep directory on the qemu box"
939 ### check hooks : invoke scripts from hooks/{node,slice}
940 def check_hooks_node (self):
941 return self.locate_first_node().check_hooks()
942 def check_hooks_sliver (self) :
943 return self.locate_first_sliver().check_hooks()
945 def check_hooks (self):
946 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
947 return self.check_hooks_node() and self.check_hooks_sliver()
950 def do_check_initscripts(self):
952 for slice_spec in self.plc_spec['slices']:
953 if not slice_spec.has_key('initscriptname'):
955 initscript=slice_spec['initscriptname']
956 for nodename in slice_spec['nodenames']:
957 (site,node) = self.locate_node (nodename)
958 # xxx - passing the wrong site - probably harmless
959 test_site = TestSite (self,site)
960 test_slice = TestSlice (self,test_site,slice_spec)
961 test_node = TestNode (self,test_site,node)
962 test_sliver = TestSliver (self, test_node, test_slice)
963 if not test_sliver.check_initscript(initscript):
967 def check_initscripts(self):
968 "check that the initscripts have triggered"
969 return self.do_check_initscripts()
971 def initscripts (self):
972 "create initscripts with PLCAPI"
973 for initscript in self.plc_spec['initscripts']:
974 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
975 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
978 def clean_initscripts (self):
979 "delete initscripts with PLCAPI"
980 for initscript in self.plc_spec['initscripts']:
981 initscript_name = initscript['initscript_fields']['name']
982 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
984 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
985 print initscript_name,'deleted'
987 print 'deletion went wrong - probably did not exist'
992 "create slices with PLCAPI"
993 return self.do_slices()
995 def clean_slices (self):
996 "delete slices with PLCAPI"
997 return self.do_slices("delete")
999 def do_slices (self, action="add"):
1000 for slice in self.plc_spec['slices']:
1001 site_spec = self.locate_site (slice['sitename'])
1002 test_site = TestSite(self,site_spec)
1003 test_slice=TestSlice(self,test_site,slice)
1005 utils.header("Deleting slices in site %s"%test_site.name())
1006 test_slice.delete_slice()
1008 utils.pprint("Creating slice",slice)
1009 test_slice.create_slice()
1010 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1014 def check_slice(self):
1015 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1019 def clear_known_hosts (self):
1020 "remove test nodes entries from the local known_hosts file"
1024 def start_node (self) :
1025 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1028 def check_tcp (self):
1029 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1030 specs = self.plc_spec['tcp_test']
1035 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1036 if not s_test_sliver.run_tcp_server(port,timeout=10):
1040 # idem for the client side
1041 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1042 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1046 def plcsh_stress_test (self):
1047 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1048 # install the stress-test in the plc image
1049 location = "/usr/share/plc_api/plcsh_stress_test.py"
1050 remote="/vservers/%s/%s"%(self.vservername,location)
1051 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1053 command += " -- --check"
1054 if self.options.size == 1:
1055 command += " --tiny"
1056 return ( self.run_in_guest(command) == 0)
1058 # populate runs the same utility without slightly different options
1059 # in particular runs with --preserve (dont cleanup) and without --check
1060 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1063 def install_sfa(self):
1064 "yum install sfa, sfa-plc and sfa-client"
1066 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1067 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1070 def dbclean_sfa(self):
1071 "thoroughly wipes off the SFA database"
1072 self.run_in_guest("sfa-nuke-plc.py")==0
1075 def plcclean_sfa(self):
1076 "cleans the PLC entries that were created as a side effect of running the script"
1078 sfa_spec=self.plc_spec['sfa']
1080 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1081 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1082 except: print "Slice %s already absent from PLC db"%slicename
1084 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1085 try: self.apiserver.DeletePerson(self.auth_root(),username)
1086 except: print "User %s already absent from PLC db"%username
1088 print "REMEMBER TO RUN import_sfa AGAIN"
1091 def uninstall_sfa(self):
1092 "uses rpm to uninstall sfa - ignore result"
1093 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1094 self.run_in_guest("rm -rf /var/lib/sfa")
1095 self.run_in_guest("rm -rf /etc/sfa")
1096 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1098 self.run_in_guest("rpm -e --noscripts sfa-plc")
1103 dirname="conf.%s"%self.plc_spec['name']
1104 if not os.path.isdir(dirname):
1105 utils.system("mkdir -p %s"%dirname)
1106 if not os.path.isdir(dirname):
1107 raise "Cannot create config dir for plc %s"%self.name()
1110 def conffile(self,filename):
1111 return "%s/%s"%(self.confdir(),filename)
1112 def confsubdir(self,dirname,clean):
1113 subdirname="%s/%s"%(self.confdir(),dirname)
1115 utils.system("rm -rf %s"%subdirname)
1116 if not os.path.isdir(subdirname):
1117 utils.system("mkdir -p %s"%subdirname)
1118 if not os.path.isdir(subdirname):
1119 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1122 def conffile_clean (self,filename):
1123 filename=self.conffile(filename)
1124 return utils.system("rm -rf %s"%filename)==0
1127 def configure_sfa(self):
1128 "run sfa-config-tty"
1129 tmpname=self.conffile("sfa-config-tty")
1130 fileconf=open(tmpname,'w')
1131 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1132 'SFA_INTERFACE_HRN',
1133 # 'SFA_REGISTRY_LEVEL1_AUTH',
1134 'SFA_REGISTRY_HOST',
1135 'SFA_AGGREGATE_HOST',
1141 'SFA_PLC_DB_PASSWORD',
1144 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1145 # the way plc_config handles booleans just sucks..
1146 for var in ['SFA_API_DEBUG']:
1148 if self.plc_spec['sfa'][var]: val='true'
1149 fileconf.write ('e %s\n%s\n'%(var,val))
1150 fileconf.write('w\n')
1151 fileconf.write('R\n')
1152 fileconf.write('q\n')
1154 utils.system('cat %s'%tmpname)
1155 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1158 def aggregate_xml_line(self):
1159 return '<aggregate addr="%s" hrn="%s" port="12346"/>' % \
1160 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1162 def registry_xml_line(self):
1163 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1164 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1167 # a cross step that takes all other plcs in argument
1168 def cross_configure_sfa(self, other_plcs):
1169 # of course with a single plc, other_plcs is an empty list
1172 agg_fname=self.conffile("agg.xml")
1173 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1174 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1175 utils.header ("(Over)wrote %s"%agg_fname)
1176 reg_fname=self.conffile("reg.xml")
1177 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1178 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1179 utils.header ("(Over)wrote %s"%reg_fname)
1180 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1181 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1183 def import_sfa(self):
1185 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1186 return self.run_in_guest('sfa-import-plc.py')==0
1187 # not needed anymore
1188 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1190 def start_sfa(self):
1192 return self.run_in_guest('service sfa start')==0
1194 def configure_sfi(self):
1195 sfa_spec=self.plc_spec['sfa']
1196 "sfi client configuration"
1197 dir_name=self.confsubdir("dot-sfi",clean=True)
1198 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1199 fileconf=open(file_name,'w')
1200 fileconf.write (self.plc_spec['keys'][0]['private'])
1202 utils.header ("(Over)wrote %s"%file_name)
1204 file_name=dir_name + os.sep + 'sfi_config'
1205 fileconf=open(file_name,'w')
1206 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1207 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1208 fileconf.write('\n')
1209 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1210 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1211 fileconf.write('\n')
1212 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1213 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1214 fileconf.write('\n')
1215 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1216 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1217 fileconf.write('\n')
1219 utils.header ("(Over)wrote %s"%file_name)
1221 file_name=dir_name + os.sep + 'person.xml'
1222 fileconf=open(file_name,'w')
1223 for record in sfa_spec['sfa_person_xml']:
1224 person_record=record
1225 fileconf.write(person_record)
1226 fileconf.write('\n')
1228 utils.header ("(Over)wrote %s"%file_name)
1230 file_name=dir_name + os.sep + 'slice.xml'
1231 fileconf=open(file_name,'w')
1232 for record in sfa_spec['sfa_slice_xml']:
1234 #slice_record=sfa_spec['sfa_slice_xml']
1235 fileconf.write(slice_record)
1236 fileconf.write('\n')
1237 utils.header ("(Over)wrote %s"%file_name)
1240 file_name=dir_name + os.sep + 'slice.rspec'
1241 fileconf=open(file_name,'w')
1243 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1245 fileconf.write(slice_rspec)
1246 fileconf.write('\n')
1248 utils.header ("(Over)wrote %s"%file_name)
1250 # push to the remote root's .sfi
1251 location = "root/.sfi"
1252 remote="/vservers/%s/%s"%(self.vservername,location)
1253 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1257 def clean_sfi (self):
1258 self.run_in_guest("rm -rf /root/.sfi")
1263 "run sfi.py add (on Registry)"
1267 def create_sfa(self):
1268 "run sfi.py create (on SM) for 1st-time creation"
1272 def update_sfa(self):
1273 "run sfi.py create (on SM) on existing object"
1277 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1278 sfa_spec=self.plc_spec['sfa']
1279 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1281 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1282 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1283 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1284 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1287 def check_slice_sfa(self):
1288 "tries to ssh-enter the SFA slice"
1291 def delete_sfa_user(self):
1292 "run sfi.py delete (on SM) for user"
1293 test_user_sfa=TestUserSfa(self)
1294 return test_user_sfa.delete_user()
1297 def delete_sfa_slices(self):
1298 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1303 self.run_in_guest('service sfa stop')==0
1306 def populate (self):
1307 "creates random entries in the PLCAPI"
1308 # install the stress-test in the plc image
1309 location = "/usr/share/plc_api/plcsh_stress_test.py"
1310 remote="/vservers/%s/%s"%(self.vservername,location)
1311 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1313 command += " -- --preserve --short-names"
1314 local = (self.run_in_guest(command) == 0);
1315 # second run with --foreign
1316 command += ' --foreign'
1317 remote = (self.run_in_guest(command) == 0);
1318 return ( local and remote)
1320 def gather_logs (self):
1321 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1322 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1323 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1324 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1325 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1326 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1328 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1329 self.gather_var_logs ()
1331 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1332 self.gather_pgsql_logs ()
1334 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1335 for site_spec in self.plc_spec['sites']:
1336 test_site = TestSite (self,site_spec)
1337 for node_spec in site_spec['nodes']:
1338 test_node=TestNode(self,test_site,node_spec)
1339 test_node.gather_qemu_logs()
1341 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1342 self.gather_nodes_var_logs()
1344 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1345 self.gather_slivers_var_logs()
1348 def gather_slivers_var_logs(self):
1349 for test_sliver in self.all_sliver_objs():
1350 remote = test_sliver.tar_var_logs()
1351 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1352 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1353 utils.system(command)
1356 def gather_var_logs (self):
1357 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1358 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1359 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1360 utils.system(command)
1361 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1362 utils.system(command)
1364 def gather_pgsql_logs (self):
1365 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1366 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1367 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1368 utils.system(command)
1370 def gather_nodes_var_logs (self):
1371 for site_spec in self.plc_spec['sites']:
1372 test_site = TestSite (self,site_spec)
1373 for node_spec in site_spec['nodes']:
1374 test_node=TestNode(self,test_site,node_spec)
1375 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1376 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1377 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1378 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1379 utils.system(command)
1382 # returns the filename to use for sql dump/restore, using options.dbname if set
1383 def dbfile (self, database):
1384 # uses options.dbname if it is found
1386 name=self.options.dbname
1387 if not isinstance(name,StringTypes):
1390 t=datetime.datetime.now()
1393 return "/root/%s-%s.sql"%(database,name)
1396 'dump the planetlab5 DB in /root in the PLC - filename has time'
1397 dump=self.dbfile("planetab5")
1398 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1399 utils.header('Dumped planetlab5 database in %s'%dump)
1402 def db_restore(self):
1403 'restore the planetlab5 DB - looks broken, but run -n might help'
1404 dump=self.dbfile("planetab5")
1405 ##stop httpd service
1406 self.run_in_guest('service httpd stop')
1407 # xxx - need another wrapper
1408 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1409 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1410 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1411 ##starting httpd service
1412 self.run_in_guest('service httpd start')
1414 utils.header('Database restored from ' + dump)
1417 def standby_1(): pass
1419 def standby_2(): pass
1421 def standby_3(): pass
1423 def standby_4(): pass
1425 def standby_5(): pass
1427 def standby_6(): pass
1429 def standby_7(): pass
1431 def standby_8(): pass
1433 def standby_9(): pass
1435 def standby_10(): pass
1437 def standby_11(): pass
1439 def standby_12(): pass
1441 def standby_13(): pass
1443 def standby_14(): pass
1445 def standby_15(): pass
1447 def standby_16(): pass
1449 def standby_17(): pass
1451 def standby_18(): pass
1453 def standby_19(): pass
1455 def standby_20(): pass