1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
23 from TestUserSfa import TestUserSfa
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
44 node_method = TestNode.__dict__[method.__name__]
45 for site_spec in self.plc_spec['sites']:
46 test_site = TestSite (self,site_spec)
47 for node_spec in site_spec['nodes']:
48 test_node = TestNode (self,test_site,node_spec)
49 if not node_method(test_node): overall=False
51 # restore the doc text
52 actual.__doc__=method.__doc__
55 def slice_mapper (method):
58 slice_method = TestSlice.__dict__[method.__name__]
59 for slice_spec in self.plc_spec['slices']:
60 site_spec = self.locate_site (slice_spec['sitename'])
61 test_site = TestSite(self,site_spec)
62 test_slice=TestSlice(self,test_site,slice_spec)
63 if not slice_method(test_slice,self.options): overall=False
65 # restore the doc text
66 actual.__doc__=method.__doc__
69 def slice_sfa_mapper (method):
72 slice_method = TestSliceSfa.__dict__[method.__name__]
73 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
74 site_spec = self.locate_site (slice_spec['sitename'])
75 test_site = TestSite(self,site_spec)
76 test_slice=TestSliceSfa(self,test_site,slice_spec)
77 if not slice_method(test_slice,self.options): overall=False
79 # restore the doc text
80 actual.__doc__=method.__doc__
90 'vs_delete','vs_create','timestamp_vs', SEP,
91 'plc_install', 'plc_configure', 'plc_start', SEP,
92 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
93 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
94 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
95 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
96 'sfa_install', 'sfa_configure', 'cross_sfa_configure', 'sfa_import', 'sfa_start', SEPSFA,
97 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
98 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
99 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
100 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
101 # but as the stress test might take a while, we sometimes missed the debug mode..
102 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
103 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
104 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
106 'force_gather_logs', SEP,
111 'show_boxes', 'local_list','local_rel','local_rel_plc','local_rel_qemu',SEP,
112 'plc_stop', 'vs_start', 'vs_stop', SEP,
113 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
114 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
115 'delete_leases', 'list_leases', SEP,
117 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
118 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
119 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEP,
120 'plc_db_dump' , 'plc_db_restore', SEP,
121 'standby_1 through 20',SEP,
125 def printable_steps (list):
126 single_line=" ".join(list)+" "
127 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
129 def valid_step (step):
130 return step != SEP and step != SEPSFA
132 # turn off the sfa-related steps when build has skipped SFA
133 # this is originally for centos5 as recent SFAs won't build on this platformb
135 def check_whether_build_has_sfa (rpms_url):
136 # warning, we're now building 'sface' so let's be a bit more picky
137 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
138 # full builds are expected to return with 0 here
140 # move all steps containing 'sfa' from default_steps to other_steps
141 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
142 TestPlc.other_steps += sfa_steps
143 for step in sfa_steps: TestPlc.default_steps.remove(step)
145 def __init__ (self,plc_spec,options):
146 self.plc_spec=plc_spec
148 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
150 self.vserverip=plc_spec['vserverip']
151 self.vservername=plc_spec['vservername']
152 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
155 raise Exception,'chroot-based myplc testing is deprecated'
156 self.apiserver=TestApiserver(self.url,options.dry_run)
159 name=self.plc_spec['name']
160 return "%s.%s"%(name,self.vservername)
163 return self.plc_spec['host_box']
166 return self.test_ssh.is_local()
168 # define the API methods on this object through xmlrpc
169 # would help, but not strictly necessary
173 def actual_command_in_guest (self,command):
174 return self.test_ssh.actual_command(self.host_to_guest(command))
176 def start_guest (self):
177 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
179 def stop_guest (self):
180 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
182 def run_in_guest (self,command):
183 return utils.system(self.actual_command_in_guest(command))
185 def run_in_host (self,command):
186 return self.test_ssh.run_in_buildname(command)
188 #command gets run in the vserver
189 def host_to_guest(self,command):
190 return "vserver %s exec %s"%(self.vservername,command)
192 #start/stop the vserver
193 def start_guest_in_host(self):
194 return "vserver %s start"%(self.vservername)
196 def stop_guest_in_host(self):
197 return "vserver %s stop"%(self.vservername)
200 def run_in_guest_piped (self,local,remote):
201 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
203 def auth_root (self):
204 return {'Username':self.plc_spec['PLC_ROOT_USER'],
205 'AuthMethod':'password',
206 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
207 'Role' : self.plc_spec['role']
209 def locate_site (self,sitename):
210 for site in self.plc_spec['sites']:
211 if site['site_fields']['name'] == sitename:
213 if site['site_fields']['login_base'] == sitename:
215 raise Exception,"Cannot locate site %s"%sitename
217 def locate_node (self,nodename):
218 for site in self.plc_spec['sites']:
219 for node in site['nodes']:
220 if node['name'] == nodename:
222 raise Exception,"Cannot locate node %s"%nodename
224 def locate_hostname (self,hostname):
225 for site in self.plc_spec['sites']:
226 for node in site['nodes']:
227 if node['node_fields']['hostname'] == hostname:
229 raise Exception,"Cannot locate hostname %s"%hostname
231 def locate_key (self,keyname):
232 for key in self.plc_spec['keys']:
233 if key['name'] == keyname:
235 raise Exception,"Cannot locate key %s"%keyname
237 def locate_slice (self, slicename):
238 for slice in self.plc_spec['slices']:
239 if slice['slice_fields']['name'] == slicename:
241 raise Exception,"Cannot locate slice %s"%slicename
243 def all_sliver_objs (self):
245 for slice_spec in self.plc_spec['slices']:
246 slicename = slice_spec['slice_fields']['name']
247 for nodename in slice_spec['nodenames']:
248 result.append(self.locate_sliver_obj (nodename,slicename))
251 def locate_sliver_obj (self,nodename,slicename):
252 (site,node) = self.locate_node(nodename)
253 slice = self.locate_slice (slicename)
255 test_site = TestSite (self, site)
256 test_node = TestNode (self, test_site,node)
257 # xxx the slice site is assumed to be the node site - mhh - probably harmless
258 test_slice = TestSlice (self, test_site, slice)
259 return TestSliver (self, test_node, test_slice)
261 def locate_first_node(self):
262 nodename=self.plc_spec['slices'][0]['nodenames'][0]
263 (site,node) = self.locate_node(nodename)
264 test_site = TestSite (self, site)
265 test_node = TestNode (self, test_site,node)
268 def locate_first_sliver (self):
269 slice_spec=self.plc_spec['slices'][0]
270 slicename=slice_spec['slice_fields']['name']
271 nodename=slice_spec['nodenames'][0]
272 return self.locate_sliver_obj(nodename,slicename)
274 # all different hostboxes used in this plc
275 def gather_hostBoxes(self):
276 # maps on sites and nodes, return [ (host_box,test_node) ]
278 for site_spec in self.plc_spec['sites']:
279 test_site = TestSite (self,site_spec)
280 for node_spec in site_spec['nodes']:
281 test_node = TestNode (self, test_site, node_spec)
282 if not test_node.is_real():
283 tuples.append( (test_node.host_box(),test_node) )
284 # transform into a dict { 'host_box' -> [ test_node .. ] }
286 for (box,node) in tuples:
287 if not result.has_key(box):
290 result[box].append(node)
293 # a step for checking this stuff
294 def show_boxes (self):
295 'print summary of nodes location'
296 for (box,nodes) in self.gather_hostBoxes().iteritems():
297 print box,":"," + ".join( [ node.name() for node in nodes ] )
300 # make this a valid step
301 def qemu_kill_all(self):
302 'kill all qemu instances on the qemu boxes involved by this setup'
303 # this is the brute force version, kill all qemus on that host box
304 for (box,nodes) in self.gather_hostBoxes().iteritems():
305 # pass the first nodename, as we don't push template-qemu on testboxes
306 nodedir=nodes[0].nodedir()
307 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
310 # make this a valid step
311 def qemu_list_all(self):
312 'list all qemu instances on the qemu boxes involved by this setup'
313 for (box,nodes) in self.gather_hostBoxes().iteritems():
314 # this is the brute force version, kill all qemus on that host box
315 TestBoxQemu(box,self.options.buildname).qemu_list_all()
318 # kill only the right qemus
319 def qemu_list_mine(self):
320 'list qemu instances for our nodes'
321 for (box,nodes) in self.gather_hostBoxes().iteritems():
322 # the fine-grain version
327 # kill only the right qemus
328 def qemu_kill_mine(self):
329 'kill the qemu instances for our nodes'
330 for (box,nodes) in self.gather_hostBoxes().iteritems():
331 # the fine-grain version
336 #################### display config
338 "show test configuration after localization"
339 self.display_pass (1)
340 self.display_pass (2)
344 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
345 def display_pass (self,passno):
346 for (key,val) in self.plc_spec.iteritems():
347 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
351 self.display_site_spec(site)
352 for node in site['nodes']:
353 self.display_node_spec(node)
354 elif key=='initscripts':
355 for initscript in val:
356 self.display_initscript_spec (initscript)
359 self.display_slice_spec (slice)
362 self.display_key_spec (key)
364 if key not in ['sites','initscripts','slices','keys', 'sfa']:
365 print '+ ',key,':',val
367 def display_site_spec (self,site):
368 print '+ ======== site',site['site_fields']['name']
369 for (k,v) in site.iteritems():
370 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
373 print '+ ','nodes : ',
375 print node['node_fields']['hostname'],'',
381 print user['name'],'',
383 elif k == 'site_fields':
384 print '+ login_base',':',v['login_base']
385 elif k == 'address_fields':
391 def display_initscript_spec (self,initscript):
392 print '+ ======== initscript',initscript['initscript_fields']['name']
394 def display_key_spec (self,key):
395 print '+ ======== key',key['name']
397 def display_slice_spec (self,slice):
398 print '+ ======== slice',slice['slice_fields']['name']
399 for (k,v) in slice.iteritems():
412 elif k=='slice_fields':
413 print '+ fields',':',
414 print 'max_nodes=',v['max_nodes'],
419 def display_node_spec (self,node):
420 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
421 print "hostname=",node['node_fields']['hostname'],
422 print "ip=",node['interface_fields']['ip']
423 if self.options.verbose:
424 utils.pprint("node details",node,depth=3)
426 # another entry point for just showing the boxes involved
427 def display_mapping (self):
428 TestPlc.display_mapping_plc(self.plc_spec)
432 def display_mapping_plc (plc_spec):
433 print '+ MyPLC',plc_spec['name']
434 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
435 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
436 for site_spec in plc_spec['sites']:
437 for node_spec in site_spec['nodes']:
438 TestPlc.display_mapping_node(node_spec)
441 def display_mapping_node (node_spec):
442 print '+ NODE %s'%(node_spec['name'])
443 print '+\tqemu box %s'%node_spec['host_box']
444 print '+\thostname=%s'%node_spec['node_fields']['hostname']
446 # write a timestamp in /vservers/<>/
447 def timestamp_vs (self):
449 utils.system(self.test_ssh.actual_command("mkdir -p /vservers/%s"%self.vservername))
450 return utils.system(self.test_ssh.actual_command("echo %d > /vservers/%s/timestamp"%(now,self.vservername)))==0
452 def local_pre (self):
453 "run site-dependant pre-test script as defined in LocalTestResources"
454 from LocalTestResources import local_resources
455 return local_resources.step_pre(self)
457 def local_post (self):
458 "run site-dependant post-test script as defined in LocalTestResources"
459 from LocalTestResources import local_resources
460 return local_resources.step_post(self)
462 def local_list (self):
463 "run site-dependant list script as defined in LocalTestResources"
464 from LocalTestResources import local_resources
465 return local_resources.step_list(self)
467 def local_rel (self):
468 "run site-dependant release script as defined in LocalTestResources"
469 from LocalTestResources import local_resources
470 return local_resources.step_release(self)
472 def local_rel_plc (self):
473 "run site-dependant release script as defined in LocalTestResources"
474 from LocalTestResources import local_resources
475 return local_resources.step_release_plc(self)
477 def local_rel_qemu (self):
478 "run site-dependant release script as defined in LocalTestResources"
479 from LocalTestResources import local_resources
480 return local_resources.step_release_qemu(self)
483 "vserver delete the test myplc"
484 self.run_in_host("vserver --silent %s delete"%self.vservername)
488 # historically the build was being fetched by the tests
489 # now the build pushes itself as a subdir of the tests workdir
490 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
491 def vs_create (self):
492 "vserver creation (no install done)"
493 # push the local build/ dir to the testplc box
495 # a full path for the local calls
496 build_dir=os.path.dirname(sys.argv[0])
497 # sometimes this is empty - set to "." in such a case
498 if not build_dir: build_dir="."
499 build_dir += "/build"
501 # use a standard name - will be relative to remote buildname
503 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
504 self.test_ssh.rmdir(build_dir)
505 self.test_ssh.copy(build_dir,recursive=True)
506 # the repo url is taken from arch-rpms-url
507 # with the last step (i386) removed
508 repo_url = self.options.arch_rpms_url
509 for level in [ 'arch' ]:
510 repo_url = os.path.dirname(repo_url)
511 # pass the vbuild-nightly options to vtest-init-vserver
513 test_env_options += " -p %s"%self.options.personality
514 test_env_options += " -d %s"%self.options.pldistro
515 test_env_options += " -f %s"%self.options.fcdistro
516 script="vtest-init-vserver.sh"
517 vserver_name = self.vservername
518 vserver_options="--netdev eth0 --interface %s"%self.vserverip
520 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
521 vserver_options += " --hostname %s"%vserver_hostname
523 print "Cannot reverse lookup %s"%self.vserverip
524 print "This is considered fatal, as this might pollute the test results"
526 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
527 return self.run_in_host(create_vserver) == 0
530 def plc_install(self):
531 "yum install myplc, noderepo, and the plain bootstrapfs"
533 # workaround for getting pgsql8.2 on centos5
534 if self.options.fcdistro == "centos5":
535 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
538 if self.options.personality == "linux32":
540 elif self.options.personality == "linux64":
543 raise Exception, "Unsupported personality %r"%self.options.personality
544 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
547 pkgs_list.append ("slicerepo-%s"%nodefamily)
548 pkgs_list.append ("myplc")
549 pkgs_list.append ("noderepo-%s"%nodefamily)
550 pkgs_list.append ("bootstrapfs-%s-plain"%nodefamily)
551 pkgs_string=" ".join(pkgs_list)
552 self.run_in_guest("yum -y install %s"%pkgs_string)
553 return self.run_in_guest("rpm -q %s"%pkgs_string)==0
556 def plc_configure(self):
558 tmpname='%s.plc-config-tty'%(self.name())
559 fileconf=open(tmpname,'w')
560 for var in [ 'PLC_NAME',
565 'PLC_MAIL_SUPPORT_ADDRESS',
568 # Above line was added for integrating SFA Testing
574 'PLC_RESERVATION_GRANULARITY',
576 'PLC_OMF_XMPP_SERVER',
578 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
579 fileconf.write('w\n')
580 fileconf.write('q\n')
582 utils.system('cat %s'%tmpname)
583 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
584 utils.system('rm %s'%tmpname)
589 self.run_in_guest('service plc start')
594 self.run_in_guest('service plc stop')
598 "start the PLC vserver"
603 "stop the PLC vserver"
607 # stores the keys from the config for further use
608 def keys_store(self):
609 "stores test users ssh keys in keys/"
610 for key_spec in self.plc_spec['keys']:
611 TestKey(self,key_spec).store_key()
614 def keys_clean(self):
615 "removes keys cached in keys/"
616 utils.system("rm -rf ./keys")
619 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
620 # for later direct access to the nodes
621 def keys_fetch(self):
622 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
624 if not os.path.isdir(dir):
626 vservername=self.vservername
628 prefix = 'debug_ssh_key'
629 for ext in [ 'pub', 'rsa' ] :
630 src="/vservers/%(vservername)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
631 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
632 if self.test_ssh.fetch(src,dst) != 0: overall=False
636 "create sites with PLCAPI"
637 return self.do_sites()
639 def delete_sites (self):
640 "delete sites with PLCAPI"
641 return self.do_sites(action="delete")
643 def do_sites (self,action="add"):
644 for site_spec in self.plc_spec['sites']:
645 test_site = TestSite (self,site_spec)
646 if (action != "add"):
647 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
648 test_site.delete_site()
649 # deleted with the site
650 #test_site.delete_users()
653 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
654 test_site.create_site()
655 test_site.create_users()
658 def delete_all_sites (self):
659 "Delete all sites in PLC, and related objects"
660 print 'auth_root',self.auth_root()
661 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
662 for site_id in site_ids:
663 print 'Deleting site_id',site_id
664 self.apiserver.DeleteSite(self.auth_root(),site_id)
668 "create nodes with PLCAPI"
669 return self.do_nodes()
670 def delete_nodes (self):
671 "delete nodes with PLCAPI"
672 return self.do_nodes(action="delete")
674 def do_nodes (self,action="add"):
675 for site_spec in self.plc_spec['sites']:
676 test_site = TestSite (self,site_spec)
678 utils.header("Deleting nodes in site %s"%test_site.name())
679 for node_spec in site_spec['nodes']:
680 test_node=TestNode(self,test_site,node_spec)
681 utils.header("Deleting %s"%test_node.name())
682 test_node.delete_node()
684 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
685 for node_spec in site_spec['nodes']:
686 utils.pprint('Creating node %s'%node_spec,node_spec)
687 test_node = TestNode (self,test_site,node_spec)
688 test_node.create_node ()
691 def nodegroups (self):
692 "create nodegroups with PLCAPI"
693 return self.do_nodegroups("add")
694 def delete_nodegroups (self):
695 "delete nodegroups with PLCAPI"
696 return self.do_nodegroups("delete")
700 def translate_timestamp (start,grain,timestamp):
701 if timestamp < TestPlc.YEAR: return start+timestamp*grain
702 else: return timestamp
705 def timestamp_printable (timestamp):
706 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
709 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
711 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
712 print 'API answered grain=',grain
713 start=(now/grain)*grain
715 # find out all nodes that are reservable
716 nodes=self.all_reservable_nodenames()
718 utils.header ("No reservable node found - proceeding without leases")
721 # attach them to the leases as specified in plc_specs
722 # this is where the 'leases' field gets interpreted as relative of absolute
723 for lease_spec in self.plc_spec['leases']:
724 # skip the ones that come with a null slice id
725 if not lease_spec['slice']: continue
726 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
727 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
728 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
729 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
730 if lease_addition['errors']:
731 utils.header("Cannot create leases, %s"%lease_addition['errors'])
734 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
735 (nodes,lease_spec['slice'],
736 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
737 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
741 def delete_leases (self):
742 "remove all leases in the myplc side"
743 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
744 utils.header("Cleaning leases %r"%lease_ids)
745 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
748 def list_leases (self):
749 "list all leases known to the myplc"
750 leases = self.apiserver.GetLeases(self.auth_root())
753 current=l['t_until']>=now
754 if self.options.verbose or current:
755 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
756 TestPlc.timestamp_printable(l['t_from']),
757 TestPlc.timestamp_printable(l['t_until'])))
760 # create nodegroups if needed, and populate
761 def do_nodegroups (self, action="add"):
762 # 1st pass to scan contents
764 for site_spec in self.plc_spec['sites']:
765 test_site = TestSite (self,site_spec)
766 for node_spec in site_spec['nodes']:
767 test_node=TestNode (self,test_site,node_spec)
768 if node_spec.has_key('nodegroups'):
769 nodegroupnames=node_spec['nodegroups']
770 if isinstance(nodegroupnames,StringTypes):
771 nodegroupnames = [ nodegroupnames ]
772 for nodegroupname in nodegroupnames:
773 if not groups_dict.has_key(nodegroupname):
774 groups_dict[nodegroupname]=[]
775 groups_dict[nodegroupname].append(test_node.name())
776 auth=self.auth_root()
778 for (nodegroupname,group_nodes) in groups_dict.iteritems():
780 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
781 # first, check if the nodetagtype is here
782 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
784 tag_type_id = tag_types[0]['tag_type_id']
786 tag_type_id = self.apiserver.AddTagType(auth,
787 {'tagname':nodegroupname,
788 'description': 'for nodegroup %s'%nodegroupname,
790 print 'located tag (type)',nodegroupname,'as',tag_type_id
792 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
794 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
795 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
796 # set node tag on all nodes, value='yes'
797 for nodename in group_nodes:
799 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
801 traceback.print_exc()
802 print 'node',nodename,'seems to already have tag',nodegroupname
805 expect_yes = self.apiserver.GetNodeTags(auth,
806 {'hostname':nodename,
807 'tagname':nodegroupname},
808 ['value'])[0]['value']
809 if expect_yes != "yes":
810 print 'Mismatch node tag on node',nodename,'got',expect_yes
813 if not self.options.dry_run:
814 print 'Cannot find tag',nodegroupname,'on node',nodename
818 print 'cleaning nodegroup',nodegroupname
819 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
821 traceback.print_exc()
825 # return a list of tuples (nodename,qemuname)
826 def all_node_infos (self) :
828 for site_spec in self.plc_spec['sites']:
829 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
830 for node_spec in site_spec['nodes'] ]
833 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
834 def all_reservable_nodenames (self):
836 for site_spec in self.plc_spec['sites']:
837 for node_spec in site_spec['nodes']:
838 node_fields=node_spec['node_fields']
839 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
840 res.append(node_fields['hostname'])
843 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
844 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
845 if self.options.dry_run:
849 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
850 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
851 # the nodes that haven't checked yet - start with a full list and shrink over time
852 tocheck = self.all_hostnames()
853 utils.header("checking nodes %r"%tocheck)
854 # create a dict hostname -> status
855 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
858 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
860 for array in tocheck_status:
861 hostname=array['hostname']
862 boot_state=array['boot_state']
863 if boot_state == target_boot_state:
864 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
866 # if it's a real node, never mind
867 (site_spec,node_spec)=self.locate_hostname(hostname)
868 if TestNode.is_real_model(node_spec['node_fields']['model']):
869 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
871 boot_state = target_boot_state
872 elif datetime.datetime.now() > graceout:
873 utils.header ("%s still in '%s' state"%(hostname,boot_state))
874 graceout=datetime.datetime.now()+datetime.timedelta(1)
875 status[hostname] = boot_state
877 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
880 if datetime.datetime.now() > timeout:
881 for hostname in tocheck:
882 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
884 # otherwise, sleep for a while
886 # only useful in empty plcs
889 def nodes_booted(self):
890 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=20)
892 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
894 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
895 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
896 vservername=self.vservername
899 local_key = "keys/%(vservername)s-debug.rsa"%locals()
902 local_key = "keys/key1.rsa"
903 node_infos = self.all_node_infos()
904 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
905 for (nodename,qemuname) in node_infos:
906 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
907 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
908 (timeout_minutes,silent_minutes,period))
910 for node_info in node_infos:
911 (hostname,qemuname) = node_info
912 # try to run 'hostname' in the node
913 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
914 # don't spam logs - show the command only after the grace period
915 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
917 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
919 node_infos.remove(node_info)
921 # we will have tried real nodes once, in case they're up - but if not, just skip
922 (site_spec,node_spec)=self.locate_hostname(hostname)
923 if TestNode.is_real_model(node_spec['node_fields']['model']):
924 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
925 node_infos.remove(node_info)
928 if datetime.datetime.now() > timeout:
929 for (hostname,qemuname) in node_infos:
930 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
932 # otherwise, sleep for a while
934 # only useful in empty plcs
937 def ssh_node_debug(self):
938 "Tries to ssh into nodes in debug mode with the debug ssh key"
939 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=5)
941 def ssh_node_boot(self):
942 "Tries to ssh into nodes in production mode with the root ssh key"
943 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=15)
946 def qemu_local_init (self):
947 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
951 "all nodes: invoke GetBootMedium and store result locally"
954 def qemu_local_config (self):
955 "all nodes: compute qemu config qemu.conf and store it locally"
958 def nodestate_reinstall (self):
959 "all nodes: mark PLCAPI boot_state as reinstall"
962 def nodestate_safeboot (self):
963 "all nodes: mark PLCAPI boot_state as safeboot"
966 def nodestate_boot (self):
967 "all nodes: mark PLCAPI boot_state as boot"
970 def nodestate_show (self):
971 "all nodes: show PLCAPI boot_state"
974 def qemu_export (self):
975 "all nodes: push local node-dep directory on the qemu box"
978 ### check hooks : invoke scripts from hooks/{node,slice}
979 def check_hooks_node (self):
980 return self.locate_first_node().check_hooks()
981 def check_hooks_sliver (self) :
982 return self.locate_first_sliver().check_hooks()
984 def check_hooks (self):
985 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
986 return self.check_hooks_node() and self.check_hooks_sliver()
989 def do_check_initscripts(self):
991 for slice_spec in self.plc_spec['slices']:
992 if not slice_spec.has_key('initscriptstamp'):
994 stamp=slice_spec['initscriptstamp']
995 for nodename in slice_spec['nodenames']:
996 (site,node) = self.locate_node (nodename)
997 # xxx - passing the wrong site - probably harmless
998 test_site = TestSite (self,site)
999 test_slice = TestSlice (self,test_site,slice_spec)
1000 test_node = TestNode (self,test_site,node)
1001 test_sliver = TestSliver (self, test_node, test_slice)
1002 if not test_sliver.check_initscript_stamp(stamp):
1006 def check_initscripts(self):
1007 "check that the initscripts have triggered"
1008 return self.do_check_initscripts()
1010 def initscripts (self):
1011 "create initscripts with PLCAPI"
1012 for initscript in self.plc_spec['initscripts']:
1013 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1014 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1017 def delete_initscripts (self):
1018 "delete initscripts with PLCAPI"
1019 for initscript in self.plc_spec['initscripts']:
1020 initscript_name = initscript['initscript_fields']['name']
1021 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1023 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1024 print initscript_name,'deleted'
1026 print 'deletion went wrong - probably did not exist'
1031 "create slices with PLCAPI"
1032 return self.do_slices()
1034 def delete_slices (self):
1035 "delete slices with PLCAPI"
1036 return self.do_slices("delete")
1038 def do_slices (self, action="add"):
1039 for slice in self.plc_spec['slices']:
1040 site_spec = self.locate_site (slice['sitename'])
1041 test_site = TestSite(self,site_spec)
1042 test_slice=TestSlice(self,test_site,slice)
1044 utils.header("Deleting slices in site %s"%test_site.name())
1045 test_slice.delete_slice()
1047 utils.pprint("Creating slice",slice)
1048 test_slice.create_slice()
1049 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1053 def ssh_slice(self):
1054 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1058 def keys_clear_known_hosts (self):
1059 "remove test nodes entries from the local known_hosts file"
1063 def qemu_start (self) :
1064 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1068 def timestamp_qemu (self) :
1069 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1072 def check_tcp (self):
1073 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1074 specs = self.plc_spec['tcp_test']
1079 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1080 if not s_test_sliver.run_tcp_server(port,timeout=10):
1084 # idem for the client side
1085 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1086 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1090 def plcsh_stress_test (self):
1091 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1092 # install the stress-test in the plc image
1093 location = "/usr/share/plc_api/plcsh_stress_test.py"
1094 remote="/vservers/%s/%s"%(self.vservername,location)
1095 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1097 command += " -- --check"
1098 if self.options.size == 1:
1099 command += " --tiny"
1100 return ( self.run_in_guest(command) == 0)
1102 # populate runs the same utility without slightly different options
1103 # in particular runs with --preserve (dont cleanup) and without --check
1104 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1107 def sfa_install(self):
1108 "yum install sfa, sfa-plc and sfa-client"
1110 self.run_in_guest("yum -y install sfa sfa-client sfa-plc sfa-sfatables")
1111 return self.run_in_guest("rpm -q sfa sfa-client sfa-plc sfa-sfatables")==0
1114 def sfa_dbclean(self):
1115 "thoroughly wipes off the SFA database"
1116 self.run_in_guest("sfa-nuke-plc.py")==0
1119 def sfa_plcclean(self):
1120 "cleans the PLC entries that were created as a side effect of running the script"
1122 sfa_spec=self.plc_spec['sfa']
1124 slicename='%s_%s'%(sfa_spec['login_base'],sfa_spec['slicename'])
1125 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1126 except: print "Slice %s already absent from PLC db"%slicename
1128 username="%s@%s"%(sfa_spec['regularuser'],sfa_spec['domain'])
1129 try: self.apiserver.DeletePerson(self.auth_root(),username)
1130 except: print "User %s already absent from PLC db"%username
1132 print "REMEMBER TO RUN sfa_import AGAIN"
1135 def sfa_uninstall(self):
1136 "uses rpm to uninstall sfa - ignore result"
1137 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1138 self.run_in_guest("rm -rf /var/lib/sfa")
1139 self.run_in_guest("rm -rf /etc/sfa")
1140 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1142 self.run_in_guest("rpm -e --noscripts sfa-plc")
1145 ### run unit tests for SFA
1146 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1147 # Running Transaction
1148 # Transaction couldn't start:
1149 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1150 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1151 # no matter how many Gbs are available on the testplc
1152 # could not figure out what's wrong, so...
1153 # if the yum install phase fails, consider the test is successful
1154 # other combinations will eventually run it hopefully
1155 def sfa_utest(self):
1156 "yum install sfa-tests and run SFA unittests"
1157 self.run_in_guest("yum -y install sfa-tests")
1158 # failed to install - forget it
1159 if self.run_in_guest("rpm -q sfa-tests")!=0:
1160 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1162 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1166 dirname="conf.%s"%self.plc_spec['name']
1167 if not os.path.isdir(dirname):
1168 utils.system("mkdir -p %s"%dirname)
1169 if not os.path.isdir(dirname):
1170 raise "Cannot create config dir for plc %s"%self.name()
1173 def conffile(self,filename):
1174 return "%s/%s"%(self.confdir(),filename)
1175 def confsubdir(self,dirname,clean,dry_run=False):
1176 subdirname="%s/%s"%(self.confdir(),dirname)
1178 utils.system("rm -rf %s"%subdirname)
1179 if not os.path.isdir(subdirname):
1180 utils.system("mkdir -p %s"%subdirname)
1181 if not dry_run and not os.path.isdir(subdirname):
1182 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1185 def conffile_clean (self,filename):
1186 filename=self.conffile(filename)
1187 return utils.system("rm -rf %s"%filename)==0
1190 def sfa_configure(self):
1191 "run sfa-config-tty"
1192 tmpname=self.conffile("sfa-config-tty")
1193 fileconf=open(tmpname,'w')
1194 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1195 'SFA_INTERFACE_HRN',
1196 # 'SFA_REGISTRY_LEVEL1_AUTH',
1197 'SFA_REGISTRY_HOST',
1198 'SFA_AGGREGATE_HOST',
1204 'SFA_PLC_DB_PASSWORD',
1207 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1208 # the way plc_config handles booleans just sucks..
1209 for var in ['SFA_API_DEBUG']:
1211 if self.plc_spec['sfa'][var]: val='true'
1212 fileconf.write ('e %s\n%s\n'%(var,val))
1213 fileconf.write('w\n')
1214 fileconf.write('R\n')
1215 fileconf.write('q\n')
1217 utils.system('cat %s'%tmpname)
1218 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1221 def aggregate_xml_line(self):
1222 port=self.plc_spec['sfa']['neighbours-port']
1223 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1224 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1226 def registry_xml_line(self):
1227 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1228 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1231 # a cross step that takes all other plcs in argument
1232 def cross_sfa_configure(self, other_plcs):
1233 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1234 # of course with a single plc, other_plcs is an empty list
1237 agg_fname=self.conffile("agg.xml")
1238 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1239 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1240 utils.header ("(Over)wrote %s"%agg_fname)
1241 reg_fname=self.conffile("reg.xml")
1242 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1243 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1244 utils.header ("(Over)wrote %s"%reg_fname)
1245 return self.test_ssh.copy_abs(agg_fname,'/vservers/%s/etc/sfa/aggregates.xml'%self.vservername)==0 \
1246 and self.test_ssh.copy_abs(reg_fname,'/vservers/%s/etc/sfa/registries.xml'%self.vservername)==0
1248 def sfa_import(self):
1250 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1251 return self.run_in_guest('sfa-import-plc.py')==0
1252 # not needed anymore
1253 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1255 def sfa_start(self):
1257 return self.run_in_guest('service sfa start')==0
1259 def sfi_configure(self):
1260 "Create /root/.sfi on the plc side for sfi client configuration"
1261 sfa_spec=self.plc_spec['sfa']
1262 dir_name=self.confsubdir("dot-sfi",clean=True,dry_run=self.options.dry_run)
1263 if self.options.dry_run: return True
1264 file_name=dir_name + os.sep + sfa_spec['piuser'] + '.pkey'
1265 fileconf=open(file_name,'w')
1266 fileconf.write (self.plc_spec['keys'][0]['private'])
1268 utils.header ("(Over)wrote %s"%file_name)
1270 file_name=dir_name + os.sep + 'sfi_config'
1271 fileconf=open(file_name,'w')
1272 SFI_AUTH="%s.%s"%(sfa_spec['SFA_REGISTRY_ROOT_AUTH'],sfa_spec['login_base'])
1273 fileconf.write ("SFI_AUTH='%s'"%SFI_AUTH)
1274 fileconf.write('\n')
1275 SFI_USER=SFI_AUTH + '.' + sfa_spec['piuser']
1276 fileconf.write ("SFI_USER='%s'"%SFI_USER)
1277 fileconf.write('\n')
1278 SFI_REGISTRY='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12345/'
1279 fileconf.write ("SFI_REGISTRY='%s'"%SFI_REGISTRY)
1280 fileconf.write('\n')
1281 SFI_SM='http://' + sfa_spec['SFA_PLC_DB_HOST'] + ':12347/'
1282 fileconf.write ("SFI_SM='%s'"%SFI_SM)
1283 fileconf.write('\n')
1285 utils.header ("(Over)wrote %s"%file_name)
1287 file_name=dir_name + os.sep + 'person.xml'
1288 fileconf=open(file_name,'w')
1289 for record in sfa_spec['sfa_person_xml']:
1290 person_record=record
1291 fileconf.write(person_record)
1292 fileconf.write('\n')
1294 utils.header ("(Over)wrote %s"%file_name)
1296 file_name=dir_name + os.sep + 'slice.xml'
1297 fileconf=open(file_name,'w')
1298 for record in sfa_spec['sfa_slice_xml']:
1300 #slice_record=sfa_spec['sfa_slice_xml']
1301 fileconf.write(slice_record)
1302 fileconf.write('\n')
1303 utils.header ("(Over)wrote %s"%file_name)
1306 file_name=dir_name + os.sep + 'slice.rspec'
1307 fileconf=open(file_name,'w')
1309 for (key, value) in sfa_spec['sfa_slice_rspec'].items():
1311 fileconf.write(slice_rspec)
1312 fileconf.write('\n')
1314 utils.header ("(Over)wrote %s"%file_name)
1316 # push to the remote root's .sfi
1317 location = "root/.sfi"
1318 remote="/vservers/%s/%s"%(self.vservername,location)
1319 self.test_ssh.copy_abs(dir_name, remote, recursive=True)
1323 def sfi_clean (self):
1324 "clean up /root/.sfi on the plc side"
1325 self.run_in_guest("rm -rf /root/.sfi")
1328 def sfa_add_user(self):
1329 "run sfi.py add using person.xml"
1330 return TestUserSfa(self).add_user()
1332 def sfa_update_user(self):
1333 "run sfi.py update using person.xml"
1334 return TestUserSfa(self).update_user()
1337 def sfa_add_slice(self):
1338 "run sfi.py add (on Registry) from slice.xml"
1342 def sfa_discover(self):
1343 "discover resources into resouces_in.rspec"
1347 def sfa_create_slice(self):
1348 "run sfi.py create (on SM) - 1st time"
1352 def sfa_check_slice_plc(self):
1353 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1357 def sfa_update_slice(self):
1358 "run sfi.py create (on SM) on existing object"
1362 "run sfi.py list and sfi.py show (both on Registry) and sfi.py slices and sfi.py resources (both on SM)"
1363 sfa_spec=self.plc_spec['sfa']
1364 auth=sfa_spec['SFA_REGISTRY_ROOT_AUTH']
1366 self.run_in_guest("sfi.py -d /root/.sfi/ list %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1367 self.run_in_guest("sfi.py -d /root/.sfi/ show %s.%s"%(auth,sfa_spec['login_base']))==0 and \
1368 self.run_in_guest("sfi.py -d /root/.sfi/ slices")==0 and \
1369 self.run_in_guest("sfi.py -d /root/.sfi/ resources -o resources")==0
1372 def ssh_slice_sfa(self):
1373 "tries to ssh-enter the SFA slice"
1376 def sfa_delete_user(self):
1377 "run sfi.py delete (on SM) for user"
1378 test_user_sfa=TestUserSfa(self)
1379 return test_user_sfa.delete_user()
1382 def sfa_delete_slice(self):
1383 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1388 self.run_in_guest('service sfa stop')==0
1391 def populate (self):
1392 "creates random entries in the PLCAPI"
1393 # install the stress-test in the plc image
1394 location = "/usr/share/plc_api/plcsh_stress_test.py"
1395 remote="/vservers/%s/%s"%(self.vservername,location)
1396 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1398 command += " -- --preserve --short-names"
1399 local = (self.run_in_guest(command) == 0);
1400 # second run with --foreign
1401 command += ' --foreign'
1402 remote = (self.run_in_guest(command) == 0);
1403 return ( local and remote)
1405 def gather_logs (self):
1406 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1407 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1408 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1409 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1410 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1411 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1413 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1414 self.gather_var_logs ()
1416 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1417 self.gather_pgsql_logs ()
1419 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1420 for site_spec in self.plc_spec['sites']:
1421 test_site = TestSite (self,site_spec)
1422 for node_spec in site_spec['nodes']:
1423 test_node=TestNode(self,test_site,node_spec)
1424 test_node.gather_qemu_logs()
1426 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1427 self.gather_nodes_var_logs()
1429 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1430 self.gather_slivers_var_logs()
1433 def gather_slivers_var_logs(self):
1434 for test_sliver in self.all_sliver_objs():
1435 remote = test_sliver.tar_var_logs()
1436 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1437 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1438 utils.system(command)
1441 def gather_var_logs (self):
1442 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1443 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1444 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1445 utils.system(command)
1446 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1447 utils.system(command)
1449 def gather_pgsql_logs (self):
1450 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1451 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1452 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1453 utils.system(command)
1455 def gather_nodes_var_logs (self):
1456 for site_spec in self.plc_spec['sites']:
1457 test_site = TestSite (self,site_spec)
1458 for node_spec in site_spec['nodes']:
1459 test_node=TestNode(self,test_site,node_spec)
1460 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1461 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1462 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1463 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1464 utils.system(command)
1467 # returns the filename to use for sql dump/restore, using options.dbname if set
1468 def dbfile (self, database):
1469 # uses options.dbname if it is found
1471 name=self.options.dbname
1472 if not isinstance(name,StringTypes):
1475 t=datetime.datetime.now()
1478 return "/root/%s-%s.sql"%(database,name)
1480 def plc_db_dump(self):
1481 'dump the planetlab5 DB in /root in the PLC - filename has time'
1482 dump=self.dbfile("planetab5")
1483 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1484 utils.header('Dumped planetlab5 database in %s'%dump)
1487 def plc_db_restore(self):
1488 'restore the planetlab5 DB - looks broken, but run -n might help'
1489 dump=self.dbfile("planetab5")
1490 ##stop httpd service
1491 self.run_in_guest('service httpd stop')
1492 # xxx - need another wrapper
1493 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1494 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1495 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1496 ##starting httpd service
1497 self.run_in_guest('service httpd start')
1499 utils.header('Database restored from ' + dump)
1502 def standby_1(): pass
1504 def standby_2(): pass
1506 def standby_3(): pass
1508 def standby_4(): pass
1510 def standby_5(): pass
1512 def standby_6(): pass
1514 def standby_7(): pass
1516 def standby_8(): pass
1518 def standby_9(): pass
1520 def standby_10(): pass
1522 def standby_11(): pass
1524 def standby_12(): pass
1526 def standby_13(): pass
1528 def standby_14(): pass
1530 def standby_15(): pass
1532 def standby_16(): pass
1534 def standby_17(): pass
1536 def standby_18(): pass
1538 def standby_19(): pass
1540 def standby_20(): pass