1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=method.__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=method.__doc__
65 def slice_sfa_mapper (method):
68 slice_method = TestSliceSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
70 site_spec = self.locate_site (slice_spec['sitename'])
71 test_site = TestSite(self,site_spec)
72 test_slice=TestSliceSfa(self,test_site,slice_spec)
73 if not slice_method(test_slice,self.options): overall=False
75 # restore the doc text
76 actual.__doc__=method.__doc__
86 'vs_delete','timestamp_vs','vs_create', SEP,
87 'plc_install', 'plc_configure', 'plc_start', SEP,
88 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
89 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
90 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
91 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
92 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
93 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
94 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
95 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
96 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
97 # but as the stress test might take a while, we sometimes missed the debug mode..
98 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
99 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
100 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
101 'check_tcp', 'check_netflow', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'standby_1_through_20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this is originally for centos5 as recent SFAs won't build on this platform
130 def check_whether_build_has_sfa (rpms_url):
131 # warning, we're now building 'sface' so let's be a bit more picky
132 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
133 # full builds are expected to return with 0 here
135 # move all steps containing 'sfa' from default_steps to other_steps
136 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
137 TestPlc.other_steps += sfa_steps
138 for step in sfa_steps: TestPlc.default_steps.remove(step)
140 def __init__ (self,plc_spec,options):
141 self.plc_spec=plc_spec
143 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
144 self.vserverip=plc_spec['vserverip']
145 self.vservername=plc_spec['vservername']
146 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
147 self.apiserver=TestApiserver(self.url,options.dry_run)
150 name=self.plc_spec['name']
151 return "%s.%s"%(name,self.vservername)
154 return self.plc_spec['host_box']
157 return self.test_ssh.is_local()
159 # define the API methods on this object through xmlrpc
160 # would help, but not strictly necessary
164 def actual_command_in_guest (self,command):
165 return self.test_ssh.actual_command(self.host_to_guest(command))
167 def start_guest (self):
168 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
170 def stop_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
173 def run_in_guest (self,command):
174 return utils.system(self.actual_command_in_guest(command))
176 def run_in_host (self,command):
177 return self.test_ssh.run_in_buildname(command)
179 #command gets run in the plc's vm
180 def host_to_guest(self,command):
181 if self.options.plcs_use_lxc:
182 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
183 return "TODO-lxc TestPlc.host_to_guest"
185 return "vserver %s exec %s"%(self.vservername,command)
187 def vm_root_in_guest(self):
188 if self.options.plcs_use_lxc:
190 return "TODO TestPlc.vm_root_in_guest"
192 return "/vservers/%s"%self.vservername
194 #start/stop the vserver
195 def start_guest_in_host(self):
196 if self.options.plcs_use_lxc:
197 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
198 return "TODO-lxc TestPlc.start_guest_in_host"
200 return "vserver %s start"%(self.vservername)
202 def stop_guest_in_host(self):
203 if self.options.plcs_use_lxc:
204 # XXX TODO-lxc how to run a command in the plc context from an lxc-based host
205 return "TODO-lxc TestPlc.stop_guest_in_host"
207 return "vserver %s stop"%(self.vservername)
210 def run_in_guest_piped (self,local,remote):
211 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
213 # does a yum install in the vs, ignore yum retcod, check with rpm
214 def yum_install (self, rpms):
215 if isinstance (rpms, list):
217 self.run_in_guest("yum -y install %s"%rpms)
218 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
219 self.run_in_guest("yum-complete-transaction -y")
220 return self.run_in_guest("rpm -q %s"%rpms)==0
222 def auth_root (self):
223 return {'Username':self.plc_spec['PLC_ROOT_USER'],
224 'AuthMethod':'password',
225 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
226 'Role' : self.plc_spec['role']
228 def locate_site (self,sitename):
229 for site in self.plc_spec['sites']:
230 if site['site_fields']['name'] == sitename:
232 if site['site_fields']['login_base'] == sitename:
234 raise Exception,"Cannot locate site %s"%sitename
236 def locate_node (self,nodename):
237 for site in self.plc_spec['sites']:
238 for node in site['nodes']:
239 if node['name'] == nodename:
241 raise Exception,"Cannot locate node %s"%nodename
243 def locate_hostname (self,hostname):
244 for site in self.plc_spec['sites']:
245 for node in site['nodes']:
246 if node['node_fields']['hostname'] == hostname:
248 raise Exception,"Cannot locate hostname %s"%hostname
250 def locate_key (self,keyname):
251 for key in self.plc_spec['keys']:
252 if key['name'] == keyname:
254 raise Exception,"Cannot locate key %s"%keyname
256 def locate_slice (self, slicename):
257 for slice in self.plc_spec['slices']:
258 if slice['slice_fields']['name'] == slicename:
260 raise Exception,"Cannot locate slice %s"%slicename
262 def all_sliver_objs (self):
264 for slice_spec in self.plc_spec['slices']:
265 slicename = slice_spec['slice_fields']['name']
266 for nodename in slice_spec['nodenames']:
267 result.append(self.locate_sliver_obj (nodename,slicename))
270 def locate_sliver_obj (self,nodename,slicename):
271 (site,node) = self.locate_node(nodename)
272 slice = self.locate_slice (slicename)
274 test_site = TestSite (self, site)
275 test_node = TestNode (self, test_site,node)
276 # xxx the slice site is assumed to be the node site - mhh - probably harmless
277 test_slice = TestSlice (self, test_site, slice)
278 return TestSliver (self, test_node, test_slice)
280 def locate_first_node(self):
281 nodename=self.plc_spec['slices'][0]['nodenames'][0]
282 (site,node) = self.locate_node(nodename)
283 test_site = TestSite (self, site)
284 test_node = TestNode (self, test_site,node)
287 def locate_first_sliver (self):
288 slice_spec=self.plc_spec['slices'][0]
289 slicename=slice_spec['slice_fields']['name']
290 nodename=slice_spec['nodenames'][0]
291 return self.locate_sliver_obj(nodename,slicename)
293 # all different hostboxes used in this plc
294 def gather_hostBoxes(self):
295 # maps on sites and nodes, return [ (host_box,test_node) ]
297 for site_spec in self.plc_spec['sites']:
298 test_site = TestSite (self,site_spec)
299 for node_spec in site_spec['nodes']:
300 test_node = TestNode (self, test_site, node_spec)
301 if not test_node.is_real():
302 tuples.append( (test_node.host_box(),test_node) )
303 # transform into a dict { 'host_box' -> [ test_node .. ] }
305 for (box,node) in tuples:
306 if not result.has_key(box):
309 result[box].append(node)
312 # a step for checking this stuff
313 def show_boxes (self):
314 'print summary of nodes location'
315 for (box,nodes) in self.gather_hostBoxes().iteritems():
316 print box,":"," + ".join( [ node.name() for node in nodes ] )
319 # make this a valid step
320 def qemu_kill_all(self):
321 'kill all qemu instances on the qemu boxes involved by this setup'
322 # this is the brute force version, kill all qemus on that host box
323 for (box,nodes) in self.gather_hostBoxes().iteritems():
324 # pass the first nodename, as we don't push template-qemu on testboxes
325 nodedir=nodes[0].nodedir()
326 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
329 # make this a valid step
330 def qemu_list_all(self):
331 'list all qemu instances on the qemu boxes involved by this setup'
332 for (box,nodes) in self.gather_hostBoxes().iteritems():
333 # this is the brute force version, kill all qemus on that host box
334 TestBoxQemu(box,self.options.buildname).qemu_list_all()
337 # kill only the right qemus
338 def qemu_list_mine(self):
339 'list qemu instances for our nodes'
340 for (box,nodes) in self.gather_hostBoxes().iteritems():
341 # the fine-grain version
346 # kill only the right qemus
347 def qemu_kill_mine(self):
348 'kill the qemu instances for our nodes'
349 for (box,nodes) in self.gather_hostBoxes().iteritems():
350 # the fine-grain version
355 #################### display config
357 "show test configuration after localization"
358 self.display_pass (1)
359 self.display_pass (2)
363 "print cut'n paste-able stuff to export env variables to your shell"
364 # guess local domain from hostname
365 domain=socket.gethostname().split('.',1)[1]
366 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
367 print "export BUILD=%s"%self.options.buildname
368 print "export PLCHOST=%s"%fqdn
369 print "export GUEST=%s"%self.plc_spec['vservername']
370 # find hostname of first node
371 (hostname,qemubox) = self.all_node_infos()[0]
372 print "export KVMHOST=%s.%s"%(qemubox,domain)
373 print "export NODE=%s"%(hostname)
377 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
378 def display_pass (self,passno):
379 for (key,val) in self.plc_spec.iteritems():
380 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
384 self.display_site_spec(site)
385 for node in site['nodes']:
386 self.display_node_spec(node)
387 elif key=='initscripts':
388 for initscript in val:
389 self.display_initscript_spec (initscript)
392 self.display_slice_spec (slice)
395 self.display_key_spec (key)
397 if key not in ['sites','initscripts','slices','keys', 'sfa']:
398 print '+ ',key,':',val
400 def display_site_spec (self,site):
401 print '+ ======== site',site['site_fields']['name']
402 for (k,v) in site.iteritems():
403 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
406 print '+ ','nodes : ',
408 print node['node_fields']['hostname'],'',
414 print user['name'],'',
416 elif k == 'site_fields':
417 print '+ login_base',':',v['login_base']
418 elif k == 'address_fields':
424 def display_initscript_spec (self,initscript):
425 print '+ ======== initscript',initscript['initscript_fields']['name']
427 def display_key_spec (self,key):
428 print '+ ======== key',key['name']
430 def display_slice_spec (self,slice):
431 print '+ ======== slice',slice['slice_fields']['name']
432 for (k,v) in slice.iteritems():
445 elif k=='slice_fields':
446 print '+ fields',':',
447 print 'max_nodes=',v['max_nodes'],
452 def display_node_spec (self,node):
453 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
454 print "hostname=",node['node_fields']['hostname'],
455 print "ip=",node['interface_fields']['ip']
456 if self.options.verbose:
457 utils.pprint("node details",node,depth=3)
459 # another entry point for just showing the boxes involved
460 def display_mapping (self):
461 TestPlc.display_mapping_plc(self.plc_spec)
465 def display_mapping_plc (plc_spec):
466 print '+ MyPLC',plc_spec['name']
467 # WARNING this would not be right for lxc-based PLC's - should be harmless though
468 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
469 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
470 for site_spec in plc_spec['sites']:
471 for node_spec in site_spec['nodes']:
472 TestPlc.display_mapping_node(node_spec)
475 def display_mapping_node (node_spec):
476 print '+ NODE %s'%(node_spec['name'])
477 print '+\tqemu box %s'%node_spec['host_box']
478 print '+\thostname=%s'%node_spec['node_fields']['hostname']
480 # write a timestamp in /vservers/<>.timestamp
481 # cannot be inside the vserver, that causes vserver .. build to cough
482 def timestamp_vs (self):
484 # TODO-lxc check this one
485 # a first approx. is to store the timestamp close to the VM root like vs does
486 stamp_path="%s.timestamp"%self.vm_root_in_guest()
487 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
489 # this is called inconditionnally at the beginning of the test sequence
490 # just in case this is a rerun, so if the vm is not running it's fine
492 "vserver delete the test myplc"
493 stamp_path="%s.timestamp"%self.vm_root_in_guest()
494 self.run_in_host("rm -f %s"%stamp_path)
495 if self.options.plcs_use_lxc:
496 # TODO-lxc : how to trash a VM altogether and the related timestamp as well
497 # might make sense to test that this has been done - unlike for vs
498 print "TODO TestPlc.vs_delete"
501 self.run_in_host("vserver --silent %s delete"%self.vservername)
505 # historically the build was being fetched by the tests
506 # now the build pushes itself as a subdir of the tests workdir
507 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
508 def vs_create (self):
509 "vserver creation (no install done)"
510 # push the local build/ dir to the testplc box
512 # a full path for the local calls
513 build_dir=os.path.dirname(sys.argv[0])
514 # sometimes this is empty - set to "." in such a case
515 if not build_dir: build_dir="."
516 build_dir += "/build"
518 # use a standard name - will be relative to remote buildname
520 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
521 self.test_ssh.rmdir(build_dir)
522 self.test_ssh.copy(build_dir,recursive=True)
523 # the repo url is taken from arch-rpms-url
524 # with the last step (i386) removed
525 repo_url = self.options.arch_rpms_url
526 for level in [ 'arch' ]:
527 repo_url = os.path.dirname(repo_url)
528 # pass the vbuild-nightly options to vtest-init-vserver
530 test_env_options += " -p %s"%self.options.personality
531 test_env_options += " -d %s"%self.options.pldistro
532 test_env_options += " -f %s"%self.options.fcdistro
533 if self.options.plcs_use_lxc:
534 # TODO-lxc : might need some tweaks
535 script="vtest-init-lxc.sh"
537 script="vtest-init-vserver.sh"
538 vserver_name = self.vservername
539 vserver_options="--netdev eth0 --interface %s"%self.vserverip
541 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
542 vserver_options += " --hostname %s"%vserver_hostname
544 print "Cannot reverse lookup %s"%self.vserverip
545 print "This is considered fatal, as this might pollute the test results"
547 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
548 return self.run_in_host(create_vserver) == 0
551 def plc_install(self):
552 "yum install myplc, noderepo, and the plain bootstrapfs"
554 # workaround for getting pgsql8.2 on centos5
555 if self.options.fcdistro == "centos5":
556 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
559 if self.options.personality == "linux32":
561 elif self.options.personality == "linux64":
564 raise Exception, "Unsupported personality %r"%self.options.personality
565 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
568 pkgs_list.append ("slicerepo-%s"%nodefamily)
569 pkgs_list.append ("myplc")
570 pkgs_list.append ("noderepo-%s"%nodefamily)
571 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
572 pkgs_string=" ".join(pkgs_list)
573 return self.yum_install (pkgs_list)
576 def plc_configure(self):
578 tmpname='%s.plc-config-tty'%(self.name())
579 fileconf=open(tmpname,'w')
580 for var in [ 'PLC_NAME',
585 'PLC_MAIL_SUPPORT_ADDRESS',
588 # Above line was added for integrating SFA Testing
594 'PLC_RESERVATION_GRANULARITY',
596 'PLC_OMF_XMPP_SERVER',
598 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
599 fileconf.write('w\n')
600 fileconf.write('q\n')
602 utils.system('cat %s'%tmpname)
603 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
604 utils.system('rm %s'%tmpname)
609 self.run_in_guest('service plc start')
614 self.run_in_guest('service plc stop')
618 "start the PLC vserver"
623 "stop the PLC vserver"
627 # stores the keys from the config for further use
628 def keys_store(self):
629 "stores test users ssh keys in keys/"
630 for key_spec in self.plc_spec['keys']:
631 TestKey(self,key_spec).store_key()
634 def keys_clean(self):
635 "removes keys cached in keys/"
636 utils.system("rm -rf ./keys")
639 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
640 # for later direct access to the nodes
641 def keys_fetch(self):
642 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
644 if not os.path.isdir(dir):
646 vservername=self.vservername
647 vm_root=self.vm_root_in_guest()
649 prefix = 'debug_ssh_key'
650 for ext in [ 'pub', 'rsa' ] :
651 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
652 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
653 if self.test_ssh.fetch(src,dst) != 0: overall=False
657 "create sites with PLCAPI"
658 return self.do_sites()
660 def delete_sites (self):
661 "delete sites with PLCAPI"
662 return self.do_sites(action="delete")
664 def do_sites (self,action="add"):
665 for site_spec in self.plc_spec['sites']:
666 test_site = TestSite (self,site_spec)
667 if (action != "add"):
668 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
669 test_site.delete_site()
670 # deleted with the site
671 #test_site.delete_users()
674 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
675 test_site.create_site()
676 test_site.create_users()
679 def delete_all_sites (self):
680 "Delete all sites in PLC, and related objects"
681 print 'auth_root',self.auth_root()
682 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
683 for site_id in site_ids:
684 print 'Deleting site_id',site_id
685 self.apiserver.DeleteSite(self.auth_root(),site_id)
689 "create nodes with PLCAPI"
690 return self.do_nodes()
691 def delete_nodes (self):
692 "delete nodes with PLCAPI"
693 return self.do_nodes(action="delete")
695 def do_nodes (self,action="add"):
696 for site_spec in self.plc_spec['sites']:
697 test_site = TestSite (self,site_spec)
699 utils.header("Deleting nodes in site %s"%test_site.name())
700 for node_spec in site_spec['nodes']:
701 test_node=TestNode(self,test_site,node_spec)
702 utils.header("Deleting %s"%test_node.name())
703 test_node.delete_node()
705 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
706 for node_spec in site_spec['nodes']:
707 utils.pprint('Creating node %s'%node_spec,node_spec)
708 test_node = TestNode (self,test_site,node_spec)
709 test_node.create_node ()
712 def nodegroups (self):
713 "create nodegroups with PLCAPI"
714 return self.do_nodegroups("add")
715 def delete_nodegroups (self):
716 "delete nodegroups with PLCAPI"
717 return self.do_nodegroups("delete")
721 def translate_timestamp (start,grain,timestamp):
722 if timestamp < TestPlc.YEAR: return start+timestamp*grain
723 else: return timestamp
726 def timestamp_printable (timestamp):
727 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
730 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
732 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
733 print 'API answered grain=',grain
734 start=(now/grain)*grain
736 # find out all nodes that are reservable
737 nodes=self.all_reservable_nodenames()
739 utils.header ("No reservable node found - proceeding without leases")
742 # attach them to the leases as specified in plc_specs
743 # this is where the 'leases' field gets interpreted as relative of absolute
744 for lease_spec in self.plc_spec['leases']:
745 # skip the ones that come with a null slice id
746 if not lease_spec['slice']: continue
747 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
748 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
749 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
750 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
751 if lease_addition['errors']:
752 utils.header("Cannot create leases, %s"%lease_addition['errors'])
755 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
756 (nodes,lease_spec['slice'],
757 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
758 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
762 def delete_leases (self):
763 "remove all leases in the myplc side"
764 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
765 utils.header("Cleaning leases %r"%lease_ids)
766 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
769 def list_leases (self):
770 "list all leases known to the myplc"
771 leases = self.apiserver.GetLeases(self.auth_root())
774 current=l['t_until']>=now
775 if self.options.verbose or current:
776 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
777 TestPlc.timestamp_printable(l['t_from']),
778 TestPlc.timestamp_printable(l['t_until'])))
781 # create nodegroups if needed, and populate
782 def do_nodegroups (self, action="add"):
783 # 1st pass to scan contents
785 for site_spec in self.plc_spec['sites']:
786 test_site = TestSite (self,site_spec)
787 for node_spec in site_spec['nodes']:
788 test_node=TestNode (self,test_site,node_spec)
789 if node_spec.has_key('nodegroups'):
790 nodegroupnames=node_spec['nodegroups']
791 if isinstance(nodegroupnames,StringTypes):
792 nodegroupnames = [ nodegroupnames ]
793 for nodegroupname in nodegroupnames:
794 if not groups_dict.has_key(nodegroupname):
795 groups_dict[nodegroupname]=[]
796 groups_dict[nodegroupname].append(test_node.name())
797 auth=self.auth_root()
799 for (nodegroupname,group_nodes) in groups_dict.iteritems():
801 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
802 # first, check if the nodetagtype is here
803 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
805 tag_type_id = tag_types[0]['tag_type_id']
807 tag_type_id = self.apiserver.AddTagType(auth,
808 {'tagname':nodegroupname,
809 'description': 'for nodegroup %s'%nodegroupname,
811 print 'located tag (type)',nodegroupname,'as',tag_type_id
813 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
815 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
816 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
817 # set node tag on all nodes, value='yes'
818 for nodename in group_nodes:
820 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
822 traceback.print_exc()
823 print 'node',nodename,'seems to already have tag',nodegroupname
826 expect_yes = self.apiserver.GetNodeTags(auth,
827 {'hostname':nodename,
828 'tagname':nodegroupname},
829 ['value'])[0]['value']
830 if expect_yes != "yes":
831 print 'Mismatch node tag on node',nodename,'got',expect_yes
834 if not self.options.dry_run:
835 print 'Cannot find tag',nodegroupname,'on node',nodename
839 print 'cleaning nodegroup',nodegroupname
840 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
842 traceback.print_exc()
846 # a list of TestNode objs
847 def all_nodes (self):
849 for site_spec in self.plc_spec['sites']:
850 test_site = TestSite (self,site_spec)
851 for node_spec in site_spec['nodes']:
852 nodes.append(TestNode (self,test_site,node_spec))
855 # return a list of tuples (nodename,qemuname)
856 def all_node_infos (self) :
858 for site_spec in self.plc_spec['sites']:
859 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
860 for node_spec in site_spec['nodes'] ]
863 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
864 def all_reservable_nodenames (self):
866 for site_spec in self.plc_spec['sites']:
867 for node_spec in site_spec['nodes']:
868 node_fields=node_spec['node_fields']
869 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
870 res.append(node_fields['hostname'])
873 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
874 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
875 if self.options.dry_run:
879 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
880 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
881 # the nodes that haven't checked yet - start with a full list and shrink over time
882 tocheck = self.all_hostnames()
883 utils.header("checking nodes %r"%tocheck)
884 # create a dict hostname -> status
885 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
888 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
890 for array in tocheck_status:
891 hostname=array['hostname']
892 boot_state=array['boot_state']
893 if boot_state == target_boot_state:
894 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
896 # if it's a real node, never mind
897 (site_spec,node_spec)=self.locate_hostname(hostname)
898 if TestNode.is_real_model(node_spec['node_fields']['model']):
899 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
901 boot_state = target_boot_state
902 elif datetime.datetime.now() > graceout:
903 utils.header ("%s still in '%s' state"%(hostname,boot_state))
904 graceout=datetime.datetime.now()+datetime.timedelta(1)
905 status[hostname] = boot_state
907 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
910 if datetime.datetime.now() > timeout:
911 for hostname in tocheck:
912 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
914 # otherwise, sleep for a while
916 # only useful in empty plcs
919 def nodes_booted(self):
920 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
922 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
924 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
925 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
926 vservername=self.vservername
929 local_key = "keys/%(vservername)s-debug.rsa"%locals()
932 local_key = "keys/key1.rsa"
933 node_infos = self.all_node_infos()
934 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
935 for (nodename,qemuname) in node_infos:
936 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
937 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
938 (timeout_minutes,silent_minutes,period))
940 for node_info in node_infos:
941 (hostname,qemuname) = node_info
942 # try to run 'hostname' in the node
943 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
944 # don't spam logs - show the command only after the grace period
945 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
947 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
949 node_infos.remove(node_info)
951 # we will have tried real nodes once, in case they're up - but if not, just skip
952 (site_spec,node_spec)=self.locate_hostname(hostname)
953 if TestNode.is_real_model(node_spec['node_fields']['model']):
954 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
955 node_infos.remove(node_info)
958 if datetime.datetime.now() > timeout:
959 for (hostname,qemuname) in node_infos:
960 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
962 # otherwise, sleep for a while
964 # only useful in empty plcs
967 def ssh_node_debug(self):
968 "Tries to ssh into nodes in debug mode with the debug ssh key"
969 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
971 def ssh_node_boot(self):
972 "Tries to ssh into nodes in production mode with the root ssh key"
973 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
976 def qemu_local_init (self):
977 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
981 "all nodes: invoke GetBootMedium and store result locally"
984 def qemu_local_config (self):
985 "all nodes: compute qemu config qemu.conf and store it locally"
988 def nodestate_reinstall (self):
989 "all nodes: mark PLCAPI boot_state as reinstall"
992 def nodestate_safeboot (self):
993 "all nodes: mark PLCAPI boot_state as safeboot"
996 def nodestate_boot (self):
997 "all nodes: mark PLCAPI boot_state as boot"
1000 def nodestate_show (self):
1001 "all nodes: show PLCAPI boot_state"
1004 def qemu_export (self):
1005 "all nodes: push local node-dep directory on the qemu box"
1008 ### check hooks : invoke scripts from hooks/{node,slice}
1009 def check_hooks_node (self):
1010 return self.locate_first_node().check_hooks()
1011 def check_hooks_sliver (self) :
1012 return self.locate_first_sliver().check_hooks()
1014 def check_hooks (self):
1015 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1016 return self.check_hooks_node() and self.check_hooks_sliver()
1019 def do_check_initscripts(self):
1021 for slice_spec in self.plc_spec['slices']:
1022 if not slice_spec.has_key('initscriptstamp'):
1024 stamp=slice_spec['initscriptstamp']
1025 for nodename in slice_spec['nodenames']:
1026 (site,node) = self.locate_node (nodename)
1027 # xxx - passing the wrong site - probably harmless
1028 test_site = TestSite (self,site)
1029 test_slice = TestSlice (self,test_site,slice_spec)
1030 test_node = TestNode (self,test_site,node)
1031 test_sliver = TestSliver (self, test_node, test_slice)
1032 if not test_sliver.check_initscript_stamp(stamp):
1036 def check_initscripts(self):
1037 "check that the initscripts have triggered"
1038 return self.do_check_initscripts()
1040 def initscripts (self):
1041 "create initscripts with PLCAPI"
1042 for initscript in self.plc_spec['initscripts']:
1043 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1044 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1047 def delete_initscripts (self):
1048 "delete initscripts with PLCAPI"
1049 for initscript in self.plc_spec['initscripts']:
1050 initscript_name = initscript['initscript_fields']['name']
1051 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1053 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1054 print initscript_name,'deleted'
1056 print 'deletion went wrong - probably did not exist'
1061 "create slices with PLCAPI"
1062 return self.do_slices()
1064 def delete_slices (self):
1065 "delete slices with PLCAPI"
1066 return self.do_slices("delete")
1068 def do_slices (self, action="add"):
1069 for slice in self.plc_spec['slices']:
1070 site_spec = self.locate_site (slice['sitename'])
1071 test_site = TestSite(self,site_spec)
1072 test_slice=TestSlice(self,test_site,slice)
1074 utils.header("Deleting slices in site %s"%test_site.name())
1075 test_slice.delete_slice()
1077 utils.pprint("Creating slice",slice)
1078 test_slice.create_slice()
1079 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1083 def ssh_slice(self):
1084 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1088 def keys_clear_known_hosts (self):
1089 "remove test nodes entries from the local known_hosts file"
1093 def qemu_start (self) :
1094 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1098 def timestamp_qemu (self) :
1099 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1102 def check_tcp (self):
1103 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1104 specs = self.plc_spec['tcp_test']
1109 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1110 if not s_test_sliver.run_tcp_server(port,timeout=10):
1114 # idem for the client side
1115 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1116 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1120 # painfully enough, we need to allow for some time as netflow might show up last
1121 def check_netflow (self):
1122 "all nodes: check that the netflow slice is alive"
1123 return self.check_systemslice ('netflow')
1125 # we have the slices up already here, so it should not take too long
1126 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1127 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1128 test_nodes=self.all_nodes()
1130 for test_node in test_nodes:
1131 if test_node.check_systemslice (slicename):
1133 test_nodes.remove(test_node)
1138 if datetime.datetime.now () > timeout:
1139 for test_node in test_nodes:
1140 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1145 def plcsh_stress_test (self):
1146 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1147 # install the stress-test in the plc image
1148 location = "/usr/share/plc_api/plcsh_stress_test.py"
1150 remote="%s/%s"%(self.vm_root_in_guest(),location)
1151 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1153 command += " -- --check"
1154 if self.options.size == 1:
1155 command += " --tiny"
1156 return ( self.run_in_guest(command) == 0)
1158 # populate runs the same utility without slightly different options
1159 # in particular runs with --preserve (dont cleanup) and without --check
1160 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1162 def sfa_install_all (self):
1163 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1164 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1166 def sfa_install_core(self):
1168 return self.yum_install ("sfa")
1170 def sfa_install_plc(self):
1171 "yum install sfa-plc"
1172 return self.yum_install("sfa-plc")
1174 def sfa_install_client(self):
1175 "yum install sfa-client"
1176 return self.yum_install("sfa-client")
1178 def sfa_install_sfatables(self):
1179 "yum install sfa-sfatables"
1180 return self.yum_install ("sfa-sfatables")
1182 def sfa_dbclean(self):
1183 "thoroughly wipes off the SFA database"
1184 self.run_in_guest("sfa-nuke.py")==0 or \
1185 self.run_in_guest("sfa-nuke-plc.py") or \
1186 self.run_in_guest("sfaadmin.py registry nuke")
1189 def sfa_plcclean(self):
1190 "cleans the PLC entries that were created as a side effect of running the script"
1192 sfa_spec=self.plc_spec['sfa']
1194 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1195 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1196 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1197 except: print "Slice %s already absent from PLC db"%slicename
1199 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1200 try: self.apiserver.DeletePerson(self.auth_root(),username)
1201 except: print "User %s already absent from PLC db"%username
1203 print "REMEMBER TO RUN sfa_import AGAIN"
1206 def sfa_uninstall(self):
1207 "uses rpm to uninstall sfa - ignore result"
1208 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1209 self.run_in_guest("rm -rf /var/lib/sfa")
1210 self.run_in_guest("rm -rf /etc/sfa")
1211 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1213 self.run_in_guest("rpm -e --noscripts sfa-plc")
1216 ### run unit tests for SFA
1217 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1218 # Running Transaction
1219 # Transaction couldn't start:
1220 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1221 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1222 # no matter how many Gbs are available on the testplc
1223 # could not figure out what's wrong, so...
1224 # if the yum install phase fails, consider the test is successful
1225 # other combinations will eventually run it hopefully
1226 def sfa_utest(self):
1227 "yum install sfa-tests and run SFA unittests"
1228 self.run_in_guest("yum -y install sfa-tests")
1229 # failed to install - forget it
1230 if self.run_in_guest("rpm -q sfa-tests")!=0:
1231 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1233 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1237 dirname="conf.%s"%self.plc_spec['name']
1238 if not os.path.isdir(dirname):
1239 utils.system("mkdir -p %s"%dirname)
1240 if not os.path.isdir(dirname):
1241 raise "Cannot create config dir for plc %s"%self.name()
1244 def conffile(self,filename):
1245 return "%s/%s"%(self.confdir(),filename)
1246 def confsubdir(self,dirname,clean,dry_run=False):
1247 subdirname="%s/%s"%(self.confdir(),dirname)
1249 utils.system("rm -rf %s"%subdirname)
1250 if not os.path.isdir(subdirname):
1251 utils.system("mkdir -p %s"%subdirname)
1252 if not dry_run and not os.path.isdir(subdirname):
1253 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1256 def conffile_clean (self,filename):
1257 filename=self.conffile(filename)
1258 return utils.system("rm -rf %s"%filename)==0
1261 def sfa_configure(self):
1262 "run sfa-config-tty"
1263 tmpname=self.conffile("sfa-config-tty")
1264 fileconf=open(tmpname,'w')
1265 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1266 'SFA_INTERFACE_HRN',
1267 'SFA_REGISTRY_LEVEL1_AUTH',
1268 'SFA_REGISTRY_HOST',
1269 'SFA_AGGREGATE_HOST',
1280 if self.plc_spec['sfa'].has_key(var):
1281 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1282 # the way plc_config handles booleans just sucks..
1285 if self.plc_spec['sfa'][var]: val='true'
1286 fileconf.write ('e %s\n%s\n'%(var,val))
1287 fileconf.write('w\n')
1288 fileconf.write('R\n')
1289 fileconf.write('q\n')
1291 utils.system('cat %s'%tmpname)
1292 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1295 def aggregate_xml_line(self):
1296 port=self.plc_spec['sfa']['neighbours-port']
1297 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1298 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1300 def registry_xml_line(self):
1301 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1302 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1305 # a cross step that takes all other plcs in argument
1306 def cross_sfa_configure(self, other_plcs):
1307 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1308 # of course with a single plc, other_plcs is an empty list
1311 agg_fname=self.conffile("agg.xml")
1312 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1313 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1314 utils.header ("(Over)wrote %s"%agg_fname)
1315 reg_fname=self.conffile("reg.xml")
1316 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1317 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1318 utils.header ("(Over)wrote %s"%reg_fname)
1319 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_guest())==0 \
1320 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_guest())==0
1322 def sfa_import(self):
1324 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1325 return self.run_in_guest('sfa-import.py')==0 or \
1326 self.run_in_guest('sfa-import-plc.py')==0 or \
1327 self.run_in_guest('sfaadmin.py registry import_registry')==0
1328 # not needed anymore
1329 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1331 def sfa_start(self):
1333 return self.run_in_guest('service sfa start')==0
1335 def sfi_configure(self):
1336 "Create /root/sfi on the plc side for sfi client configuration"
1337 if self.options.dry_run:
1338 utils.header("DRY RUN - skipping step")
1340 sfa_spec=self.plc_spec['sfa']
1341 # cannot use sfa_slice_mapper to pass dir_name
1342 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1343 site_spec = self.locate_site (slice_spec['sitename'])
1344 test_site = TestSite(self,site_spec)
1345 test_slice=TestSliceSfa(self,test_site,slice_spec)
1346 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1347 test_slice.sfi_config(dir_name)
1348 # push into the remote /root/sfi area
1349 location = test_slice.sfi_path()
1350 remote="%s/%s"%(self.vm_root_in_guest(),location)
1351 self.test_ssh.mkdir(remote,abs=True)
1352 # need to strip last level or remote otherwise we get an extra dir level
1353 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1357 def sfi_clean (self):
1358 "clean up /root/sfi on the plc side"
1359 self.run_in_guest("rm -rf /root/sfi")
1363 def sfa_add_user(self):
1368 def sfa_update_user(self):
1372 def sfa_add_slice(self):
1373 "run sfi.py add (on Registry) from slice.xml"
1377 def sfa_discover(self):
1378 "discover resources into resouces_in.rspec"
1382 def sfa_create_slice(self):
1383 "run sfi.py create (on SM) - 1st time"
1387 def sfa_check_slice_plc(self):
1388 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1392 def sfa_update_slice(self):
1393 "run sfi.py create (on SM) on existing object"
1398 "various registry-related calls"
1402 def ssh_slice_sfa(self):
1403 "tries to ssh-enter the SFA slice"
1407 def sfa_delete_user(self):
1412 def sfa_delete_slice(self):
1413 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1418 self.run_in_guest('service sfa stop')==0
1421 def populate (self):
1422 "creates random entries in the PLCAPI"
1423 # install the stress-test in the plc image
1424 location = "/usr/share/plc_api/plcsh_stress_test.py"
1425 remote="%s/%s"%(self.vm_root_in_guest(),location)
1426 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1428 command += " -- --preserve --short-names"
1429 local = (self.run_in_guest(command) == 0);
1430 # second run with --foreign
1431 command += ' --foreign'
1432 remote = (self.run_in_guest(command) == 0);
1433 return ( local and remote)
1435 def gather_logs (self):
1436 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1437 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1438 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1439 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1440 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1441 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1443 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1444 self.gather_var_logs ()
1446 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1447 self.gather_pgsql_logs ()
1449 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1450 for site_spec in self.plc_spec['sites']:
1451 test_site = TestSite (self,site_spec)
1452 for node_spec in site_spec['nodes']:
1453 test_node=TestNode(self,test_site,node_spec)
1454 test_node.gather_qemu_logs()
1456 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1457 self.gather_nodes_var_logs()
1459 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1460 self.gather_slivers_var_logs()
1463 def gather_slivers_var_logs(self):
1464 for test_sliver in self.all_sliver_objs():
1465 remote = test_sliver.tar_var_logs()
1466 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1467 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1468 utils.system(command)
1471 def gather_var_logs (self):
1472 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1473 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1474 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1475 utils.system(command)
1476 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1477 utils.system(command)
1479 def gather_pgsql_logs (self):
1480 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1481 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1482 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1483 utils.system(command)
1485 def gather_nodes_var_logs (self):
1486 for site_spec in self.plc_spec['sites']:
1487 test_site = TestSite (self,site_spec)
1488 for node_spec in site_spec['nodes']:
1489 test_node=TestNode(self,test_site,node_spec)
1490 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1491 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1492 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1493 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1494 utils.system(command)
1497 # returns the filename to use for sql dump/restore, using options.dbname if set
1498 def dbfile (self, database):
1499 # uses options.dbname if it is found
1501 name=self.options.dbname
1502 if not isinstance(name,StringTypes):
1505 t=datetime.datetime.now()
1508 return "/root/%s-%s.sql"%(database,name)
1510 def plc_db_dump(self):
1511 'dump the planetlab5 DB in /root in the PLC - filename has time'
1512 dump=self.dbfile("planetab5")
1513 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1514 utils.header('Dumped planetlab5 database in %s'%dump)
1517 def plc_db_restore(self):
1518 'restore the planetlab5 DB - looks broken, but run -n might help'
1519 dump=self.dbfile("planetab5")
1520 ##stop httpd service
1521 self.run_in_guest('service httpd stop')
1522 # xxx - need another wrapper
1523 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1524 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1525 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1526 ##starting httpd service
1527 self.run_in_guest('service httpd start')
1529 utils.header('Database restored from ' + dump)
1531 def standby_1_through_20(self):
1532 """convenience function to wait for a specified number of minutes"""
1535 def standby_1(): pass
1537 def standby_2(): pass
1539 def standby_3(): pass
1541 def standby_4(): pass
1543 def standby_5(): pass
1545 def standby_6(): pass
1547 def standby_7(): pass
1549 def standby_8(): pass
1551 def standby_9(): pass
1553 def standby_10(): pass
1555 def standby_11(): pass
1557 def standby_12(): pass
1559 def standby_13(): pass
1561 def standby_14(): pass
1563 def standby_15(): pass
1565 def standby_16(): pass
1567 def standby_17(): pass
1569 def standby_18(): pass
1571 def standby_19(): pass
1573 def standby_20(): pass