1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=method.__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=method.__doc__
65 def slice_sfa_mapper (method):
68 slice_method = TestSliceSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
70 site_spec = self.locate_site (slice_spec['sitename'])
71 test_site = TestSite(self,site_spec)
72 test_slice=TestSliceSfa(self,test_site,slice_spec)
73 if not slice_method(test_slice,self.options): overall=False
75 # restore the doc text
76 actual.__doc__=method.__doc__
86 'vs_delete','timestamp_vs','vs_create', SEP,
87 'plc_install', 'plc_configure', 'plc_start', SEP,
88 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
89 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
90 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
91 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
92 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
93 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
94 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
95 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
96 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
97 # but as the stress test might take a while, we sometimes missed the debug mode..
98 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
99 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
100 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
101 'check_tcp', 'check_netflow', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'standby_1_through_20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this is originally for centos5 as recent SFAs won't build on this platform
130 def check_whether_build_has_sfa (rpms_url):
131 # warning, we're now building 'sface' so let's be a bit more picky
132 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
133 # full builds are expected to return with 0 here
135 # move all steps containing 'sfa' from default_steps to other_steps
136 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
137 TestPlc.other_steps += sfa_steps
138 for step in sfa_steps: TestPlc.default_steps.remove(step)
140 def __init__ (self,plc_spec,options):
141 self.plc_spec=plc_spec
143 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
144 self.vserverip=plc_spec['vserverip']
145 self.vservername=plc_spec['vservername']
146 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
147 self.apiserver=TestApiserver(self.url,options.dry_run)
150 name=self.plc_spec['name']
151 return "%s.%s"%(name,self.vservername)
154 return self.plc_spec['host_box']
157 return self.test_ssh.is_local()
159 # define the API methods on this object through xmlrpc
160 # would help, but not strictly necessary
164 def actual_command_in_guest (self,command):
165 return self.test_ssh.actual_command(self.host_to_guest(command))
167 def start_guest (self):
168 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
170 def stop_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
173 def run_in_guest (self,command):
174 return utils.system(self.actual_command_in_guest(command))
176 def run_in_host (self,command):
177 return self.test_ssh.run_in_buildname(command)
179 #command gets run in the plc's vm
180 def host_to_guest(self,command):
181 if self.options.plcs_use_lxc:
182 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.hostname(),command)
184 return "vserver %s exec %s"%(self.vservername,command)
186 def vm_root_in_host(self):
187 if self.options.plcs_use_lxc:
188 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
190 return "/vservers/%s"%(self.vservername)
192 def vm_timestamp_path (self):
193 if self.options.plcs_use_lxc:
194 return "/var/lib/lxc/%s.timestamp"%(self.vservername)
196 return "/vservers/%s.timestamp"%(self.vservername)
198 #start/stop the vserver
199 def start_guest_in_host(self):
200 if self.options.plcs_use_lxc:
201 return "lxc-start --name=%s"%(self.vservername)
203 return "vserver %s start"%(self.vservername)
205 def stop_guest_in_host(self):
206 if self.options.plcs_use_lxc:
207 return "lxc-stop --name=%s"%(self.vservername)
209 return "vserver %s stop"%(self.vservername)
212 def run_in_guest_piped (self,local,remote):
213 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
215 # does a yum install in the vs, ignore yum retcod, check with rpm
216 def yum_install (self, rpms):
217 if isinstance (rpms, list):
219 self.run_in_guest("yum -y install %s"%rpms)
220 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
221 self.run_in_guest("yum-complete-transaction -y")
222 return self.run_in_guest("rpm -q %s"%rpms)==0
224 def auth_root (self):
225 return {'Username':self.plc_spec['PLC_ROOT_USER'],
226 'AuthMethod':'password',
227 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
228 'Role' : self.plc_spec['role']
230 def locate_site (self,sitename):
231 for site in self.plc_spec['sites']:
232 if site['site_fields']['name'] == sitename:
234 if site['site_fields']['login_base'] == sitename:
236 raise Exception,"Cannot locate site %s"%sitename
238 def locate_node (self,nodename):
239 for site in self.plc_spec['sites']:
240 for node in site['nodes']:
241 if node['name'] == nodename:
243 raise Exception,"Cannot locate node %s"%nodename
245 def locate_hostname (self,hostname):
246 for site in self.plc_spec['sites']:
247 for node in site['nodes']:
248 if node['node_fields']['hostname'] == hostname:
250 raise Exception,"Cannot locate hostname %s"%hostname
252 def locate_key (self,keyname):
253 for key in self.plc_spec['keys']:
254 if key['name'] == keyname:
256 raise Exception,"Cannot locate key %s"%keyname
258 def locate_slice (self, slicename):
259 for slice in self.plc_spec['slices']:
260 if slice['slice_fields']['name'] == slicename:
262 raise Exception,"Cannot locate slice %s"%slicename
264 def all_sliver_objs (self):
266 for slice_spec in self.plc_spec['slices']:
267 slicename = slice_spec['slice_fields']['name']
268 for nodename in slice_spec['nodenames']:
269 result.append(self.locate_sliver_obj (nodename,slicename))
272 def locate_sliver_obj (self,nodename,slicename):
273 (site,node) = self.locate_node(nodename)
274 slice = self.locate_slice (slicename)
276 test_site = TestSite (self, site)
277 test_node = TestNode (self, test_site,node)
278 # xxx the slice site is assumed to be the node site - mhh - probably harmless
279 test_slice = TestSlice (self, test_site, slice)
280 return TestSliver (self, test_node, test_slice)
282 def locate_first_node(self):
283 nodename=self.plc_spec['slices'][0]['nodenames'][0]
284 (site,node) = self.locate_node(nodename)
285 test_site = TestSite (self, site)
286 test_node = TestNode (self, test_site,node)
289 def locate_first_sliver (self):
290 slice_spec=self.plc_spec['slices'][0]
291 slicename=slice_spec['slice_fields']['name']
292 nodename=slice_spec['nodenames'][0]
293 return self.locate_sliver_obj(nodename,slicename)
295 # all different hostboxes used in this plc
296 def gather_hostBoxes(self):
297 # maps on sites and nodes, return [ (host_box,test_node) ]
299 for site_spec in self.plc_spec['sites']:
300 test_site = TestSite (self,site_spec)
301 for node_spec in site_spec['nodes']:
302 test_node = TestNode (self, test_site, node_spec)
303 if not test_node.is_real():
304 tuples.append( (test_node.host_box(),test_node) )
305 # transform into a dict { 'host_box' -> [ test_node .. ] }
307 for (box,node) in tuples:
308 if not result.has_key(box):
311 result[box].append(node)
314 # a step for checking this stuff
315 def show_boxes (self):
316 'print summary of nodes location'
317 for (box,nodes) in self.gather_hostBoxes().iteritems():
318 print box,":"," + ".join( [ node.name() for node in nodes ] )
321 # make this a valid step
322 def qemu_kill_all(self):
323 'kill all qemu instances on the qemu boxes involved by this setup'
324 # this is the brute force version, kill all qemus on that host box
325 for (box,nodes) in self.gather_hostBoxes().iteritems():
326 # pass the first nodename, as we don't push template-qemu on testboxes
327 nodedir=nodes[0].nodedir()
328 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
331 # make this a valid step
332 def qemu_list_all(self):
333 'list all qemu instances on the qemu boxes involved by this setup'
334 for (box,nodes) in self.gather_hostBoxes().iteritems():
335 # this is the brute force version, kill all qemus on that host box
336 TestBoxQemu(box,self.options.buildname).qemu_list_all()
339 # kill only the right qemus
340 def qemu_list_mine(self):
341 'list qemu instances for our nodes'
342 for (box,nodes) in self.gather_hostBoxes().iteritems():
343 # the fine-grain version
348 # kill only the right qemus
349 def qemu_kill_mine(self):
350 'kill the qemu instances for our nodes'
351 for (box,nodes) in self.gather_hostBoxes().iteritems():
352 # the fine-grain version
357 #################### display config
359 "show test configuration after localization"
360 self.display_pass (1)
361 self.display_pass (2)
365 "print cut'n paste-able stuff to export env variables to your shell"
366 # guess local domain from hostname
367 domain=socket.gethostname().split('.',1)[1]
368 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
369 print "export BUILD=%s"%self.options.buildname
370 print "export PLCHOST=%s"%fqdn
371 print "export GUEST=%s"%self.plc_spec['vservername']
372 # find hostname of first node
373 (hostname,qemubox) = self.all_node_infos()[0]
374 print "export KVMHOST=%s.%s"%(qemubox,domain)
375 print "export NODE=%s"%(hostname)
379 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
380 def display_pass (self,passno):
381 for (key,val) in self.plc_spec.iteritems():
382 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
386 self.display_site_spec(site)
387 for node in site['nodes']:
388 self.display_node_spec(node)
389 elif key=='initscripts':
390 for initscript in val:
391 self.display_initscript_spec (initscript)
394 self.display_slice_spec (slice)
397 self.display_key_spec (key)
399 if key not in ['sites','initscripts','slices','keys', 'sfa']:
400 print '+ ',key,':',val
402 def display_site_spec (self,site):
403 print '+ ======== site',site['site_fields']['name']
404 for (k,v) in site.iteritems():
405 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
408 print '+ ','nodes : ',
410 print node['node_fields']['hostname'],'',
416 print user['name'],'',
418 elif k == 'site_fields':
419 print '+ login_base',':',v['login_base']
420 elif k == 'address_fields':
426 def display_initscript_spec (self,initscript):
427 print '+ ======== initscript',initscript['initscript_fields']['name']
429 def display_key_spec (self,key):
430 print '+ ======== key',key['name']
432 def display_slice_spec (self,slice):
433 print '+ ======== slice',slice['slice_fields']['name']
434 for (k,v) in slice.iteritems():
447 elif k=='slice_fields':
448 print '+ fields',':',
449 print 'max_nodes=',v['max_nodes'],
454 def display_node_spec (self,node):
455 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
456 print "hostname=",node['node_fields']['hostname'],
457 print "ip=",node['interface_fields']['ip']
458 if self.options.verbose:
459 utils.pprint("node details",node,depth=3)
461 # another entry point for just showing the boxes involved
462 def display_mapping (self):
463 TestPlc.display_mapping_plc(self.plc_spec)
467 def display_mapping_plc (plc_spec):
468 print '+ MyPLC',plc_spec['name']
469 # WARNING this would not be right for lxc-based PLC's - should be harmless though
470 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
471 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
472 for site_spec in plc_spec['sites']:
473 for node_spec in site_spec['nodes']:
474 TestPlc.display_mapping_node(node_spec)
477 def display_mapping_node (node_spec):
478 print '+ NODE %s'%(node_spec['name'])
479 print '+\tqemu box %s'%node_spec['host_box']
480 print '+\thostname=%s'%node_spec['node_fields']['hostname']
482 # write a timestamp in /vservers/<>.timestamp
483 # cannot be inside the vserver, that causes vserver .. build to cough
484 def timestamp_vs (self):
486 # TODO-lxc check this one
487 # a first approx. is to store the timestamp close to the VM root like vs does
488 stamp_path=self.vm_timestamp_path ()
489 stamp_dir = os.path.dirname (stamp_path)
490 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
491 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
493 # this is called inconditionnally at the beginning of the test sequence
494 # just in case this is a rerun, so if the vm is not running it's fine
496 "vserver delete the test myplc"
497 stamp_path=self.vm_timestamp_path()
498 self.run_in_host("rm -f %s"%stamp_path)
499 if self.options.plcs_use_lxc:
500 self.run_in_host("lxc-stop --name %s"%self.vservername)
501 self.run_in_host("lxc-destroy --name %s"%self.vservername)
504 self.run_in_host("vserver --silent %s delete"%self.vservername)
508 # historically the build was being fetched by the tests
509 # now the build pushes itself as a subdir of the tests workdir
510 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
511 def vs_create (self):
512 "vserver creation (no install done)"
513 # push the local build/ dir to the testplc box
515 # a full path for the local calls
516 build_dir=os.path.dirname(sys.argv[0])
517 # sometimes this is empty - set to "." in such a case
518 if not build_dir: build_dir="."
519 build_dir += "/build"
521 # use a standard name - will be relative to remote buildname
523 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
524 self.test_ssh.rmdir(build_dir)
525 self.test_ssh.copy(build_dir,recursive=True)
526 # the repo url is taken from arch-rpms-url
527 # with the last step (i386) removed
528 repo_url = self.options.arch_rpms_url
529 for level in [ 'arch' ]:
530 repo_url = os.path.dirname(repo_url)
531 # pass the vbuild-nightly options to vtest-init-vserver
533 test_env_options += " -p %s"%self.options.personality
534 test_env_options += " -d %s"%self.options.pldistro
535 test_env_options += " -f %s"%self.options.fcdistro
536 if self.options.plcs_use_lxc:
537 script="vtest-init-lxc.sh"
539 script="vtest-init-vserver.sh"
540 vserver_name = self.vservername
541 vserver_options="--netdev eth0 --interface %s"%self.vserverip
543 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
544 vserver_options += " --hostname %s"%vserver_hostname
546 print "Cannot reverse lookup %s"%self.vserverip
547 print "This is considered fatal, as this might pollute the test results"
549 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
550 return self.run_in_host(create_vserver) == 0
553 def plc_install(self):
554 "yum install myplc, noderepo, and the plain bootstrapfs"
556 # workaround for getting pgsql8.2 on centos5
557 if self.options.fcdistro == "centos5":
558 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
561 if self.options.personality == "linux32":
563 elif self.options.personality == "linux64":
566 raise Exception, "Unsupported personality %r"%self.options.personality
567 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
570 pkgs_list.append ("slicerepo-%s"%nodefamily)
571 pkgs_list.append ("myplc")
572 pkgs_list.append ("noderepo-%s"%nodefamily)
573 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
574 pkgs_string=" ".join(pkgs_list)
575 return self.yum_install (pkgs_list)
578 def plc_configure(self):
580 tmpname='%s.plc-config-tty'%(self.name())
581 fileconf=open(tmpname,'w')
582 for var in [ 'PLC_NAME',
587 'PLC_MAIL_SUPPORT_ADDRESS',
590 # Above line was added for integrating SFA Testing
596 'PLC_RESERVATION_GRANULARITY',
598 'PLC_OMF_XMPP_SERVER',
600 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
601 fileconf.write('w\n')
602 fileconf.write('q\n')
604 utils.system('cat %s'%tmpname)
605 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
606 utils.system('rm %s'%tmpname)
611 self.run_in_guest('service plc start')
616 self.run_in_guest('service plc stop')
620 "start the PLC vserver"
625 "stop the PLC vserver"
629 # stores the keys from the config for further use
630 def keys_store(self):
631 "stores test users ssh keys in keys/"
632 for key_spec in self.plc_spec['keys']:
633 TestKey(self,key_spec).store_key()
636 def keys_clean(self):
637 "removes keys cached in keys/"
638 utils.system("rm -rf ./keys")
641 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
642 # for later direct access to the nodes
643 def keys_fetch(self):
644 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
646 if not os.path.isdir(dir):
648 vservername=self.vservername
649 vm_root=self.vm_root_in_host()
651 prefix = 'debug_ssh_key'
652 for ext in [ 'pub', 'rsa' ] :
653 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
654 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
655 if self.test_ssh.fetch(src,dst) != 0: overall=False
659 "create sites with PLCAPI"
660 return self.do_sites()
662 def delete_sites (self):
663 "delete sites with PLCAPI"
664 return self.do_sites(action="delete")
666 def do_sites (self,action="add"):
667 for site_spec in self.plc_spec['sites']:
668 test_site = TestSite (self,site_spec)
669 if (action != "add"):
670 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
671 test_site.delete_site()
672 # deleted with the site
673 #test_site.delete_users()
676 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
677 test_site.create_site()
678 test_site.create_users()
681 def delete_all_sites (self):
682 "Delete all sites in PLC, and related objects"
683 print 'auth_root',self.auth_root()
684 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
685 for site_id in site_ids:
686 print 'Deleting site_id',site_id
687 self.apiserver.DeleteSite(self.auth_root(),site_id)
691 "create nodes with PLCAPI"
692 return self.do_nodes()
693 def delete_nodes (self):
694 "delete nodes with PLCAPI"
695 return self.do_nodes(action="delete")
697 def do_nodes (self,action="add"):
698 for site_spec in self.plc_spec['sites']:
699 test_site = TestSite (self,site_spec)
701 utils.header("Deleting nodes in site %s"%test_site.name())
702 for node_spec in site_spec['nodes']:
703 test_node=TestNode(self,test_site,node_spec)
704 utils.header("Deleting %s"%test_node.name())
705 test_node.delete_node()
707 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
708 for node_spec in site_spec['nodes']:
709 utils.pprint('Creating node %s'%node_spec,node_spec)
710 test_node = TestNode (self,test_site,node_spec)
711 test_node.create_node ()
714 def nodegroups (self):
715 "create nodegroups with PLCAPI"
716 return self.do_nodegroups("add")
717 def delete_nodegroups (self):
718 "delete nodegroups with PLCAPI"
719 return self.do_nodegroups("delete")
723 def translate_timestamp (start,grain,timestamp):
724 if timestamp < TestPlc.YEAR: return start+timestamp*grain
725 else: return timestamp
728 def timestamp_printable (timestamp):
729 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
732 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
734 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
735 print 'API answered grain=',grain
736 start=(now/grain)*grain
738 # find out all nodes that are reservable
739 nodes=self.all_reservable_nodenames()
741 utils.header ("No reservable node found - proceeding without leases")
744 # attach them to the leases as specified in plc_specs
745 # this is where the 'leases' field gets interpreted as relative of absolute
746 for lease_spec in self.plc_spec['leases']:
747 # skip the ones that come with a null slice id
748 if not lease_spec['slice']: continue
749 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
750 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
751 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
752 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
753 if lease_addition['errors']:
754 utils.header("Cannot create leases, %s"%lease_addition['errors'])
757 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
758 (nodes,lease_spec['slice'],
759 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
760 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
764 def delete_leases (self):
765 "remove all leases in the myplc side"
766 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
767 utils.header("Cleaning leases %r"%lease_ids)
768 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
771 def list_leases (self):
772 "list all leases known to the myplc"
773 leases = self.apiserver.GetLeases(self.auth_root())
776 current=l['t_until']>=now
777 if self.options.verbose or current:
778 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
779 TestPlc.timestamp_printable(l['t_from']),
780 TestPlc.timestamp_printable(l['t_until'])))
783 # create nodegroups if needed, and populate
784 def do_nodegroups (self, action="add"):
785 # 1st pass to scan contents
787 for site_spec in self.plc_spec['sites']:
788 test_site = TestSite (self,site_spec)
789 for node_spec in site_spec['nodes']:
790 test_node=TestNode (self,test_site,node_spec)
791 if node_spec.has_key('nodegroups'):
792 nodegroupnames=node_spec['nodegroups']
793 if isinstance(nodegroupnames,StringTypes):
794 nodegroupnames = [ nodegroupnames ]
795 for nodegroupname in nodegroupnames:
796 if not groups_dict.has_key(nodegroupname):
797 groups_dict[nodegroupname]=[]
798 groups_dict[nodegroupname].append(test_node.name())
799 auth=self.auth_root()
801 for (nodegroupname,group_nodes) in groups_dict.iteritems():
803 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
804 # first, check if the nodetagtype is here
805 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
807 tag_type_id = tag_types[0]['tag_type_id']
809 tag_type_id = self.apiserver.AddTagType(auth,
810 {'tagname':nodegroupname,
811 'description': 'for nodegroup %s'%nodegroupname,
813 print 'located tag (type)',nodegroupname,'as',tag_type_id
815 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
817 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
818 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
819 # set node tag on all nodes, value='yes'
820 for nodename in group_nodes:
822 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
824 traceback.print_exc()
825 print 'node',nodename,'seems to already have tag',nodegroupname
828 expect_yes = self.apiserver.GetNodeTags(auth,
829 {'hostname':nodename,
830 'tagname':nodegroupname},
831 ['value'])[0]['value']
832 if expect_yes != "yes":
833 print 'Mismatch node tag on node',nodename,'got',expect_yes
836 if not self.options.dry_run:
837 print 'Cannot find tag',nodegroupname,'on node',nodename
841 print 'cleaning nodegroup',nodegroupname
842 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
844 traceback.print_exc()
848 # a list of TestNode objs
849 def all_nodes (self):
851 for site_spec in self.plc_spec['sites']:
852 test_site = TestSite (self,site_spec)
853 for node_spec in site_spec['nodes']:
854 nodes.append(TestNode (self,test_site,node_spec))
857 # return a list of tuples (nodename,qemuname)
858 def all_node_infos (self) :
860 for site_spec in self.plc_spec['sites']:
861 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
862 for node_spec in site_spec['nodes'] ]
865 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
866 def all_reservable_nodenames (self):
868 for site_spec in self.plc_spec['sites']:
869 for node_spec in site_spec['nodes']:
870 node_fields=node_spec['node_fields']
871 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
872 res.append(node_fields['hostname'])
875 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
876 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
877 if self.options.dry_run:
881 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
882 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
883 # the nodes that haven't checked yet - start with a full list and shrink over time
884 tocheck = self.all_hostnames()
885 utils.header("checking nodes %r"%tocheck)
886 # create a dict hostname -> status
887 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
890 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
892 for array in tocheck_status:
893 hostname=array['hostname']
894 boot_state=array['boot_state']
895 if boot_state == target_boot_state:
896 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
898 # if it's a real node, never mind
899 (site_spec,node_spec)=self.locate_hostname(hostname)
900 if TestNode.is_real_model(node_spec['node_fields']['model']):
901 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
903 boot_state = target_boot_state
904 elif datetime.datetime.now() > graceout:
905 utils.header ("%s still in '%s' state"%(hostname,boot_state))
906 graceout=datetime.datetime.now()+datetime.timedelta(1)
907 status[hostname] = boot_state
909 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
912 if datetime.datetime.now() > timeout:
913 for hostname in tocheck:
914 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
916 # otherwise, sleep for a while
918 # only useful in empty plcs
921 def nodes_booted(self):
922 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
924 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
926 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
927 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
928 vservername=self.vservername
931 local_key = "keys/%(vservername)s-debug.rsa"%locals()
934 local_key = "keys/key1.rsa"
935 node_infos = self.all_node_infos()
936 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
937 for (nodename,qemuname) in node_infos:
938 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
939 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
940 (timeout_minutes,silent_minutes,period))
942 for node_info in node_infos:
943 (hostname,qemuname) = node_info
944 # try to run 'hostname' in the node
945 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
946 # don't spam logs - show the command only after the grace period
947 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
949 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
951 node_infos.remove(node_info)
953 # we will have tried real nodes once, in case they're up - but if not, just skip
954 (site_spec,node_spec)=self.locate_hostname(hostname)
955 if TestNode.is_real_model(node_spec['node_fields']['model']):
956 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
957 node_infos.remove(node_info)
960 if datetime.datetime.now() > timeout:
961 for (hostname,qemuname) in node_infos:
962 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
964 # otherwise, sleep for a while
966 # only useful in empty plcs
969 def ssh_node_debug(self):
970 "Tries to ssh into nodes in debug mode with the debug ssh key"
971 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
973 def ssh_node_boot(self):
974 "Tries to ssh into nodes in production mode with the root ssh key"
975 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
978 def qemu_local_init (self):
979 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
983 "all nodes: invoke GetBootMedium and store result locally"
986 def qemu_local_config (self):
987 "all nodes: compute qemu config qemu.conf and store it locally"
990 def nodestate_reinstall (self):
991 "all nodes: mark PLCAPI boot_state as reinstall"
994 def nodestate_safeboot (self):
995 "all nodes: mark PLCAPI boot_state as safeboot"
998 def nodestate_boot (self):
999 "all nodes: mark PLCAPI boot_state as boot"
1002 def nodestate_show (self):
1003 "all nodes: show PLCAPI boot_state"
1006 def qemu_export (self):
1007 "all nodes: push local node-dep directory on the qemu box"
1010 ### check hooks : invoke scripts from hooks/{node,slice}
1011 def check_hooks_node (self):
1012 return self.locate_first_node().check_hooks()
1013 def check_hooks_sliver (self) :
1014 return self.locate_first_sliver().check_hooks()
1016 def check_hooks (self):
1017 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1018 return self.check_hooks_node() and self.check_hooks_sliver()
1021 def do_check_initscripts(self):
1023 for slice_spec in self.plc_spec['slices']:
1024 if not slice_spec.has_key('initscriptstamp'):
1026 stamp=slice_spec['initscriptstamp']
1027 for nodename in slice_spec['nodenames']:
1028 (site,node) = self.locate_node (nodename)
1029 # xxx - passing the wrong site - probably harmless
1030 test_site = TestSite (self,site)
1031 test_slice = TestSlice (self,test_site,slice_spec)
1032 test_node = TestNode (self,test_site,node)
1033 test_sliver = TestSliver (self, test_node, test_slice)
1034 if not test_sliver.check_initscript_stamp(stamp):
1038 def check_initscripts(self):
1039 "check that the initscripts have triggered"
1040 return self.do_check_initscripts()
1042 def initscripts (self):
1043 "create initscripts with PLCAPI"
1044 for initscript in self.plc_spec['initscripts']:
1045 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1046 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1049 def delete_initscripts (self):
1050 "delete initscripts with PLCAPI"
1051 for initscript in self.plc_spec['initscripts']:
1052 initscript_name = initscript['initscript_fields']['name']
1053 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1055 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1056 print initscript_name,'deleted'
1058 print 'deletion went wrong - probably did not exist'
1063 "create slices with PLCAPI"
1064 return self.do_slices()
1066 def delete_slices (self):
1067 "delete slices with PLCAPI"
1068 return self.do_slices("delete")
1070 def do_slices (self, action="add"):
1071 for slice in self.plc_spec['slices']:
1072 site_spec = self.locate_site (slice['sitename'])
1073 test_site = TestSite(self,site_spec)
1074 test_slice=TestSlice(self,test_site,slice)
1076 utils.header("Deleting slices in site %s"%test_site.name())
1077 test_slice.delete_slice()
1079 utils.pprint("Creating slice",slice)
1080 test_slice.create_slice()
1081 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1085 def ssh_slice(self):
1086 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1090 def keys_clear_known_hosts (self):
1091 "remove test nodes entries from the local known_hosts file"
1095 def qemu_start (self) :
1096 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1100 def timestamp_qemu (self) :
1101 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1104 def check_tcp (self):
1105 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1106 specs = self.plc_spec['tcp_test']
1111 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1112 if not s_test_sliver.run_tcp_server(port,timeout=10):
1116 # idem for the client side
1117 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1118 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1122 # painfully enough, we need to allow for some time as netflow might show up last
1123 def check_netflow (self):
1124 "all nodes: check that the netflow slice is alive"
1125 return self.check_systemslice ('netflow')
1127 # we have the slices up already here, so it should not take too long
1128 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1129 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1130 test_nodes=self.all_nodes()
1132 for test_node in test_nodes:
1133 if test_node.check_systemslice (slicename):
1135 test_nodes.remove(test_node)
1140 if datetime.datetime.now () > timeout:
1141 for test_node in test_nodes:
1142 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1147 def plcsh_stress_test (self):
1148 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1149 # install the stress-test in the plc image
1150 location = "/usr/share/plc_api/plcsh_stress_test.py"
1151 remote="%s/%s"%(self.vm_root_in_host(),location)
1152 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1154 command += " -- --check"
1155 if self.options.size == 1:
1156 command += " --tiny"
1157 return ( self.run_in_guest(command) == 0)
1159 # populate runs the same utility without slightly different options
1160 # in particular runs with --preserve (dont cleanup) and without --check
1161 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1163 def sfa_install_all (self):
1164 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1165 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1167 def sfa_install_core(self):
1169 return self.yum_install ("sfa")
1171 def sfa_install_plc(self):
1172 "yum install sfa-plc"
1173 return self.yum_install("sfa-plc")
1175 def sfa_install_client(self):
1176 "yum install sfa-client"
1177 return self.yum_install("sfa-client")
1179 def sfa_install_sfatables(self):
1180 "yum install sfa-sfatables"
1181 return self.yum_install ("sfa-sfatables")
1183 def sfa_dbclean(self):
1184 "thoroughly wipes off the SFA database"
1185 self.run_in_guest("sfa-nuke.py")==0 or \
1186 self.run_in_guest("sfa-nuke-plc.py") or \
1187 self.run_in_guest("sfaadmin.py registry nuke")
1190 def sfa_plcclean(self):
1191 "cleans the PLC entries that were created as a side effect of running the script"
1193 sfa_spec=self.plc_spec['sfa']
1195 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1196 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1197 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1198 except: print "Slice %s already absent from PLC db"%slicename
1200 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1201 try: self.apiserver.DeletePerson(self.auth_root(),username)
1202 except: print "User %s already absent from PLC db"%username
1204 print "REMEMBER TO RUN sfa_import AGAIN"
1207 def sfa_uninstall(self):
1208 "uses rpm to uninstall sfa - ignore result"
1209 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1210 self.run_in_guest("rm -rf /var/lib/sfa")
1211 self.run_in_guest("rm -rf /etc/sfa")
1212 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1214 self.run_in_guest("rpm -e --noscripts sfa-plc")
1217 ### run unit tests for SFA
1218 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1219 # Running Transaction
1220 # Transaction couldn't start:
1221 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1222 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1223 # no matter how many Gbs are available on the testplc
1224 # could not figure out what's wrong, so...
1225 # if the yum install phase fails, consider the test is successful
1226 # other combinations will eventually run it hopefully
1227 def sfa_utest(self):
1228 "yum install sfa-tests and run SFA unittests"
1229 self.run_in_guest("yum -y install sfa-tests")
1230 # failed to install - forget it
1231 if self.run_in_guest("rpm -q sfa-tests")!=0:
1232 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1234 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1238 dirname="conf.%s"%self.plc_spec['name']
1239 if not os.path.isdir(dirname):
1240 utils.system("mkdir -p %s"%dirname)
1241 if not os.path.isdir(dirname):
1242 raise "Cannot create config dir for plc %s"%self.name()
1245 def conffile(self,filename):
1246 return "%s/%s"%(self.confdir(),filename)
1247 def confsubdir(self,dirname,clean,dry_run=False):
1248 subdirname="%s/%s"%(self.confdir(),dirname)
1250 utils.system("rm -rf %s"%subdirname)
1251 if not os.path.isdir(subdirname):
1252 utils.system("mkdir -p %s"%subdirname)
1253 if not dry_run and not os.path.isdir(subdirname):
1254 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1257 def conffile_clean (self,filename):
1258 filename=self.conffile(filename)
1259 return utils.system("rm -rf %s"%filename)==0
1262 def sfa_configure(self):
1263 "run sfa-config-tty"
1264 tmpname=self.conffile("sfa-config-tty")
1265 fileconf=open(tmpname,'w')
1266 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1267 'SFA_INTERFACE_HRN',
1268 'SFA_REGISTRY_LEVEL1_AUTH',
1269 'SFA_REGISTRY_HOST',
1270 'SFA_AGGREGATE_HOST',
1281 if self.plc_spec['sfa'].has_key(var):
1282 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1283 # the way plc_config handles booleans just sucks..
1286 if self.plc_spec['sfa'][var]: val='true'
1287 fileconf.write ('e %s\n%s\n'%(var,val))
1288 fileconf.write('w\n')
1289 fileconf.write('R\n')
1290 fileconf.write('q\n')
1292 utils.system('cat %s'%tmpname)
1293 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1296 def aggregate_xml_line(self):
1297 port=self.plc_spec['sfa']['neighbours-port']
1298 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1299 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1301 def registry_xml_line(self):
1302 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1303 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1306 # a cross step that takes all other plcs in argument
1307 def cross_sfa_configure(self, other_plcs):
1308 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1309 # of course with a single plc, other_plcs is an empty list
1312 agg_fname=self.conffile("agg.xml")
1313 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1314 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1315 utils.header ("(Over)wrote %s"%agg_fname)
1316 reg_fname=self.conffile("reg.xml")
1317 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1318 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1319 utils.header ("(Over)wrote %s"%reg_fname)
1320 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1321 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1323 def sfa_import(self):
1325 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1326 return self.run_in_guest('sfa-import.py')==0 or \
1327 self.run_in_guest('sfa-import-plc.py')==0 or \
1328 self.run_in_guest('sfaadmin.py registry import_registry')==0
1329 # not needed anymore
1330 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1332 def sfa_start(self):
1334 return self.run_in_guest('service sfa start')==0
1336 def sfi_configure(self):
1337 "Create /root/sfi on the plc side for sfi client configuration"
1338 if self.options.dry_run:
1339 utils.header("DRY RUN - skipping step")
1341 sfa_spec=self.plc_spec['sfa']
1342 # cannot use sfa_slice_mapper to pass dir_name
1343 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1344 site_spec = self.locate_site (slice_spec['sitename'])
1345 test_site = TestSite(self,site_spec)
1346 test_slice=TestSliceSfa(self,test_site,slice_spec)
1347 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1348 test_slice.sfi_config(dir_name)
1349 # push into the remote /root/sfi area
1350 location = test_slice.sfi_path()
1351 remote="%s/%s"%(self.vm_root_in_host(),location)
1352 self.test_ssh.mkdir(remote,abs=True)
1353 # need to strip last level or remote otherwise we get an extra dir level
1354 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1358 def sfi_clean (self):
1359 "clean up /root/sfi on the plc side"
1360 self.run_in_guest("rm -rf /root/sfi")
1364 def sfa_add_user(self):
1369 def sfa_update_user(self):
1373 def sfa_add_slice(self):
1374 "run sfi.py add (on Registry) from slice.xml"
1378 def sfa_discover(self):
1379 "discover resources into resouces_in.rspec"
1383 def sfa_create_slice(self):
1384 "run sfi.py create (on SM) - 1st time"
1388 def sfa_check_slice_plc(self):
1389 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1393 def sfa_update_slice(self):
1394 "run sfi.py create (on SM) on existing object"
1399 "various registry-related calls"
1403 def ssh_slice_sfa(self):
1404 "tries to ssh-enter the SFA slice"
1408 def sfa_delete_user(self):
1413 def sfa_delete_slice(self):
1414 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1419 self.run_in_guest('service sfa stop')==0
1422 def populate (self):
1423 "creates random entries in the PLCAPI"
1424 # install the stress-test in the plc image
1425 location = "/usr/share/plc_api/plcsh_stress_test.py"
1426 remote="%s/%s"%(self.vm_root_in_host(),location)
1427 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1429 command += " -- --preserve --short-names"
1430 local = (self.run_in_guest(command) == 0);
1431 # second run with --foreign
1432 command += ' --foreign'
1433 remote = (self.run_in_guest(command) == 0);
1434 return ( local and remote)
1436 def gather_logs (self):
1437 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1438 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1439 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1440 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1441 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1442 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1444 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1445 self.gather_var_logs ()
1447 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1448 self.gather_pgsql_logs ()
1450 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1451 for site_spec in self.plc_spec['sites']:
1452 test_site = TestSite (self,site_spec)
1453 for node_spec in site_spec['nodes']:
1454 test_node=TestNode(self,test_site,node_spec)
1455 test_node.gather_qemu_logs()
1457 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1458 self.gather_nodes_var_logs()
1460 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1461 self.gather_slivers_var_logs()
1464 def gather_slivers_var_logs(self):
1465 for test_sliver in self.all_sliver_objs():
1466 remote = test_sliver.tar_var_logs()
1467 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1468 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1469 utils.system(command)
1472 def gather_var_logs (self):
1473 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1474 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1475 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1476 utils.system(command)
1477 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1478 utils.system(command)
1480 def gather_pgsql_logs (self):
1481 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1482 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1483 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1484 utils.system(command)
1486 def gather_nodes_var_logs (self):
1487 for site_spec in self.plc_spec['sites']:
1488 test_site = TestSite (self,site_spec)
1489 for node_spec in site_spec['nodes']:
1490 test_node=TestNode(self,test_site,node_spec)
1491 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1492 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1493 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1494 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1495 utils.system(command)
1498 # returns the filename to use for sql dump/restore, using options.dbname if set
1499 def dbfile (self, database):
1500 # uses options.dbname if it is found
1502 name=self.options.dbname
1503 if not isinstance(name,StringTypes):
1506 t=datetime.datetime.now()
1509 return "/root/%s-%s.sql"%(database,name)
1511 def plc_db_dump(self):
1512 'dump the planetlab5 DB in /root in the PLC - filename has time'
1513 dump=self.dbfile("planetab5")
1514 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1515 utils.header('Dumped planetlab5 database in %s'%dump)
1518 def plc_db_restore(self):
1519 'restore the planetlab5 DB - looks broken, but run -n might help'
1520 dump=self.dbfile("planetab5")
1521 ##stop httpd service
1522 self.run_in_guest('service httpd stop')
1523 # xxx - need another wrapper
1524 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1525 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1526 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1527 ##starting httpd service
1528 self.run_in_guest('service httpd start')
1530 utils.header('Database restored from ' + dump)
1532 def standby_1_through_20(self):
1533 """convenience function to wait for a specified number of minutes"""
1536 def standby_1(): pass
1538 def standby_2(): pass
1540 def standby_3(): pass
1542 def standby_4(): pass
1544 def standby_5(): pass
1546 def standby_6(): pass
1548 def standby_7(): pass
1550 def standby_8(): pass
1552 def standby_9(): pass
1554 def standby_10(): pass
1556 def standby_11(): pass
1558 def standby_12(): pass
1560 def standby_13(): pass
1562 def standby_14(): pass
1564 def standby_15(): pass
1566 def standby_16(): pass
1568 def standby_17(): pass
1570 def standby_18(): pass
1572 def standby_19(): pass
1574 def standby_20(): pass