1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=method.__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=method.__doc__
65 def slice_sfa_mapper (method):
68 slice_method = TestSliceSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
70 site_spec = self.locate_site (slice_spec['sitename'])
71 test_site = TestSite(self,site_spec)
72 test_slice=TestSliceSfa(self,test_site,slice_spec)
73 if not slice_method(test_slice,self.options): overall=False
75 # restore the doc text
76 actual.__doc__=method.__doc__
86 'vs_delete','timestamp_vs','vs_create', SEP,
87 'plc_install', 'plc_configure', 'plc_start', SEP,
88 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
89 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
90 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
91 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
92 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
93 'sfi_configure@1', 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
94 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
95 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
96 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
97 # but as the stress test might take a while, we sometimes missed the debug mode..
98 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
99 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
100 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
101 'check_tcp', 'check_sys_slice', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'standby_1_through_20',SEP,
120 def printable_steps (list):
121 single_line=" ".join(list)+" "
122 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
124 def valid_step (step):
125 return step != SEP and step != SEPSFA
127 # turn off the sfa-related steps when build has skipped SFA
128 # this is originally for centos5 as recent SFAs won't build on this platform
130 def check_whether_build_has_sfa (rpms_url):
131 # warning, we're now building 'sface' so let's be a bit more picky
132 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
133 # full builds are expected to return with 0 here
135 # move all steps containing 'sfa' from default_steps to other_steps
136 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
137 TestPlc.other_steps += sfa_steps
138 for step in sfa_steps: TestPlc.default_steps.remove(step)
140 def __init__ (self,plc_spec,options):
141 self.plc_spec=plc_spec
143 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
144 self.vserverip=plc_spec['vserverip']
145 self.vservername=plc_spec['vservername']
146 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
147 self.apiserver=TestApiserver(self.url,options.dry_run)
149 def has_addresses_api (self):
150 return hasattr(self.apiserver,'AddIpAddress')
153 name=self.plc_spec['name']
154 return "%s.%s"%(name,self.vservername)
157 return self.plc_spec['host_box']
160 return self.test_ssh.is_local()
162 # define the API methods on this object through xmlrpc
163 # would help, but not strictly necessary
167 def actual_command_in_guest (self,command):
168 return self.test_ssh.actual_command(self.host_to_guest(command))
170 def start_guest (self):
171 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
173 def stop_guest (self):
174 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
176 def run_in_guest (self,command):
177 return utils.system(self.actual_command_in_guest(command))
179 def run_in_host (self,command):
180 return self.test_ssh.run_in_buildname(command)
182 #command gets run in the plc's vm
183 def host_to_guest(self,command):
184 if self.options.plcs_use_lxc:
185 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
187 return "vserver %s exec %s"%(self.vservername,command)
189 def vm_root_in_host(self):
190 if self.options.plcs_use_lxc:
191 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
193 return "/vservers/%s"%(self.vservername)
195 def vm_timestamp_path (self):
196 if self.options.plcs_use_lxc:
197 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
199 return "/vservers/%s.timestamp"%(self.vservername)
201 #start/stop the vserver
202 def start_guest_in_host(self):
203 if self.options.plcs_use_lxc:
204 return "lxc-start --daemon --name=%s"%(self.vservername)
206 return "vserver %s start"%(self.vservername)
208 def stop_guest_in_host(self):
209 if self.options.plcs_use_lxc:
210 return "lxc-stop --name=%s"%(self.vservername)
212 return "vserver %s stop"%(self.vservername)
215 def run_in_guest_piped (self,local,remote):
216 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
218 # does a yum install in the vs, ignore yum retcod, check with rpm
219 def yum_install (self, rpms):
220 if isinstance (rpms, list):
222 self.run_in_guest("yum -y install %s"%rpms)
223 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
224 self.run_in_guest("yum-complete-transaction -y")
225 return self.run_in_guest("rpm -q %s"%rpms)==0
227 def auth_root (self):
228 return {'Username':self.plc_spec['PLC_ROOT_USER'],
229 'AuthMethod':'password',
230 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
231 'Role' : self.plc_spec['role']
233 def locate_site (self,sitename):
234 for site in self.plc_spec['sites']:
235 if site['site_fields']['name'] == sitename:
237 if site['site_fields']['login_base'] == sitename:
239 raise Exception,"Cannot locate site %s"%sitename
241 def locate_node (self,nodename):
242 for site in self.plc_spec['sites']:
243 for node in site['nodes']:
244 if node['name'] == nodename:
246 raise Exception,"Cannot locate node %s"%nodename
248 def locate_hostname (self,hostname):
249 for site in self.plc_spec['sites']:
250 for node in site['nodes']:
251 if node['node_fields']['hostname'] == hostname:
253 raise Exception,"Cannot locate hostname %s"%hostname
255 def locate_key (self,keyname):
256 for key in self.plc_spec['keys']:
257 if key['name'] == keyname:
259 raise Exception,"Cannot locate key %s"%keyname
261 def locate_slice (self, slicename):
262 for slice in self.plc_spec['slices']:
263 if slice['slice_fields']['name'] == slicename:
265 raise Exception,"Cannot locate slice %s"%slicename
267 def all_sliver_objs (self):
269 for slice_spec in self.plc_spec['slices']:
270 slicename = slice_spec['slice_fields']['name']
271 for nodename in slice_spec['nodenames']:
272 result.append(self.locate_sliver_obj (nodename,slicename))
275 def locate_sliver_obj (self,nodename,slicename):
276 (site,node) = self.locate_node(nodename)
277 slice = self.locate_slice (slicename)
279 test_site = TestSite (self, site)
280 test_node = TestNode (self, test_site,node)
281 # xxx the slice site is assumed to be the node site - mhh - probably harmless
282 test_slice = TestSlice (self, test_site, slice)
283 return TestSliver (self, test_node, test_slice)
285 def locate_first_node(self):
286 nodename=self.plc_spec['slices'][0]['nodenames'][0]
287 (site,node) = self.locate_node(nodename)
288 test_site = TestSite (self, site)
289 test_node = TestNode (self, test_site,node)
292 def locate_first_sliver (self):
293 slice_spec=self.plc_spec['slices'][0]
294 slicename=slice_spec['slice_fields']['name']
295 nodename=slice_spec['nodenames'][0]
296 return self.locate_sliver_obj(nodename,slicename)
298 # all different hostboxes used in this plc
299 def gather_hostBoxes(self):
300 # maps on sites and nodes, return [ (host_box,test_node) ]
302 for site_spec in self.plc_spec['sites']:
303 test_site = TestSite (self,site_spec)
304 for node_spec in site_spec['nodes']:
305 test_node = TestNode (self, test_site, node_spec)
306 if not test_node.is_real():
307 tuples.append( (test_node.host_box(),test_node) )
308 # transform into a dict { 'host_box' -> [ test_node .. ] }
310 for (box,node) in tuples:
311 if not result.has_key(box):
314 result[box].append(node)
317 # a step for checking this stuff
318 def show_boxes (self):
319 'print summary of nodes location'
320 for (box,nodes) in self.gather_hostBoxes().iteritems():
321 print box,":"," + ".join( [ node.name() for node in nodes ] )
324 # make this a valid step
325 def qemu_kill_all(self):
326 'kill all qemu instances on the qemu boxes involved by this setup'
327 # this is the brute force version, kill all qemus on that host box
328 for (box,nodes) in self.gather_hostBoxes().iteritems():
329 # pass the first nodename, as we don't push template-qemu on testboxes
330 nodedir=nodes[0].nodedir()
331 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
334 # make this a valid step
335 def qemu_list_all(self):
336 'list all qemu instances on the qemu boxes involved by this setup'
337 for (box,nodes) in self.gather_hostBoxes().iteritems():
338 # this is the brute force version, kill all qemus on that host box
339 TestBoxQemu(box,self.options.buildname).qemu_list_all()
342 # kill only the right qemus
343 def qemu_list_mine(self):
344 'list qemu instances for our nodes'
345 for (box,nodes) in self.gather_hostBoxes().iteritems():
346 # the fine-grain version
351 # kill only the right qemus
352 def qemu_kill_mine(self):
353 'kill the qemu instances for our nodes'
354 for (box,nodes) in self.gather_hostBoxes().iteritems():
355 # the fine-grain version
360 #################### display config
362 "show test configuration after localization"
368 "print cut'n paste-able stuff to export env variables to your shell"
369 # guess local domain from hostname
370 domain=socket.gethostname().split('.',1)[1]
371 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
372 print "export BUILD=%s"%self.options.buildname
373 if self.options.plcs_use_lxc:
374 print "export PLCHOSTLXC=%s"%fqdn
376 print "export PLCHOSTVS=%s"%fqdn
377 print "export GUESTNAME=%s"%self.plc_spec['vservername']
378 vplcname=self.plc_spec['vservername'].split('-')[-1]
379 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
380 # find hostname of first node
381 (hostname,qemubox) = self.all_node_infos()[0]
382 print "export KVMHOST=%s.%s"%(qemubox,domain)
383 print "export NODE=%s"%(hostname)
387 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
388 def show_pass (self,passno):
389 for (key,val) in self.plc_spec.iteritems():
390 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
394 self.display_site_spec(site)
395 for node in site['nodes']:
396 self.display_node_spec(node)
397 elif key=='initscripts':
398 for initscript in val:
399 self.display_initscript_spec (initscript)
402 self.display_slice_spec (slice)
405 self.display_key_spec (key)
407 if key not in ['sites','initscripts','slices','keys', 'sfa']:
408 print '+ ',key,':',val
410 def display_site_spec (self,site):
411 print '+ ======== site',site['site_fields']['name']
412 for (k,v) in site.iteritems():
413 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
416 print '+ ','nodes : ',
418 print node['node_fields']['hostname'],'',
424 print user['name'],'',
426 elif k == 'site_fields':
427 print '+ login_base',':',v['login_base']
428 elif k == 'address_fields':
434 def display_initscript_spec (self,initscript):
435 print '+ ======== initscript',initscript['initscript_fields']['name']
437 def display_key_spec (self,key):
438 print '+ ======== key',key['name']
440 def display_slice_spec (self,slice):
441 print '+ ======== slice',slice['slice_fields']['name']
442 for (k,v) in slice.iteritems():
455 elif k=='slice_fields':
456 print '+ fields',':',
457 print 'max_nodes=',v['max_nodes'],
462 def display_node_spec (self,node):
463 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
464 print "hostname=",node['node_fields']['hostname'],
465 print "ip=",node['interface_fields']['ip']
466 if self.options.verbose:
467 utils.pprint("node details",node,depth=3)
469 # another entry point for just showing the boxes involved
470 def display_mapping (self):
471 TestPlc.display_mapping_plc(self.plc_spec)
475 def display_mapping_plc (plc_spec):
476 print '+ MyPLC',plc_spec['name']
477 # WARNING this would not be right for lxc-based PLC's - should be harmless though
478 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
479 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
480 for site_spec in plc_spec['sites']:
481 for node_spec in site_spec['nodes']:
482 TestPlc.display_mapping_node(node_spec)
485 def display_mapping_node (node_spec):
486 print '+ NODE %s'%(node_spec['name'])
487 print '+\tqemu box %s'%node_spec['host_box']
488 print '+\thostname=%s'%node_spec['node_fields']['hostname']
490 # write a timestamp in /vservers/<>.timestamp
491 # cannot be inside the vserver, that causes vserver .. build to cough
492 def timestamp_vs (self):
494 # TODO-lxc check this one
495 # a first approx. is to store the timestamp close to the VM root like vs does
496 stamp_path=self.vm_timestamp_path ()
497 stamp_dir = os.path.dirname (stamp_path)
498 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
499 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
501 # this is called inconditionnally at the beginning of the test sequence
502 # just in case this is a rerun, so if the vm is not running it's fine
504 "vserver delete the test myplc"
505 stamp_path=self.vm_timestamp_path()
506 self.run_in_host("rm -f %s"%stamp_path)
507 if self.options.plcs_use_lxc:
508 self.run_in_host("lxc-stop --name %s"%self.vservername)
509 self.run_in_host("lxc-destroy --name %s"%self.vservername)
512 self.run_in_host("vserver --silent %s delete"%self.vservername)
516 # historically the build was being fetched by the tests
517 # now the build pushes itself as a subdir of the tests workdir
518 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
519 def vs_create (self):
520 "vserver creation (no install done)"
521 # push the local build/ dir to the testplc box
523 # a full path for the local calls
524 build_dir=os.path.dirname(sys.argv[0])
525 # sometimes this is empty - set to "." in such a case
526 if not build_dir: build_dir="."
527 build_dir += "/build"
529 # use a standard name - will be relative to remote buildname
531 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
532 self.test_ssh.rmdir(build_dir)
533 self.test_ssh.copy(build_dir,recursive=True)
534 # the repo url is taken from arch-rpms-url
535 # with the last step (i386) removed
536 repo_url = self.options.arch_rpms_url
537 for level in [ 'arch' ]:
538 repo_url = os.path.dirname(repo_url)
539 # pass the vbuild-nightly options to vtest-init-vserver
541 test_env_options += " -p %s"%self.options.personality
542 test_env_options += " -d %s"%self.options.pldistro
543 test_env_options += " -f %s"%self.options.fcdistro
544 if self.options.plcs_use_lxc:
545 script="vtest-init-lxc.sh"
547 script="vtest-init-vserver.sh"
548 vserver_name = self.vservername
549 vserver_options="--netdev eth0 --interface %s"%self.vserverip
551 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
552 vserver_options += " --hostname %s"%vserver_hostname
554 print "Cannot reverse lookup %s"%self.vserverip
555 print "This is considered fatal, as this might pollute the test results"
557 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
558 return self.run_in_host(create_vserver) == 0
561 def plc_install(self):
562 "yum install myplc, noderepo, and the plain bootstrapfs"
564 # workaround for getting pgsql8.2 on centos5
565 if self.options.fcdistro == "centos5":
566 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
569 if self.options.personality == "linux32":
571 elif self.options.personality == "linux64":
574 raise Exception, "Unsupported personality %r"%self.options.personality
575 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
578 pkgs_list.append ("slicerepo-%s"%nodefamily)
579 pkgs_list.append ("myplc")
580 pkgs_list.append ("noderepo-%s"%nodefamily)
581 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
582 pkgs_string=" ".join(pkgs_list)
583 return self.yum_install (pkgs_list)
586 def plc_configure(self):
588 tmpname='%s.plc-config-tty'%(self.name())
589 fileconf=open(tmpname,'w')
590 for var in [ 'PLC_NAME',
595 'PLC_MAIL_SUPPORT_ADDRESS',
598 # Above line was added for integrating SFA Testing
604 'PLC_RESERVATION_GRANULARITY',
606 'PLC_OMF_XMPP_SERVER',
608 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
609 fileconf.write('w\n')
610 fileconf.write('q\n')
612 utils.system('cat %s'%tmpname)
613 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
614 utils.system('rm %s'%tmpname)
619 self.run_in_guest('service plc start')
624 self.run_in_guest('service plc stop')
628 "start the PLC vserver"
633 "stop the PLC vserver"
637 # stores the keys from the config for further use
638 def keys_store(self):
639 "stores test users ssh keys in keys/"
640 for key_spec in self.plc_spec['keys']:
641 TestKey(self,key_spec).store_key()
644 def keys_clean(self):
645 "removes keys cached in keys/"
646 utils.system("rm -rf ./keys")
649 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
650 # for later direct access to the nodes
651 def keys_fetch(self):
652 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
654 if not os.path.isdir(dir):
656 vservername=self.vservername
657 vm_root=self.vm_root_in_host()
659 prefix = 'debug_ssh_key'
660 for ext in [ 'pub', 'rsa' ] :
661 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
662 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
663 if self.test_ssh.fetch(src,dst) != 0: overall=False
667 "create sites with PLCAPI"
668 return self.do_sites()
670 def delete_sites (self):
671 "delete sites with PLCAPI"
672 return self.do_sites(action="delete")
674 def do_sites (self,action="add"):
675 for site_spec in self.plc_spec['sites']:
676 test_site = TestSite (self,site_spec)
677 if (action != "add"):
678 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
679 test_site.delete_site()
680 # deleted with the site
681 #test_site.delete_users()
684 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
685 test_site.create_site()
686 test_site.create_users()
689 def delete_all_sites (self):
690 "Delete all sites in PLC, and related objects"
691 print 'auth_root',self.auth_root()
692 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
693 for site_id in site_ids:
694 print 'Deleting site_id',site_id
695 self.apiserver.DeleteSite(self.auth_root(),site_id)
699 "create nodes with PLCAPI"
700 return self.do_nodes()
701 def delete_nodes (self):
702 "delete nodes with PLCAPI"
703 return self.do_nodes(action="delete")
705 def do_nodes (self,action="add"):
706 for site_spec in self.plc_spec['sites']:
707 test_site = TestSite (self,site_spec)
709 utils.header("Deleting nodes in site %s"%test_site.name())
710 for node_spec in site_spec['nodes']:
711 test_node=TestNode(self,test_site,node_spec)
712 utils.header("Deleting %s"%test_node.name())
713 test_node.delete_node()
715 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
716 for node_spec in site_spec['nodes']:
717 utils.pprint('Creating node %s'%node_spec,node_spec)
718 test_node = TestNode (self,test_site,node_spec)
719 test_node.create_node ()
722 def nodegroups (self):
723 "create nodegroups with PLCAPI"
724 return self.do_nodegroups("add")
725 def delete_nodegroups (self):
726 "delete nodegroups with PLCAPI"
727 return self.do_nodegroups("delete")
731 def translate_timestamp (start,grain,timestamp):
732 if timestamp < TestPlc.YEAR: return start+timestamp*grain
733 else: return timestamp
736 def timestamp_printable (timestamp):
737 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
740 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
742 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
743 print 'API answered grain=',grain
744 start=(now/grain)*grain
746 # find out all nodes that are reservable
747 nodes=self.all_reservable_nodenames()
749 utils.header ("No reservable node found - proceeding without leases")
752 # attach them to the leases as specified in plc_specs
753 # this is where the 'leases' field gets interpreted as relative of absolute
754 for lease_spec in self.plc_spec['leases']:
755 # skip the ones that come with a null slice id
756 if not lease_spec['slice']: continue
757 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
758 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
759 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
760 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
761 if lease_addition['errors']:
762 utils.header("Cannot create leases, %s"%lease_addition['errors'])
765 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
766 (nodes,lease_spec['slice'],
767 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
768 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
772 def delete_leases (self):
773 "remove all leases in the myplc side"
774 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
775 utils.header("Cleaning leases %r"%lease_ids)
776 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
779 def list_leases (self):
780 "list all leases known to the myplc"
781 leases = self.apiserver.GetLeases(self.auth_root())
784 current=l['t_until']>=now
785 if self.options.verbose or current:
786 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
787 TestPlc.timestamp_printable(l['t_from']),
788 TestPlc.timestamp_printable(l['t_until'])))
791 # create nodegroups if needed, and populate
792 def do_nodegroups (self, action="add"):
793 # 1st pass to scan contents
795 for site_spec in self.plc_spec['sites']:
796 test_site = TestSite (self,site_spec)
797 for node_spec in site_spec['nodes']:
798 test_node=TestNode (self,test_site,node_spec)
799 if node_spec.has_key('nodegroups'):
800 nodegroupnames=node_spec['nodegroups']
801 if isinstance(nodegroupnames,StringTypes):
802 nodegroupnames = [ nodegroupnames ]
803 for nodegroupname in nodegroupnames:
804 if not groups_dict.has_key(nodegroupname):
805 groups_dict[nodegroupname]=[]
806 groups_dict[nodegroupname].append(test_node.name())
807 auth=self.auth_root()
809 for (nodegroupname,group_nodes) in groups_dict.iteritems():
811 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
812 # first, check if the nodetagtype is here
813 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
815 tag_type_id = tag_types[0]['tag_type_id']
817 tag_type_id = self.apiserver.AddTagType(auth,
818 {'tagname':nodegroupname,
819 'description': 'for nodegroup %s'%nodegroupname,
821 print 'located tag (type)',nodegroupname,'as',tag_type_id
823 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
825 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
826 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
827 # set node tag on all nodes, value='yes'
828 for nodename in group_nodes:
830 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
832 traceback.print_exc()
833 print 'node',nodename,'seems to already have tag',nodegroupname
836 expect_yes = self.apiserver.GetNodeTags(auth,
837 {'hostname':nodename,
838 'tagname':nodegroupname},
839 ['value'])[0]['value']
840 if expect_yes != "yes":
841 print 'Mismatch node tag on node',nodename,'got',expect_yes
844 if not self.options.dry_run:
845 print 'Cannot find tag',nodegroupname,'on node',nodename
849 print 'cleaning nodegroup',nodegroupname
850 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
852 traceback.print_exc()
856 # a list of TestNode objs
857 def all_nodes (self):
859 for site_spec in self.plc_spec['sites']:
860 test_site = TestSite (self,site_spec)
861 for node_spec in site_spec['nodes']:
862 nodes.append(TestNode (self,test_site,node_spec))
865 # return a list of tuples (nodename,qemuname)
866 def all_node_infos (self) :
868 for site_spec in self.plc_spec['sites']:
869 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
870 for node_spec in site_spec['nodes'] ]
873 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
874 def all_reservable_nodenames (self):
876 for site_spec in self.plc_spec['sites']:
877 for node_spec in site_spec['nodes']:
878 node_fields=node_spec['node_fields']
879 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
880 res.append(node_fields['hostname'])
883 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
884 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
885 if self.options.dry_run:
889 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
890 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
891 # the nodes that haven't checked yet - start with a full list and shrink over time
892 tocheck = self.all_hostnames()
893 utils.header("checking nodes %r"%tocheck)
894 # create a dict hostname -> status
895 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
898 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
900 for array in tocheck_status:
901 hostname=array['hostname']
902 boot_state=array['boot_state']
903 if boot_state == target_boot_state:
904 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
906 # if it's a real node, never mind
907 (site_spec,node_spec)=self.locate_hostname(hostname)
908 if TestNode.is_real_model(node_spec['node_fields']['model']):
909 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
911 boot_state = target_boot_state
912 elif datetime.datetime.now() > graceout:
913 utils.header ("%s still in '%s' state"%(hostname,boot_state))
914 graceout=datetime.datetime.now()+datetime.timedelta(1)
915 status[hostname] = boot_state
917 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
920 if datetime.datetime.now() > timeout:
921 for hostname in tocheck:
922 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
924 # otherwise, sleep for a while
926 # only useful in empty plcs
929 def nodes_booted(self):
930 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
932 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
934 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
935 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
936 vservername=self.vservername
939 local_key = "keys/%(vservername)s-debug.rsa"%locals()
942 local_key = "keys/key1.rsa"
943 node_infos = self.all_node_infos()
944 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
945 for (nodename,qemuname) in node_infos:
946 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
947 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
948 (timeout_minutes,silent_minutes,period))
950 for node_info in node_infos:
951 (hostname,qemuname) = node_info
952 # try to run 'hostname' in the node
953 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
954 # don't spam logs - show the command only after the grace period
955 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
957 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
959 node_infos.remove(node_info)
961 # we will have tried real nodes once, in case they're up - but if not, just skip
962 (site_spec,node_spec)=self.locate_hostname(hostname)
963 if TestNode.is_real_model(node_spec['node_fields']['model']):
964 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
965 node_infos.remove(node_info)
968 if datetime.datetime.now() > timeout:
969 for (hostname,qemuname) in node_infos:
970 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
972 # otherwise, sleep for a while
974 # only useful in empty plcs
977 def ssh_node_debug(self):
978 "Tries to ssh into nodes in debug mode with the debug ssh key"
979 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
981 def ssh_node_boot(self):
982 "Tries to ssh into nodes in production mode with the root ssh key"
983 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
986 def qemu_local_init (self):
987 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
991 "all nodes: invoke GetBootMedium and store result locally"
994 def qemu_local_config (self):
995 "all nodes: compute qemu config qemu.conf and store it locally"
998 def nodestate_reinstall (self):
999 "all nodes: mark PLCAPI boot_state as reinstall"
1002 def nodestate_safeboot (self):
1003 "all nodes: mark PLCAPI boot_state as safeboot"
1006 def nodestate_boot (self):
1007 "all nodes: mark PLCAPI boot_state as boot"
1010 def nodestate_show (self):
1011 "all nodes: show PLCAPI boot_state"
1014 def qemu_export (self):
1015 "all nodes: push local node-dep directory on the qemu box"
1018 ### check hooks : invoke scripts from hooks/{node,slice}
1019 def check_hooks_node (self):
1020 return self.locate_first_node().check_hooks()
1021 def check_hooks_sliver (self) :
1022 return self.locate_first_sliver().check_hooks()
1024 def check_hooks (self):
1025 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1026 return self.check_hooks_node() and self.check_hooks_sliver()
1029 def do_check_initscripts(self):
1031 for slice_spec in self.plc_spec['slices']:
1032 if not slice_spec.has_key('initscriptstamp'):
1034 stamp=slice_spec['initscriptstamp']
1035 for nodename in slice_spec['nodenames']:
1036 (site,node) = self.locate_node (nodename)
1037 # xxx - passing the wrong site - probably harmless
1038 test_site = TestSite (self,site)
1039 test_slice = TestSlice (self,test_site,slice_spec)
1040 test_node = TestNode (self,test_site,node)
1041 test_sliver = TestSliver (self, test_node, test_slice)
1042 if not test_sliver.check_initscript_stamp(stamp):
1046 def check_initscripts(self):
1047 "check that the initscripts have triggered"
1048 return self.do_check_initscripts()
1050 def initscripts (self):
1051 "create initscripts with PLCAPI"
1052 for initscript in self.plc_spec['initscripts']:
1053 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1054 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1057 def delete_initscripts (self):
1058 "delete initscripts with PLCAPI"
1059 for initscript in self.plc_spec['initscripts']:
1060 initscript_name = initscript['initscript_fields']['name']
1061 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1063 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1064 print initscript_name,'deleted'
1066 print 'deletion went wrong - probably did not exist'
1071 "create slices with PLCAPI"
1072 return self.do_slices()
1074 def delete_slices (self):
1075 "delete slices with PLCAPI"
1076 return self.do_slices("delete")
1078 def do_slices (self, action="add"):
1079 for slice in self.plc_spec['slices']:
1080 site_spec = self.locate_site (slice['sitename'])
1081 test_site = TestSite(self,site_spec)
1082 test_slice=TestSlice(self,test_site,slice)
1084 utils.header("Deleting slices in site %s"%test_site.name())
1085 test_slice.delete_slice()
1087 utils.pprint("Creating slice",slice)
1088 test_slice.create_slice()
1089 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1093 def ssh_slice(self):
1094 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1098 def keys_clear_known_hosts (self):
1099 "remove test nodes entries from the local known_hosts file"
1103 def qemu_start (self) :
1104 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1108 def timestamp_qemu (self) :
1109 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1112 def check_tcp (self):
1113 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1114 specs = self.plc_spec['tcp_test']
1119 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1120 if not s_test_sliver.run_tcp_server(port,timeout=10):
1124 # idem for the client side
1125 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1126 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1130 # painfully enough, we need to allow for some time as netflow might show up last
1131 def check_sys_slice (self):
1132 "all nodes: check that a system slice is alive"
1133 # would probably make more sense to check for netflow,
1134 # but that one is currently not working in the lxc distro
1135 # return self.check_systemslice ('netflow')
1136 return self.check_systemslice ('drl')
1138 # we have the slices up already here, so it should not take too long
1139 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1140 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1141 test_nodes=self.all_nodes()
1143 for test_node in test_nodes:
1144 if test_node.check_systemslice (slicename,dry_run=self.options.dry_run):
1146 test_nodes.remove(test_node)
1151 if datetime.datetime.now () > timeout:
1152 for test_node in test_nodes:
1153 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1158 def plcsh_stress_test (self):
1159 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1160 # install the stress-test in the plc image
1161 location = "/usr/share/plc_api/plcsh_stress_test.py"
1162 remote="%s/%s"%(self.vm_root_in_host(),location)
1163 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1165 command += " -- --check"
1166 if self.options.size == 1:
1167 command += " --tiny"
1168 return ( self.run_in_guest(command) == 0)
1170 # populate runs the same utility without slightly different options
1171 # in particular runs with --preserve (dont cleanup) and without --check
1172 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1174 def sfa_install_all (self):
1175 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1176 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1178 def sfa_install_core(self):
1180 return self.yum_install ("sfa")
1182 def sfa_install_plc(self):
1183 "yum install sfa-plc"
1184 return self.yum_install("sfa-plc")
1186 def sfa_install_client(self):
1187 "yum install sfa-client"
1188 return self.yum_install("sfa-client")
1190 def sfa_install_sfatables(self):
1191 "yum install sfa-sfatables"
1192 return self.yum_install ("sfa-sfatables")
1194 def sfa_dbclean(self):
1195 "thoroughly wipes off the SFA database"
1196 self.run_in_guest("sfa-nuke.py")==0 or \
1197 self.run_in_guest("sfa-nuke-plc.py") or \
1198 self.run_in_guest("sfaadmin.py registry nuke")
1201 def sfa_plcclean(self):
1202 "cleans the PLC entries that were created as a side effect of running the script"
1204 sfa_spec=self.plc_spec['sfa']
1206 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1207 slicename='%s_%s'%(sfa_slice_spec['login_base'],sfa_slice_spec['slicename'])
1208 try: self.apiserver.DeleteSlice(self.auth_root(),slicename)
1209 except: print "Slice %s already absent from PLC db"%slicename
1211 username="%s@%s"%(sfa_slice_spec['regularuser'],sfa_slice_spec['domain'])
1212 try: self.apiserver.DeletePerson(self.auth_root(),username)
1213 except: print "User %s already absent from PLC db"%username
1215 print "REMEMBER TO RUN sfa_import AGAIN"
1218 def sfa_uninstall(self):
1219 "uses rpm to uninstall sfa - ignore result"
1220 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1221 self.run_in_guest("rm -rf /var/lib/sfa")
1222 self.run_in_guest("rm -rf /etc/sfa")
1223 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1225 self.run_in_guest("rpm -e --noscripts sfa-plc")
1228 ### run unit tests for SFA
1229 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1230 # Running Transaction
1231 # Transaction couldn't start:
1232 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1233 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1234 # no matter how many Gbs are available on the testplc
1235 # could not figure out what's wrong, so...
1236 # if the yum install phase fails, consider the test is successful
1237 # other combinations will eventually run it hopefully
1238 def sfa_utest(self):
1239 "yum install sfa-tests and run SFA unittests"
1240 self.run_in_guest("yum -y install sfa-tests")
1241 # failed to install - forget it
1242 if self.run_in_guest("rpm -q sfa-tests")!=0:
1243 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1245 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1249 dirname="conf.%s"%self.plc_spec['name']
1250 if not os.path.isdir(dirname):
1251 utils.system("mkdir -p %s"%dirname)
1252 if not os.path.isdir(dirname):
1253 raise "Cannot create config dir for plc %s"%self.name()
1256 def conffile(self,filename):
1257 return "%s/%s"%(self.confdir(),filename)
1258 def confsubdir(self,dirname,clean,dry_run=False):
1259 subdirname="%s/%s"%(self.confdir(),dirname)
1261 utils.system("rm -rf %s"%subdirname)
1262 if not os.path.isdir(subdirname):
1263 utils.system("mkdir -p %s"%subdirname)
1264 if not dry_run and not os.path.isdir(subdirname):
1265 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1268 def conffile_clean (self,filename):
1269 filename=self.conffile(filename)
1270 return utils.system("rm -rf %s"%filename)==0
1273 def sfa_configure(self):
1274 "run sfa-config-tty"
1275 tmpname=self.conffile("sfa-config-tty")
1276 fileconf=open(tmpname,'w')
1277 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1278 'SFA_INTERFACE_HRN',
1279 'SFA_REGISTRY_LEVEL1_AUTH',
1280 'SFA_REGISTRY_HOST',
1281 'SFA_AGGREGATE_HOST',
1292 if self.plc_spec['sfa'].has_key(var):
1293 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1294 # the way plc_config handles booleans just sucks..
1297 if self.plc_spec['sfa'][var]: val='true'
1298 fileconf.write ('e %s\n%s\n'%(var,val))
1299 fileconf.write('w\n')
1300 fileconf.write('R\n')
1301 fileconf.write('q\n')
1303 utils.system('cat %s'%tmpname)
1304 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1307 def aggregate_xml_line(self):
1308 port=self.plc_spec['sfa']['neighbours-port']
1309 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1310 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1312 def registry_xml_line(self):
1313 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1314 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1317 # a cross step that takes all other plcs in argument
1318 def cross_sfa_configure(self, other_plcs):
1319 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1320 # of course with a single plc, other_plcs is an empty list
1323 agg_fname=self.conffile("agg.xml")
1324 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1325 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1326 utils.header ("(Over)wrote %s"%agg_fname)
1327 reg_fname=self.conffile("reg.xml")
1328 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1329 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1330 utils.header ("(Over)wrote %s"%reg_fname)
1331 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1332 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1334 def sfa_import(self):
1336 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1337 return self.run_in_guest('sfa-import.py')==0 or \
1338 self.run_in_guest('sfa-import-plc.py')==0 or \
1339 self.run_in_guest('sfaadmin.py registry import_registry')==0
1340 # not needed anymore
1341 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1343 def sfa_start(self):
1345 return self.run_in_guest('service sfa start')==0
1347 def sfi_configure(self):
1348 "Create /root/sfi on the plc side for sfi client configuration"
1349 if self.options.dry_run:
1350 utils.header("DRY RUN - skipping step")
1352 sfa_spec=self.plc_spec['sfa']
1353 # cannot use sfa_slice_mapper to pass dir_name
1354 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1355 site_spec = self.locate_site (slice_spec['sitename'])
1356 test_site = TestSite(self,site_spec)
1357 test_slice=TestSliceSfa(self,test_site,slice_spec)
1358 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1359 test_slice.sfi_config(dir_name)
1360 # push into the remote /root/sfi area
1361 location = test_slice.sfi_path()
1362 remote="%s/%s"%(self.vm_root_in_host(),location)
1363 self.test_ssh.mkdir(remote,abs=True)
1364 # need to strip last level or remote otherwise we get an extra dir level
1365 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1369 def sfi_clean (self):
1370 "clean up /root/sfi on the plc side"
1371 self.run_in_guest("rm -rf /root/sfi")
1375 def sfa_add_user(self):
1380 def sfa_update_user(self):
1384 def sfa_add_slice(self):
1385 "run sfi.py add (on Registry) from slice.xml"
1389 def sfa_discover(self):
1390 "discover resources into resouces_in.rspec"
1394 def sfa_create_slice(self):
1395 "run sfi.py create (on SM) - 1st time"
1399 def sfa_check_slice_plc(self):
1400 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1404 def sfa_update_slice(self):
1405 "run sfi.py create (on SM) on existing object"
1410 "various registry-related calls"
1414 def ssh_slice_sfa(self):
1415 "tries to ssh-enter the SFA slice"
1419 def sfa_delete_user(self):
1424 def sfa_delete_slice(self):
1425 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1430 self.run_in_guest('service sfa stop')==0
1433 def populate (self):
1434 "creates random entries in the PLCAPI"
1435 # install the stress-test in the plc image
1436 location = "/usr/share/plc_api/plcsh_stress_test.py"
1437 remote="%s/%s"%(self.vm_root_in_host(),location)
1438 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1440 command += " -- --preserve --short-names"
1441 local = (self.run_in_guest(command) == 0);
1442 # second run with --foreign
1443 command += ' --foreign'
1444 remote = (self.run_in_guest(command) == 0);
1445 return ( local and remote)
1447 def gather_logs (self):
1448 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1449 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1450 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1451 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1452 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1453 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1454 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1456 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1457 self.gather_var_logs ()
1459 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1460 self.gather_pgsql_logs ()
1462 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1463 self.gather_root_sfi ()
1465 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1466 for site_spec in self.plc_spec['sites']:
1467 test_site = TestSite (self,site_spec)
1468 for node_spec in site_spec['nodes']:
1469 test_node=TestNode(self,test_site,node_spec)
1470 test_node.gather_qemu_logs()
1472 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1473 self.gather_nodes_var_logs()
1475 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1476 self.gather_slivers_var_logs()
1479 def gather_slivers_var_logs(self):
1480 for test_sliver in self.all_sliver_objs():
1481 remote = test_sliver.tar_var_logs()
1482 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1483 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1484 utils.system(command)
1487 def gather_var_logs (self):
1488 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1489 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1490 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1491 utils.system(command)
1492 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1493 utils.system(command)
1495 def gather_pgsql_logs (self):
1496 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1497 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1498 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1499 utils.system(command)
1501 def gather_root_sfi (self):
1502 utils.system("mkdir -p logs/sfi.%s"%self.name())
1503 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1504 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1505 utils.system(command)
1507 def gather_nodes_var_logs (self):
1508 for site_spec in self.plc_spec['sites']:
1509 test_site = TestSite (self,site_spec)
1510 for node_spec in site_spec['nodes']:
1511 test_node=TestNode(self,test_site,node_spec)
1512 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1513 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1514 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1515 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1516 utils.system(command)
1519 # returns the filename to use for sql dump/restore, using options.dbname if set
1520 def dbfile (self, database):
1521 # uses options.dbname if it is found
1523 name=self.options.dbname
1524 if not isinstance(name,StringTypes):
1527 t=datetime.datetime.now()
1530 return "/root/%s-%s.sql"%(database,name)
1532 def plc_db_dump(self):
1533 'dump the planetlab5 DB in /root in the PLC - filename has time'
1534 dump=self.dbfile("planetab5")
1535 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1536 utils.header('Dumped planetlab5 database in %s'%dump)
1539 def plc_db_restore(self):
1540 'restore the planetlab5 DB - looks broken, but run -n might help'
1541 dump=self.dbfile("planetab5")
1542 ##stop httpd service
1543 self.run_in_guest('service httpd stop')
1544 # xxx - need another wrapper
1545 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1546 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1547 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1548 ##starting httpd service
1549 self.run_in_guest('service httpd start')
1551 utils.header('Database restored from ' + dump)
1553 def standby_1_through_20(self):
1554 """convenience function to wait for a specified number of minutes"""
1557 def standby_1(): pass
1559 def standby_2(): pass
1561 def standby_3(): pass
1563 def standby_4(): pass
1565 def standby_5(): pass
1567 def standby_6(): pass
1569 def standby_7(): pass
1571 def standby_8(): pass
1573 def standby_9(): pass
1575 def standby_10(): pass
1577 def standby_11(): pass
1579 def standby_12(): pass
1581 def standby_13(): pass
1583 def standby_14(): pass
1585 def standby_15(): pass
1587 def standby_16(): pass
1589 def standby_17(): pass
1591 def standby_18(): pass
1593 def standby_19(): pass
1595 def standby_20(): pass