1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestSliceSfa import TestSliceSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=method.__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=method.__doc__
65 def slice_sfa_mapper (method):
68 slice_method = TestSliceSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
70 test_slice=TestSliceSfa(self,slice_spec)
71 if not slice_method(test_slice,self.options): overall=False
73 # restore the doc text
74 actual.__doc__=method.__doc__
84 'vs_delete','timestamp_vs','vs_create', SEP,
85 'plc_install', 'plc_configure', 'plc_start', SEP,
86 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
87 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
88 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
89 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
90 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
91 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
92 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
93 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
94 'sfa_update_user@1', 'sfa_update_slice@1', 'sfa_view@1', 'sfa_utest@1',SEPSFA,
95 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
96 # but as the stress test might take a while, we sometimes missed the debug mode..
97 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
98 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
99 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
100 'check_tcp', 'check_sys_slice', SEP,
101 'force_gather_logs', SEP,
104 'export', 'show_boxes', SEP,
105 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
106 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
107 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
108 'delete_leases', 'list_leases', SEP,
110 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
111 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
112 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
113 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
114 'plc_db_dump' , 'plc_db_restore', SEP,
115 'standby_1_through_20',SEP,
119 def printable_steps (list):
120 single_line=" ".join(list)+" "
121 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
123 def valid_step (step):
124 return step != SEP and step != SEPSFA
126 # turn off the sfa-related steps when build has skipped SFA
127 # this is originally for centos5 as recent SFAs won't build on this platform
129 def check_whether_build_has_sfa (rpms_url):
130 # warning, we're now building 'sface' so let's be a bit more picky
131 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
132 # full builds are expected to return with 0 here
134 # move all steps containing 'sfa' from default_steps to other_steps
135 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 ]
136 TestPlc.other_steps += sfa_steps
137 for step in sfa_steps: TestPlc.default_steps.remove(step)
139 def __init__ (self,plc_spec,options):
140 self.plc_spec=plc_spec
142 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
143 self.vserverip=plc_spec['vserverip']
144 self.vservername=plc_spec['vservername']
145 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
146 self.apiserver=TestApiserver(self.url,options.dry_run)
148 def has_addresses_api (self):
149 return self.apiserver.has_method('AddIpAddress')
152 name=self.plc_spec['name']
153 return "%s.%s"%(name,self.vservername)
156 return self.plc_spec['host_box']
159 return self.test_ssh.is_local()
161 # define the API methods on this object through xmlrpc
162 # would help, but not strictly necessary
166 def actual_command_in_guest (self,command):
167 return self.test_ssh.actual_command(self.host_to_guest(command))
169 def start_guest (self):
170 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
172 def stop_guest (self):
173 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
175 def run_in_guest (self,command):
176 return utils.system(self.actual_command_in_guest(command))
178 def run_in_host (self,command):
179 return self.test_ssh.run_in_buildname(command)
181 #command gets run in the plc's vm
182 def host_to_guest(self,command):
183 if self.options.plcs_use_lxc:
184 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
186 return "vserver %s exec %s"%(self.vservername,command)
188 def vm_root_in_host(self):
189 if self.options.plcs_use_lxc:
190 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
192 return "/vservers/%s"%(self.vservername)
194 def vm_timestamp_path (self):
195 if self.options.plcs_use_lxc:
196 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
198 return "/vservers/%s.timestamp"%(self.vservername)
200 #start/stop the vserver
201 def start_guest_in_host(self):
202 if self.options.plcs_use_lxc:
203 return "lxc-start --daemon --name=%s"%(self.vservername)
205 return "vserver %s start"%(self.vservername)
207 def stop_guest_in_host(self):
208 if self.options.plcs_use_lxc:
209 return "lxc-stop --name=%s"%(self.vservername)
211 return "vserver %s stop"%(self.vservername)
214 def run_in_guest_piped (self,local,remote):
215 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
217 # does a yum install in the vs, ignore yum retcod, check with rpm
218 def yum_install (self, rpms):
219 if isinstance (rpms, list):
221 self.run_in_guest("yum -y install %s"%rpms)
222 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
223 self.run_in_guest("yum-complete-transaction -y")
224 return self.run_in_guest("rpm -q %s"%rpms)==0
226 def auth_root (self):
227 return {'Username':self.plc_spec['PLC_ROOT_USER'],
228 'AuthMethod':'password',
229 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
230 'Role' : self.plc_spec['role']
232 def locate_site (self,sitename):
233 for site in self.plc_spec['sites']:
234 if site['site_fields']['name'] == sitename:
236 if site['site_fields']['login_base'] == sitename:
238 raise Exception,"Cannot locate site %s"%sitename
240 def locate_node (self,nodename):
241 for site in self.plc_spec['sites']:
242 for node in site['nodes']:
243 if node['name'] == nodename:
245 raise Exception,"Cannot locate node %s"%nodename
247 def locate_hostname (self,hostname):
248 for site in self.plc_spec['sites']:
249 for node in site['nodes']:
250 if node['node_fields']['hostname'] == hostname:
252 raise Exception,"Cannot locate hostname %s"%hostname
254 def locate_key (self,keyname):
255 for key in self.plc_spec['keys']:
256 if key['name'] == keyname:
258 raise Exception,"Cannot locate key %s"%keyname
260 def locate_slice (self, slicename):
261 for slice in self.plc_spec['slices']:
262 if slice['slice_fields']['name'] == slicename:
264 raise Exception,"Cannot locate slice %s"%slicename
266 def all_sliver_objs (self):
268 for slice_spec in self.plc_spec['slices']:
269 slicename = slice_spec['slice_fields']['name']
270 for nodename in slice_spec['nodenames']:
271 result.append(self.locate_sliver_obj (nodename,slicename))
274 def locate_sliver_obj (self,nodename,slicename):
275 (site,node) = self.locate_node(nodename)
276 slice = self.locate_slice (slicename)
278 test_site = TestSite (self, site)
279 test_node = TestNode (self, test_site,node)
280 # xxx the slice site is assumed to be the node site - mhh - probably harmless
281 test_slice = TestSlice (self, test_site, slice)
282 return TestSliver (self, test_node, test_slice)
284 def locate_first_node(self):
285 nodename=self.plc_spec['slices'][0]['nodenames'][0]
286 (site,node) = self.locate_node(nodename)
287 test_site = TestSite (self, site)
288 test_node = TestNode (self, test_site,node)
291 def locate_first_sliver (self):
292 slice_spec=self.plc_spec['slices'][0]
293 slicename=slice_spec['slice_fields']['name']
294 nodename=slice_spec['nodenames'][0]
295 return self.locate_sliver_obj(nodename,slicename)
297 # all different hostboxes used in this plc
298 def gather_hostBoxes(self):
299 # maps on sites and nodes, return [ (host_box,test_node) ]
301 for site_spec in self.plc_spec['sites']:
302 test_site = TestSite (self,site_spec)
303 for node_spec in site_spec['nodes']:
304 test_node = TestNode (self, test_site, node_spec)
305 if not test_node.is_real():
306 tuples.append( (test_node.host_box(),test_node) )
307 # transform into a dict { 'host_box' -> [ test_node .. ] }
309 for (box,node) in tuples:
310 if not result.has_key(box):
313 result[box].append(node)
316 # a step for checking this stuff
317 def show_boxes (self):
318 'print summary of nodes location'
319 for (box,nodes) in self.gather_hostBoxes().iteritems():
320 print box,":"," + ".join( [ node.name() for node in nodes ] )
323 # make this a valid step
324 def qemu_kill_all(self):
325 'kill all qemu instances on the qemu boxes involved by this setup'
326 # this is the brute force version, kill all qemus on that host box
327 for (box,nodes) in self.gather_hostBoxes().iteritems():
328 # pass the first nodename, as we don't push template-qemu on testboxes
329 nodedir=nodes[0].nodedir()
330 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
333 # make this a valid step
334 def qemu_list_all(self):
335 'list all qemu instances on the qemu boxes involved by this setup'
336 for (box,nodes) in self.gather_hostBoxes().iteritems():
337 # this is the brute force version, kill all qemus on that host box
338 TestBoxQemu(box,self.options.buildname).qemu_list_all()
341 # kill only the right qemus
342 def qemu_list_mine(self):
343 'list qemu instances for our nodes'
344 for (box,nodes) in self.gather_hostBoxes().iteritems():
345 # the fine-grain version
350 # kill only the right qemus
351 def qemu_kill_mine(self):
352 'kill the qemu instances for our nodes'
353 for (box,nodes) in self.gather_hostBoxes().iteritems():
354 # the fine-grain version
359 #################### display config
361 "show test configuration after localization"
367 "print cut'n paste-able stuff to export env variables to your shell"
368 # guess local domain from hostname
369 domain=socket.gethostname().split('.',1)[1]
370 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
371 print "export BUILD=%s"%self.options.buildname
372 if self.options.plcs_use_lxc:
373 print "export PLCHOSTLXC=%s"%fqdn
375 print "export PLCHOSTVS=%s"%fqdn
376 print "export GUESTNAME=%s"%self.plc_spec['vservername']
377 vplcname=self.plc_spec['vservername'].split('-')[-1]
378 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
379 # find hostname of first node
380 (hostname,qemubox) = self.all_node_infos()[0]
381 print "export KVMHOST=%s.%s"%(qemubox,domain)
382 print "export NODE=%s"%(hostname)
386 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
387 def show_pass (self,passno):
388 for (key,val) in self.plc_spec.iteritems():
389 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
393 self.display_site_spec(site)
394 for node in site['nodes']:
395 self.display_node_spec(node)
396 elif key=='initscripts':
397 for initscript in val:
398 self.display_initscript_spec (initscript)
401 self.display_slice_spec (slice)
404 self.display_key_spec (key)
406 if key not in ['sites','initscripts','slices','keys', 'sfa']:
407 print '+ ',key,':',val
409 def display_site_spec (self,site):
410 print '+ ======== site',site['site_fields']['name']
411 for (k,v) in site.iteritems():
412 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
415 print '+ ','nodes : ',
417 print node['node_fields']['hostname'],'',
423 print user['name'],'',
425 elif k == 'site_fields':
426 print '+ login_base',':',v['login_base']
427 elif k == 'address_fields':
433 def display_initscript_spec (self,initscript):
434 print '+ ======== initscript',initscript['initscript_fields']['name']
436 def display_key_spec (self,key):
437 print '+ ======== key',key['name']
439 def display_slice_spec (self,slice):
440 print '+ ======== slice',slice['slice_fields']['name']
441 for (k,v) in slice.iteritems():
454 elif k=='slice_fields':
455 print '+ fields',':',
456 print 'max_nodes=',v['max_nodes'],
461 def display_node_spec (self,node):
462 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
463 print "hostname=",node['node_fields']['hostname'],
464 print "ip=",node['interface_fields']['ip']
465 if self.options.verbose:
466 utils.pprint("node details",node,depth=3)
468 # another entry point for just showing the boxes involved
469 def display_mapping (self):
470 TestPlc.display_mapping_plc(self.plc_spec)
474 def display_mapping_plc (plc_spec):
475 print '+ MyPLC',plc_spec['name']
476 # WARNING this would not be right for lxc-based PLC's - should be harmless though
477 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
478 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
479 for site_spec in plc_spec['sites']:
480 for node_spec in site_spec['nodes']:
481 TestPlc.display_mapping_node(node_spec)
484 def display_mapping_node (node_spec):
485 print '+ NODE %s'%(node_spec['name'])
486 print '+\tqemu box %s'%node_spec['host_box']
487 print '+\thostname=%s'%node_spec['node_fields']['hostname']
489 # write a timestamp in /vservers/<>.timestamp
490 # cannot be inside the vserver, that causes vserver .. build to cough
491 def timestamp_vs (self):
493 # TODO-lxc check this one
494 # a first approx. is to store the timestamp close to the VM root like vs does
495 stamp_path=self.vm_timestamp_path ()
496 stamp_dir = os.path.dirname (stamp_path)
497 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
498 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
500 # this is called inconditionnally at the beginning of the test sequence
501 # just in case this is a rerun, so if the vm is not running it's fine
503 "vserver delete the test myplc"
504 stamp_path=self.vm_timestamp_path()
505 self.run_in_host("rm -f %s"%stamp_path)
506 if self.options.plcs_use_lxc:
507 self.run_in_host("lxc-stop --name %s"%self.vservername)
508 self.run_in_host("lxc-destroy --name %s"%self.vservername)
511 self.run_in_host("vserver --silent %s delete"%self.vservername)
515 # historically the build was being fetched by the tests
516 # now the build pushes itself as a subdir of the tests workdir
517 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
518 def vs_create (self):
519 "vserver creation (no install done)"
520 # push the local build/ dir to the testplc box
522 # a full path for the local calls
523 build_dir=os.path.dirname(sys.argv[0])
524 # sometimes this is empty - set to "." in such a case
525 if not build_dir: build_dir="."
526 build_dir += "/build"
528 # use a standard name - will be relative to remote buildname
530 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
531 self.test_ssh.rmdir(build_dir)
532 self.test_ssh.copy(build_dir,recursive=True)
533 # the repo url is taken from arch-rpms-url
534 # with the last step (i386) removed
535 repo_url = self.options.arch_rpms_url
536 for level in [ 'arch' ]:
537 repo_url = os.path.dirname(repo_url)
538 # pass the vbuild-nightly options to vtest-init-vserver
540 test_env_options += " -p %s"%self.options.personality
541 test_env_options += " -d %s"%self.options.pldistro
542 test_env_options += " -f %s"%self.options.fcdistro
543 if self.options.plcs_use_lxc:
544 script="vtest-init-lxc.sh"
546 script="vtest-init-vserver.sh"
547 vserver_name = self.vservername
548 vserver_options="--netdev eth0 --interface %s"%self.vserverip
550 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
551 vserver_options += " --hostname %s"%vserver_hostname
553 print "Cannot reverse lookup %s"%self.vserverip
554 print "This is considered fatal, as this might pollute the test results"
556 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
557 return self.run_in_host(create_vserver) == 0
560 def plc_install(self):
561 "yum install myplc, noderepo, and the plain bootstrapfs"
563 # workaround for getting pgsql8.2 on centos5
564 if self.options.fcdistro == "centos5":
565 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
568 if self.options.personality == "linux32":
570 elif self.options.personality == "linux64":
573 raise Exception, "Unsupported personality %r"%self.options.personality
574 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
577 pkgs_list.append ("slicerepo-%s"%nodefamily)
578 pkgs_list.append ("myplc")
579 pkgs_list.append ("noderepo-%s"%nodefamily)
580 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
581 pkgs_string=" ".join(pkgs_list)
582 return self.yum_install (pkgs_list)
585 def plc_configure(self):
587 tmpname='%s.plc-config-tty'%(self.name())
588 fileconf=open(tmpname,'w')
589 for var in [ 'PLC_NAME',
594 'PLC_MAIL_SUPPORT_ADDRESS',
597 # Above line was added for integrating SFA Testing
603 'PLC_RESERVATION_GRANULARITY',
605 'PLC_OMF_XMPP_SERVER',
607 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
608 fileconf.write('w\n')
609 fileconf.write('q\n')
611 utils.system('cat %s'%tmpname)
612 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
613 utils.system('rm %s'%tmpname)
618 self.run_in_guest('service plc start')
623 self.run_in_guest('service plc stop')
627 "start the PLC vserver"
632 "stop the PLC vserver"
636 # stores the keys from the config for further use
637 def keys_store(self):
638 "stores test users ssh keys in keys/"
639 for key_spec in self.plc_spec['keys']:
640 TestKey(self,key_spec).store_key()
643 def keys_clean(self):
644 "removes keys cached in keys/"
645 utils.system("rm -rf ./keys")
648 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
649 # for later direct access to the nodes
650 def keys_fetch(self):
651 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
653 if not os.path.isdir(dir):
655 vservername=self.vservername
656 vm_root=self.vm_root_in_host()
658 prefix = 'debug_ssh_key'
659 for ext in [ 'pub', 'rsa' ] :
660 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
661 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
662 if self.test_ssh.fetch(src,dst) != 0: overall=False
666 "create sites with PLCAPI"
667 return self.do_sites()
669 def delete_sites (self):
670 "delete sites with PLCAPI"
671 return self.do_sites(action="delete")
673 def do_sites (self,action="add"):
674 for site_spec in self.plc_spec['sites']:
675 test_site = TestSite (self,site_spec)
676 if (action != "add"):
677 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
678 test_site.delete_site()
679 # deleted with the site
680 #test_site.delete_users()
683 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
684 test_site.create_site()
685 test_site.create_users()
688 def delete_all_sites (self):
689 "Delete all sites in PLC, and related objects"
690 print 'auth_root',self.auth_root()
691 site_ids = [s['site_id'] for s in self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])]
692 for site_id in site_ids:
693 print 'Deleting site_id',site_id
694 self.apiserver.DeleteSite(self.auth_root(),site_id)
698 "create nodes with PLCAPI"
699 return self.do_nodes()
700 def delete_nodes (self):
701 "delete nodes with PLCAPI"
702 return self.do_nodes(action="delete")
704 def do_nodes (self,action="add"):
705 for site_spec in self.plc_spec['sites']:
706 test_site = TestSite (self,site_spec)
708 utils.header("Deleting nodes in site %s"%test_site.name())
709 for node_spec in site_spec['nodes']:
710 test_node=TestNode(self,test_site,node_spec)
711 utils.header("Deleting %s"%test_node.name())
712 test_node.delete_node()
714 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
715 for node_spec in site_spec['nodes']:
716 utils.pprint('Creating node %s'%node_spec,node_spec)
717 test_node = TestNode (self,test_site,node_spec)
718 test_node.create_node ()
721 def nodegroups (self):
722 "create nodegroups with PLCAPI"
723 return self.do_nodegroups("add")
724 def delete_nodegroups (self):
725 "delete nodegroups with PLCAPI"
726 return self.do_nodegroups("delete")
730 def translate_timestamp (start,grain,timestamp):
731 if timestamp < TestPlc.YEAR: return start+timestamp*grain
732 else: return timestamp
735 def timestamp_printable (timestamp):
736 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
739 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
741 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
742 print 'API answered grain=',grain
743 start=(now/grain)*grain
745 # find out all nodes that are reservable
746 nodes=self.all_reservable_nodenames()
748 utils.header ("No reservable node found - proceeding without leases")
751 # attach them to the leases as specified in plc_specs
752 # this is where the 'leases' field gets interpreted as relative of absolute
753 for lease_spec in self.plc_spec['leases']:
754 # skip the ones that come with a null slice id
755 if not lease_spec['slice']: continue
756 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
757 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
758 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
759 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
760 if lease_addition['errors']:
761 utils.header("Cannot create leases, %s"%lease_addition['errors'])
764 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
765 (nodes,lease_spec['slice'],
766 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
767 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
771 def delete_leases (self):
772 "remove all leases in the myplc side"
773 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
774 utils.header("Cleaning leases %r"%lease_ids)
775 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
778 def list_leases (self):
779 "list all leases known to the myplc"
780 leases = self.apiserver.GetLeases(self.auth_root())
783 current=l['t_until']>=now
784 if self.options.verbose or current:
785 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
786 TestPlc.timestamp_printable(l['t_from']),
787 TestPlc.timestamp_printable(l['t_until'])))
790 # create nodegroups if needed, and populate
791 def do_nodegroups (self, action="add"):
792 # 1st pass to scan contents
794 for site_spec in self.plc_spec['sites']:
795 test_site = TestSite (self,site_spec)
796 for node_spec in site_spec['nodes']:
797 test_node=TestNode (self,test_site,node_spec)
798 if node_spec.has_key('nodegroups'):
799 nodegroupnames=node_spec['nodegroups']
800 if isinstance(nodegroupnames,StringTypes):
801 nodegroupnames = [ nodegroupnames ]
802 for nodegroupname in nodegroupnames:
803 if not groups_dict.has_key(nodegroupname):
804 groups_dict[nodegroupname]=[]
805 groups_dict[nodegroupname].append(test_node.name())
806 auth=self.auth_root()
808 for (nodegroupname,group_nodes) in groups_dict.iteritems():
810 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
811 # first, check if the nodetagtype is here
812 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
814 tag_type_id = tag_types[0]['tag_type_id']
816 tag_type_id = self.apiserver.AddTagType(auth,
817 {'tagname':nodegroupname,
818 'description': 'for nodegroup %s'%nodegroupname,
820 print 'located tag (type)',nodegroupname,'as',tag_type_id
822 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
824 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
825 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
826 # set node tag on all nodes, value='yes'
827 for nodename in group_nodes:
829 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
831 traceback.print_exc()
832 print 'node',nodename,'seems to already have tag',nodegroupname
835 expect_yes = self.apiserver.GetNodeTags(auth,
836 {'hostname':nodename,
837 'tagname':nodegroupname},
838 ['value'])[0]['value']
839 if expect_yes != "yes":
840 print 'Mismatch node tag on node',nodename,'got',expect_yes
843 if not self.options.dry_run:
844 print 'Cannot find tag',nodegroupname,'on node',nodename
848 print 'cleaning nodegroup',nodegroupname
849 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
851 traceback.print_exc()
855 # a list of TestNode objs
856 def all_nodes (self):
858 for site_spec in self.plc_spec['sites']:
859 test_site = TestSite (self,site_spec)
860 for node_spec in site_spec['nodes']:
861 nodes.append(TestNode (self,test_site,node_spec))
864 # return a list of tuples (nodename,qemuname)
865 def all_node_infos (self) :
867 for site_spec in self.plc_spec['sites']:
868 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
869 for node_spec in site_spec['nodes'] ]
872 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
873 def all_reservable_nodenames (self):
875 for site_spec in self.plc_spec['sites']:
876 for node_spec in site_spec['nodes']:
877 node_fields=node_spec['node_fields']
878 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
879 res.append(node_fields['hostname'])
882 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
883 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
884 if self.options.dry_run:
888 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
889 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
890 # the nodes that haven't checked yet - start with a full list and shrink over time
891 tocheck = self.all_hostnames()
892 utils.header("checking nodes %r"%tocheck)
893 # create a dict hostname -> status
894 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
897 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
899 for array in tocheck_status:
900 hostname=array['hostname']
901 boot_state=array['boot_state']
902 if boot_state == target_boot_state:
903 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
905 # if it's a real node, never mind
906 (site_spec,node_spec)=self.locate_hostname(hostname)
907 if TestNode.is_real_model(node_spec['node_fields']['model']):
908 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
910 boot_state = target_boot_state
911 elif datetime.datetime.now() > graceout:
912 utils.header ("%s still in '%s' state"%(hostname,boot_state))
913 graceout=datetime.datetime.now()+datetime.timedelta(1)
914 status[hostname] = boot_state
916 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
919 if datetime.datetime.now() > timeout:
920 for hostname in tocheck:
921 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
923 # otherwise, sleep for a while
925 # only useful in empty plcs
928 def nodes_booted(self):
929 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
931 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
933 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
934 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
935 vservername=self.vservername
938 local_key = "keys/%(vservername)s-debug.rsa"%locals()
941 local_key = "keys/key1.rsa"
942 node_infos = self.all_node_infos()
943 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
944 for (nodename,qemuname) in node_infos:
945 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
946 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
947 (timeout_minutes,silent_minutes,period))
949 for node_info in node_infos:
950 (hostname,qemuname) = node_info
951 # try to run 'hostname' in the node
952 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
953 # don't spam logs - show the command only after the grace period
954 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
956 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
958 node_infos.remove(node_info)
960 # we will have tried real nodes once, in case they're up - but if not, just skip
961 (site_spec,node_spec)=self.locate_hostname(hostname)
962 if TestNode.is_real_model(node_spec['node_fields']['model']):
963 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
964 node_infos.remove(node_info)
967 if datetime.datetime.now() > timeout:
968 for (hostname,qemuname) in node_infos:
969 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
971 # otherwise, sleep for a while
973 # only useful in empty plcs
976 def ssh_node_debug(self):
977 "Tries to ssh into nodes in debug mode with the debug ssh key"
978 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
980 def ssh_node_boot(self):
981 "Tries to ssh into nodes in production mode with the root ssh key"
982 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
985 def qemu_local_init (self):
986 "all nodes : init a clean local directory for holding node-dep stuff like iso image..."
990 "all nodes: invoke GetBootMedium and store result locally"
993 def qemu_local_config (self):
994 "all nodes: compute qemu config qemu.conf and store it locally"
997 def nodestate_reinstall (self):
998 "all nodes: mark PLCAPI boot_state as reinstall"
1001 def nodestate_safeboot (self):
1002 "all nodes: mark PLCAPI boot_state as safeboot"
1005 def nodestate_boot (self):
1006 "all nodes: mark PLCAPI boot_state as boot"
1009 def nodestate_show (self):
1010 "all nodes: show PLCAPI boot_state"
1013 def qemu_export (self):
1014 "all nodes: push local node-dep directory on the qemu box"
1017 ### check hooks : invoke scripts from hooks/{node,slice}
1018 def check_hooks_node (self):
1019 return self.locate_first_node().check_hooks()
1020 def check_hooks_sliver (self) :
1021 return self.locate_first_sliver().check_hooks()
1023 def check_hooks (self):
1024 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1025 return self.check_hooks_node() and self.check_hooks_sliver()
1028 def do_check_initscripts(self):
1030 for slice_spec in self.plc_spec['slices']:
1031 if not slice_spec.has_key('initscriptstamp'):
1033 stamp=slice_spec['initscriptstamp']
1034 for nodename in slice_spec['nodenames']:
1035 (site,node) = self.locate_node (nodename)
1036 # xxx - passing the wrong site - probably harmless
1037 test_site = TestSite (self,site)
1038 test_slice = TestSlice (self,test_site,slice_spec)
1039 test_node = TestNode (self,test_site,node)
1040 test_sliver = TestSliver (self, test_node, test_slice)
1041 if not test_sliver.check_initscript_stamp(stamp):
1045 def check_initscripts(self):
1046 "check that the initscripts have triggered"
1047 return self.do_check_initscripts()
1049 def initscripts (self):
1050 "create initscripts with PLCAPI"
1051 for initscript in self.plc_spec['initscripts']:
1052 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1053 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1056 def delete_initscripts (self):
1057 "delete initscripts with PLCAPI"
1058 for initscript in self.plc_spec['initscripts']:
1059 initscript_name = initscript['initscript_fields']['name']
1060 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1062 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1063 print initscript_name,'deleted'
1065 print 'deletion went wrong - probably did not exist'
1070 "create slices with PLCAPI"
1071 return self.do_slices()
1073 def delete_slices (self):
1074 "delete slices with PLCAPI"
1075 return self.do_slices("delete")
1077 def do_slices (self, action="add"):
1078 for slice in self.plc_spec['slices']:
1079 site_spec = self.locate_site (slice['sitename'])
1080 test_site = TestSite(self,site_spec)
1081 test_slice=TestSlice(self,test_site,slice)
1083 utils.header("Deleting slices in site %s"%test_site.name())
1084 test_slice.delete_slice()
1086 utils.pprint("Creating slice",slice)
1087 test_slice.create_slice()
1088 utils.header('Created Slice %s'%slice['slice_fields']['name'])
1092 def ssh_slice(self):
1093 "tries to ssh-enter the slice with the user key, to ensure slice creation"
1097 def keys_clear_known_hosts (self):
1098 "remove test nodes entries from the local known_hosts file"
1102 def qemu_start (self) :
1103 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1107 def timestamp_qemu (self) :
1108 "all nodes: start the qemu instance (also runs qemu-bridge-init start)"
1111 def check_tcp (self):
1112 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1113 specs = self.plc_spec['tcp_test']
1118 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1119 if not s_test_sliver.run_tcp_server(port,timeout=10):
1123 # idem for the client side
1124 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1125 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1129 # painfully enough, we need to allow for some time as netflow might show up last
1130 def check_sys_slice (self):
1131 "all nodes: check that a system slice is alive"
1132 # would probably make more sense to check for netflow,
1133 # but that one is currently not working in the lxc distro
1134 # return self.check_systemslice ('netflow')
1135 return self.check_systemslice ('drl')
1137 # we have the slices up already here, so it should not take too long
1138 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1139 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1140 test_nodes=self.all_nodes()
1142 for test_node in test_nodes:
1143 if test_node.check_systemslice (slicename,dry_run=self.options.dry_run):
1145 test_nodes.remove(test_node)
1150 if datetime.datetime.now () > timeout:
1151 for test_node in test_nodes:
1152 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1157 def plcsh_stress_test (self):
1158 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1159 # install the stress-test in the plc image
1160 location = "/usr/share/plc_api/plcsh_stress_test.py"
1161 remote="%s/%s"%(self.vm_root_in_host(),location)
1162 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1164 command += " -- --check"
1165 if self.options.size == 1:
1166 command += " --tiny"
1167 return ( self.run_in_guest(command) == 0)
1169 # populate runs the same utility without slightly different options
1170 # in particular runs with --preserve (dont cleanup) and without --check
1171 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1173 def sfa_install_all (self):
1174 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1175 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1177 def sfa_install_core(self):
1179 return self.yum_install ("sfa")
1181 def sfa_install_plc(self):
1182 "yum install sfa-plc"
1183 return self.yum_install("sfa-plc")
1185 def sfa_install_client(self):
1186 "yum install sfa-client"
1187 return self.yum_install("sfa-client")
1189 def sfa_install_sfatables(self):
1190 "yum install sfa-sfatables"
1191 return self.yum_install ("sfa-sfatables")
1193 def sfa_dbclean(self):
1194 "thoroughly wipes off the SFA database"
1195 return self.run_in_guest("sfaadmin.py registry nuke")==0 or \
1196 self.run_in_guest("sfa-nuke.py")==0 or \
1197 self.run_in_guest("sfa-nuke-plc.py")==0
1199 def sfa_plcclean(self):
1200 "cleans the PLC entries that were created as a side effect of running the script"
1202 sfa_spec=self.plc_spec['sfa']
1204 for sfa_slice_spec in sfa_spec['sfa_slice_specs']:
1205 login_base=sfa_slice_spec['login_base']
1206 try: self.apiserver.DeleteSite (self.auth.root(),login_base)
1207 except: print "Site %s already absent from PLC db"%login_base
1209 for key in ['piuser','regularuser']:
1210 username="%s@%s"%(sfa_slice_spec[key],sfa_slice_spec['domain'])
1211 try: self.apiserver.DeletePerson(self.auth_root(),username)
1212 except: print "User %s already absent from PLC db"%username
1214 print "REMEMBER TO RUN sfa_import AGAIN"
1217 def sfa_uninstall(self):
1218 "uses rpm to uninstall sfa - ignore result"
1219 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1220 self.run_in_guest("rm -rf /var/lib/sfa")
1221 self.run_in_guest("rm -rf /etc/sfa")
1222 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1224 self.run_in_guest("rpm -e --noscripts sfa-plc")
1227 ### run unit tests for SFA
1228 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1229 # Running Transaction
1230 # Transaction couldn't start:
1231 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1232 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1233 # no matter how many Gbs are available on the testplc
1234 # could not figure out what's wrong, so...
1235 # if the yum install phase fails, consider the test is successful
1236 # other combinations will eventually run it hopefully
1237 def sfa_utest(self):
1238 "yum install sfa-tests and run SFA unittests"
1239 self.run_in_guest("yum -y install sfa-tests")
1240 # failed to install - forget it
1241 if self.run_in_guest("rpm -q sfa-tests")!=0:
1242 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1244 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1248 dirname="conf.%s"%self.plc_spec['name']
1249 if not os.path.isdir(dirname):
1250 utils.system("mkdir -p %s"%dirname)
1251 if not os.path.isdir(dirname):
1252 raise "Cannot create config dir for plc %s"%self.name()
1255 def conffile(self,filename):
1256 return "%s/%s"%(self.confdir(),filename)
1257 def confsubdir(self,dirname,clean,dry_run=False):
1258 subdirname="%s/%s"%(self.confdir(),dirname)
1260 utils.system("rm -rf %s"%subdirname)
1261 if not os.path.isdir(subdirname):
1262 utils.system("mkdir -p %s"%subdirname)
1263 if not dry_run and not os.path.isdir(subdirname):
1264 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1267 def conffile_clean (self,filename):
1268 filename=self.conffile(filename)
1269 return utils.system("rm -rf %s"%filename)==0
1272 def sfa_configure(self):
1273 "run sfa-config-tty"
1274 tmpname=self.conffile("sfa-config-tty")
1275 fileconf=open(tmpname,'w')
1276 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1277 'SFA_INTERFACE_HRN',
1278 'SFA_REGISTRY_LEVEL1_AUTH',
1279 'SFA_REGISTRY_HOST',
1280 'SFA_AGGREGATE_HOST',
1291 if self.plc_spec['sfa'].has_key(var):
1292 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1293 # the way plc_config handles booleans just sucks..
1296 if self.plc_spec['sfa'][var]: val='true'
1297 fileconf.write ('e %s\n%s\n'%(var,val))
1298 fileconf.write('w\n')
1299 fileconf.write('R\n')
1300 fileconf.write('q\n')
1302 utils.system('cat %s'%tmpname)
1303 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1306 def aggregate_xml_line(self):
1307 port=self.plc_spec['sfa']['neighbours-port']
1308 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1309 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1311 def registry_xml_line(self):
1312 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1313 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1316 # a cross step that takes all other plcs in argument
1317 def cross_sfa_configure(self, other_plcs):
1318 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1319 # of course with a single plc, other_plcs is an empty list
1322 agg_fname=self.conffile("agg.xml")
1323 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1324 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1325 utils.header ("(Over)wrote %s"%agg_fname)
1326 reg_fname=self.conffile("reg.xml")
1327 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1328 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1329 utils.header ("(Over)wrote %s"%reg_fname)
1330 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1331 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1333 def sfa_import(self):
1335 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1336 return self.run_in_guest('sfa-import.py')==0 or \
1337 self.run_in_guest('sfa-import-plc.py')==0 or \
1338 self.run_in_guest('sfaadmin.py registry import_registry')==0
1339 # not needed anymore
1340 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1342 def sfa_start(self):
1344 return self.run_in_guest('service sfa start')==0
1346 def sfi_configure(self):
1347 "Create /root/sfi on the plc side for sfi client configuration"
1348 if self.options.dry_run:
1349 utils.header("DRY RUN - skipping step")
1351 sfa_spec=self.plc_spec['sfa']
1352 # cannot use sfa_slice_mapper to pass dir_name
1353 for slice_spec in self.plc_spec['sfa']['sfa_slice_specs']:
1354 test_slice=TestSliceSfa(self,slice_spec)
1355 dir_name=self.confsubdir("dot-sfi/%s"%slice_spec['slicename'],clean=True,dry_run=self.options.dry_run)
1356 test_slice.sfi_config(dir_name)
1357 # push into the remote /root/sfi area
1358 location = test_slice.sfi_path()
1359 remote="%s/%s"%(self.vm_root_in_host(),location)
1360 self.test_ssh.mkdir(remote,abs=True)
1361 # need to strip last level or remote otherwise we get an extra dir level
1362 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1366 def sfi_clean (self):
1367 "clean up /root/sfi on the plc side"
1368 self.run_in_guest("rm -rf /root/sfi")
1372 def sfa_add_site (self):
1373 "bootstrap a site using sfaadmin"
1377 def sfa_add_pi (self):
1378 "bootstrap a PI user for that site"
1382 def sfa_add_user(self):
1387 def sfa_update_user(self):
1391 def sfa_add_slice(self):
1392 "run sfi.py add (on Registry) from slice.xml"
1396 def sfa_discover(self):
1397 "discover resources into resouces_in.rspec"
1401 def sfa_create_slice(self):
1402 "run sfi.py create (on SM) - 1st time"
1406 def sfa_check_slice_plc(self):
1407 "check sfa_create_slice at the plcs - all local nodes should be in slice"
1411 def sfa_update_slice(self):
1412 "run sfi.py create (on SM) on existing object"
1417 "various registry-related calls"
1421 def ssh_slice_sfa(self):
1422 "tries to ssh-enter the SFA slice"
1426 def sfa_delete_user(self):
1431 def sfa_delete_slice(self):
1432 "run sfi.py delete (on SM), sfi.py remove (on Registry) to clean slices"
1437 self.run_in_guest('service sfa stop')==0
1440 def populate (self):
1441 "creates random entries in the PLCAPI"
1442 # install the stress-test in the plc image
1443 location = "/usr/share/plc_api/plcsh_stress_test.py"
1444 remote="%s/%s"%(self.vm_root_in_host(),location)
1445 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1447 command += " -- --preserve --short-names"
1448 local = (self.run_in_guest(command) == 0);
1449 # second run with --foreign
1450 command += ' --foreign'
1451 remote = (self.run_in_guest(command) == 0);
1452 return ( local and remote)
1454 def gather_logs (self):
1455 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1456 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1457 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1458 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1459 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1460 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1461 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1463 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1464 self.gather_var_logs ()
1466 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1467 self.gather_pgsql_logs ()
1469 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1470 self.gather_root_sfi ()
1472 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1473 for site_spec in self.plc_spec['sites']:
1474 test_site = TestSite (self,site_spec)
1475 for node_spec in site_spec['nodes']:
1476 test_node=TestNode(self,test_site,node_spec)
1477 test_node.gather_qemu_logs()
1479 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1480 self.gather_nodes_var_logs()
1482 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1483 self.gather_slivers_var_logs()
1486 def gather_slivers_var_logs(self):
1487 for test_sliver in self.all_sliver_objs():
1488 remote = test_sliver.tar_var_logs()
1489 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1490 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1491 utils.system(command)
1494 def gather_var_logs (self):
1495 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1496 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1497 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1498 utils.system(command)
1499 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1500 utils.system(command)
1502 def gather_pgsql_logs (self):
1503 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1504 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1505 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1506 utils.system(command)
1508 def gather_root_sfi (self):
1509 utils.system("mkdir -p logs/sfi.%s"%self.name())
1510 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1511 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1512 utils.system(command)
1514 def gather_nodes_var_logs (self):
1515 for site_spec in self.plc_spec['sites']:
1516 test_site = TestSite (self,site_spec)
1517 for node_spec in site_spec['nodes']:
1518 test_node=TestNode(self,test_site,node_spec)
1519 test_ssh = TestSsh (test_node.name(),key="keys/key1.rsa")
1520 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1521 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1522 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1523 utils.system(command)
1526 # returns the filename to use for sql dump/restore, using options.dbname if set
1527 def dbfile (self, database):
1528 # uses options.dbname if it is found
1530 name=self.options.dbname
1531 if not isinstance(name,StringTypes):
1534 t=datetime.datetime.now()
1537 return "/root/%s-%s.sql"%(database,name)
1539 def plc_db_dump(self):
1540 'dump the planetlab5 DB in /root in the PLC - filename has time'
1541 dump=self.dbfile("planetab5")
1542 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1543 utils.header('Dumped planetlab5 database in %s'%dump)
1546 def plc_db_restore(self):
1547 'restore the planetlab5 DB - looks broken, but run -n might help'
1548 dump=self.dbfile("planetab5")
1549 ##stop httpd service
1550 self.run_in_guest('service httpd stop')
1551 # xxx - need another wrapper
1552 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1553 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1554 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1555 ##starting httpd service
1556 self.run_in_guest('service httpd start')
1558 utils.header('Database restored from ' + dump)
1560 def standby_1_through_20(self):
1561 """convenience function to wait for a specified number of minutes"""
1564 def standby_1(): pass
1566 def standby_2(): pass
1568 def standby_3(): pass
1570 def standby_4(): pass
1572 def standby_5(): pass
1574 def standby_6(): pass
1576 def standby_7(): pass
1578 def standby_8(): pass
1580 def standby_9(): pass
1582 def standby_10(): pass
1584 def standby_11(): pass
1586 def standby_12(): pass
1588 def standby_13(): pass
1590 def standby_14(): pass
1592 def standby_15(): pass
1594 def standby_16(): pass
1596 def standby_17(): pass
1598 def standby_18(): pass
1600 def standby_19(): pass
1602 def standby_20(): pass