1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
65 def auth_sfa_mapper (method):
68 slice_method = TestAuthSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
70 test_slice=TestAuthSfa(self,slice_spec)
71 if not slice_method(test_slice,self.options): overall=False
73 # restore the doc text
74 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
84 'vs_delete','timestamp_vs','vs_create', SEP,
85 'plc_install', 'plc_configure', 'plc_start', SEP,
86 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', 'speed_up_slices', SEP,
87 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
88 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
89 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
90 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
91 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
92 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
93 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
94 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
95 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
96 # but as the stress test might take a while, we sometimes missed the debug mode..
97 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
98 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
99 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
100 'check_tcp', 'check_system_slice', SEP,
101 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'check_netflow','check_drl', SEP,
117 'standby_1_through_20',SEP,
121 def printable_steps (list):
122 single_line=" ".join(list)+" "
123 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
125 def valid_step (step):
126 return step != SEP and step != SEPSFA
128 # turn off the sfa-related steps when build has skipped SFA
129 # this was originally for centos5 but is still valid
130 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
132 def check_whether_build_has_sfa (rpms_url):
133 utils.header ("Checking if build provides SFA package...")
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 utils.header("build does provide SFA")
140 # move all steps containing 'sfa' from default_steps to other_steps
141 utils.header("SFA package not found - removing steps with sfa or sfi")
142 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
143 TestPlc.other_steps += sfa_steps
144 for step in sfa_steps: TestPlc.default_steps.remove(step)
146 def __init__ (self,plc_spec,options):
147 self.plc_spec=plc_spec
149 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
150 self.vserverip=plc_spec['vserverip']
151 self.vservername=plc_spec['vservername']
152 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
153 self.apiserver=TestApiserver(self.url,options.dry_run)
155 def has_addresses_api (self):
156 return self.apiserver.has_method('AddIpAddress')
159 name=self.plc_spec['name']
160 return "%s.%s"%(name,self.vservername)
163 return self.plc_spec['host_box']
166 return self.test_ssh.is_local()
168 # define the API methods on this object through xmlrpc
169 # would help, but not strictly necessary
173 def actual_command_in_guest (self,command):
174 return self.test_ssh.actual_command(self.host_to_guest(command))
176 def start_guest (self):
177 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
179 def stop_guest (self):
180 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
182 def run_in_guest (self,command):
183 return utils.system(self.actual_command_in_guest(command))
185 def run_in_host (self,command):
186 return self.test_ssh.run_in_buildname(command)
188 #command gets run in the plc's vm
189 def host_to_guest(self,command):
190 if self.options.plcs_use_lxc:
191 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
193 return "vserver %s exec %s"%(self.vservername,command)
195 def vm_root_in_host(self):
196 if self.options.plcs_use_lxc:
197 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
199 return "/vservers/%s"%(self.vservername)
201 def vm_timestamp_path (self):
202 if self.options.plcs_use_lxc:
203 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
205 return "/vservers/%s.timestamp"%(self.vservername)
207 #start/stop the vserver
208 def start_guest_in_host(self):
209 if self.options.plcs_use_lxc:
210 return "lxc-start --daemon --name=%s"%(self.vservername)
212 return "vserver %s start"%(self.vservername)
214 def stop_guest_in_host(self):
215 if self.options.plcs_use_lxc:
216 return "lxc-stop --name=%s"%(self.vservername)
218 return "vserver %s stop"%(self.vservername)
221 def run_in_guest_piped (self,local,remote):
222 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
224 def yum_check_installed (self, rpms):
225 if isinstance (rpms, list):
227 return self.run_in_guest("rpm -q %s"%rpms)==0
229 # does a yum install in the vs, ignore yum retcod, check with rpm
230 def yum_install (self, rpms):
231 if isinstance (rpms, list):
233 self.run_in_guest("yum -y install %s"%rpms)
234 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
235 self.run_in_guest("yum-complete-transaction -y")
236 return self.yum_check_installed (rpms)
238 def auth_root (self):
239 return {'Username':self.plc_spec['PLC_ROOT_USER'],
240 'AuthMethod':'password',
241 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
242 'Role' : self.plc_spec['role']
244 def locate_site (self,sitename):
245 for site in self.plc_spec['sites']:
246 if site['site_fields']['name'] == sitename:
248 if site['site_fields']['login_base'] == sitename:
250 raise Exception,"Cannot locate site %s"%sitename
252 def locate_node (self,nodename):
253 for site in self.plc_spec['sites']:
254 for node in site['nodes']:
255 if node['name'] == nodename:
257 raise Exception,"Cannot locate node %s"%nodename
259 def locate_hostname (self,hostname):
260 for site in self.plc_spec['sites']:
261 for node in site['nodes']:
262 if node['node_fields']['hostname'] == hostname:
264 raise Exception,"Cannot locate hostname %s"%hostname
266 def locate_key (self,key_name):
267 for key in self.plc_spec['keys']:
268 if key['key_name'] == key_name:
270 raise Exception,"Cannot locate key %s"%key_name
272 def locate_private_key_from_key_names (self, key_names):
273 # locate the first avail. key
275 for key_name in key_names:
276 key_spec=self.locate_key(key_name)
277 test_key=TestKey(self,key_spec)
278 publickey=test_key.publicpath()
279 privatekey=test_key.privatepath()
280 if os.path.isfile(publickey) and os.path.isfile(privatekey):
282 if found: return privatekey
285 def locate_slice (self, slicename):
286 for slice in self.plc_spec['slices']:
287 if slice['slice_fields']['name'] == slicename:
289 raise Exception,"Cannot locate slice %s"%slicename
291 def all_sliver_objs (self):
293 for slice_spec in self.plc_spec['slices']:
294 slicename = slice_spec['slice_fields']['name']
295 for nodename in slice_spec['nodenames']:
296 result.append(self.locate_sliver_obj (nodename,slicename))
299 def locate_sliver_obj (self,nodename,slicename):
300 (site,node) = self.locate_node(nodename)
301 slice = self.locate_slice (slicename)
303 test_site = TestSite (self, site)
304 test_node = TestNode (self, test_site,node)
305 # xxx the slice site is assumed to be the node site - mhh - probably harmless
306 test_slice = TestSlice (self, test_site, slice)
307 return TestSliver (self, test_node, test_slice)
309 def locate_first_node(self):
310 nodename=self.plc_spec['slices'][0]['nodenames'][0]
311 (site,node) = self.locate_node(nodename)
312 test_site = TestSite (self, site)
313 test_node = TestNode (self, test_site,node)
316 def locate_first_sliver (self):
317 slice_spec=self.plc_spec['slices'][0]
318 slicename=slice_spec['slice_fields']['name']
319 nodename=slice_spec['nodenames'][0]
320 return self.locate_sliver_obj(nodename,slicename)
322 # all different hostboxes used in this plc
323 def gather_hostBoxes(self):
324 # maps on sites and nodes, return [ (host_box,test_node) ]
326 for site_spec in self.plc_spec['sites']:
327 test_site = TestSite (self,site_spec)
328 for node_spec in site_spec['nodes']:
329 test_node = TestNode (self, test_site, node_spec)
330 if not test_node.is_real():
331 tuples.append( (test_node.host_box(),test_node) )
332 # transform into a dict { 'host_box' -> [ test_node .. ] }
334 for (box,node) in tuples:
335 if not result.has_key(box):
338 result[box].append(node)
341 # a step for checking this stuff
342 def show_boxes (self):
343 'print summary of nodes location'
344 for (box,nodes) in self.gather_hostBoxes().iteritems():
345 print box,":"," + ".join( [ node.name() for node in nodes ] )
348 # make this a valid step
349 def qemu_kill_all(self):
350 'kill all qemu instances on the qemu boxes involved by this setup'
351 # this is the brute force version, kill all qemus on that host box
352 for (box,nodes) in self.gather_hostBoxes().iteritems():
353 # pass the first nodename, as we don't push template-qemu on testboxes
354 nodedir=nodes[0].nodedir()
355 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
358 # make this a valid step
359 def qemu_list_all(self):
360 'list all qemu instances on the qemu boxes involved by this setup'
361 for (box,nodes) in self.gather_hostBoxes().iteritems():
362 # this is the brute force version, kill all qemus on that host box
363 TestBoxQemu(box,self.options.buildname).qemu_list_all()
366 # kill only the right qemus
367 def qemu_list_mine(self):
368 'list qemu instances for our nodes'
369 for (box,nodes) in self.gather_hostBoxes().iteritems():
370 # the fine-grain version
375 # kill only the right qemus
376 def qemu_kill_mine(self):
377 'kill the qemu instances for our nodes'
378 for (box,nodes) in self.gather_hostBoxes().iteritems():
379 # the fine-grain version
384 #################### display config
386 "show test configuration after localization"
392 "print cut'n paste-able stuff to export env variables to your shell"
393 # guess local domain from hostname
394 domain=socket.gethostname().split('.',1)[1]
395 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
396 print "export BUILD=%s"%self.options.buildname
397 if self.options.plcs_use_lxc:
398 print "export PLCHOSTLXC=%s"%fqdn
400 print "export PLCHOSTVS=%s"%fqdn
401 print "export GUESTNAME=%s"%self.plc_spec['vservername']
402 vplcname=self.plc_spec['vservername'].split('-')[-1]
403 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
404 # find hostname of first node
405 (hostname,qemubox) = self.all_node_infos()[0]
406 print "export KVMHOST=%s.%s"%(qemubox,domain)
407 print "export NODE=%s"%(hostname)
411 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
412 def show_pass (self,passno):
413 for (key,val) in self.plc_spec.iteritems():
414 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
418 self.display_site_spec(site)
419 for node in site['nodes']:
420 self.display_node_spec(node)
421 elif key=='initscripts':
422 for initscript in val:
423 self.display_initscript_spec (initscript)
426 self.display_slice_spec (slice)
429 self.display_key_spec (key)
431 if key not in ['sites','initscripts','slices','keys', 'sfa']:
432 print '+ ',key,':',val
434 def display_site_spec (self,site):
435 print '+ ======== site',site['site_fields']['name']
436 for (k,v) in site.iteritems():
437 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
440 print '+ ','nodes : ',
442 print node['node_fields']['hostname'],'',
448 print user['name'],'',
450 elif k == 'site_fields':
451 print '+ login_base',':',v['login_base']
452 elif k == 'address_fields':
458 def display_initscript_spec (self,initscript):
459 print '+ ======== initscript',initscript['initscript_fields']['name']
461 def display_key_spec (self,key):
462 print '+ ======== key',key['key_name']
464 def display_slice_spec (self,slice):
465 print '+ ======== slice',slice['slice_fields']['name']
466 for (k,v) in slice.iteritems():
479 elif k=='slice_fields':
480 print '+ fields',':',
481 print 'max_nodes=',v['max_nodes'],
486 def display_node_spec (self,node):
487 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
488 print "hostname=",node['node_fields']['hostname'],
489 print "ip=",node['interface_fields']['ip']
490 if self.options.verbose:
491 utils.pprint("node details",node,depth=3)
493 # another entry point for just showing the boxes involved
494 def display_mapping (self):
495 TestPlc.display_mapping_plc(self.plc_spec)
499 def display_mapping_plc (plc_spec):
500 print '+ MyPLC',plc_spec['name']
501 # WARNING this would not be right for lxc-based PLC's - should be harmless though
502 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
503 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
504 for site_spec in plc_spec['sites']:
505 for node_spec in site_spec['nodes']:
506 TestPlc.display_mapping_node(node_spec)
509 def display_mapping_node (node_spec):
510 print '+ NODE %s'%(node_spec['name'])
511 print '+\tqemu box %s'%node_spec['host_box']
512 print '+\thostname=%s'%node_spec['node_fields']['hostname']
514 # write a timestamp in /vservers/<>.timestamp
515 # cannot be inside the vserver, that causes vserver .. build to cough
516 def timestamp_vs (self):
517 "Create a timestamp to remember creation date for this plc"
519 # TODO-lxc check this one
520 # a first approx. is to store the timestamp close to the VM root like vs does
521 stamp_path=self.vm_timestamp_path ()
522 stamp_dir = os.path.dirname (stamp_path)
523 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
524 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
526 # this is called inconditionnally at the beginning of the test sequence
527 # just in case this is a rerun, so if the vm is not running it's fine
529 "vserver delete the test myplc"
530 stamp_path=self.vm_timestamp_path()
531 self.run_in_host("rm -f %s"%stamp_path)
532 if self.options.plcs_use_lxc:
533 self.run_in_host("lxc-stop --name %s"%self.vservername)
534 self.run_in_host("lxc-destroy --name %s"%self.vservername)
537 self.run_in_host("vserver --silent %s delete"%self.vservername)
541 # historically the build was being fetched by the tests
542 # now the build pushes itself as a subdir of the tests workdir
543 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
544 def vs_create (self):
545 "vserver creation (no install done)"
546 # push the local build/ dir to the testplc box
548 # a full path for the local calls
549 build_dir=os.path.dirname(sys.argv[0])
550 # sometimes this is empty - set to "." in such a case
551 if not build_dir: build_dir="."
552 build_dir += "/build"
554 # use a standard name - will be relative to remote buildname
556 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
557 self.test_ssh.rmdir(build_dir)
558 self.test_ssh.copy(build_dir,recursive=True)
559 # the repo url is taken from arch-rpms-url
560 # with the last step (i386) removed
561 repo_url = self.options.arch_rpms_url
562 for level in [ 'arch' ]:
563 repo_url = os.path.dirname(repo_url)
564 # pass the vbuild-nightly options to vtest-init-vserver
566 test_env_options += " -p %s"%self.options.personality
567 test_env_options += " -d %s"%self.options.pldistro
568 test_env_options += " -f %s"%self.options.fcdistro
569 if self.options.plcs_use_lxc:
570 script="vtest-init-lxc.sh"
572 script="vtest-init-vserver.sh"
573 vserver_name = self.vservername
574 vserver_options="--netdev eth0 --interface %s"%self.vserverip
576 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
577 vserver_options += " --hostname %s"%vserver_hostname
579 print "Cannot reverse lookup %s"%self.vserverip
580 print "This is considered fatal, as this might pollute the test results"
582 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
583 return self.run_in_host(create_vserver) == 0
586 def plc_install(self):
587 "yum install myplc, noderepo, and the plain bootstrapfs"
589 # workaround for getting pgsql8.2 on centos5
590 if self.options.fcdistro == "centos5":
591 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
594 if self.options.personality == "linux32":
596 elif self.options.personality == "linux64":
599 raise Exception, "Unsupported personality %r"%self.options.personality
600 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
603 pkgs_list.append ("slicerepo-%s"%nodefamily)
604 pkgs_list.append ("myplc")
605 pkgs_list.append ("noderepo-%s"%nodefamily)
606 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
607 pkgs_string=" ".join(pkgs_list)
608 return self.yum_install (pkgs_list)
611 def plc_configure(self):
613 tmpname='%s.plc-config-tty'%(self.name())
614 fileconf=open(tmpname,'w')
615 for var in [ 'PLC_NAME',
620 'PLC_MAIL_SUPPORT_ADDRESS',
623 # Above line was added for integrating SFA Testing
629 'PLC_RESERVATION_GRANULARITY',
631 'PLC_OMF_XMPP_SERVER',
633 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
634 fileconf.write('w\n')
635 fileconf.write('q\n')
637 utils.system('cat %s'%tmpname)
638 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
639 utils.system('rm %s'%tmpname)
644 self.run_in_guest('service plc start')
649 self.run_in_guest('service plc stop')
653 "start the PLC vserver"
658 "stop the PLC vserver"
662 # stores the keys from the config for further use
663 def keys_store(self):
664 "stores test users ssh keys in keys/"
665 for key_spec in self.plc_spec['keys']:
666 TestKey(self,key_spec).store_key()
669 def keys_clean(self):
670 "removes keys cached in keys/"
671 utils.system("rm -rf ./keys")
674 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
675 # for later direct access to the nodes
676 def keys_fetch(self):
677 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
679 if not os.path.isdir(dir):
681 vservername=self.vservername
682 vm_root=self.vm_root_in_host()
684 prefix = 'debug_ssh_key'
685 for ext in [ 'pub', 'rsa' ] :
686 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
687 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
688 if self.test_ssh.fetch(src,dst) != 0: overall=False
692 "create sites with PLCAPI"
693 return self.do_sites()
695 def delete_sites (self):
696 "delete sites with PLCAPI"
697 return self.do_sites(action="delete")
699 def do_sites (self,action="add"):
700 for site_spec in self.plc_spec['sites']:
701 test_site = TestSite (self,site_spec)
702 if (action != "add"):
703 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
704 test_site.delete_site()
705 # deleted with the site
706 #test_site.delete_users()
709 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
710 test_site.create_site()
711 test_site.create_users()
714 def delete_all_sites (self):
715 "Delete all sites in PLC, and related objects"
716 print 'auth_root',self.auth_root()
717 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])
719 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
720 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
721 site_id=site['site_id']
722 print 'Deleting site_id',site_id
723 self.apiserver.DeleteSite(self.auth_root(),site_id)
727 "create nodes with PLCAPI"
728 return self.do_nodes()
729 def delete_nodes (self):
730 "delete nodes with PLCAPI"
731 return self.do_nodes(action="delete")
733 def do_nodes (self,action="add"):
734 for site_spec in self.plc_spec['sites']:
735 test_site = TestSite (self,site_spec)
737 utils.header("Deleting nodes in site %s"%test_site.name())
738 for node_spec in site_spec['nodes']:
739 test_node=TestNode(self,test_site,node_spec)
740 utils.header("Deleting %s"%test_node.name())
741 test_node.delete_node()
743 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
744 for node_spec in site_spec['nodes']:
745 utils.pprint('Creating node %s'%node_spec,node_spec)
746 test_node = TestNode (self,test_site,node_spec)
747 test_node.create_node ()
750 def nodegroups (self):
751 "create nodegroups with PLCAPI"
752 return self.do_nodegroups("add")
753 def delete_nodegroups (self):
754 "delete nodegroups with PLCAPI"
755 return self.do_nodegroups("delete")
759 def translate_timestamp (start,grain,timestamp):
760 if timestamp < TestPlc.YEAR: return start+timestamp*grain
761 else: return timestamp
764 def timestamp_printable (timestamp):
765 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
768 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
770 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
771 print 'API answered grain=',grain
772 start=(now/grain)*grain
774 # find out all nodes that are reservable
775 nodes=self.all_reservable_nodenames()
777 utils.header ("No reservable node found - proceeding without leases")
780 # attach them to the leases as specified in plc_specs
781 # this is where the 'leases' field gets interpreted as relative of absolute
782 for lease_spec in self.plc_spec['leases']:
783 # skip the ones that come with a null slice id
784 if not lease_spec['slice']: continue
785 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
786 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
787 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
788 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
789 if lease_addition['errors']:
790 utils.header("Cannot create leases, %s"%lease_addition['errors'])
793 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
794 (nodes,lease_spec['slice'],
795 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
796 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
800 def delete_leases (self):
801 "remove all leases in the myplc side"
802 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
803 utils.header("Cleaning leases %r"%lease_ids)
804 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
807 def list_leases (self):
808 "list all leases known to the myplc"
809 leases = self.apiserver.GetLeases(self.auth_root())
812 current=l['t_until']>=now
813 if self.options.verbose or current:
814 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
815 TestPlc.timestamp_printable(l['t_from']),
816 TestPlc.timestamp_printable(l['t_until'])))
819 # create nodegroups if needed, and populate
820 def do_nodegroups (self, action="add"):
821 # 1st pass to scan contents
823 for site_spec in self.plc_spec['sites']:
824 test_site = TestSite (self,site_spec)
825 for node_spec in site_spec['nodes']:
826 test_node=TestNode (self,test_site,node_spec)
827 if node_spec.has_key('nodegroups'):
828 nodegroupnames=node_spec['nodegroups']
829 if isinstance(nodegroupnames,StringTypes):
830 nodegroupnames = [ nodegroupnames ]
831 for nodegroupname in nodegroupnames:
832 if not groups_dict.has_key(nodegroupname):
833 groups_dict[nodegroupname]=[]
834 groups_dict[nodegroupname].append(test_node.name())
835 auth=self.auth_root()
837 for (nodegroupname,group_nodes) in groups_dict.iteritems():
839 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
840 # first, check if the nodetagtype is here
841 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
843 tag_type_id = tag_types[0]['tag_type_id']
845 tag_type_id = self.apiserver.AddTagType(auth,
846 {'tagname':nodegroupname,
847 'description': 'for nodegroup %s'%nodegroupname,
849 print 'located tag (type)',nodegroupname,'as',tag_type_id
851 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
853 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
854 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
855 # set node tag on all nodes, value='yes'
856 for nodename in group_nodes:
858 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
860 traceback.print_exc()
861 print 'node',nodename,'seems to already have tag',nodegroupname
864 expect_yes = self.apiserver.GetNodeTags(auth,
865 {'hostname':nodename,
866 'tagname':nodegroupname},
867 ['value'])[0]['value']
868 if expect_yes != "yes":
869 print 'Mismatch node tag on node',nodename,'got',expect_yes
872 if not self.options.dry_run:
873 print 'Cannot find tag',nodegroupname,'on node',nodename
877 print 'cleaning nodegroup',nodegroupname
878 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
880 traceback.print_exc()
884 # a list of TestNode objs
885 def all_nodes (self):
887 for site_spec in self.plc_spec['sites']:
888 test_site = TestSite (self,site_spec)
889 for node_spec in site_spec['nodes']:
890 nodes.append(TestNode (self,test_site,node_spec))
893 # return a list of tuples (nodename,qemuname)
894 def all_node_infos (self) :
896 for site_spec in self.plc_spec['sites']:
897 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
898 for node_spec in site_spec['nodes'] ]
901 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
902 def all_reservable_nodenames (self):
904 for site_spec in self.plc_spec['sites']:
905 for node_spec in site_spec['nodes']:
906 node_fields=node_spec['node_fields']
907 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
908 res.append(node_fields['hostname'])
911 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
912 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
913 if self.options.dry_run:
917 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
918 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
919 # the nodes that haven't checked yet - start with a full list and shrink over time
920 tocheck = self.all_hostnames()
921 utils.header("checking nodes %r"%tocheck)
922 # create a dict hostname -> status
923 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
926 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
928 for array in tocheck_status:
929 hostname=array['hostname']
930 boot_state=array['boot_state']
931 if boot_state == target_boot_state:
932 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
934 # if it's a real node, never mind
935 (site_spec,node_spec)=self.locate_hostname(hostname)
936 if TestNode.is_real_model(node_spec['node_fields']['model']):
937 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
939 boot_state = target_boot_state
940 elif datetime.datetime.now() > graceout:
941 utils.header ("%s still in '%s' state"%(hostname,boot_state))
942 graceout=datetime.datetime.now()+datetime.timedelta(1)
943 status[hostname] = boot_state
945 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
948 if datetime.datetime.now() > timeout:
949 for hostname in tocheck:
950 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
952 # otherwise, sleep for a while
954 # only useful in empty plcs
957 def nodes_booted(self):
958 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
960 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
962 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
963 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
964 vservername=self.vservername
967 local_key = "keys/%(vservername)s-debug.rsa"%locals()
970 local_key = "keys/key_admin.rsa"
971 node_infos = self.all_node_infos()
972 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
973 for (nodename,qemuname) in node_infos:
974 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
975 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
976 (timeout_minutes,silent_minutes,period))
978 for node_info in node_infos:
979 (hostname,qemuname) = node_info
980 # try to run 'hostname' in the node
981 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
982 # don't spam logs - show the command only after the grace period
983 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
985 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
987 node_infos.remove(node_info)
989 # we will have tried real nodes once, in case they're up - but if not, just skip
990 (site_spec,node_spec)=self.locate_hostname(hostname)
991 if TestNode.is_real_model(node_spec['node_fields']['model']):
992 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
993 node_infos.remove(node_info)
996 if datetime.datetime.now() > timeout:
997 for (hostname,qemuname) in node_infos:
998 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
1000 # otherwise, sleep for a while
1002 # only useful in empty plcs
1005 def ssh_node_debug(self):
1006 "Tries to ssh into nodes in debug mode with the debug ssh key"
1007 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
1009 def ssh_node_boot(self):
1010 "Tries to ssh into nodes in production mode with the root ssh key"
1011 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
1014 def qemu_local_init (self): pass
1016 def bootcd (self): pass
1018 def qemu_local_config (self): pass
1020 def nodestate_reinstall (self): pass
1022 def nodestate_safeboot (self): pass
1024 def nodestate_boot (self): pass
1026 def nodestate_show (self): pass
1028 def qemu_export (self): pass
1030 ### check hooks : invoke scripts from hooks/{node,slice}
1031 def check_hooks_node (self):
1032 return self.locate_first_node().check_hooks()
1033 def check_hooks_sliver (self) :
1034 return self.locate_first_sliver().check_hooks()
1036 def check_hooks (self):
1037 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1038 return self.check_hooks_node() and self.check_hooks_sliver()
1041 def do_check_initscripts(self):
1043 for slice_spec in self.plc_spec['slices']:
1044 if not slice_spec.has_key('initscriptstamp'):
1046 stamp=slice_spec['initscriptstamp']
1047 for nodename in slice_spec['nodenames']:
1048 (site,node) = self.locate_node (nodename)
1049 # xxx - passing the wrong site - probably harmless
1050 test_site = TestSite (self,site)
1051 test_slice = TestSlice (self,test_site,slice_spec)
1052 test_node = TestNode (self,test_site,node)
1053 test_sliver = TestSliver (self, test_node, test_slice)
1054 if not test_sliver.check_initscript_stamp(stamp):
1058 def check_initscripts(self):
1059 "check that the initscripts have triggered"
1060 return self.do_check_initscripts()
1062 def initscripts (self):
1063 "create initscripts with PLCAPI"
1064 for initscript in self.plc_spec['initscripts']:
1065 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1066 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1069 def delete_initscripts (self):
1070 "delete initscripts with PLCAPI"
1071 for initscript in self.plc_spec['initscripts']:
1072 initscript_name = initscript['initscript_fields']['name']
1073 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1075 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1076 print initscript_name,'deleted'
1078 print 'deletion went wrong - probably did not exist'
1083 "create slices with PLCAPI"
1084 return self.do_slices(action="add")
1086 def delete_slices (self):
1087 "delete slices with PLCAPI"
1088 return self.do_slices(action="delete")
1090 def fill_slices (self):
1091 "add nodes in slices with PLCAPI"
1092 return self.do_slices(action="fill")
1094 def empty_slices (self):
1095 "remove nodes from slices with PLCAPI"
1096 return self.do_slices(action="empty")
1098 def do_slices (self, action="add"):
1099 for slice in self.plc_spec['slices']:
1100 site_spec = self.locate_site (slice['sitename'])
1101 test_site = TestSite(self,site_spec)
1102 test_slice=TestSlice(self,test_site,slice)
1103 if action == "delete":
1104 test_slice.delete_slice()
1105 elif action=="fill":
1106 test_slice.add_nodes()
1107 elif action=="empty":
1108 test_slice.delete_nodes()
1110 test_slice.create_slice()
1114 def ssh_slice(self): pass
1116 def ssh_slice_off (self): pass
1119 def keys_clear_known_hosts (self): pass
1121 def speed_up_slices (self):
1122 "tweak nodemanager settings on all nodes using a conf file"
1123 # create the template on the server-side
1124 template="%s.nodemanager"%self.name()
1125 template_file = open (template,"w")
1126 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1127 template_file.close()
1128 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1129 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1130 self.test_ssh.copy_abs(template,remote)
1132 self.apiserver.AddConfFile (self.auth_root(),
1133 {'dest':'/etc/sysconfig/nodemanager',
1134 'source':'PlanetLabConf/nodemanager',
1135 'postinstall_cmd':'service nm restart',})
1139 def qemu_start (self) : pass
1142 def timestamp_qemu (self) : pass
1144 def check_tcp (self):
1145 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1146 specs = self.plc_spec['tcp_test']
1151 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1152 if not s_test_sliver.run_tcp_server(port,timeout=10):
1156 # idem for the client side
1157 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1158 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1162 # painfully enough, we need to allow for some time as netflow might show up last
1163 def check_system_slice (self):
1164 "all nodes: check that a system slice is alive"
1165 # netflow currently not working in the lxc distro
1166 # drl not built at all in the wtx distro
1167 # if we find either of them we're happy
1168 return self.check_netflow() or self.check_drl()
1171 def check_netflow (self): return self._check_system_slice ('netflow')
1172 def check_drl (self): return self._check_system_slice ('drl')
1174 # we have the slices up already here, so it should not take too long
1175 def _check_system_slice (self, slicename, timeout_minutes=5, period=15):
1176 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1177 test_nodes=self.all_nodes()
1179 for test_node in test_nodes:
1180 if test_node._check_system_slice (slicename,dry_run=self.options.dry_run):
1182 test_nodes.remove(test_node)
1187 if datetime.datetime.now () > timeout:
1188 for test_node in test_nodes:
1189 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1194 def plcsh_stress_test (self):
1195 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1196 # install the stress-test in the plc image
1197 location = "/usr/share/plc_api/plcsh_stress_test.py"
1198 remote="%s/%s"%(self.vm_root_in_host(),location)
1199 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1201 command += " -- --check"
1202 if self.options.size == 1:
1203 command += " --tiny"
1204 return ( self.run_in_guest(command) == 0)
1206 # populate runs the same utility without slightly different options
1207 # in particular runs with --preserve (dont cleanup) and without --check
1208 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1210 def sfa_install_all (self):
1211 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1212 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1214 def sfa_install_core(self):
1216 return self.yum_install ("sfa")
1218 def sfa_install_plc(self):
1219 "yum install sfa-plc"
1220 return self.yum_install("sfa-plc")
1222 def sfa_install_sfatables(self):
1223 "yum install sfa-sfatables"
1224 return self.yum_install ("sfa-sfatables")
1226 # for some very odd reason, this sometimes fails with the following symptom
1227 # # yum install sfa-client
1228 # Setting up Install Process
1230 # Downloading Packages:
1231 # Running rpm_check_debug
1232 # Running Transaction Test
1233 # Transaction Test Succeeded
1234 # Running Transaction
1235 # Transaction couldn't start:
1236 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1237 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1238 # even though in the same context I have
1239 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1240 # Filesystem Size Used Avail Use% Mounted on
1241 # /dev/hdv1 806G 264G 501G 35% /
1242 # none 16M 36K 16M 1% /tmp
1244 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1245 def sfa_install_client(self):
1246 "yum install sfa-client"
1247 first_try=self.yum_install("sfa-client")
1248 if first_try: return True
1249 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1250 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1251 utils.header("rpm_path=<<%s>>"%rpm_path)
1253 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1254 return self.yum_check_installed ("sfa-client")
1256 def sfa_dbclean(self):
1257 "thoroughly wipes off the SFA database"
1258 return self.run_in_guest("sfaadmin.py registry nuke")==0 or \
1259 self.run_in_guest("sfa-nuke.py")==0 or \
1260 self.run_in_guest("sfa-nuke-plc.py")==0
1262 def sfa_plcclean(self):
1263 "cleans the PLC entries that were created as a side effect of running the script"
1265 sfa_spec=self.plc_spec['sfa']
1267 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1268 login_base=auth_sfa_spec['login_base']
1269 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1270 except: print "Site %s already absent from PLC db"%login_base
1272 for spec_name in ['pi_spec','user_spec']:
1273 user_spec=auth_sfa_spec[spec_name]
1274 username=user_spec['email']
1275 try: self.apiserver.DeletePerson(self.auth_root(),username)
1277 # this in fact is expected as sites delete their members
1278 #print "User %s already absent from PLC db"%username
1281 print "REMEMBER TO RUN sfa_import AGAIN"
1284 def sfa_uninstall(self):
1285 "uses rpm to uninstall sfa - ignore result"
1286 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1287 self.run_in_guest("rm -rf /var/lib/sfa")
1288 self.run_in_guest("rm -rf /etc/sfa")
1289 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1291 self.run_in_guest("rpm -e --noscripts sfa-plc")
1294 ### run unit tests for SFA
1295 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1296 # Running Transaction
1297 # Transaction couldn't start:
1298 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1299 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1300 # no matter how many Gbs are available on the testplc
1301 # could not figure out what's wrong, so...
1302 # if the yum install phase fails, consider the test is successful
1303 # other combinations will eventually run it hopefully
1304 def sfa_utest(self):
1305 "yum install sfa-tests and run SFA unittests"
1306 self.run_in_guest("yum -y install sfa-tests")
1307 # failed to install - forget it
1308 if self.run_in_guest("rpm -q sfa-tests")!=0:
1309 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1311 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1315 dirname="conf.%s"%self.plc_spec['name']
1316 if not os.path.isdir(dirname):
1317 utils.system("mkdir -p %s"%dirname)
1318 if not os.path.isdir(dirname):
1319 raise "Cannot create config dir for plc %s"%self.name()
1322 def conffile(self,filename):
1323 return "%s/%s"%(self.confdir(),filename)
1324 def confsubdir(self,dirname,clean,dry_run=False):
1325 subdirname="%s/%s"%(self.confdir(),dirname)
1327 utils.system("rm -rf %s"%subdirname)
1328 if not os.path.isdir(subdirname):
1329 utils.system("mkdir -p %s"%subdirname)
1330 if not dry_run and not os.path.isdir(subdirname):
1331 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1334 def conffile_clean (self,filename):
1335 filename=self.conffile(filename)
1336 return utils.system("rm -rf %s"%filename)==0
1339 def sfa_configure(self):
1340 "run sfa-config-tty"
1341 tmpname=self.conffile("sfa-config-tty")
1342 fileconf=open(tmpname,'w')
1343 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1344 'SFA_INTERFACE_HRN',
1345 'SFA_REGISTRY_LEVEL1_AUTH',
1346 'SFA_REGISTRY_HOST',
1347 'SFA_AGGREGATE_HOST',
1357 'SFA_GENERIC_FLAVOUR',
1358 'SFA_AGGREGATE_ENABLED',
1360 if self.plc_spec['sfa'].has_key(var):
1361 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1362 # the way plc_config handles booleans just sucks..
1365 if self.plc_spec['sfa'][var]: val='true'
1366 fileconf.write ('e %s\n%s\n'%(var,val))
1367 fileconf.write('w\n')
1368 fileconf.write('R\n')
1369 fileconf.write('q\n')
1371 utils.system('cat %s'%tmpname)
1372 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1375 def aggregate_xml_line(self):
1376 port=self.plc_spec['sfa']['neighbours-port']
1377 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1378 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1380 def registry_xml_line(self):
1381 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1382 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1385 # a cross step that takes all other plcs in argument
1386 def cross_sfa_configure(self, other_plcs):
1387 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1388 # of course with a single plc, other_plcs is an empty list
1391 agg_fname=self.conffile("agg.xml")
1392 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1393 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1394 utils.header ("(Over)wrote %s"%agg_fname)
1395 reg_fname=self.conffile("reg.xml")
1396 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1397 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1398 utils.header ("(Over)wrote %s"%reg_fname)
1399 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1400 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1402 def sfa_import(self):
1404 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1406 self.run_in_guest('sfaadmin.py reg import_registry')==0
1407 # not needed anymore
1408 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1410 def sfa_start(self):
1412 return self.run_in_guest('service sfa start')==0
1414 def sfi_configure(self):
1415 "Create /root/sfi on the plc side for sfi client configuration"
1416 if self.options.dry_run:
1417 utils.header("DRY RUN - skipping step")
1419 sfa_spec=self.plc_spec['sfa']
1420 # cannot use auth_sfa_mapper to pass dir_name
1421 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1422 test_slice=TestAuthSfa(self,slice_spec)
1423 dir_basename=os.path.basename(test_slice.sfi_path())
1424 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1425 test_slice.sfi_configure(dir_name)
1426 # push into the remote /root/sfi area
1427 location = test_slice.sfi_path()
1428 remote="%s/%s"%(self.vm_root_in_host(),location)
1429 self.test_ssh.mkdir(remote,abs=True)
1430 # need to strip last level or remote otherwise we get an extra dir level
1431 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1435 def sfi_clean (self):
1436 "clean up /root/sfi on the plc side"
1437 self.run_in_guest("rm -rf /root/sfi")
1441 def sfa_add_site (self): pass
1443 def sfa_add_pi (self): pass
1445 def sfa_add_user(self): pass
1447 def sfa_update_user(self): pass
1449 def sfa_add_slice(self): pass
1451 def sfa_discover(self): pass
1453 def sfa_create_slice(self): pass
1455 def sfa_check_slice_plc(self): pass
1457 def sfa_update_slice(self): pass
1459 def sfi_list(self): pass
1461 def sfi_show(self): pass
1463 def sfi_slices(self): pass
1465 def ssh_slice_sfa(self): pass
1467 def sfa_delete_user(self): pass
1469 def sfa_delete_slice(self): pass
1473 self.run_in_guest('service sfa stop')==0
1476 def populate (self):
1477 "creates random entries in the PLCAPI"
1478 # install the stress-test in the plc image
1479 location = "/usr/share/plc_api/plcsh_stress_test.py"
1480 remote="%s/%s"%(self.vm_root_in_host(),location)
1481 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1483 command += " -- --preserve --short-names"
1484 local = (self.run_in_guest(command) == 0);
1485 # second run with --foreign
1486 command += ' --foreign'
1487 remote = (self.run_in_guest(command) == 0);
1488 return ( local and remote)
1490 def gather_logs (self):
1491 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1492 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1493 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1494 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1495 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1496 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1497 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1499 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1500 self.gather_var_logs ()
1502 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1503 self.gather_pgsql_logs ()
1505 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1506 self.gather_root_sfi ()
1508 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1509 for site_spec in self.plc_spec['sites']:
1510 test_site = TestSite (self,site_spec)
1511 for node_spec in site_spec['nodes']:
1512 test_node=TestNode(self,test_site,node_spec)
1513 test_node.gather_qemu_logs()
1515 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1516 self.gather_nodes_var_logs()
1518 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1519 self.gather_slivers_var_logs()
1522 def gather_slivers_var_logs(self):
1523 for test_sliver in self.all_sliver_objs():
1524 remote = test_sliver.tar_var_logs()
1525 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1526 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1527 utils.system(command)
1530 def gather_var_logs (self):
1531 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1532 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1533 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1534 utils.system(command)
1535 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1536 utils.system(command)
1538 def gather_pgsql_logs (self):
1539 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1540 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1541 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1542 utils.system(command)
1544 def gather_root_sfi (self):
1545 utils.system("mkdir -p logs/sfi.%s"%self.name())
1546 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1547 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1548 utils.system(command)
1550 def gather_nodes_var_logs (self):
1551 for site_spec in self.plc_spec['sites']:
1552 test_site = TestSite (self,site_spec)
1553 for node_spec in site_spec['nodes']:
1554 test_node=TestNode(self,test_site,node_spec)
1555 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1556 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1557 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1558 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1559 utils.system(command)
1562 # returns the filename to use for sql dump/restore, using options.dbname if set
1563 def dbfile (self, database):
1564 # uses options.dbname if it is found
1566 name=self.options.dbname
1567 if not isinstance(name,StringTypes):
1570 t=datetime.datetime.now()
1573 return "/root/%s-%s.sql"%(database,name)
1575 def plc_db_dump(self):
1576 'dump the planetlab5 DB in /root in the PLC - filename has time'
1577 dump=self.dbfile("planetab5")
1578 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1579 utils.header('Dumped planetlab5 database in %s'%dump)
1582 def plc_db_restore(self):
1583 'restore the planetlab5 DB - looks broken, but run -n might help'
1584 dump=self.dbfile("planetab5")
1585 ##stop httpd service
1586 self.run_in_guest('service httpd stop')
1587 # xxx - need another wrapper
1588 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1589 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1590 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1591 ##starting httpd service
1592 self.run_in_guest('service httpd start')
1594 utils.header('Database restored from ' + dump)
1596 def standby_1_through_20(self):
1597 """convenience function to wait for a specified number of minutes"""
1600 def standby_1(): pass
1602 def standby_2(): pass
1604 def standby_3(): pass
1606 def standby_4(): pass
1608 def standby_5(): pass
1610 def standby_6(): pass
1612 def standby_7(): pass
1614 def standby_8(): pass
1616 def standby_9(): pass
1618 def standby_10(): pass
1620 def standby_11(): pass
1622 def standby_12(): pass
1624 def standby_13(): pass
1626 def standby_14(): pass
1628 def standby_15(): pass
1630 def standby_16(): pass
1632 def standby_17(): pass
1634 def standby_18(): pass
1636 def standby_19(): pass
1638 def standby_20(): pass