1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
65 def auth_sfa_mapper (method):
68 slice_method = TestAuthSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
70 test_slice=TestAuthSfa(self,slice_spec)
71 if not slice_method(test_slice,self.options): overall=False
73 # restore the doc text
74 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
84 'vs_delete','timestamp_vs','vs_create', SEP,
85 'plc_install', 'plc_configure', 'plc_start', SEP,
86 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', 'speed_up_slices', SEP,
87 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
88 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
89 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
90 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
91 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
92 'sfa_add_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
93 'sfa_create_slice@1', 'sfa_check_slice_plc@1', SEPSFA,
94 'sfa_update_user@1', 'sfa_update_slice@1', SEPSFA,
95 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
96 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
97 # but as the stress test might take a while, we sometimes missed the debug mode..
98 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
99 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
100 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
101 'check_tcp', 'check_sys_slice', SEP,
102 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
103 'force_gather_logs', SEP,
106 'export', 'show_boxes', SEP,
107 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
108 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
109 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
110 'delete_leases', 'list_leases', SEP,
112 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
113 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
114 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
115 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
116 'plc_db_dump' , 'plc_db_restore', SEP,
117 'standby_1_through_20',SEP,
121 def printable_steps (list):
122 single_line=" ".join(list)+" "
123 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
125 def valid_step (step):
126 return step != SEP and step != SEPSFA
128 # turn off the sfa-related steps when build has skipped SFA
129 # this was originally for centos5 but is still valid
130 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
132 def check_whether_build_has_sfa (rpms_url):
133 utils.header ("Checking if build provides SFA package...")
134 # warning, we're now building 'sface' so let's be a bit more picky
135 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
136 # full builds are expected to return with 0 here
138 utils.header("build does provide SFA")
140 # move all steps containing 'sfa' from default_steps to other_steps
141 utils.header("SFA package not found - removing steps with sfa or sfi")
142 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
143 TestPlc.other_steps += sfa_steps
144 for step in sfa_steps: TestPlc.default_steps.remove(step)
146 def __init__ (self,plc_spec,options):
147 self.plc_spec=plc_spec
149 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
150 self.vserverip=plc_spec['vserverip']
151 self.vservername=plc_spec['vservername']
152 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
153 self.apiserver=TestApiserver(self.url,options.dry_run)
155 def has_addresses_api (self):
156 return self.apiserver.has_method('AddIpAddress')
159 name=self.plc_spec['name']
160 return "%s.%s"%(name,self.vservername)
163 return self.plc_spec['host_box']
166 return self.test_ssh.is_local()
168 # define the API methods on this object through xmlrpc
169 # would help, but not strictly necessary
173 def actual_command_in_guest (self,command):
174 return self.test_ssh.actual_command(self.host_to_guest(command))
176 def start_guest (self):
177 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
179 def stop_guest (self):
180 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
182 def run_in_guest (self,command):
183 return utils.system(self.actual_command_in_guest(command))
185 def run_in_host (self,command):
186 return self.test_ssh.run_in_buildname(command)
188 #command gets run in the plc's vm
189 def host_to_guest(self,command):
190 if self.options.plcs_use_lxc:
191 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
193 return "vserver %s exec %s"%(self.vservername,command)
195 def vm_root_in_host(self):
196 if self.options.plcs_use_lxc:
197 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
199 return "/vservers/%s"%(self.vservername)
201 def vm_timestamp_path (self):
202 if self.options.plcs_use_lxc:
203 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
205 return "/vservers/%s.timestamp"%(self.vservername)
207 #start/stop the vserver
208 def start_guest_in_host(self):
209 if self.options.plcs_use_lxc:
210 return "lxc-start --daemon --name=%s"%(self.vservername)
212 return "vserver %s start"%(self.vservername)
214 def stop_guest_in_host(self):
215 if self.options.plcs_use_lxc:
216 return "lxc-stop --name=%s"%(self.vservername)
218 return "vserver %s stop"%(self.vservername)
221 def run_in_guest_piped (self,local,remote):
222 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
224 def yum_check_installed (self, rpms):
225 if isinstance (rpms, list):
227 return self.run_in_guest("rpm -q %s"%rpms)==0
229 # does a yum install in the vs, ignore yum retcod, check with rpm
230 def yum_install (self, rpms):
231 if isinstance (rpms, list):
233 self.run_in_guest("yum -y install %s"%rpms)
234 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
235 self.run_in_guest("yum-complete-transaction -y")
236 return self.yum_check_installed (rpms)
238 def auth_root (self):
239 return {'Username':self.plc_spec['PLC_ROOT_USER'],
240 'AuthMethod':'password',
241 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
242 'Role' : self.plc_spec['role']
244 def locate_site (self,sitename):
245 for site in self.plc_spec['sites']:
246 if site['site_fields']['name'] == sitename:
248 if site['site_fields']['login_base'] == sitename:
250 raise Exception,"Cannot locate site %s"%sitename
252 def locate_node (self,nodename):
253 for site in self.plc_spec['sites']:
254 for node in site['nodes']:
255 if node['name'] == nodename:
257 raise Exception,"Cannot locate node %s"%nodename
259 def locate_hostname (self,hostname):
260 for site in self.plc_spec['sites']:
261 for node in site['nodes']:
262 if node['node_fields']['hostname'] == hostname:
264 raise Exception,"Cannot locate hostname %s"%hostname
266 def locate_key (self,key_name):
267 for key in self.plc_spec['keys']:
268 if key['key_name'] == key_name:
270 raise Exception,"Cannot locate key %s"%key_name
272 def locate_private_key_from_key_names (self, key_names):
273 # locate the first avail. key
275 for key_name in key_names:
276 key_spec=self.locate_key(key_name)
277 test_key=TestKey(self,key_spec)
278 publickey=test_key.publicpath()
279 privatekey=test_key.privatepath()
280 if os.path.isfile(publickey) and os.path.isfile(privatekey):
282 if found: return privatekey
285 def locate_slice (self, slicename):
286 for slice in self.plc_spec['slices']:
287 if slice['slice_fields']['name'] == slicename:
289 raise Exception,"Cannot locate slice %s"%slicename
291 def all_sliver_objs (self):
293 for slice_spec in self.plc_spec['slices']:
294 slicename = slice_spec['slice_fields']['name']
295 for nodename in slice_spec['nodenames']:
296 result.append(self.locate_sliver_obj (nodename,slicename))
299 def locate_sliver_obj (self,nodename,slicename):
300 (site,node) = self.locate_node(nodename)
301 slice = self.locate_slice (slicename)
303 test_site = TestSite (self, site)
304 test_node = TestNode (self, test_site,node)
305 # xxx the slice site is assumed to be the node site - mhh - probably harmless
306 test_slice = TestSlice (self, test_site, slice)
307 return TestSliver (self, test_node, test_slice)
309 def locate_first_node(self):
310 nodename=self.plc_spec['slices'][0]['nodenames'][0]
311 (site,node) = self.locate_node(nodename)
312 test_site = TestSite (self, site)
313 test_node = TestNode (self, test_site,node)
316 def locate_first_sliver (self):
317 slice_spec=self.plc_spec['slices'][0]
318 slicename=slice_spec['slice_fields']['name']
319 nodename=slice_spec['nodenames'][0]
320 return self.locate_sliver_obj(nodename,slicename)
322 # all different hostboxes used in this plc
323 def gather_hostBoxes(self):
324 # maps on sites and nodes, return [ (host_box,test_node) ]
326 for site_spec in self.plc_spec['sites']:
327 test_site = TestSite (self,site_spec)
328 for node_spec in site_spec['nodes']:
329 test_node = TestNode (self, test_site, node_spec)
330 if not test_node.is_real():
331 tuples.append( (test_node.host_box(),test_node) )
332 # transform into a dict { 'host_box' -> [ test_node .. ] }
334 for (box,node) in tuples:
335 if not result.has_key(box):
338 result[box].append(node)
341 # a step for checking this stuff
342 def show_boxes (self):
343 'print summary of nodes location'
344 for (box,nodes) in self.gather_hostBoxes().iteritems():
345 print box,":"," + ".join( [ node.name() for node in nodes ] )
348 # make this a valid step
349 def qemu_kill_all(self):
350 'kill all qemu instances on the qemu boxes involved by this setup'
351 # this is the brute force version, kill all qemus on that host box
352 for (box,nodes) in self.gather_hostBoxes().iteritems():
353 # pass the first nodename, as we don't push template-qemu on testboxes
354 nodedir=nodes[0].nodedir()
355 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
358 # make this a valid step
359 def qemu_list_all(self):
360 'list all qemu instances on the qemu boxes involved by this setup'
361 for (box,nodes) in self.gather_hostBoxes().iteritems():
362 # this is the brute force version, kill all qemus on that host box
363 TestBoxQemu(box,self.options.buildname).qemu_list_all()
366 # kill only the right qemus
367 def qemu_list_mine(self):
368 'list qemu instances for our nodes'
369 for (box,nodes) in self.gather_hostBoxes().iteritems():
370 # the fine-grain version
375 # kill only the right qemus
376 def qemu_kill_mine(self):
377 'kill the qemu instances for our nodes'
378 for (box,nodes) in self.gather_hostBoxes().iteritems():
379 # the fine-grain version
384 #################### display config
386 "show test configuration after localization"
392 "print cut'n paste-able stuff to export env variables to your shell"
393 # guess local domain from hostname
394 domain=socket.gethostname().split('.',1)[1]
395 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
396 print "export BUILD=%s"%self.options.buildname
397 if self.options.plcs_use_lxc:
398 print "export PLCHOSTLXC=%s"%fqdn
400 print "export PLCHOSTVS=%s"%fqdn
401 print "export GUESTNAME=%s"%self.plc_spec['vservername']
402 vplcname=self.plc_spec['vservername'].split('-')[-1]
403 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
404 # find hostname of first node
405 (hostname,qemubox) = self.all_node_infos()[0]
406 print "export KVMHOST=%s.%s"%(qemubox,domain)
407 print "export NODE=%s"%(hostname)
411 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
412 def show_pass (self,passno):
413 for (key,val) in self.plc_spec.iteritems():
414 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
418 self.display_site_spec(site)
419 for node in site['nodes']:
420 self.display_node_spec(node)
421 elif key=='initscripts':
422 for initscript in val:
423 self.display_initscript_spec (initscript)
426 self.display_slice_spec (slice)
429 self.display_key_spec (key)
431 if key not in ['sites','initscripts','slices','keys', 'sfa']:
432 print '+ ',key,':',val
434 def display_site_spec (self,site):
435 print '+ ======== site',site['site_fields']['name']
436 for (k,v) in site.iteritems():
437 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
440 print '+ ','nodes : ',
442 print node['node_fields']['hostname'],'',
448 print user['name'],'',
450 elif k == 'site_fields':
451 print '+ login_base',':',v['login_base']
452 elif k == 'address_fields':
458 def display_initscript_spec (self,initscript):
459 print '+ ======== initscript',initscript['initscript_fields']['name']
461 def display_key_spec (self,key):
462 print '+ ======== key',key['key_name']
464 def display_slice_spec (self,slice):
465 print '+ ======== slice',slice['slice_fields']['name']
466 for (k,v) in slice.iteritems():
479 elif k=='slice_fields':
480 print '+ fields',':',
481 print 'max_nodes=',v['max_nodes'],
486 def display_node_spec (self,node):
487 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
488 print "hostname=",node['node_fields']['hostname'],
489 print "ip=",node['interface_fields']['ip']
490 if self.options.verbose:
491 utils.pprint("node details",node,depth=3)
493 # another entry point for just showing the boxes involved
494 def display_mapping (self):
495 TestPlc.display_mapping_plc(self.plc_spec)
499 def display_mapping_plc (plc_spec):
500 print '+ MyPLC',plc_spec['name']
501 # WARNING this would not be right for lxc-based PLC's - should be harmless though
502 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
503 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
504 for site_spec in plc_spec['sites']:
505 for node_spec in site_spec['nodes']:
506 TestPlc.display_mapping_node(node_spec)
509 def display_mapping_node (node_spec):
510 print '+ NODE %s'%(node_spec['name'])
511 print '+\tqemu box %s'%node_spec['host_box']
512 print '+\thostname=%s'%node_spec['node_fields']['hostname']
514 # write a timestamp in /vservers/<>.timestamp
515 # cannot be inside the vserver, that causes vserver .. build to cough
516 def timestamp_vs (self):
517 "Create a timestamp to remember creation date for this plc"
519 # TODO-lxc check this one
520 # a first approx. is to store the timestamp close to the VM root like vs does
521 stamp_path=self.vm_timestamp_path ()
522 stamp_dir = os.path.dirname (stamp_path)
523 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
524 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
526 # this is called inconditionnally at the beginning of the test sequence
527 # just in case this is a rerun, so if the vm is not running it's fine
529 "vserver delete the test myplc"
530 stamp_path=self.vm_timestamp_path()
531 self.run_in_host("rm -f %s"%stamp_path)
532 if self.options.plcs_use_lxc:
533 self.run_in_host("lxc-stop --name %s"%self.vservername)
534 self.run_in_host("lxc-destroy --name %s"%self.vservername)
537 self.run_in_host("vserver --silent %s delete"%self.vservername)
541 # historically the build was being fetched by the tests
542 # now the build pushes itself as a subdir of the tests workdir
543 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
544 def vs_create (self):
545 "vserver creation (no install done)"
546 # push the local build/ dir to the testplc box
548 # a full path for the local calls
549 build_dir=os.path.dirname(sys.argv[0])
550 # sometimes this is empty - set to "." in such a case
551 if not build_dir: build_dir="."
552 build_dir += "/build"
554 # use a standard name - will be relative to remote buildname
556 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
557 self.test_ssh.rmdir(build_dir)
558 self.test_ssh.copy(build_dir,recursive=True)
559 # the repo url is taken from arch-rpms-url
560 # with the last step (i386) removed
561 repo_url = self.options.arch_rpms_url
562 for level in [ 'arch' ]:
563 repo_url = os.path.dirname(repo_url)
564 # pass the vbuild-nightly options to vtest-init-vserver
566 test_env_options += " -p %s"%self.options.personality
567 test_env_options += " -d %s"%self.options.pldistro
568 test_env_options += " -f %s"%self.options.fcdistro
569 if self.options.plcs_use_lxc:
570 script="vtest-init-lxc.sh"
572 script="vtest-init-vserver.sh"
573 vserver_name = self.vservername
574 vserver_options="--netdev eth0 --interface %s"%self.vserverip
576 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
577 vserver_options += " --hostname %s"%vserver_hostname
579 print "Cannot reverse lookup %s"%self.vserverip
580 print "This is considered fatal, as this might pollute the test results"
582 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
583 return self.run_in_host(create_vserver) == 0
586 def plc_install(self):
587 "yum install myplc, noderepo, and the plain bootstrapfs"
589 # workaround for getting pgsql8.2 on centos5
590 if self.options.fcdistro == "centos5":
591 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
594 if self.options.personality == "linux32":
596 elif self.options.personality == "linux64":
599 raise Exception, "Unsupported personality %r"%self.options.personality
600 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
603 pkgs_list.append ("slicerepo-%s"%nodefamily)
604 pkgs_list.append ("myplc")
605 pkgs_list.append ("noderepo-%s"%nodefamily)
606 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
607 pkgs_string=" ".join(pkgs_list)
608 return self.yum_install (pkgs_list)
611 def plc_configure(self):
613 tmpname='%s.plc-config-tty'%(self.name())
614 fileconf=open(tmpname,'w')
615 for var in [ 'PLC_NAME',
620 'PLC_MAIL_SUPPORT_ADDRESS',
623 # Above line was added for integrating SFA Testing
629 'PLC_RESERVATION_GRANULARITY',
631 'PLC_OMF_XMPP_SERVER',
633 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
634 fileconf.write('w\n')
635 fileconf.write('q\n')
637 utils.system('cat %s'%tmpname)
638 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
639 utils.system('rm %s'%tmpname)
644 self.run_in_guest('service plc start')
649 self.run_in_guest('service plc stop')
653 "start the PLC vserver"
658 "stop the PLC vserver"
662 # stores the keys from the config for further use
663 def keys_store(self):
664 "stores test users ssh keys in keys/"
665 for key_spec in self.plc_spec['keys']:
666 TestKey(self,key_spec).store_key()
669 def keys_clean(self):
670 "removes keys cached in keys/"
671 utils.system("rm -rf ./keys")
674 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
675 # for later direct access to the nodes
676 def keys_fetch(self):
677 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
679 if not os.path.isdir(dir):
681 vservername=self.vservername
682 vm_root=self.vm_root_in_host()
684 prefix = 'debug_ssh_key'
685 for ext in [ 'pub', 'rsa' ] :
686 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
687 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
688 if self.test_ssh.fetch(src,dst) != 0: overall=False
692 "create sites with PLCAPI"
693 return self.do_sites()
695 def delete_sites (self):
696 "delete sites with PLCAPI"
697 return self.do_sites(action="delete")
699 def do_sites (self,action="add"):
700 for site_spec in self.plc_spec['sites']:
701 test_site = TestSite (self,site_spec)
702 if (action != "add"):
703 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
704 test_site.delete_site()
705 # deleted with the site
706 #test_site.delete_users()
709 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
710 test_site.create_site()
711 test_site.create_users()
714 def delete_all_sites (self):
715 "Delete all sites in PLC, and related objects"
716 print 'auth_root',self.auth_root()
717 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])
719 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
720 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
721 site_id=site['site_id']
722 print 'Deleting site_id',site_id
723 self.apiserver.DeleteSite(self.auth_root(),site_id)
727 "create nodes with PLCAPI"
728 return self.do_nodes()
729 def delete_nodes (self):
730 "delete nodes with PLCAPI"
731 return self.do_nodes(action="delete")
733 def do_nodes (self,action="add"):
734 for site_spec in self.plc_spec['sites']:
735 test_site = TestSite (self,site_spec)
737 utils.header("Deleting nodes in site %s"%test_site.name())
738 for node_spec in site_spec['nodes']:
739 test_node=TestNode(self,test_site,node_spec)
740 utils.header("Deleting %s"%test_node.name())
741 test_node.delete_node()
743 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
744 for node_spec in site_spec['nodes']:
745 utils.pprint('Creating node %s'%node_spec,node_spec)
746 test_node = TestNode (self,test_site,node_spec)
747 test_node.create_node ()
750 def nodegroups (self):
751 "create nodegroups with PLCAPI"
752 return self.do_nodegroups("add")
753 def delete_nodegroups (self):
754 "delete nodegroups with PLCAPI"
755 return self.do_nodegroups("delete")
759 def translate_timestamp (start,grain,timestamp):
760 if timestamp < TestPlc.YEAR: return start+timestamp*grain
761 else: return timestamp
764 def timestamp_printable (timestamp):
765 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
768 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
770 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
771 print 'API answered grain=',grain
772 start=(now/grain)*grain
774 # find out all nodes that are reservable
775 nodes=self.all_reservable_nodenames()
777 utils.header ("No reservable node found - proceeding without leases")
780 # attach them to the leases as specified in plc_specs
781 # this is where the 'leases' field gets interpreted as relative of absolute
782 for lease_spec in self.plc_spec['leases']:
783 # skip the ones that come with a null slice id
784 if not lease_spec['slice']: continue
785 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
786 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
787 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
788 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
789 if lease_addition['errors']:
790 utils.header("Cannot create leases, %s"%lease_addition['errors'])
793 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
794 (nodes,lease_spec['slice'],
795 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
796 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
800 def delete_leases (self):
801 "remove all leases in the myplc side"
802 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
803 utils.header("Cleaning leases %r"%lease_ids)
804 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
807 def list_leases (self):
808 "list all leases known to the myplc"
809 leases = self.apiserver.GetLeases(self.auth_root())
812 current=l['t_until']>=now
813 if self.options.verbose or current:
814 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
815 TestPlc.timestamp_printable(l['t_from']),
816 TestPlc.timestamp_printable(l['t_until'])))
819 # create nodegroups if needed, and populate
820 def do_nodegroups (self, action="add"):
821 # 1st pass to scan contents
823 for site_spec in self.plc_spec['sites']:
824 test_site = TestSite (self,site_spec)
825 for node_spec in site_spec['nodes']:
826 test_node=TestNode (self,test_site,node_spec)
827 if node_spec.has_key('nodegroups'):
828 nodegroupnames=node_spec['nodegroups']
829 if isinstance(nodegroupnames,StringTypes):
830 nodegroupnames = [ nodegroupnames ]
831 for nodegroupname in nodegroupnames:
832 if not groups_dict.has_key(nodegroupname):
833 groups_dict[nodegroupname]=[]
834 groups_dict[nodegroupname].append(test_node.name())
835 auth=self.auth_root()
837 for (nodegroupname,group_nodes) in groups_dict.iteritems():
839 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
840 # first, check if the nodetagtype is here
841 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
843 tag_type_id = tag_types[0]['tag_type_id']
845 tag_type_id = self.apiserver.AddTagType(auth,
846 {'tagname':nodegroupname,
847 'description': 'for nodegroup %s'%nodegroupname,
849 print 'located tag (type)',nodegroupname,'as',tag_type_id
851 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
853 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
854 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
855 # set node tag on all nodes, value='yes'
856 for nodename in group_nodes:
858 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
860 traceback.print_exc()
861 print 'node',nodename,'seems to already have tag',nodegroupname
864 expect_yes = self.apiserver.GetNodeTags(auth,
865 {'hostname':nodename,
866 'tagname':nodegroupname},
867 ['value'])[0]['value']
868 if expect_yes != "yes":
869 print 'Mismatch node tag on node',nodename,'got',expect_yes
872 if not self.options.dry_run:
873 print 'Cannot find tag',nodegroupname,'on node',nodename
877 print 'cleaning nodegroup',nodegroupname
878 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
880 traceback.print_exc()
884 # a list of TestNode objs
885 def all_nodes (self):
887 for site_spec in self.plc_spec['sites']:
888 test_site = TestSite (self,site_spec)
889 for node_spec in site_spec['nodes']:
890 nodes.append(TestNode (self,test_site,node_spec))
893 # return a list of tuples (nodename,qemuname)
894 def all_node_infos (self) :
896 for site_spec in self.plc_spec['sites']:
897 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
898 for node_spec in site_spec['nodes'] ]
901 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
902 def all_reservable_nodenames (self):
904 for site_spec in self.plc_spec['sites']:
905 for node_spec in site_spec['nodes']:
906 node_fields=node_spec['node_fields']
907 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
908 res.append(node_fields['hostname'])
911 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
912 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
913 if self.options.dry_run:
917 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
918 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
919 # the nodes that haven't checked yet - start with a full list and shrink over time
920 tocheck = self.all_hostnames()
921 utils.header("checking nodes %r"%tocheck)
922 # create a dict hostname -> status
923 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
926 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
928 for array in tocheck_status:
929 hostname=array['hostname']
930 boot_state=array['boot_state']
931 if boot_state == target_boot_state:
932 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
934 # if it's a real node, never mind
935 (site_spec,node_spec)=self.locate_hostname(hostname)
936 if TestNode.is_real_model(node_spec['node_fields']['model']):
937 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
939 boot_state = target_boot_state
940 elif datetime.datetime.now() > graceout:
941 utils.header ("%s still in '%s' state"%(hostname,boot_state))
942 graceout=datetime.datetime.now()+datetime.timedelta(1)
943 status[hostname] = boot_state
945 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
948 if datetime.datetime.now() > timeout:
949 for hostname in tocheck:
950 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
952 # otherwise, sleep for a while
954 # only useful in empty plcs
957 def nodes_booted(self):
958 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
960 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
962 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
963 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
964 vservername=self.vservername
967 local_key = "keys/%(vservername)s-debug.rsa"%locals()
970 local_key = "keys/key_admin.rsa"
971 node_infos = self.all_node_infos()
972 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
973 for (nodename,qemuname) in node_infos:
974 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
975 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
976 (timeout_minutes,silent_minutes,period))
978 for node_info in node_infos:
979 (hostname,qemuname) = node_info
980 # try to run 'hostname' in the node
981 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
982 # don't spam logs - show the command only after the grace period
983 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
985 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
987 node_infos.remove(node_info)
989 # we will have tried real nodes once, in case they're up - but if not, just skip
990 (site_spec,node_spec)=self.locate_hostname(hostname)
991 if TestNode.is_real_model(node_spec['node_fields']['model']):
992 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
993 node_infos.remove(node_info)
996 if datetime.datetime.now() > timeout:
997 for (hostname,qemuname) in node_infos:
998 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
1000 # otherwise, sleep for a while
1002 # only useful in empty plcs
1005 def ssh_node_debug(self):
1006 "Tries to ssh into nodes in debug mode with the debug ssh key"
1007 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
1009 def ssh_node_boot(self):
1010 "Tries to ssh into nodes in production mode with the root ssh key"
1011 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
1014 def qemu_local_init (self): pass
1016 def bootcd (self): pass
1018 def qemu_local_config (self): pass
1020 def nodestate_reinstall (self): pass
1022 def nodestate_safeboot (self): pass
1024 def nodestate_boot (self): pass
1026 def nodestate_show (self): pass
1028 def qemu_export (self): pass
1030 ### check hooks : invoke scripts from hooks/{node,slice}
1031 def check_hooks_node (self):
1032 return self.locate_first_node().check_hooks()
1033 def check_hooks_sliver (self) :
1034 return self.locate_first_sliver().check_hooks()
1036 def check_hooks (self):
1037 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1038 return self.check_hooks_node() and self.check_hooks_sliver()
1041 def do_check_initscripts(self):
1043 for slice_spec in self.plc_spec['slices']:
1044 if not slice_spec.has_key('initscriptstamp'):
1046 stamp=slice_spec['initscriptstamp']
1047 for nodename in slice_spec['nodenames']:
1048 (site,node) = self.locate_node (nodename)
1049 # xxx - passing the wrong site - probably harmless
1050 test_site = TestSite (self,site)
1051 test_slice = TestSlice (self,test_site,slice_spec)
1052 test_node = TestNode (self,test_site,node)
1053 test_sliver = TestSliver (self, test_node, test_slice)
1054 if not test_sliver.check_initscript_stamp(stamp):
1058 def check_initscripts(self):
1059 "check that the initscripts have triggered"
1060 return self.do_check_initscripts()
1062 def initscripts (self):
1063 "create initscripts with PLCAPI"
1064 for initscript in self.plc_spec['initscripts']:
1065 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1066 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1069 def delete_initscripts (self):
1070 "delete initscripts with PLCAPI"
1071 for initscript in self.plc_spec['initscripts']:
1072 initscript_name = initscript['initscript_fields']['name']
1073 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1075 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1076 print initscript_name,'deleted'
1078 print 'deletion went wrong - probably did not exist'
1083 "create slices with PLCAPI"
1084 return self.do_slices(action="add")
1086 def delete_slices (self):
1087 "delete slices with PLCAPI"
1088 return self.do_slices(action="delete")
1090 def fill_slices (self):
1091 "add nodes in slices with PLCAPI"
1092 return self.do_slices(action="fill")
1094 def empty_slices (self):
1095 "remove nodes from slices with PLCAPI"
1096 return self.do_slices(action="empty")
1098 def do_slices (self, action="add"):
1099 for slice in self.plc_spec['slices']:
1100 site_spec = self.locate_site (slice['sitename'])
1101 test_site = TestSite(self,site_spec)
1102 test_slice=TestSlice(self,test_site,slice)
1103 if action == "delete":
1104 test_slice.delete_slice()
1105 elif action=="fill":
1106 test_slice.add_nodes()
1107 elif action=="empty":
1108 test_slice.delete_nodes()
1110 test_slice.create_slice()
1114 def ssh_slice(self): pass
1116 def ssh_slice_off (self): pass
1119 def keys_clear_known_hosts (self): pass
1121 def speed_up_slices (self):
1122 "tweak nodemanager settings on all nodes using a conf file"
1123 # create the template on the server-side
1124 template="%s.nodemanager"%self.name()
1125 template_file = open (template,"w")
1126 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1127 template_file.close()
1128 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1129 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1130 self.test_ssh.copy_abs(template,remote)
1132 self.apiserver.AddConfFile (self.auth_root(),
1133 {'dest':'/etc/sysconfig/nodemanager',
1134 'source':'PlanetLabConf/nodemanager',
1135 'postinstall_cmd':'service nm restart',})
1139 def qemu_start (self) : pass
1142 def timestamp_qemu (self) : pass
1144 def check_tcp (self):
1145 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1146 specs = self.plc_spec['tcp_test']
1151 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1152 if not s_test_sliver.run_tcp_server(port,timeout=10):
1156 # idem for the client side
1157 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1158 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1162 # painfully enough, we need to allow for some time as netflow might show up last
1163 def check_sys_slice (self):
1164 "all nodes: check that a system slice is alive"
1165 # would probably make more sense to check for netflow,
1166 # but that one is currently not working in the lxc distro
1167 # return self.check_systemslice ('netflow')
1168 return self.check_systemslice ('drl')
1170 # we have the slices up already here, so it should not take too long
1171 def check_systemslice (self, slicename, timeout_minutes=5, period=15):
1172 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1173 test_nodes=self.all_nodes()
1175 for test_node in test_nodes:
1176 if test_node.check_systemslice (slicename,dry_run=self.options.dry_run):
1178 test_nodes.remove(test_node)
1183 if datetime.datetime.now () > timeout:
1184 for test_node in test_nodes:
1185 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1190 def plcsh_stress_test (self):
1191 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1192 # install the stress-test in the plc image
1193 location = "/usr/share/plc_api/plcsh_stress_test.py"
1194 remote="%s/%s"%(self.vm_root_in_host(),location)
1195 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1197 command += " -- --check"
1198 if self.options.size == 1:
1199 command += " --tiny"
1200 return ( self.run_in_guest(command) == 0)
1202 # populate runs the same utility without slightly different options
1203 # in particular runs with --preserve (dont cleanup) and without --check
1204 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1206 def sfa_install_all (self):
1207 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1208 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1210 def sfa_install_core(self):
1212 return self.yum_install ("sfa")
1214 def sfa_install_plc(self):
1215 "yum install sfa-plc"
1216 return self.yum_install("sfa-plc")
1218 def sfa_install_sfatables(self):
1219 "yum install sfa-sfatables"
1220 return self.yum_install ("sfa-sfatables")
1222 # for some very odd reason, this sometimes fails with the following symptom
1223 # # yum install sfa-client
1224 # Setting up Install Process
1226 # Downloading Packages:
1227 # Running rpm_check_debug
1228 # Running Transaction Test
1229 # Transaction Test Succeeded
1230 # Running Transaction
1231 # Transaction couldn't start:
1232 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1233 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1234 # even though in the same context I have
1235 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1236 # Filesystem Size Used Avail Use% Mounted on
1237 # /dev/hdv1 806G 264G 501G 35% /
1238 # none 16M 36K 16M 1% /tmp
1240 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1241 def sfa_install_client(self):
1242 "yum install sfa-client"
1243 first_try=self.yum_install("sfa-client")
1244 if first_try: return True
1245 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1246 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1247 utils.header("rpm_path=<<%s>>"%rpm_path)
1249 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1250 return self.yum_check_installed ("sfa-client")
1252 def sfa_dbclean(self):
1253 "thoroughly wipes off the SFA database"
1254 return self.run_in_guest("sfaadmin.py registry nuke")==0 or \
1255 self.run_in_guest("sfa-nuke.py")==0 or \
1256 self.run_in_guest("sfa-nuke-plc.py")==0
1258 def sfa_plcclean(self):
1259 "cleans the PLC entries that were created as a side effect of running the script"
1261 sfa_spec=self.plc_spec['sfa']
1263 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1264 login_base=auth_sfa_spec['login_base']
1265 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1266 except: print "Site %s already absent from PLC db"%login_base
1268 for spec_name in ['pi_spec','user_spec']:
1269 user_spec=auth_sfa_spec[spec_name]
1270 username=user_spec['email']
1271 try: self.apiserver.DeletePerson(self.auth_root(),username)
1273 # this in fact is expected as sites delete their members
1274 #print "User %s already absent from PLC db"%username
1277 print "REMEMBER TO RUN sfa_import AGAIN"
1280 def sfa_uninstall(self):
1281 "uses rpm to uninstall sfa - ignore result"
1282 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1283 self.run_in_guest("rm -rf /var/lib/sfa")
1284 self.run_in_guest("rm -rf /etc/sfa")
1285 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1287 self.run_in_guest("rpm -e --noscripts sfa-plc")
1290 ### run unit tests for SFA
1291 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1292 # Running Transaction
1293 # Transaction couldn't start:
1294 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1295 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1296 # no matter how many Gbs are available on the testplc
1297 # could not figure out what's wrong, so...
1298 # if the yum install phase fails, consider the test is successful
1299 # other combinations will eventually run it hopefully
1300 def sfa_utest(self):
1301 "yum install sfa-tests and run SFA unittests"
1302 self.run_in_guest("yum -y install sfa-tests")
1303 # failed to install - forget it
1304 if self.run_in_guest("rpm -q sfa-tests")!=0:
1305 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1307 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1311 dirname="conf.%s"%self.plc_spec['name']
1312 if not os.path.isdir(dirname):
1313 utils.system("mkdir -p %s"%dirname)
1314 if not os.path.isdir(dirname):
1315 raise "Cannot create config dir for plc %s"%self.name()
1318 def conffile(self,filename):
1319 return "%s/%s"%(self.confdir(),filename)
1320 def confsubdir(self,dirname,clean,dry_run=False):
1321 subdirname="%s/%s"%(self.confdir(),dirname)
1323 utils.system("rm -rf %s"%subdirname)
1324 if not os.path.isdir(subdirname):
1325 utils.system("mkdir -p %s"%subdirname)
1326 if not dry_run and not os.path.isdir(subdirname):
1327 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1330 def conffile_clean (self,filename):
1331 filename=self.conffile(filename)
1332 return utils.system("rm -rf %s"%filename)==0
1335 def sfa_configure(self):
1336 "run sfa-config-tty"
1337 tmpname=self.conffile("sfa-config-tty")
1338 fileconf=open(tmpname,'w')
1339 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1340 'SFA_INTERFACE_HRN',
1341 'SFA_REGISTRY_LEVEL1_AUTH',
1342 'SFA_REGISTRY_HOST',
1343 'SFA_AGGREGATE_HOST',
1353 'SFA_GENERIC_FLAVOUR',
1354 'SFA_AGGREGATE_ENABLED',
1356 if self.plc_spec['sfa'].has_key(var):
1357 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1358 # the way plc_config handles booleans just sucks..
1361 if self.plc_spec['sfa'][var]: val='true'
1362 fileconf.write ('e %s\n%s\n'%(var,val))
1363 fileconf.write('w\n')
1364 fileconf.write('R\n')
1365 fileconf.write('q\n')
1367 utils.system('cat %s'%tmpname)
1368 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1371 def aggregate_xml_line(self):
1372 port=self.plc_spec['sfa']['neighbours-port']
1373 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1374 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1376 def registry_xml_line(self):
1377 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1378 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1381 # a cross step that takes all other plcs in argument
1382 def cross_sfa_configure(self, other_plcs):
1383 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1384 # of course with a single plc, other_plcs is an empty list
1387 agg_fname=self.conffile("agg.xml")
1388 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1389 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1390 utils.header ("(Over)wrote %s"%agg_fname)
1391 reg_fname=self.conffile("reg.xml")
1392 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1393 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1394 utils.header ("(Over)wrote %s"%reg_fname)
1395 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1396 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1398 def sfa_import(self):
1400 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1402 self.run_in_guest('sfaadmin.py reg import_registry')==0
1403 # not needed anymore
1404 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1406 def sfa_start(self):
1408 return self.run_in_guest('service sfa start')==0
1410 def sfi_configure(self):
1411 "Create /root/sfi on the plc side for sfi client configuration"
1412 if self.options.dry_run:
1413 utils.header("DRY RUN - skipping step")
1415 sfa_spec=self.plc_spec['sfa']
1416 # cannot use auth_sfa_mapper to pass dir_name
1417 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1418 test_slice=TestAuthSfa(self,slice_spec)
1419 dir_basename=os.path.basename(test_slice.sfi_path())
1420 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1421 test_slice.sfi_configure(dir_name)
1422 # push into the remote /root/sfi area
1423 location = test_slice.sfi_path()
1424 remote="%s/%s"%(self.vm_root_in_host(),location)
1425 self.test_ssh.mkdir(remote,abs=True)
1426 # need to strip last level or remote otherwise we get an extra dir level
1427 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1431 def sfi_clean (self):
1432 "clean up /root/sfi on the plc side"
1433 self.run_in_guest("rm -rf /root/sfi")
1437 def sfa_add_site (self): pass
1439 def sfa_add_pi (self): pass
1441 def sfa_add_user(self): pass
1443 def sfa_update_user(self): pass
1445 def sfa_add_slice(self): pass
1447 def sfa_discover(self): pass
1449 def sfa_create_slice(self): pass
1451 def sfa_check_slice_plc(self): pass
1453 def sfa_update_slice(self): pass
1455 def sfi_list(self): pass
1457 def sfi_show(self): pass
1459 def sfi_slices(self): pass
1461 def ssh_slice_sfa(self): pass
1463 def sfa_delete_user(self): pass
1465 def sfa_delete_slice(self): pass
1469 self.run_in_guest('service sfa stop')==0
1472 def populate (self):
1473 "creates random entries in the PLCAPI"
1474 # install the stress-test in the plc image
1475 location = "/usr/share/plc_api/plcsh_stress_test.py"
1476 remote="%s/%s"%(self.vm_root_in_host(),location)
1477 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1479 command += " -- --preserve --short-names"
1480 local = (self.run_in_guest(command) == 0);
1481 # second run with --foreign
1482 command += ' --foreign'
1483 remote = (self.run_in_guest(command) == 0);
1484 return ( local and remote)
1486 def gather_logs (self):
1487 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1488 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1489 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1490 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1491 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1492 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1493 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1495 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1496 self.gather_var_logs ()
1498 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1499 self.gather_pgsql_logs ()
1501 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1502 self.gather_root_sfi ()
1504 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1505 for site_spec in self.plc_spec['sites']:
1506 test_site = TestSite (self,site_spec)
1507 for node_spec in site_spec['nodes']:
1508 test_node=TestNode(self,test_site,node_spec)
1509 test_node.gather_qemu_logs()
1511 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1512 self.gather_nodes_var_logs()
1514 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1515 self.gather_slivers_var_logs()
1518 def gather_slivers_var_logs(self):
1519 for test_sliver in self.all_sliver_objs():
1520 remote = test_sliver.tar_var_logs()
1521 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1522 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1523 utils.system(command)
1526 def gather_var_logs (self):
1527 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1528 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1529 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1530 utils.system(command)
1531 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1532 utils.system(command)
1534 def gather_pgsql_logs (self):
1535 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1536 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1537 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1538 utils.system(command)
1540 def gather_root_sfi (self):
1541 utils.system("mkdir -p logs/sfi.%s"%self.name())
1542 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1543 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1544 utils.system(command)
1546 def gather_nodes_var_logs (self):
1547 for site_spec in self.plc_spec['sites']:
1548 test_site = TestSite (self,site_spec)
1549 for node_spec in site_spec['nodes']:
1550 test_node=TestNode(self,test_site,node_spec)
1551 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1552 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1553 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1554 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1555 utils.system(command)
1558 # returns the filename to use for sql dump/restore, using options.dbname if set
1559 def dbfile (self, database):
1560 # uses options.dbname if it is found
1562 name=self.options.dbname
1563 if not isinstance(name,StringTypes):
1566 t=datetime.datetime.now()
1569 return "/root/%s-%s.sql"%(database,name)
1571 def plc_db_dump(self):
1572 'dump the planetlab5 DB in /root in the PLC - filename has time'
1573 dump=self.dbfile("planetab5")
1574 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1575 utils.header('Dumped planetlab5 database in %s'%dump)
1578 def plc_db_restore(self):
1579 'restore the planetlab5 DB - looks broken, but run -n might help'
1580 dump=self.dbfile("planetab5")
1581 ##stop httpd service
1582 self.run_in_guest('service httpd stop')
1583 # xxx - need another wrapper
1584 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1585 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1586 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1587 ##starting httpd service
1588 self.run_in_guest('service httpd start')
1590 utils.header('Database restored from ' + dump)
1592 def standby_1_through_20(self):
1593 """convenience function to wait for a specified number of minutes"""
1596 def standby_1(): pass
1598 def standby_2(): pass
1600 def standby_3(): pass
1602 def standby_4(): pass
1604 def standby_5(): pass
1606 def standby_6(): pass
1608 def standby_7(): pass
1610 def standby_8(): pass
1612 def standby_9(): pass
1614 def standby_10(): pass
1616 def standby_11(): pass
1618 def standby_12(): pass
1620 def standby_13(): pass
1622 def standby_14(): pass
1624 def standby_15(): pass
1626 def standby_16(): pass
1628 def standby_17(): pass
1630 def standby_18(): pass
1632 def standby_19(): pass
1634 def standby_20(): pass