1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
65 def auth_sfa_mapper (method):
68 slice_method = TestAuthSfa.__dict__[method.__name__]
69 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
70 test_slice=TestAuthSfa(self,slice_spec)
71 if not slice_method(test_slice,self.options): overall=False
73 # restore the doc text
74 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
84 'vs_delete','timestamp_vs','vs_create', SEP,
85 'plc_install', 'plc_configure', 'plc_start', SEP,
86 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', 'speed_up_slices', SEP,
87 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
88 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
89 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
90 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
91 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
92 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_discover@1', SEPSFA,
93 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
94 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
95 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
96 # but as the stress test might take a while, we sometimes missed the debug mode..
97 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
98 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
99 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
100 'check_tcp', 'check_system_slice', SEP,
101 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'check_netflow','check_drl', SEP,
117 'debug_nodemanager', SEP,
118 'standby_1_through_20',SEP,
122 def printable_steps (list):
123 single_line=" ".join(list)+" "
124 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
126 def valid_step (step):
127 return step != SEP and step != SEPSFA
129 # turn off the sfa-related steps when build has skipped SFA
130 # this was originally for centos5 but is still valid
131 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
133 def check_whether_build_has_sfa (rpms_url):
134 utils.header ("Checking if build provides SFA package...")
135 # warning, we're now building 'sface' so let's be a bit more picky
136 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
137 # full builds are expected to return with 0 here
139 utils.header("build does provide SFA")
141 # move all steps containing 'sfa' from default_steps to other_steps
142 utils.header("SFA package not found - removing steps with sfa or sfi")
143 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
144 TestPlc.other_steps += sfa_steps
145 for step in sfa_steps: TestPlc.default_steps.remove(step)
147 def __init__ (self,plc_spec,options):
148 self.plc_spec=plc_spec
150 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
151 self.vserverip=plc_spec['vserverip']
152 self.vservername=plc_spec['vservername']
153 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
154 self.apiserver=TestApiserver(self.url,options.dry_run)
156 def has_addresses_api (self):
157 return self.apiserver.has_method('AddIpAddress')
160 name=self.plc_spec['name']
161 return "%s.%s"%(name,self.vservername)
164 return self.plc_spec['host_box']
167 return self.test_ssh.is_local()
169 # define the API methods on this object through xmlrpc
170 # would help, but not strictly necessary
174 def actual_command_in_guest (self,command):
175 return self.test_ssh.actual_command(self.host_to_guest(command))
177 def start_guest (self):
178 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
180 def stop_guest (self):
181 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
183 def run_in_guest (self,command):
184 return utils.system(self.actual_command_in_guest(command))
186 def run_in_host (self,command):
187 return self.test_ssh.run_in_buildname(command)
189 #command gets run in the plc's vm
190 def host_to_guest(self,command):
191 if self.options.plcs_use_lxc:
192 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
194 return "vserver %s exec %s"%(self.vservername,command)
196 def vm_root_in_host(self):
197 if self.options.plcs_use_lxc:
198 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
200 return "/vservers/%s"%(self.vservername)
202 def vm_timestamp_path (self):
203 if self.options.plcs_use_lxc:
204 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
206 return "/vservers/%s.timestamp"%(self.vservername)
208 #start/stop the vserver
209 def start_guest_in_host(self):
210 if self.options.plcs_use_lxc:
211 return "lxc-start --daemon --name=%s"%(self.vservername)
213 return "vserver %s start"%(self.vservername)
215 def stop_guest_in_host(self):
216 if self.options.plcs_use_lxc:
217 return "lxc-stop --name=%s"%(self.vservername)
219 return "vserver %s stop"%(self.vservername)
222 def run_in_guest_piped (self,local,remote):
223 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
225 def yum_check_installed (self, rpms):
226 if isinstance (rpms, list):
228 return self.run_in_guest("rpm -q %s"%rpms)==0
230 # does a yum install in the vs, ignore yum retcod, check with rpm
231 def yum_install (self, rpms):
232 if isinstance (rpms, list):
234 self.run_in_guest("yum -y install %s"%rpms)
235 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
236 self.run_in_guest("yum-complete-transaction -y")
237 return self.yum_check_installed (rpms)
239 def auth_root (self):
240 return {'Username':self.plc_spec['PLC_ROOT_USER'],
241 'AuthMethod':'password',
242 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
243 'Role' : self.plc_spec['role']
245 def locate_site (self,sitename):
246 for site in self.plc_spec['sites']:
247 if site['site_fields']['name'] == sitename:
249 if site['site_fields']['login_base'] == sitename:
251 raise Exception,"Cannot locate site %s"%sitename
253 def locate_node (self,nodename):
254 for site in self.plc_spec['sites']:
255 for node in site['nodes']:
256 if node['name'] == nodename:
258 raise Exception,"Cannot locate node %s"%nodename
260 def locate_hostname (self,hostname):
261 for site in self.plc_spec['sites']:
262 for node in site['nodes']:
263 if node['node_fields']['hostname'] == hostname:
265 raise Exception,"Cannot locate hostname %s"%hostname
267 def locate_key (self,key_name):
268 for key in self.plc_spec['keys']:
269 if key['key_name'] == key_name:
271 raise Exception,"Cannot locate key %s"%key_name
273 def locate_private_key_from_key_names (self, key_names):
274 # locate the first avail. key
276 for key_name in key_names:
277 key_spec=self.locate_key(key_name)
278 test_key=TestKey(self,key_spec)
279 publickey=test_key.publicpath()
280 privatekey=test_key.privatepath()
281 if os.path.isfile(publickey) and os.path.isfile(privatekey):
283 if found: return privatekey
286 def locate_slice (self, slicename):
287 for slice in self.plc_spec['slices']:
288 if slice['slice_fields']['name'] == slicename:
290 raise Exception,"Cannot locate slice %s"%slicename
292 def all_sliver_objs (self):
294 for slice_spec in self.plc_spec['slices']:
295 slicename = slice_spec['slice_fields']['name']
296 for nodename in slice_spec['nodenames']:
297 result.append(self.locate_sliver_obj (nodename,slicename))
300 def locate_sliver_obj (self,nodename,slicename):
301 (site,node) = self.locate_node(nodename)
302 slice = self.locate_slice (slicename)
304 test_site = TestSite (self, site)
305 test_node = TestNode (self, test_site,node)
306 # xxx the slice site is assumed to be the node site - mhh - probably harmless
307 test_slice = TestSlice (self, test_site, slice)
308 return TestSliver (self, test_node, test_slice)
310 def locate_first_node(self):
311 nodename=self.plc_spec['slices'][0]['nodenames'][0]
312 (site,node) = self.locate_node(nodename)
313 test_site = TestSite (self, site)
314 test_node = TestNode (self, test_site,node)
317 def locate_first_sliver (self):
318 slice_spec=self.plc_spec['slices'][0]
319 slicename=slice_spec['slice_fields']['name']
320 nodename=slice_spec['nodenames'][0]
321 return self.locate_sliver_obj(nodename,slicename)
323 # all different hostboxes used in this plc
324 def gather_hostBoxes(self):
325 # maps on sites and nodes, return [ (host_box,test_node) ]
327 for site_spec in self.plc_spec['sites']:
328 test_site = TestSite (self,site_spec)
329 for node_spec in site_spec['nodes']:
330 test_node = TestNode (self, test_site, node_spec)
331 if not test_node.is_real():
332 tuples.append( (test_node.host_box(),test_node) )
333 # transform into a dict { 'host_box' -> [ test_node .. ] }
335 for (box,node) in tuples:
336 if not result.has_key(box):
339 result[box].append(node)
342 # a step for checking this stuff
343 def show_boxes (self):
344 'print summary of nodes location'
345 for (box,nodes) in self.gather_hostBoxes().iteritems():
346 print box,":"," + ".join( [ node.name() for node in nodes ] )
349 # make this a valid step
350 def qemu_kill_all(self):
351 'kill all qemu instances on the qemu boxes involved by this setup'
352 # this is the brute force version, kill all qemus on that host box
353 for (box,nodes) in self.gather_hostBoxes().iteritems():
354 # pass the first nodename, as we don't push template-qemu on testboxes
355 nodedir=nodes[0].nodedir()
356 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
359 # make this a valid step
360 def qemu_list_all(self):
361 'list all qemu instances on the qemu boxes involved by this setup'
362 for (box,nodes) in self.gather_hostBoxes().iteritems():
363 # this is the brute force version, kill all qemus on that host box
364 TestBoxQemu(box,self.options.buildname).qemu_list_all()
367 # kill only the right qemus
368 def qemu_list_mine(self):
369 'list qemu instances for our nodes'
370 for (box,nodes) in self.gather_hostBoxes().iteritems():
371 # the fine-grain version
376 # kill only the right qemus
377 def qemu_kill_mine(self):
378 'kill the qemu instances for our nodes'
379 for (box,nodes) in self.gather_hostBoxes().iteritems():
380 # the fine-grain version
385 #################### display config
387 "show test configuration after localization"
393 "print cut'n paste-able stuff to export env variables to your shell"
394 # guess local domain from hostname
395 domain=socket.gethostname().split('.',1)[1]
396 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
397 print "export BUILD=%s"%self.options.buildname
398 if self.options.plcs_use_lxc:
399 print "export PLCHOSTLXC=%s"%fqdn
401 print "export PLCHOSTVS=%s"%fqdn
402 print "export GUESTNAME=%s"%self.plc_spec['vservername']
403 vplcname=self.plc_spec['vservername'].split('-')[-1]
404 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
405 # find hostname of first node
406 (hostname,qemubox) = self.all_node_infos()[0]
407 print "export KVMHOST=%s.%s"%(qemubox,domain)
408 print "export NODE=%s"%(hostname)
412 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
413 def show_pass (self,passno):
414 for (key,val) in self.plc_spec.iteritems():
415 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
419 self.display_site_spec(site)
420 for node in site['nodes']:
421 self.display_node_spec(node)
422 elif key=='initscripts':
423 for initscript in val:
424 self.display_initscript_spec (initscript)
427 self.display_slice_spec (slice)
430 self.display_key_spec (key)
432 if key not in ['sites','initscripts','slices','keys', 'sfa']:
433 print '+ ',key,':',val
435 def display_site_spec (self,site):
436 print '+ ======== site',site['site_fields']['name']
437 for (k,v) in site.iteritems():
438 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
441 print '+ ','nodes : ',
443 print node['node_fields']['hostname'],'',
449 print user['name'],'',
451 elif k == 'site_fields':
452 print '+ login_base',':',v['login_base']
453 elif k == 'address_fields':
459 def display_initscript_spec (self,initscript):
460 print '+ ======== initscript',initscript['initscript_fields']['name']
462 def display_key_spec (self,key):
463 print '+ ======== key',key['key_name']
465 def display_slice_spec (self,slice):
466 print '+ ======== slice',slice['slice_fields']['name']
467 for (k,v) in slice.iteritems():
480 elif k=='slice_fields':
481 print '+ fields',':',
482 print 'max_nodes=',v['max_nodes'],
487 def display_node_spec (self,node):
488 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
489 print "hostname=",node['node_fields']['hostname'],
490 print "ip=",node['interface_fields']['ip']
491 if self.options.verbose:
492 utils.pprint("node details",node,depth=3)
494 # another entry point for just showing the boxes involved
495 def display_mapping (self):
496 TestPlc.display_mapping_plc(self.plc_spec)
500 def display_mapping_plc (plc_spec):
501 print '+ MyPLC',plc_spec['name']
502 # WARNING this would not be right for lxc-based PLC's - should be harmless though
503 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
504 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
505 for site_spec in plc_spec['sites']:
506 for node_spec in site_spec['nodes']:
507 TestPlc.display_mapping_node(node_spec)
510 def display_mapping_node (node_spec):
511 print '+ NODE %s'%(node_spec['name'])
512 print '+\tqemu box %s'%node_spec['host_box']
513 print '+\thostname=%s'%node_spec['node_fields']['hostname']
515 # write a timestamp in /vservers/<>.timestamp
516 # cannot be inside the vserver, that causes vserver .. build to cough
517 def timestamp_vs (self):
518 "Create a timestamp to remember creation date for this plc"
520 # TODO-lxc check this one
521 # a first approx. is to store the timestamp close to the VM root like vs does
522 stamp_path=self.vm_timestamp_path ()
523 stamp_dir = os.path.dirname (stamp_path)
524 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
525 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
527 # this is called inconditionnally at the beginning of the test sequence
528 # just in case this is a rerun, so if the vm is not running it's fine
530 "vserver delete the test myplc"
531 stamp_path=self.vm_timestamp_path()
532 self.run_in_host("rm -f %s"%stamp_path)
533 if self.options.plcs_use_lxc:
534 self.run_in_host("lxc-stop --name %s"%self.vservername)
535 self.run_in_host("lxc-destroy --name %s"%self.vservername)
538 self.run_in_host("vserver --silent %s delete"%self.vservername)
542 # historically the build was being fetched by the tests
543 # now the build pushes itself as a subdir of the tests workdir
544 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
545 def vs_create (self):
546 "vserver creation (no install done)"
547 # push the local build/ dir to the testplc box
549 # a full path for the local calls
550 build_dir=os.path.dirname(sys.argv[0])
551 # sometimes this is empty - set to "." in such a case
552 if not build_dir: build_dir="."
553 build_dir += "/build"
555 # use a standard name - will be relative to remote buildname
557 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
558 self.test_ssh.rmdir(build_dir)
559 self.test_ssh.copy(build_dir,recursive=True)
560 # the repo url is taken from arch-rpms-url
561 # with the last step (i386) removed
562 repo_url = self.options.arch_rpms_url
563 for level in [ 'arch' ]:
564 repo_url = os.path.dirname(repo_url)
565 # pass the vbuild-nightly options to vtest-init-vserver
567 test_env_options += " -p %s"%self.options.personality
568 test_env_options += " -d %s"%self.options.pldistro
569 test_env_options += " -f %s"%self.options.fcdistro
570 if self.options.plcs_use_lxc:
571 script="vtest-init-lxc.sh"
573 script="vtest-init-vserver.sh"
574 vserver_name = self.vservername
575 vserver_options="--netdev eth0 --interface %s"%self.vserverip
577 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
578 vserver_options += " --hostname %s"%vserver_hostname
580 print "Cannot reverse lookup %s"%self.vserverip
581 print "This is considered fatal, as this might pollute the test results"
583 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
584 return self.run_in_host(create_vserver) == 0
587 def plc_install(self):
588 "yum install myplc, noderepo, and the plain bootstrapfs"
590 # workaround for getting pgsql8.2 on centos5
591 if self.options.fcdistro == "centos5":
592 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
595 if self.options.personality == "linux32":
597 elif self.options.personality == "linux64":
600 raise Exception, "Unsupported personality %r"%self.options.personality
601 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
604 pkgs_list.append ("slicerepo-%s"%nodefamily)
605 pkgs_list.append ("myplc")
606 pkgs_list.append ("noderepo-%s"%nodefamily)
607 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
608 pkgs_string=" ".join(pkgs_list)
609 return self.yum_install (pkgs_list)
612 def plc_configure(self):
614 tmpname='%s.plc-config-tty'%(self.name())
615 fileconf=open(tmpname,'w')
616 for var in [ 'PLC_NAME',
621 'PLC_MAIL_SUPPORT_ADDRESS',
624 # Above line was added for integrating SFA Testing
630 'PLC_RESERVATION_GRANULARITY',
632 'PLC_OMF_XMPP_SERVER',
634 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
635 fileconf.write('w\n')
636 fileconf.write('q\n')
638 utils.system('cat %s'%tmpname)
639 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
640 utils.system('rm %s'%tmpname)
645 self.run_in_guest('service plc start')
650 self.run_in_guest('service plc stop')
654 "start the PLC vserver"
659 "stop the PLC vserver"
663 # stores the keys from the config for further use
664 def keys_store(self):
665 "stores test users ssh keys in keys/"
666 for key_spec in self.plc_spec['keys']:
667 TestKey(self,key_spec).store_key()
670 def keys_clean(self):
671 "removes keys cached in keys/"
672 utils.system("rm -rf ./keys")
675 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
676 # for later direct access to the nodes
677 def keys_fetch(self):
678 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
680 if not os.path.isdir(dir):
682 vservername=self.vservername
683 vm_root=self.vm_root_in_host()
685 prefix = 'debug_ssh_key'
686 for ext in [ 'pub', 'rsa' ] :
687 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
688 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
689 if self.test_ssh.fetch(src,dst) != 0: overall=False
693 "create sites with PLCAPI"
694 return self.do_sites()
696 def delete_sites (self):
697 "delete sites with PLCAPI"
698 return self.do_sites(action="delete")
700 def do_sites (self,action="add"):
701 for site_spec in self.plc_spec['sites']:
702 test_site = TestSite (self,site_spec)
703 if (action != "add"):
704 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
705 test_site.delete_site()
706 # deleted with the site
707 #test_site.delete_users()
710 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
711 test_site.create_site()
712 test_site.create_users()
715 def delete_all_sites (self):
716 "Delete all sites in PLC, and related objects"
717 print 'auth_root',self.auth_root()
718 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])
720 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
721 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
722 site_id=site['site_id']
723 print 'Deleting site_id',site_id
724 self.apiserver.DeleteSite(self.auth_root(),site_id)
728 "create nodes with PLCAPI"
729 return self.do_nodes()
730 def delete_nodes (self):
731 "delete nodes with PLCAPI"
732 return self.do_nodes(action="delete")
734 def do_nodes (self,action="add"):
735 for site_spec in self.plc_spec['sites']:
736 test_site = TestSite (self,site_spec)
738 utils.header("Deleting nodes in site %s"%test_site.name())
739 for node_spec in site_spec['nodes']:
740 test_node=TestNode(self,test_site,node_spec)
741 utils.header("Deleting %s"%test_node.name())
742 test_node.delete_node()
744 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
745 for node_spec in site_spec['nodes']:
746 utils.pprint('Creating node %s'%node_spec,node_spec)
747 test_node = TestNode (self,test_site,node_spec)
748 test_node.create_node ()
751 def nodegroups (self):
752 "create nodegroups with PLCAPI"
753 return self.do_nodegroups("add")
754 def delete_nodegroups (self):
755 "delete nodegroups with PLCAPI"
756 return self.do_nodegroups("delete")
760 def translate_timestamp (start,grain,timestamp):
761 if timestamp < TestPlc.YEAR: return start+timestamp*grain
762 else: return timestamp
765 def timestamp_printable (timestamp):
766 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
769 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
771 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
772 print 'API answered grain=',grain
773 start=(now/grain)*grain
775 # find out all nodes that are reservable
776 nodes=self.all_reservable_nodenames()
778 utils.header ("No reservable node found - proceeding without leases")
781 # attach them to the leases as specified in plc_specs
782 # this is where the 'leases' field gets interpreted as relative of absolute
783 for lease_spec in self.plc_spec['leases']:
784 # skip the ones that come with a null slice id
785 if not lease_spec['slice']: continue
786 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
787 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
788 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
789 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
790 if lease_addition['errors']:
791 utils.header("Cannot create leases, %s"%lease_addition['errors'])
794 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
795 (nodes,lease_spec['slice'],
796 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
797 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
801 def delete_leases (self):
802 "remove all leases in the myplc side"
803 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
804 utils.header("Cleaning leases %r"%lease_ids)
805 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
808 def list_leases (self):
809 "list all leases known to the myplc"
810 leases = self.apiserver.GetLeases(self.auth_root())
813 current=l['t_until']>=now
814 if self.options.verbose or current:
815 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
816 TestPlc.timestamp_printable(l['t_from']),
817 TestPlc.timestamp_printable(l['t_until'])))
820 # create nodegroups if needed, and populate
821 def do_nodegroups (self, action="add"):
822 # 1st pass to scan contents
824 for site_spec in self.plc_spec['sites']:
825 test_site = TestSite (self,site_spec)
826 for node_spec in site_spec['nodes']:
827 test_node=TestNode (self,test_site,node_spec)
828 if node_spec.has_key('nodegroups'):
829 nodegroupnames=node_spec['nodegroups']
830 if isinstance(nodegroupnames,StringTypes):
831 nodegroupnames = [ nodegroupnames ]
832 for nodegroupname in nodegroupnames:
833 if not groups_dict.has_key(nodegroupname):
834 groups_dict[nodegroupname]=[]
835 groups_dict[nodegroupname].append(test_node.name())
836 auth=self.auth_root()
838 for (nodegroupname,group_nodes) in groups_dict.iteritems():
840 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
841 # first, check if the nodetagtype is here
842 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
844 tag_type_id = tag_types[0]['tag_type_id']
846 tag_type_id = self.apiserver.AddTagType(auth,
847 {'tagname':nodegroupname,
848 'description': 'for nodegroup %s'%nodegroupname,
850 print 'located tag (type)',nodegroupname,'as',tag_type_id
852 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
854 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
855 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
856 # set node tag on all nodes, value='yes'
857 for nodename in group_nodes:
859 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
861 traceback.print_exc()
862 print 'node',nodename,'seems to already have tag',nodegroupname
865 expect_yes = self.apiserver.GetNodeTags(auth,
866 {'hostname':nodename,
867 'tagname':nodegroupname},
868 ['value'])[0]['value']
869 if expect_yes != "yes":
870 print 'Mismatch node tag on node',nodename,'got',expect_yes
873 if not self.options.dry_run:
874 print 'Cannot find tag',nodegroupname,'on node',nodename
878 print 'cleaning nodegroup',nodegroupname
879 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
881 traceback.print_exc()
885 # a list of TestNode objs
886 def all_nodes (self):
888 for site_spec in self.plc_spec['sites']:
889 test_site = TestSite (self,site_spec)
890 for node_spec in site_spec['nodes']:
891 nodes.append(TestNode (self,test_site,node_spec))
894 # return a list of tuples (nodename,qemuname)
895 def all_node_infos (self) :
897 for site_spec in self.plc_spec['sites']:
898 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
899 for node_spec in site_spec['nodes'] ]
902 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
903 def all_reservable_nodenames (self):
905 for site_spec in self.plc_spec['sites']:
906 for node_spec in site_spec['nodes']:
907 node_fields=node_spec['node_fields']
908 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
909 res.append(node_fields['hostname'])
912 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
913 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
914 if self.options.dry_run:
918 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
919 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
920 # the nodes that haven't checked yet - start with a full list and shrink over time
921 tocheck = self.all_hostnames()
922 utils.header("checking nodes %r"%tocheck)
923 # create a dict hostname -> status
924 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
927 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
929 for array in tocheck_status:
930 hostname=array['hostname']
931 boot_state=array['boot_state']
932 if boot_state == target_boot_state:
933 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
935 # if it's a real node, never mind
936 (site_spec,node_spec)=self.locate_hostname(hostname)
937 if TestNode.is_real_model(node_spec['node_fields']['model']):
938 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
940 boot_state = target_boot_state
941 elif datetime.datetime.now() > graceout:
942 utils.header ("%s still in '%s' state"%(hostname,boot_state))
943 graceout=datetime.datetime.now()+datetime.timedelta(1)
944 status[hostname] = boot_state
946 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
949 if datetime.datetime.now() > timeout:
950 for hostname in tocheck:
951 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
953 # otherwise, sleep for a while
955 # only useful in empty plcs
958 def nodes_booted(self):
959 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
961 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
963 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
964 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
965 vservername=self.vservername
968 local_key = "keys/%(vservername)s-debug.rsa"%locals()
971 local_key = "keys/key_admin.rsa"
972 node_infos = self.all_node_infos()
973 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
974 for (nodename,qemuname) in node_infos:
975 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
976 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
977 (timeout_minutes,silent_minutes,period))
979 for node_info in node_infos:
980 (hostname,qemuname) = node_info
981 # try to run 'hostname' in the node
982 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
983 # don't spam logs - show the command only after the grace period
984 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
986 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
988 node_infos.remove(node_info)
990 # we will have tried real nodes once, in case they're up - but if not, just skip
991 (site_spec,node_spec)=self.locate_hostname(hostname)
992 if TestNode.is_real_model(node_spec['node_fields']['model']):
993 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
994 node_infos.remove(node_info)
997 if datetime.datetime.now() > timeout:
998 for (hostname,qemuname) in node_infos:
999 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
1001 # otherwise, sleep for a while
1003 # only useful in empty plcs
1006 def ssh_node_debug(self):
1007 "Tries to ssh into nodes in debug mode with the debug ssh key"
1008 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
1010 def ssh_node_boot(self):
1011 "Tries to ssh into nodes in production mode with the root ssh key"
1012 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
1015 def qemu_local_init (self): pass
1017 def bootcd (self): pass
1019 def qemu_local_config (self): pass
1021 def nodestate_reinstall (self): pass
1023 def nodestate_safeboot (self): pass
1025 def nodestate_boot (self): pass
1027 def nodestate_show (self): pass
1029 def qemu_export (self): pass
1031 ### check hooks : invoke scripts from hooks/{node,slice}
1032 def check_hooks_node (self):
1033 return self.locate_first_node().check_hooks()
1034 def check_hooks_sliver (self) :
1035 return self.locate_first_sliver().check_hooks()
1037 def check_hooks (self):
1038 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1039 return self.check_hooks_node() and self.check_hooks_sliver()
1042 def do_check_initscripts(self):
1044 for slice_spec in self.plc_spec['slices']:
1045 if not slice_spec.has_key('initscriptstamp'):
1047 stamp=slice_spec['initscriptstamp']
1048 for nodename in slice_spec['nodenames']:
1049 (site,node) = self.locate_node (nodename)
1050 # xxx - passing the wrong site - probably harmless
1051 test_site = TestSite (self,site)
1052 test_slice = TestSlice (self,test_site,slice_spec)
1053 test_node = TestNode (self,test_site,node)
1054 test_sliver = TestSliver (self, test_node, test_slice)
1055 if not test_sliver.check_initscript_stamp(stamp):
1059 def check_initscripts(self):
1060 "check that the initscripts have triggered"
1061 return self.do_check_initscripts()
1063 def initscripts (self):
1064 "create initscripts with PLCAPI"
1065 for initscript in self.plc_spec['initscripts']:
1066 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1067 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1070 def delete_initscripts (self):
1071 "delete initscripts with PLCAPI"
1072 for initscript in self.plc_spec['initscripts']:
1073 initscript_name = initscript['initscript_fields']['name']
1074 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1076 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1077 print initscript_name,'deleted'
1079 print 'deletion went wrong - probably did not exist'
1084 "create slices with PLCAPI"
1085 return self.do_slices(action="add")
1087 def delete_slices (self):
1088 "delete slices with PLCAPI"
1089 return self.do_slices(action="delete")
1091 def fill_slices (self):
1092 "add nodes in slices with PLCAPI"
1093 return self.do_slices(action="fill")
1095 def empty_slices (self):
1096 "remove nodes from slices with PLCAPI"
1097 return self.do_slices(action="empty")
1099 def do_slices (self, action="add"):
1100 for slice in self.plc_spec['slices']:
1101 site_spec = self.locate_site (slice['sitename'])
1102 test_site = TestSite(self,site_spec)
1103 test_slice=TestSlice(self,test_site,slice)
1104 if action == "delete":
1105 test_slice.delete_slice()
1106 elif action=="fill":
1107 test_slice.add_nodes()
1108 elif action=="empty":
1109 test_slice.delete_nodes()
1111 test_slice.create_slice()
1115 def ssh_slice(self): pass
1117 def ssh_slice_off (self): pass
1120 def keys_clear_known_hosts (self): pass
1122 def speed_up_slices (self):
1123 "tweak nodemanager settings on all nodes using a conf file"
1124 # create the template on the server-side
1125 template="%s.nodemanager"%self.name()
1126 template_file = open (template,"w")
1127 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1128 template_file.close()
1129 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1130 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1131 self.test_ssh.copy_abs(template,remote)
1133 self.apiserver.AddConfFile (self.auth_root(),
1134 {'dest':'/etc/sysconfig/nodemanager',
1135 'source':'PlanetLabConf/nodemanager',
1136 'postinstall_cmd':'service nm restart',})
1139 def debug_nodemanager (self):
1140 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1141 template="%s.nodemanager"%self.name()
1142 template_file = open (template,"w")
1143 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1144 template_file.close()
1145 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1146 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1147 self.test_ssh.copy_abs(template,remote)
1151 def qemu_start (self) : pass
1154 def timestamp_qemu (self) : pass
1156 def check_tcp (self):
1157 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1158 if 'tcp_test' not in self.plc_spec:
1159 utils.header ("check_tcp: no config found")
1161 specs = self.plc_spec['tcp_test']
1166 s_test_sliver = self.locate_sliver_obj (spec['server_node'],spec['server_slice'])
1167 if not s_test_sliver.run_tcp_server(port,timeout=10):
1171 # idem for the client side
1172 c_test_sliver = self.locate_sliver_obj(spec['server_node'],spec['server_slice'])
1173 if not c_test_sliver.run_tcp_client(s_test_sliver.test_node.name(),port):
1177 # painfully enough, we need to allow for some time as netflow might show up last
1178 def check_system_slice (self):
1179 "all nodes: check that a system slice is alive"
1180 # netflow currently not working in the lxc distro
1181 # drl not built at all in the wtx distro
1182 # if we find either of them we're happy
1183 return self.check_netflow() or self.check_drl()
1186 def check_netflow (self): return self._check_system_slice ('netflow')
1187 def check_drl (self): return self._check_system_slice ('drl')
1189 # we have the slices up already here, so it should not take too long
1190 def _check_system_slice (self, slicename, timeout_minutes=5, period=15):
1191 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1192 test_nodes=self.all_nodes()
1194 for test_node in test_nodes:
1195 if test_node._check_system_slice (slicename,dry_run=self.options.dry_run):
1197 test_nodes.remove(test_node)
1202 if datetime.datetime.now () > timeout:
1203 for test_node in test_nodes:
1204 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1209 def plcsh_stress_test (self):
1210 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1211 # install the stress-test in the plc image
1212 location = "/usr/share/plc_api/plcsh_stress_test.py"
1213 remote="%s/%s"%(self.vm_root_in_host(),location)
1214 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1216 command += " -- --check"
1217 if self.options.size == 1:
1218 command += " --tiny"
1219 return ( self.run_in_guest(command) == 0)
1221 # populate runs the same utility without slightly different options
1222 # in particular runs with --preserve (dont cleanup) and without --check
1223 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1225 def sfa_install_all (self):
1226 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1227 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1229 def sfa_install_core(self):
1231 return self.yum_install ("sfa")
1233 def sfa_install_plc(self):
1234 "yum install sfa-plc"
1235 return self.yum_install("sfa-plc")
1237 def sfa_install_sfatables(self):
1238 "yum install sfa-sfatables"
1239 return self.yum_install ("sfa-sfatables")
1241 # for some very odd reason, this sometimes fails with the following symptom
1242 # # yum install sfa-client
1243 # Setting up Install Process
1245 # Downloading Packages:
1246 # Running rpm_check_debug
1247 # Running Transaction Test
1248 # Transaction Test Succeeded
1249 # Running Transaction
1250 # Transaction couldn't start:
1251 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1252 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1253 # even though in the same context I have
1254 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1255 # Filesystem Size Used Avail Use% Mounted on
1256 # /dev/hdv1 806G 264G 501G 35% /
1257 # none 16M 36K 16M 1% /tmp
1259 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1260 def sfa_install_client(self):
1261 "yum install sfa-client"
1262 first_try=self.yum_install("sfa-client")
1263 if first_try: return True
1264 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1265 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1266 utils.header("rpm_path=<<%s>>"%rpm_path)
1268 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1269 return self.yum_check_installed ("sfa-client")
1271 def sfa_dbclean(self):
1272 "thoroughly wipes off the SFA database"
1273 return self.run_in_guest("sfaadmin.py registry nuke")==0 or \
1274 self.run_in_guest("sfa-nuke.py")==0 or \
1275 self.run_in_guest("sfa-nuke-plc.py")==0
1277 def sfa_plcclean(self):
1278 "cleans the PLC entries that were created as a side effect of running the script"
1280 sfa_spec=self.plc_spec['sfa']
1282 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1283 login_base=auth_sfa_spec['login_base']
1284 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1285 except: print "Site %s already absent from PLC db"%login_base
1287 for spec_name in ['pi_spec','user_spec']:
1288 user_spec=auth_sfa_spec[spec_name]
1289 username=user_spec['email']
1290 try: self.apiserver.DeletePerson(self.auth_root(),username)
1292 # this in fact is expected as sites delete their members
1293 #print "User %s already absent from PLC db"%username
1296 print "REMEMBER TO RUN sfa_import AGAIN"
1299 def sfa_uninstall(self):
1300 "uses rpm to uninstall sfa - ignore result"
1301 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1302 self.run_in_guest("rm -rf /var/lib/sfa")
1303 self.run_in_guest("rm -rf /etc/sfa")
1304 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1306 self.run_in_guest("rpm -e --noscripts sfa-plc")
1309 ### run unit tests for SFA
1310 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1311 # Running Transaction
1312 # Transaction couldn't start:
1313 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1314 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1315 # no matter how many Gbs are available on the testplc
1316 # could not figure out what's wrong, so...
1317 # if the yum install phase fails, consider the test is successful
1318 # other combinations will eventually run it hopefully
1319 def sfa_utest(self):
1320 "yum install sfa-tests and run SFA unittests"
1321 self.run_in_guest("yum -y install sfa-tests")
1322 # failed to install - forget it
1323 if self.run_in_guest("rpm -q sfa-tests")!=0:
1324 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1326 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1330 dirname="conf.%s"%self.plc_spec['name']
1331 if not os.path.isdir(dirname):
1332 utils.system("mkdir -p %s"%dirname)
1333 if not os.path.isdir(dirname):
1334 raise "Cannot create config dir for plc %s"%self.name()
1337 def conffile(self,filename):
1338 return "%s/%s"%(self.confdir(),filename)
1339 def confsubdir(self,dirname,clean,dry_run=False):
1340 subdirname="%s/%s"%(self.confdir(),dirname)
1342 utils.system("rm -rf %s"%subdirname)
1343 if not os.path.isdir(subdirname):
1344 utils.system("mkdir -p %s"%subdirname)
1345 if not dry_run and not os.path.isdir(subdirname):
1346 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1349 def conffile_clean (self,filename):
1350 filename=self.conffile(filename)
1351 return utils.system("rm -rf %s"%filename)==0
1354 def sfa_configure(self):
1355 "run sfa-config-tty"
1356 tmpname=self.conffile("sfa-config-tty")
1357 fileconf=open(tmpname,'w')
1358 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1359 'SFA_INTERFACE_HRN',
1360 'SFA_REGISTRY_LEVEL1_AUTH',
1361 'SFA_REGISTRY_HOST',
1362 'SFA_AGGREGATE_HOST',
1372 'SFA_GENERIC_FLAVOUR',
1373 'SFA_AGGREGATE_ENABLED',
1375 if self.plc_spec['sfa'].has_key(var):
1376 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1377 # the way plc_config handles booleans just sucks..
1380 if self.plc_spec['sfa'][var]: val='true'
1381 fileconf.write ('e %s\n%s\n'%(var,val))
1382 fileconf.write('w\n')
1383 fileconf.write('R\n')
1384 fileconf.write('q\n')
1386 utils.system('cat %s'%tmpname)
1387 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1390 def aggregate_xml_line(self):
1391 port=self.plc_spec['sfa']['neighbours-port']
1392 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1393 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1395 def registry_xml_line(self):
1396 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1397 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1400 # a cross step that takes all other plcs in argument
1401 def cross_sfa_configure(self, other_plcs):
1402 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1403 # of course with a single plc, other_plcs is an empty list
1406 agg_fname=self.conffile("agg.xml")
1407 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1408 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1409 utils.header ("(Over)wrote %s"%agg_fname)
1410 reg_fname=self.conffile("reg.xml")
1411 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1412 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1413 utils.header ("(Over)wrote %s"%reg_fname)
1414 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1415 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1417 def sfa_import(self):
1419 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1421 self.run_in_guest('sfaadmin.py reg import_registry')==0
1422 # not needed anymore
1423 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1425 def sfa_start(self):
1427 return self.run_in_guest('service sfa start')==0
1429 def sfi_configure(self):
1430 "Create /root/sfi on the plc side for sfi client configuration"
1431 if self.options.dry_run:
1432 utils.header("DRY RUN - skipping step")
1434 sfa_spec=self.plc_spec['sfa']
1435 # cannot use auth_sfa_mapper to pass dir_name
1436 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1437 test_slice=TestAuthSfa(self,slice_spec)
1438 dir_basename=os.path.basename(test_slice.sfi_path())
1439 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1440 test_slice.sfi_configure(dir_name)
1441 # push into the remote /root/sfi area
1442 location = test_slice.sfi_path()
1443 remote="%s/%s"%(self.vm_root_in_host(),location)
1444 self.test_ssh.mkdir(remote,abs=True)
1445 # need to strip last level or remote otherwise we get an extra dir level
1446 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1450 def sfi_clean (self):
1451 "clean up /root/sfi on the plc side"
1452 self.run_in_guest("rm -rf /root/sfi")
1456 def sfa_add_site (self): pass
1458 def sfa_add_pi (self): pass
1460 def sfa_add_user(self): pass
1462 def sfa_update_user(self): pass
1464 def sfa_add_slice(self): pass
1466 def sfa_discover(self): pass
1468 def sfa_create_slice(self): pass
1470 def sfa_check_slice_plc(self): pass
1472 def sfa_update_slice(self): pass
1474 def sfi_list(self): pass
1476 def sfi_show(self): pass
1478 def sfi_slices(self): pass
1480 def ssh_slice_sfa(self): pass
1482 def sfa_delete_user(self): pass
1484 def sfa_delete_slice(self): pass
1488 self.run_in_guest('service sfa stop')==0
1491 def populate (self):
1492 "creates random entries in the PLCAPI"
1493 # install the stress-test in the plc image
1494 location = "/usr/share/plc_api/plcsh_stress_test.py"
1495 remote="%s/%s"%(self.vm_root_in_host(),location)
1496 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1498 command += " -- --preserve --short-names"
1499 local = (self.run_in_guest(command) == 0);
1500 # second run with --foreign
1501 command += ' --foreign'
1502 remote = (self.run_in_guest(command) == 0);
1503 return ( local and remote)
1505 def gather_logs (self):
1506 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1507 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1508 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1509 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1510 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1511 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1512 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1514 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1515 self.gather_var_logs ()
1517 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1518 self.gather_pgsql_logs ()
1520 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1521 self.gather_root_sfi ()
1523 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1524 for site_spec in self.plc_spec['sites']:
1525 test_site = TestSite (self,site_spec)
1526 for node_spec in site_spec['nodes']:
1527 test_node=TestNode(self,test_site,node_spec)
1528 test_node.gather_qemu_logs()
1530 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1531 self.gather_nodes_var_logs()
1533 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1534 self.gather_slivers_var_logs()
1537 def gather_slivers_var_logs(self):
1538 for test_sliver in self.all_sliver_objs():
1539 remote = test_sliver.tar_var_logs()
1540 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1541 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1542 utils.system(command)
1545 def gather_var_logs (self):
1546 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1547 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1548 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1549 utils.system(command)
1550 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1551 utils.system(command)
1553 def gather_pgsql_logs (self):
1554 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1555 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1556 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1557 utils.system(command)
1559 def gather_root_sfi (self):
1560 utils.system("mkdir -p logs/sfi.%s"%self.name())
1561 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1562 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1563 utils.system(command)
1565 def gather_nodes_var_logs (self):
1566 for site_spec in self.plc_spec['sites']:
1567 test_site = TestSite (self,site_spec)
1568 for node_spec in site_spec['nodes']:
1569 test_node=TestNode(self,test_site,node_spec)
1570 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1571 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1572 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1573 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1574 utils.system(command)
1577 # returns the filename to use for sql dump/restore, using options.dbname if set
1578 def dbfile (self, database):
1579 # uses options.dbname if it is found
1581 name=self.options.dbname
1582 if not isinstance(name,StringTypes):
1585 t=datetime.datetime.now()
1588 return "/root/%s-%s.sql"%(database,name)
1590 def plc_db_dump(self):
1591 'dump the planetlab5 DB in /root in the PLC - filename has time'
1592 dump=self.dbfile("planetab5")
1593 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1594 utils.header('Dumped planetlab5 database in %s'%dump)
1597 def plc_db_restore(self):
1598 'restore the planetlab5 DB - looks broken, but run -n might help'
1599 dump=self.dbfile("planetab5")
1600 ##stop httpd service
1601 self.run_in_guest('service httpd stop')
1602 # xxx - need another wrapper
1603 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1604 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1605 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1606 ##starting httpd service
1607 self.run_in_guest('service httpd start')
1609 utils.header('Database restored from ' + dump)
1611 def standby_1_through_20(self):
1612 """convenience function to wait for a specified number of minutes"""
1615 def standby_1(): pass
1617 def standby_2(): pass
1619 def standby_3(): pass
1621 def standby_4(): pass
1623 def standby_5(): pass
1625 def standby_6(): pass
1627 def standby_7(): pass
1629 def standby_8(): pass
1631 def standby_9(): pass
1633 def standby_10(): pass
1635 def standby_11(): pass
1637 def standby_12(): pass
1639 def standby_13(): pass
1641 def standby_14(): pass
1643 def standby_15(): pass
1645 def standby_16(): pass
1647 def standby_17(): pass
1649 def standby_18(): pass
1651 def standby_19(): pass
1653 def standby_20(): pass