1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
65 def auth_sfa_mapper (method):
68 auth_method = TestAuthSfa.__dict__[method.__name__]
69 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
70 test_auth=TestAuthSfa(self,auth_spec)
71 if not auth_method(test_auth,self.options): overall=False
73 # restore the doc text
74 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
84 'vs_delete','timestamp_vs','vs_create', SEP,
85 'plc_install', 'plc_configure', 'plc_start', SEP,
86 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', 'speed_up_slices', SEP,
87 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
88 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
89 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
90 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
91 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
92 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
93 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
94 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
95 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
96 # but as the stress test might take a while, we sometimes missed the debug mode..
97 'check_vsys_defaults', 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
98 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
99 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
100 'cross_check_tcp@1', 'check_system_slice', SEP,
101 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'check_netflow','check_drl', SEP,
117 'debug_nodemanager', SEP,
118 'standby_1_through_20',SEP,
122 def printable_steps (list):
123 single_line=" ".join(list)+" "
124 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
126 def valid_step (step):
127 return step != SEP and step != SEPSFA
129 # turn off the sfa-related steps when build has skipped SFA
130 # this was originally for centos5 but is still valid
131 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
133 def check_whether_build_has_sfa (rpms_url):
134 utils.header ("Checking if build provides SFA package...")
135 # warning, we're now building 'sface' so let's be a bit more picky
136 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
137 # full builds are expected to return with 0 here
139 utils.header("build does provide SFA")
141 # move all steps containing 'sfa' from default_steps to other_steps
142 utils.header("SFA package not found - removing steps with sfa or sfi")
143 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
144 TestPlc.other_steps += sfa_steps
145 for step in sfa_steps: TestPlc.default_steps.remove(step)
147 def __init__ (self,plc_spec,options):
148 self.plc_spec=plc_spec
150 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
151 self.vserverip=plc_spec['vserverip']
152 self.vservername=plc_spec['vservername']
153 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
154 self.apiserver=TestApiserver(self.url,options.dry_run)
155 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
156 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
158 def has_addresses_api (self):
159 return self.apiserver.has_method('AddIpAddress')
162 name=self.plc_spec['name']
163 return "%s.%s"%(name,self.vservername)
166 return self.plc_spec['host_box']
169 return self.test_ssh.is_local()
171 # define the API methods on this object through xmlrpc
172 # would help, but not strictly necessary
176 def actual_command_in_guest (self,command):
177 return self.test_ssh.actual_command(self.host_to_guest(command))
179 def start_guest (self):
180 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
182 def stop_guest (self):
183 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
185 def run_in_guest (self,command):
186 return utils.system(self.actual_command_in_guest(command))
188 def run_in_host (self,command):
189 return self.test_ssh.run_in_buildname(command)
191 #command gets run in the plc's vm
192 def host_to_guest(self,command):
193 if self.options.plcs_use_lxc:
194 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
196 return "vserver %s exec %s"%(self.vservername,command)
198 def vm_root_in_host(self):
199 if self.options.plcs_use_lxc:
200 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
202 return "/vservers/%s"%(self.vservername)
204 def vm_timestamp_path (self):
205 if self.options.plcs_use_lxc:
206 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
208 return "/vservers/%s.timestamp"%(self.vservername)
210 #start/stop the vserver
211 def start_guest_in_host(self):
212 if self.options.plcs_use_lxc:
213 return "lxc-start --daemon --name=%s"%(self.vservername)
215 return "vserver %s start"%(self.vservername)
217 def stop_guest_in_host(self):
218 if self.options.plcs_use_lxc:
219 return "lxc-stop --name=%s"%(self.vservername)
221 return "vserver %s stop"%(self.vservername)
224 def run_in_guest_piped (self,local,remote):
225 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
227 def yum_check_installed (self, rpms):
228 if isinstance (rpms, list):
230 return self.run_in_guest("rpm -q %s"%rpms)==0
232 # does a yum install in the vs, ignore yum retcod, check with rpm
233 def yum_install (self, rpms):
234 if isinstance (rpms, list):
236 self.run_in_guest("yum -y install %s"%rpms)
237 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
238 self.run_in_guest("yum-complete-transaction -y")
239 return self.yum_check_installed (rpms)
241 def auth_root (self):
242 return {'Username':self.plc_spec['PLC_ROOT_USER'],
243 'AuthMethod':'password',
244 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
245 'Role' : self.plc_spec['role']
247 def locate_site (self,sitename):
248 for site in self.plc_spec['sites']:
249 if site['site_fields']['name'] == sitename:
251 if site['site_fields']['login_base'] == sitename:
253 raise Exception,"Cannot locate site %s"%sitename
255 def locate_node (self,nodename):
256 for site in self.plc_spec['sites']:
257 for node in site['nodes']:
258 if node['name'] == nodename:
260 raise Exception,"Cannot locate node %s"%nodename
262 def locate_hostname (self,hostname):
263 for site in self.plc_spec['sites']:
264 for node in site['nodes']:
265 if node['node_fields']['hostname'] == hostname:
267 raise Exception,"Cannot locate hostname %s"%hostname
269 def locate_key (self,key_name):
270 for key in self.plc_spec['keys']:
271 if key['key_name'] == key_name:
273 raise Exception,"Cannot locate key %s"%key_name
275 def locate_private_key_from_key_names (self, key_names):
276 # locate the first avail. key
278 for key_name in key_names:
279 key_spec=self.locate_key(key_name)
280 test_key=TestKey(self,key_spec)
281 publickey=test_key.publicpath()
282 privatekey=test_key.privatepath()
283 if os.path.isfile(publickey) and os.path.isfile(privatekey):
285 if found: return privatekey
288 def locate_slice (self, slicename):
289 for slice in self.plc_spec['slices']:
290 if slice['slice_fields']['name'] == slicename:
292 raise Exception,"Cannot locate slice %s"%slicename
294 def all_sliver_objs (self):
296 for slice_spec in self.plc_spec['slices']:
297 slicename = slice_spec['slice_fields']['name']
298 for nodename in slice_spec['nodenames']:
299 result.append(self.locate_sliver_obj (nodename,slicename))
302 def locate_sliver_obj (self,nodename,slicename):
303 (site,node) = self.locate_node(nodename)
304 slice = self.locate_slice (slicename)
306 test_site = TestSite (self, site)
307 test_node = TestNode (self, test_site,node)
308 # xxx the slice site is assumed to be the node site - mhh - probably harmless
309 test_slice = TestSlice (self, test_site, slice)
310 return TestSliver (self, test_node, test_slice)
312 def locate_first_node(self):
313 nodename=self.plc_spec['slices'][0]['nodenames'][0]
314 (site,node) = self.locate_node(nodename)
315 test_site = TestSite (self, site)
316 test_node = TestNode (self, test_site,node)
319 def locate_first_sliver (self):
320 slice_spec=self.plc_spec['slices'][0]
321 slicename=slice_spec['slice_fields']['name']
322 nodename=slice_spec['nodenames'][0]
323 return self.locate_sliver_obj(nodename,slicename)
325 # all different hostboxes used in this plc
326 def gather_hostBoxes(self):
327 # maps on sites and nodes, return [ (host_box,test_node) ]
329 for site_spec in self.plc_spec['sites']:
330 test_site = TestSite (self,site_spec)
331 for node_spec in site_spec['nodes']:
332 test_node = TestNode (self, test_site, node_spec)
333 if not test_node.is_real():
334 tuples.append( (test_node.host_box(),test_node) )
335 # transform into a dict { 'host_box' -> [ test_node .. ] }
337 for (box,node) in tuples:
338 if not result.has_key(box):
341 result[box].append(node)
344 # a step for checking this stuff
345 def show_boxes (self):
346 'print summary of nodes location'
347 for (box,nodes) in self.gather_hostBoxes().iteritems():
348 print box,":"," + ".join( [ node.name() for node in nodes ] )
351 # make this a valid step
352 def qemu_kill_all(self):
353 'kill all qemu instances on the qemu boxes involved by this setup'
354 # this is the brute force version, kill all qemus on that host box
355 for (box,nodes) in self.gather_hostBoxes().iteritems():
356 # pass the first nodename, as we don't push template-qemu on testboxes
357 nodedir=nodes[0].nodedir()
358 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
361 # make this a valid step
362 def qemu_list_all(self):
363 'list all qemu instances on the qemu boxes involved by this setup'
364 for (box,nodes) in self.gather_hostBoxes().iteritems():
365 # this is the brute force version, kill all qemus on that host box
366 TestBoxQemu(box,self.options.buildname).qemu_list_all()
369 # kill only the right qemus
370 def qemu_list_mine(self):
371 'list qemu instances for our nodes'
372 for (box,nodes) in self.gather_hostBoxes().iteritems():
373 # the fine-grain version
378 # kill only the right qemus
379 def qemu_kill_mine(self):
380 'kill the qemu instances for our nodes'
381 for (box,nodes) in self.gather_hostBoxes().iteritems():
382 # the fine-grain version
387 #################### display config
389 "show test configuration after localization"
394 # uggly hack to make sure 'run export' only reports about the 1st plc
395 # to avoid confusion - also we use 'inri_slice1' in various aliases..
398 "print cut'n paste-able stuff to export env variables to your shell"
399 # guess local domain from hostname
400 if TestPlc.exported_id>1:
401 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
403 TestPlc.exported_id+=1
404 domain=socket.gethostname().split('.',1)[1]
405 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
406 print "export BUILD=%s"%self.options.buildname
407 if self.options.plcs_use_lxc:
408 print "export PLCHOSTLXC=%s"%fqdn
410 print "export PLCHOSTVS=%s"%fqdn
411 print "export GUESTNAME=%s"%self.plc_spec['vservername']
412 vplcname=self.plc_spec['vservername'].split('-')[-1]
413 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
414 # find hostname of first node
415 (hostname,qemubox) = self.all_node_infos()[0]
416 print "export KVMHOST=%s.%s"%(qemubox,domain)
417 print "export NODE=%s"%(hostname)
421 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
422 def show_pass (self,passno):
423 for (key,val) in self.plc_spec.iteritems():
424 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
428 self.display_site_spec(site)
429 for node in site['nodes']:
430 self.display_node_spec(node)
431 elif key=='initscripts':
432 for initscript in val:
433 self.display_initscript_spec (initscript)
436 self.display_slice_spec (slice)
439 self.display_key_spec (key)
441 if key not in ['sites','initscripts','slices','keys', 'sfa']:
442 print '+ ',key,':',val
444 def display_site_spec (self,site):
445 print '+ ======== site',site['site_fields']['name']
446 for (k,v) in site.iteritems():
447 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
450 print '+ ','nodes : ',
452 print node['node_fields']['hostname'],'',
458 print user['name'],'',
460 elif k == 'site_fields':
461 print '+ login_base',':',v['login_base']
462 elif k == 'address_fields':
468 def display_initscript_spec (self,initscript):
469 print '+ ======== initscript',initscript['initscript_fields']['name']
471 def display_key_spec (self,key):
472 print '+ ======== key',key['key_name']
474 def display_slice_spec (self,slice):
475 print '+ ======== slice',slice['slice_fields']['name']
476 for (k,v) in slice.iteritems():
489 elif k=='slice_fields':
490 print '+ fields',':',
491 print 'max_nodes=',v['max_nodes'],
496 def display_node_spec (self,node):
497 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
498 print "hostname=",node['node_fields']['hostname'],
499 print "ip=",node['interface_fields']['ip']
500 if self.options.verbose:
501 utils.pprint("node details",node,depth=3)
503 # another entry point for just showing the boxes involved
504 def display_mapping (self):
505 TestPlc.display_mapping_plc(self.plc_spec)
509 def display_mapping_plc (plc_spec):
510 print '+ MyPLC',plc_spec['name']
511 # WARNING this would not be right for lxc-based PLC's - should be harmless though
512 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
513 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
514 for site_spec in plc_spec['sites']:
515 for node_spec in site_spec['nodes']:
516 TestPlc.display_mapping_node(node_spec)
519 def display_mapping_node (node_spec):
520 print '+ NODE %s'%(node_spec['name'])
521 print '+\tqemu box %s'%node_spec['host_box']
522 print '+\thostname=%s'%node_spec['node_fields']['hostname']
524 # write a timestamp in /vservers/<>.timestamp
525 # cannot be inside the vserver, that causes vserver .. build to cough
526 def timestamp_vs (self):
527 "Create a timestamp to remember creation date for this plc"
529 # TODO-lxc check this one
530 # a first approx. is to store the timestamp close to the VM root like vs does
531 stamp_path=self.vm_timestamp_path ()
532 stamp_dir = os.path.dirname (stamp_path)
533 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
534 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
536 # this is called inconditionnally at the beginning of the test sequence
537 # just in case this is a rerun, so if the vm is not running it's fine
539 "vserver delete the test myplc"
540 stamp_path=self.vm_timestamp_path()
541 self.run_in_host("rm -f %s"%stamp_path)
542 if self.options.plcs_use_lxc:
543 self.run_in_host("lxc-stop --name %s"%self.vservername)
544 self.run_in_host("lxc-destroy --name %s"%self.vservername)
547 self.run_in_host("vserver --silent %s delete"%self.vservername)
551 # historically the build was being fetched by the tests
552 # now the build pushes itself as a subdir of the tests workdir
553 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
554 def vs_create (self):
555 "vserver creation (no install done)"
556 # push the local build/ dir to the testplc box
558 # a full path for the local calls
559 build_dir=os.path.dirname(sys.argv[0])
560 # sometimes this is empty - set to "." in such a case
561 if not build_dir: build_dir="."
562 build_dir += "/build"
564 # use a standard name - will be relative to remote buildname
566 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
567 self.test_ssh.rmdir(build_dir)
568 self.test_ssh.copy(build_dir,recursive=True)
569 # the repo url is taken from arch-rpms-url
570 # with the last step (i386) removed
571 repo_url = self.options.arch_rpms_url
572 for level in [ 'arch' ]:
573 repo_url = os.path.dirname(repo_url)
574 # pass the vbuild-nightly options to vtest-init-vserver
576 test_env_options += " -p %s"%self.options.personality
577 test_env_options += " -d %s"%self.options.pldistro
578 test_env_options += " -f %s"%self.options.fcdistro
579 if self.options.plcs_use_lxc:
580 script="vtest-init-lxc.sh"
582 script="vtest-init-vserver.sh"
583 vserver_name = self.vservername
584 vserver_options="--netdev eth0 --interface %s"%self.vserverip
586 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
587 vserver_options += " --hostname %s"%vserver_hostname
589 print "Cannot reverse lookup %s"%self.vserverip
590 print "This is considered fatal, as this might pollute the test results"
592 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
593 return self.run_in_host(create_vserver) == 0
596 def plc_install(self):
597 "yum install myplc, noderepo, and the plain bootstrapfs"
599 # workaround for getting pgsql8.2 on centos5
600 if self.options.fcdistro == "centos5":
601 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
604 if self.options.personality == "linux32":
606 elif self.options.personality == "linux64":
609 raise Exception, "Unsupported personality %r"%self.options.personality
610 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
613 pkgs_list.append ("slicerepo-%s"%nodefamily)
614 pkgs_list.append ("myplc")
615 pkgs_list.append ("noderepo-%s"%nodefamily)
616 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
617 pkgs_string=" ".join(pkgs_list)
618 return self.yum_install (pkgs_list)
621 def plc_configure(self):
623 tmpname='%s.plc-config-tty'%(self.name())
624 fileconf=open(tmpname,'w')
625 for var in [ 'PLC_NAME',
630 'PLC_MAIL_SUPPORT_ADDRESS',
633 # Above line was added for integrating SFA Testing
639 'PLC_RESERVATION_GRANULARITY',
641 'PLC_OMF_XMPP_SERVER',
644 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
645 fileconf.write('w\n')
646 fileconf.write('q\n')
648 utils.system('cat %s'%tmpname)
649 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
650 utils.system('rm %s'%tmpname)
655 self.run_in_guest('service plc start')
660 self.run_in_guest('service plc stop')
664 "start the PLC vserver"
669 "stop the PLC vserver"
673 # stores the keys from the config for further use
674 def keys_store(self):
675 "stores test users ssh keys in keys/"
676 for key_spec in self.plc_spec['keys']:
677 TestKey(self,key_spec).store_key()
680 def keys_clean(self):
681 "removes keys cached in keys/"
682 utils.system("rm -rf ./keys")
685 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
686 # for later direct access to the nodes
687 def keys_fetch(self):
688 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
690 if not os.path.isdir(dir):
692 vservername=self.vservername
693 vm_root=self.vm_root_in_host()
695 prefix = 'debug_ssh_key'
696 for ext in [ 'pub', 'rsa' ] :
697 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
698 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
699 if self.test_ssh.fetch(src,dst) != 0: overall=False
703 "create sites with PLCAPI"
704 return self.do_sites()
706 def delete_sites (self):
707 "delete sites with PLCAPI"
708 return self.do_sites(action="delete")
710 def do_sites (self,action="add"):
711 for site_spec in self.plc_spec['sites']:
712 test_site = TestSite (self,site_spec)
713 if (action != "add"):
714 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
715 test_site.delete_site()
716 # deleted with the site
717 #test_site.delete_users()
720 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
721 test_site.create_site()
722 test_site.create_users()
725 def delete_all_sites (self):
726 "Delete all sites in PLC, and related objects"
727 print 'auth_root',self.auth_root()
728 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
730 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
731 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
732 site_id=site['site_id']
733 print 'Deleting site_id',site_id
734 self.apiserver.DeleteSite(self.auth_root(),site_id)
738 "create nodes with PLCAPI"
739 return self.do_nodes()
740 def delete_nodes (self):
741 "delete nodes with PLCAPI"
742 return self.do_nodes(action="delete")
744 def do_nodes (self,action="add"):
745 for site_spec in self.plc_spec['sites']:
746 test_site = TestSite (self,site_spec)
748 utils.header("Deleting nodes in site %s"%test_site.name())
749 for node_spec in site_spec['nodes']:
750 test_node=TestNode(self,test_site,node_spec)
751 utils.header("Deleting %s"%test_node.name())
752 test_node.delete_node()
754 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
755 for node_spec in site_spec['nodes']:
756 utils.pprint('Creating node %s'%node_spec,node_spec)
757 test_node = TestNode (self,test_site,node_spec)
758 test_node.create_node ()
761 def nodegroups (self):
762 "create nodegroups with PLCAPI"
763 return self.do_nodegroups("add")
764 def delete_nodegroups (self):
765 "delete nodegroups with PLCAPI"
766 return self.do_nodegroups("delete")
770 def translate_timestamp (start,grain,timestamp):
771 if timestamp < TestPlc.YEAR: return start+timestamp*grain
772 else: return timestamp
775 def timestamp_printable (timestamp):
776 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
779 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
781 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
782 print 'API answered grain=',grain
783 start=(now/grain)*grain
785 # find out all nodes that are reservable
786 nodes=self.all_reservable_nodenames()
788 utils.header ("No reservable node found - proceeding without leases")
791 # attach them to the leases as specified in plc_specs
792 # this is where the 'leases' field gets interpreted as relative of absolute
793 for lease_spec in self.plc_spec['leases']:
794 # skip the ones that come with a null slice id
795 if not lease_spec['slice']: continue
796 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
797 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
798 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
799 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
800 if lease_addition['errors']:
801 utils.header("Cannot create leases, %s"%lease_addition['errors'])
804 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
805 (nodes,lease_spec['slice'],
806 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
807 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
811 def delete_leases (self):
812 "remove all leases in the myplc side"
813 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
814 utils.header("Cleaning leases %r"%lease_ids)
815 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
818 def list_leases (self):
819 "list all leases known to the myplc"
820 leases = self.apiserver.GetLeases(self.auth_root())
823 current=l['t_until']>=now
824 if self.options.verbose or current:
825 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
826 TestPlc.timestamp_printable(l['t_from']),
827 TestPlc.timestamp_printable(l['t_until'])))
830 # create nodegroups if needed, and populate
831 def do_nodegroups (self, action="add"):
832 # 1st pass to scan contents
834 for site_spec in self.plc_spec['sites']:
835 test_site = TestSite (self,site_spec)
836 for node_spec in site_spec['nodes']:
837 test_node=TestNode (self,test_site,node_spec)
838 if node_spec.has_key('nodegroups'):
839 nodegroupnames=node_spec['nodegroups']
840 if isinstance(nodegroupnames,StringTypes):
841 nodegroupnames = [ nodegroupnames ]
842 for nodegroupname in nodegroupnames:
843 if not groups_dict.has_key(nodegroupname):
844 groups_dict[nodegroupname]=[]
845 groups_dict[nodegroupname].append(test_node.name())
846 auth=self.auth_root()
848 for (nodegroupname,group_nodes) in groups_dict.iteritems():
850 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
851 # first, check if the nodetagtype is here
852 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
854 tag_type_id = tag_types[0]['tag_type_id']
856 tag_type_id = self.apiserver.AddTagType(auth,
857 {'tagname':nodegroupname,
858 'description': 'for nodegroup %s'%nodegroupname,
860 print 'located tag (type)',nodegroupname,'as',tag_type_id
862 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
864 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
865 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
866 # set node tag on all nodes, value='yes'
867 for nodename in group_nodes:
869 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
871 traceback.print_exc()
872 print 'node',nodename,'seems to already have tag',nodegroupname
875 expect_yes = self.apiserver.GetNodeTags(auth,
876 {'hostname':nodename,
877 'tagname':nodegroupname},
878 ['value'])[0]['value']
879 if expect_yes != "yes":
880 print 'Mismatch node tag on node',nodename,'got',expect_yes
883 if not self.options.dry_run:
884 print 'Cannot find tag',nodegroupname,'on node',nodename
888 print 'cleaning nodegroup',nodegroupname
889 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
891 traceback.print_exc()
895 # a list of TestNode objs
896 def all_nodes (self):
898 for site_spec in self.plc_spec['sites']:
899 test_site = TestSite (self,site_spec)
900 for node_spec in site_spec['nodes']:
901 nodes.append(TestNode (self,test_site,node_spec))
904 # return a list of tuples (nodename,qemuname)
905 def all_node_infos (self) :
907 for site_spec in self.plc_spec['sites']:
908 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
909 for node_spec in site_spec['nodes'] ]
912 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
913 def all_reservable_nodenames (self):
915 for site_spec in self.plc_spec['sites']:
916 for node_spec in site_spec['nodes']:
917 node_fields=node_spec['node_fields']
918 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
919 res.append(node_fields['hostname'])
922 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
923 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
924 if self.options.dry_run:
928 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
929 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
930 # the nodes that haven't checked yet - start with a full list and shrink over time
931 tocheck = self.all_hostnames()
932 utils.header("checking nodes %r"%tocheck)
933 # create a dict hostname -> status
934 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
937 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
939 for array in tocheck_status:
940 hostname=array['hostname']
941 boot_state=array['boot_state']
942 if boot_state == target_boot_state:
943 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
945 # if it's a real node, never mind
946 (site_spec,node_spec)=self.locate_hostname(hostname)
947 if TestNode.is_real_model(node_spec['node_fields']['model']):
948 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
950 boot_state = target_boot_state
951 elif datetime.datetime.now() > graceout:
952 utils.header ("%s still in '%s' state"%(hostname,boot_state))
953 graceout=datetime.datetime.now()+datetime.timedelta(1)
954 status[hostname] = boot_state
956 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
959 if datetime.datetime.now() > timeout:
960 for hostname in tocheck:
961 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
963 # otherwise, sleep for a while
965 # only useful in empty plcs
968 def nodes_booted(self):
969 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
971 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
973 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
974 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
975 vservername=self.vservername
978 local_key = "keys/%(vservername)s-debug.rsa"%locals()
981 local_key = "keys/key_admin.rsa"
982 node_infos = self.all_node_infos()
983 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
984 for (nodename,qemuname) in node_infos:
985 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
986 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
987 (timeout_minutes,silent_minutes,period))
989 for node_info in node_infos:
990 (hostname,qemuname) = node_info
991 # try to run 'hostname' in the node
992 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
993 # don't spam logs - show the command only after the grace period
994 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
996 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
998 node_infos.remove(node_info)
1000 # we will have tried real nodes once, in case they're up - but if not, just skip
1001 (site_spec,node_spec)=self.locate_hostname(hostname)
1002 if TestNode.is_real_model(node_spec['node_fields']['model']):
1003 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
1004 node_infos.remove(node_info)
1007 if datetime.datetime.now() > timeout:
1008 for (hostname,qemuname) in node_infos:
1009 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
1011 # otherwise, sleep for a while
1013 # only useful in empty plcs
1016 def ssh_node_debug(self):
1017 "Tries to ssh into nodes in debug mode with the debug ssh key"
1018 return self.check_nodes_ssh(debug=True,
1019 timeout_minutes=self.ssh_node_debug_timeout,
1020 silent_minutes=self.ssh_node_debug_silent)
1022 def ssh_node_boot(self):
1023 "Tries to ssh into nodes in production mode with the root ssh key"
1024 return self.check_nodes_ssh(debug=False,
1025 timeout_minutes=self.ssh_node_boot_timeout,
1026 silent_minutes=self.ssh_node_boot_silent)
1029 def qemu_local_init (self): pass
1031 def bootcd (self): pass
1033 def qemu_local_config (self): pass
1035 def nodestate_reinstall (self): pass
1037 def nodestate_safeboot (self): pass
1039 def nodestate_boot (self): pass
1041 def nodestate_show (self): pass
1043 def qemu_export (self): pass
1045 ### check hooks : invoke scripts from hooks/{node,slice}
1046 def check_hooks_node (self):
1047 return self.locate_first_node().check_hooks()
1048 def check_hooks_sliver (self) :
1049 return self.locate_first_sliver().check_hooks()
1051 def check_hooks (self):
1052 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1053 return self.check_hooks_node() and self.check_hooks_sliver()
1056 def do_check_initscripts(self):
1058 for slice_spec in self.plc_spec['slices']:
1059 if not slice_spec.has_key('initscriptstamp'):
1061 stamp=slice_spec['initscriptstamp']
1062 for nodename in slice_spec['nodenames']:
1063 (site,node) = self.locate_node (nodename)
1064 # xxx - passing the wrong site - probably harmless
1065 test_site = TestSite (self,site)
1066 test_slice = TestSlice (self,test_site,slice_spec)
1067 test_node = TestNode (self,test_site,node)
1068 test_sliver = TestSliver (self, test_node, test_slice)
1069 if not test_sliver.check_initscript_stamp(stamp):
1073 def check_initscripts(self):
1074 "check that the initscripts have triggered"
1075 return self.do_check_initscripts()
1077 def initscripts (self):
1078 "create initscripts with PLCAPI"
1079 for initscript in self.plc_spec['initscripts']:
1080 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1081 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1084 def delete_initscripts (self):
1085 "delete initscripts with PLCAPI"
1086 for initscript in self.plc_spec['initscripts']:
1087 initscript_name = initscript['initscript_fields']['name']
1088 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1090 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1091 print initscript_name,'deleted'
1093 print 'deletion went wrong - probably did not exist'
1098 "create slices with PLCAPI"
1099 return self.do_slices(action="add")
1101 def delete_slices (self):
1102 "delete slices with PLCAPI"
1103 return self.do_slices(action="delete")
1105 def fill_slices (self):
1106 "add nodes in slices with PLCAPI"
1107 return self.do_slices(action="fill")
1109 def empty_slices (self):
1110 "remove nodes from slices with PLCAPI"
1111 return self.do_slices(action="empty")
1113 def do_slices (self, action="add"):
1114 for slice in self.plc_spec['slices']:
1115 site_spec = self.locate_site (slice['sitename'])
1116 test_site = TestSite(self,site_spec)
1117 test_slice=TestSlice(self,test_site,slice)
1118 if action == "delete":
1119 test_slice.delete_slice()
1120 elif action=="fill":
1121 test_slice.add_nodes()
1122 elif action=="empty":
1123 test_slice.delete_nodes()
1125 test_slice.create_slice()
1129 def ssh_slice(self): pass
1131 def ssh_slice_off (self): pass
1134 def check_vsys_defaults(self): pass
1137 def keys_clear_known_hosts (self): pass
1139 def speed_up_slices (self):
1140 "tweak nodemanager settings on all nodes using a conf file"
1141 # create the template on the server-side
1142 template="%s.nodemanager"%self.name()
1143 template_file = open (template,"w")
1144 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1145 template_file.close()
1146 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1147 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1148 self.test_ssh.copy_abs(template,remote)
1150 self.apiserver.AddConfFile (self.auth_root(),
1151 {'dest':'/etc/sysconfig/nodemanager',
1152 'source':'PlanetLabConf/nodemanager',
1153 'postinstall_cmd':'service nm restart',})
1156 def debug_nodemanager (self):
1157 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1158 template="%s.nodemanager"%self.name()
1159 template_file = open (template,"w")
1160 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1161 template_file.close()
1162 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1163 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1164 self.test_ssh.copy_abs(template,remote)
1168 def qemu_start (self) : pass
1171 def timestamp_qemu (self) : pass
1173 # when a spec refers to a node possibly on another plc
1174 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1175 for plc in [ self ] + other_plcs:
1177 return plc.locate_sliver_obj (nodename, slicename)
1180 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1182 # implement this one as a cross step so that we can take advantage of different nodes
1183 # in multi-plcs mode
1184 def cross_check_tcp (self, other_plcs):
1185 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1186 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1187 utils.header ("check_tcp: no/empty config found")
1189 specs = self.plc_spec['tcp_specs']
1194 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1195 if not s_test_sliver.run_tcp_server(port,timeout=20):
1199 # idem for the client side
1200 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1201 # use nodename from locatesd sliver, unless 'client_connect' is set
1202 if 'client_connect' in spec:
1203 destination = spec['client_connect']
1205 destination=s_test_sliver.test_node.name()
1206 if not c_test_sliver.run_tcp_client(destination,port):
1210 # painfully enough, we need to allow for some time as netflow might show up last
1211 def check_system_slice (self):
1212 "all nodes: check that a system slice is alive"
1213 # netflow currently not working in the lxc distro
1214 # drl not built at all in the wtx distro
1215 # if we find either of them we're happy
1216 return self.check_netflow() or self.check_drl()
1219 def check_netflow (self): return self._check_system_slice ('netflow')
1220 def check_drl (self): return self._check_system_slice ('drl')
1222 # we have the slices up already here, so it should not take too long
1223 def _check_system_slice (self, slicename, timeout_minutes=5, period=15):
1224 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1225 test_nodes=self.all_nodes()
1227 for test_node in test_nodes:
1228 if test_node._check_system_slice (slicename,dry_run=self.options.dry_run):
1230 test_nodes.remove(test_node)
1235 if datetime.datetime.now () > timeout:
1236 for test_node in test_nodes:
1237 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1242 def plcsh_stress_test (self):
1243 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1244 # install the stress-test in the plc image
1245 location = "/usr/share/plc_api/plcsh_stress_test.py"
1246 remote="%s/%s"%(self.vm_root_in_host(),location)
1247 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1249 command += " -- --check"
1250 if self.options.size == 1:
1251 command += " --tiny"
1252 return ( self.run_in_guest(command) == 0)
1254 # populate runs the same utility without slightly different options
1255 # in particular runs with --preserve (dont cleanup) and without --check
1256 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1258 def sfa_install_all (self):
1259 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1260 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1262 def sfa_install_core(self):
1264 return self.yum_install ("sfa")
1266 def sfa_install_plc(self):
1267 "yum install sfa-plc"
1268 return self.yum_install("sfa-plc")
1270 def sfa_install_sfatables(self):
1271 "yum install sfa-sfatables"
1272 return self.yum_install ("sfa-sfatables")
1274 # for some very odd reason, this sometimes fails with the following symptom
1275 # # yum install sfa-client
1276 # Setting up Install Process
1278 # Downloading Packages:
1279 # Running rpm_check_debug
1280 # Running Transaction Test
1281 # Transaction Test Succeeded
1282 # Running Transaction
1283 # Transaction couldn't start:
1284 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1285 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1286 # even though in the same context I have
1287 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1288 # Filesystem Size Used Avail Use% Mounted on
1289 # /dev/hdv1 806G 264G 501G 35% /
1290 # none 16M 36K 16M 1% /tmp
1292 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1293 def sfa_install_client(self):
1294 "yum install sfa-client"
1295 first_try=self.yum_install("sfa-client")
1296 if first_try: return True
1297 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1298 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1299 utils.header("rpm_path=<<%s>>"%rpm_path)
1301 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1302 return self.yum_check_installed ("sfa-client")
1304 def sfa_dbclean(self):
1305 "thoroughly wipes off the SFA database"
1306 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1307 self.run_in_guest("sfa-nuke.py")==0 or \
1308 self.run_in_guest("sfa-nuke-plc.py")==0
1310 def sfa_fsclean(self):
1311 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1312 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1315 def sfa_plcclean(self):
1316 "cleans the PLC entries that were created as a side effect of running the script"
1318 sfa_spec=self.plc_spec['sfa']
1320 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1321 login_base=auth_sfa_spec['login_base']
1322 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1323 except: print "Site %s already absent from PLC db"%login_base
1325 for spec_name in ['pi_spec','user_spec']:
1326 user_spec=auth_sfa_spec[spec_name]
1327 username=user_spec['email']
1328 try: self.apiserver.DeletePerson(self.auth_root(),username)
1330 # this in fact is expected as sites delete their members
1331 #print "User %s already absent from PLC db"%username
1334 print "REMEMBER TO RUN sfa_import AGAIN"
1337 def sfa_uninstall(self):
1338 "uses rpm to uninstall sfa - ignore result"
1339 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1340 self.run_in_guest("rm -rf /var/lib/sfa")
1341 self.run_in_guest("rm -rf /etc/sfa")
1342 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1344 self.run_in_guest("rpm -e --noscripts sfa-plc")
1347 ### run unit tests for SFA
1348 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1349 # Running Transaction
1350 # Transaction couldn't start:
1351 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1352 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1353 # no matter how many Gbs are available on the testplc
1354 # could not figure out what's wrong, so...
1355 # if the yum install phase fails, consider the test is successful
1356 # other combinations will eventually run it hopefully
1357 def sfa_utest(self):
1358 "yum install sfa-tests and run SFA unittests"
1359 self.run_in_guest("yum -y install sfa-tests")
1360 # failed to install - forget it
1361 if self.run_in_guest("rpm -q sfa-tests")!=0:
1362 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1364 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1368 dirname="conf.%s"%self.plc_spec['name']
1369 if not os.path.isdir(dirname):
1370 utils.system("mkdir -p %s"%dirname)
1371 if not os.path.isdir(dirname):
1372 raise Exception,"Cannot create config dir for plc %s"%self.name()
1375 def conffile(self,filename):
1376 return "%s/%s"%(self.confdir(),filename)
1377 def confsubdir(self,dirname,clean,dry_run=False):
1378 subdirname="%s/%s"%(self.confdir(),dirname)
1380 utils.system("rm -rf %s"%subdirname)
1381 if not os.path.isdir(subdirname):
1382 utils.system("mkdir -p %s"%subdirname)
1383 if not dry_run and not os.path.isdir(subdirname):
1384 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1387 def conffile_clean (self,filename):
1388 filename=self.conffile(filename)
1389 return utils.system("rm -rf %s"%filename)==0
1392 def sfa_configure(self):
1393 "run sfa-config-tty"
1394 tmpname=self.conffile("sfa-config-tty")
1395 fileconf=open(tmpname,'w')
1396 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1397 'SFA_INTERFACE_HRN',
1398 'SFA_REGISTRY_LEVEL1_AUTH',
1399 'SFA_REGISTRY_HOST',
1400 'SFA_AGGREGATE_HOST',
1410 'SFA_GENERIC_FLAVOUR',
1411 'SFA_AGGREGATE_ENABLED',
1413 if self.plc_spec['sfa'].has_key(var):
1414 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1415 # the way plc_config handles booleans just sucks..
1418 if self.plc_spec['sfa'][var]: val='true'
1419 fileconf.write ('e %s\n%s\n'%(var,val))
1420 fileconf.write('w\n')
1421 fileconf.write('R\n')
1422 fileconf.write('q\n')
1424 utils.system('cat %s'%tmpname)
1425 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1428 def aggregate_xml_line(self):
1429 port=self.plc_spec['sfa']['neighbours-port']
1430 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1431 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1433 def registry_xml_line(self):
1434 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1435 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1438 # a cross step that takes all other plcs in argument
1439 def cross_sfa_configure(self, other_plcs):
1440 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1441 # of course with a single plc, other_plcs is an empty list
1444 agg_fname=self.conffile("agg.xml")
1445 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1446 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1447 utils.header ("(Over)wrote %s"%agg_fname)
1448 reg_fname=self.conffile("reg.xml")
1449 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1450 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1451 utils.header ("(Over)wrote %s"%reg_fname)
1452 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1453 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1455 def sfa_import(self):
1456 "use sfaadmin to import from plc"
1457 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1459 self.run_in_guest('sfaadmin reg import_registry')==0
1460 # not needed anymore
1461 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1463 def sfa_start(self):
1465 return self.run_in_guest('service sfa start')==0
1467 def sfi_configure(self):
1468 "Create /root/sfi on the plc side for sfi client configuration"
1469 if self.options.dry_run:
1470 utils.header("DRY RUN - skipping step")
1472 sfa_spec=self.plc_spec['sfa']
1473 # cannot use auth_sfa_mapper to pass dir_name
1474 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1475 test_slice=TestAuthSfa(self,slice_spec)
1476 dir_basename=os.path.basename(test_slice.sfi_path())
1477 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1478 test_slice.sfi_configure(dir_name)
1479 # push into the remote /root/sfi area
1480 location = test_slice.sfi_path()
1481 remote="%s/%s"%(self.vm_root_in_host(),location)
1482 self.test_ssh.mkdir(remote,abs=True)
1483 # need to strip last level or remote otherwise we get an extra dir level
1484 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1488 def sfi_clean (self):
1489 "clean up /root/sfi on the plc side"
1490 self.run_in_guest("rm -rf /root/sfi")
1494 def sfa_add_site (self): pass
1496 def sfa_add_pi (self): pass
1498 def sfa_add_user(self): pass
1500 def sfa_update_user(self): pass
1502 def sfa_add_slice(self): pass
1504 def sfa_renew_slice(self): pass
1506 def sfa_discover(self): pass
1508 def sfa_create_slice(self): pass
1510 def sfa_check_slice_plc(self): pass
1512 def sfa_update_slice(self): pass
1514 def sfi_list(self): pass
1516 def sfi_show(self): pass
1518 def sfi_slices(self): pass
1520 def ssh_slice_sfa(self): pass
1522 def sfa_delete_user(self): pass
1524 def sfa_delete_slice(self): pass
1528 self.run_in_guest('service sfa stop')==0
1531 def populate (self):
1532 "creates random entries in the PLCAPI"
1533 # install the stress-test in the plc image
1534 location = "/usr/share/plc_api/plcsh_stress_test.py"
1535 remote="%s/%s"%(self.vm_root_in_host(),location)
1536 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1538 command += " -- --preserve --short-names"
1539 local = (self.run_in_guest(command) == 0);
1540 # second run with --foreign
1541 command += ' --foreign'
1542 remote = (self.run_in_guest(command) == 0);
1543 return ( local and remote)
1545 def gather_logs (self):
1546 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1547 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1548 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1549 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1550 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1551 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1552 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1554 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1555 self.gather_var_logs ()
1557 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1558 self.gather_pgsql_logs ()
1560 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1561 self.gather_root_sfi ()
1563 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1564 for site_spec in self.plc_spec['sites']:
1565 test_site = TestSite (self,site_spec)
1566 for node_spec in site_spec['nodes']:
1567 test_node=TestNode(self,test_site,node_spec)
1568 test_node.gather_qemu_logs()
1570 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1571 self.gather_nodes_var_logs()
1573 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1574 self.gather_slivers_var_logs()
1577 def gather_slivers_var_logs(self):
1578 for test_sliver in self.all_sliver_objs():
1579 remote = test_sliver.tar_var_logs()
1580 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1581 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1582 utils.system(command)
1585 def gather_var_logs (self):
1586 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1587 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1588 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1589 utils.system(command)
1590 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1591 utils.system(command)
1593 def gather_pgsql_logs (self):
1594 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1595 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1596 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1597 utils.system(command)
1599 def gather_root_sfi (self):
1600 utils.system("mkdir -p logs/sfi.%s"%self.name())
1601 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1602 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1603 utils.system(command)
1605 def gather_nodes_var_logs (self):
1606 for site_spec in self.plc_spec['sites']:
1607 test_site = TestSite (self,site_spec)
1608 for node_spec in site_spec['nodes']:
1609 test_node=TestNode(self,test_site,node_spec)
1610 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1611 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1612 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1613 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1614 utils.system(command)
1617 # returns the filename to use for sql dump/restore, using options.dbname if set
1618 def dbfile (self, database):
1619 # uses options.dbname if it is found
1621 name=self.options.dbname
1622 if not isinstance(name,StringTypes):
1625 t=datetime.datetime.now()
1628 return "/root/%s-%s.sql"%(database,name)
1630 def plc_db_dump(self):
1631 'dump the planetlab5 DB in /root in the PLC - filename has time'
1632 dump=self.dbfile("planetab5")
1633 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1634 utils.header('Dumped planetlab5 database in %s'%dump)
1637 def plc_db_restore(self):
1638 'restore the planetlab5 DB - looks broken, but run -n might help'
1639 dump=self.dbfile("planetab5")
1640 ##stop httpd service
1641 self.run_in_guest('service httpd stop')
1642 # xxx - need another wrapper
1643 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1644 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1645 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1646 ##starting httpd service
1647 self.run_in_guest('service httpd start')
1649 utils.header('Database restored from ' + dump)
1651 def standby_1_through_20(self):
1652 """convenience function to wait for a specified number of minutes"""
1655 def standby_1(): pass
1657 def standby_2(): pass
1659 def standby_3(): pass
1661 def standby_4(): pass
1663 def standby_5(): pass
1665 def standby_6(): pass
1667 def standby_7(): pass
1669 def standby_8(): pass
1671 def standby_9(): pass
1673 def standby_10(): pass
1675 def standby_11(): pass
1677 def standby_12(): pass
1679 def standby_13(): pass
1681 def standby_14(): pass
1683 def standby_15(): pass
1685 def standby_16(): pass
1687 def standby_17(): pass
1689 def standby_18(): pass
1691 def standby_19(): pass
1693 def standby_20(): pass