1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from datetime import datetime, timedelta
10 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
24 from Completer import Completer, CompleterTask
26 # step methods must take (self) and return a boolean (options is a member of the class)
28 def standby(minutes,dry_run):
29 utils.header('Entering StandBy for %d mn'%minutes)
33 time.sleep(60*minutes)
36 def standby_generic (func):
38 minutes=int(func.__name__.split("_")[1])
39 return standby(minutes,self.options.dry_run)
42 def node_mapper (method):
43 def actual(self,*args, **kwds):
45 node_method = TestNode.__dict__[method.__name__]
46 for test_node in self.all_nodes():
47 if not node_method(test_node, *args, **kwds): overall=False
49 # restore the doc text
50 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
53 def slice_mapper (method):
56 slice_method = TestSlice.__dict__[method.__name__]
57 for slice_spec in self.plc_spec['slices']:
58 site_spec = self.locate_site (slice_spec['sitename'])
59 test_site = TestSite(self,site_spec)
60 test_slice=TestSlice(self,test_site,slice_spec)
61 if not slice_method(test_slice,self.options): overall=False
63 # restore the doc text
64 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
67 def auth_sfa_mapper (method):
70 auth_method = TestAuthSfa.__dict__[method.__name__]
71 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
72 test_auth=TestAuthSfa(self,auth_spec)
73 if not auth_method(test_auth,self.options): overall=False
75 # restore the doc text
76 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
86 'vs_delete','timestamp_vs','vs_create', SEP,
87 'plc_install', 'plc_configure', 'plc_start', SEP,
88 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
89 'plcapi_urls','speed_up_slices', SEP,
90 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
91 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
92 # keep this our of the way for now
93 # 'check_vsys_defaults', SEP,
94 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
95 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
96 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
97 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
98 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
99 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
100 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
101 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
102 # but as the stress test might take a while, we sometimes missed the debug mode..
103 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
104 'ssh_node_boot@1', 'node_bmlogs@1', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts', SEP,
105 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
106 'cross_check_tcp@1', 'check_system_slice', SEP,
107 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
108 'force_gather_logs', SEP,
111 'export', 'show_boxes', SEP,
112 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
113 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
114 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
115 'delete_leases', 'list_leases', SEP,
117 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
118 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
119 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
120 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
121 'plc_db_dump' , 'plc_db_restore', SEP,
122 'check_netflow','check_drl', SEP,
123 'debug_nodemanager', SEP,
124 'standby_1_through_20',SEP,
128 def printable_steps (list):
129 single_line=" ".join(list)+" "
130 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
132 def valid_step (step):
133 return step != SEP and step != SEPSFA
135 # turn off the sfa-related steps when build has skipped SFA
136 # this was originally for centos5 but is still valid
137 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
139 def check_whether_build_has_sfa (rpms_url):
140 utils.header ("Checking if build provides SFA package...")
141 # warning, we're now building 'sface' so let's be a bit more picky
142 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
143 # full builds are expected to return with 0 here
145 utils.header("build does provide SFA")
147 # move all steps containing 'sfa' from default_steps to other_steps
148 utils.header("SFA package not found - removing steps with sfa or sfi")
149 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
150 TestPlc.other_steps += sfa_steps
151 for step in sfa_steps: TestPlc.default_steps.remove(step)
153 def __init__ (self,plc_spec,options):
154 self.plc_spec=plc_spec
156 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
157 self.vserverip=plc_spec['vserverip']
158 self.vservername=plc_spec['vservername']
159 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
160 self.apiserver=TestApiserver(self.url,options.dry_run)
161 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
162 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
164 def has_addresses_api (self):
165 return self.apiserver.has_method('AddIpAddress')
168 name=self.plc_spec['name']
169 return "%s.%s"%(name,self.vservername)
172 return self.plc_spec['host_box']
175 return self.test_ssh.is_local()
177 # define the API methods on this object through xmlrpc
178 # would help, but not strictly necessary
182 def actual_command_in_guest (self,command):
183 return self.test_ssh.actual_command(self.host_to_guest(command))
185 def start_guest (self):
186 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
188 def stop_guest (self):
189 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
191 def run_in_guest (self,command):
192 return utils.system(self.actual_command_in_guest(command))
194 def run_in_host (self,command):
195 return self.test_ssh.run_in_buildname(command)
197 #command gets run in the plc's vm
198 def host_to_guest(self,command):
199 if self.options.plcs_use_lxc:
200 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
202 return "vserver %s exec %s"%(self.vservername,command)
204 def vm_root_in_host(self):
205 if self.options.plcs_use_lxc:
206 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
208 return "/vservers/%s"%(self.vservername)
210 def vm_timestamp_path (self):
211 if self.options.plcs_use_lxc:
212 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
214 return "/vservers/%s.timestamp"%(self.vservername)
216 #start/stop the vserver
217 def start_guest_in_host(self):
218 if self.options.plcs_use_lxc:
219 return "lxc-start --daemon --name=%s"%(self.vservername)
221 return "vserver %s start"%(self.vservername)
223 def stop_guest_in_host(self):
224 if self.options.plcs_use_lxc:
225 return "lxc-stop --name=%s"%(self.vservername)
227 return "vserver %s stop"%(self.vservername)
230 def run_in_guest_piped (self,local,remote):
231 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
233 def yum_check_installed (self, rpms):
234 if isinstance (rpms, list):
236 return self.run_in_guest("rpm -q %s"%rpms)==0
238 # does a yum install in the vs, ignore yum retcod, check with rpm
239 def yum_install (self, rpms):
240 if isinstance (rpms, list):
242 self.run_in_guest("yum -y install %s"%rpms)
243 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
244 self.run_in_guest("yum-complete-transaction -y")
245 return self.yum_check_installed (rpms)
247 def auth_root (self):
248 return {'Username':self.plc_spec['PLC_ROOT_USER'],
249 'AuthMethod':'password',
250 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
251 'Role' : self.plc_spec['role']
253 def locate_site (self,sitename):
254 for site in self.plc_spec['sites']:
255 if site['site_fields']['name'] == sitename:
257 if site['site_fields']['login_base'] == sitename:
259 raise Exception,"Cannot locate site %s"%sitename
261 def locate_node (self,nodename):
262 for site in self.plc_spec['sites']:
263 for node in site['nodes']:
264 if node['name'] == nodename:
266 raise Exception,"Cannot locate node %s"%nodename
268 def locate_hostname (self,hostname):
269 for site in self.plc_spec['sites']:
270 for node in site['nodes']:
271 if node['node_fields']['hostname'] == hostname:
273 raise Exception,"Cannot locate hostname %s"%hostname
275 def locate_key (self,key_name):
276 for key in self.plc_spec['keys']:
277 if key['key_name'] == key_name:
279 raise Exception,"Cannot locate key %s"%key_name
281 def locate_private_key_from_key_names (self, key_names):
282 # locate the first avail. key
284 for key_name in key_names:
285 key_spec=self.locate_key(key_name)
286 test_key=TestKey(self,key_spec)
287 publickey=test_key.publicpath()
288 privatekey=test_key.privatepath()
289 if os.path.isfile(publickey) and os.path.isfile(privatekey):
291 if found: return privatekey
294 def locate_slice (self, slicename):
295 for slice in self.plc_spec['slices']:
296 if slice['slice_fields']['name'] == slicename:
298 raise Exception,"Cannot locate slice %s"%slicename
300 def all_sliver_objs (self):
302 for slice_spec in self.plc_spec['slices']:
303 slicename = slice_spec['slice_fields']['name']
304 for nodename in slice_spec['nodenames']:
305 result.append(self.locate_sliver_obj (nodename,slicename))
308 def locate_sliver_obj (self,nodename,slicename):
309 (site,node) = self.locate_node(nodename)
310 slice = self.locate_slice (slicename)
312 test_site = TestSite (self, site)
313 test_node = TestNode (self, test_site,node)
314 # xxx the slice site is assumed to be the node site - mhh - probably harmless
315 test_slice = TestSlice (self, test_site, slice)
316 return TestSliver (self, test_node, test_slice)
318 def locate_first_node(self):
319 nodename=self.plc_spec['slices'][0]['nodenames'][0]
320 (site,node) = self.locate_node(nodename)
321 test_site = TestSite (self, site)
322 test_node = TestNode (self, test_site,node)
325 def locate_first_sliver (self):
326 slice_spec=self.plc_spec['slices'][0]
327 slicename=slice_spec['slice_fields']['name']
328 nodename=slice_spec['nodenames'][0]
329 return self.locate_sliver_obj(nodename,slicename)
331 # all different hostboxes used in this plc
332 def gather_hostBoxes(self):
333 # maps on sites and nodes, return [ (host_box,test_node) ]
335 for site_spec in self.plc_spec['sites']:
336 test_site = TestSite (self,site_spec)
337 for node_spec in site_spec['nodes']:
338 test_node = TestNode (self, test_site, node_spec)
339 if not test_node.is_real():
340 tuples.append( (test_node.host_box(),test_node) )
341 # transform into a dict { 'host_box' -> [ test_node .. ] }
343 for (box,node) in tuples:
344 if not result.has_key(box):
347 result[box].append(node)
350 # a step for checking this stuff
351 def show_boxes (self):
352 'print summary of nodes location'
353 for (box,nodes) in self.gather_hostBoxes().iteritems():
354 print box,":"," + ".join( [ node.name() for node in nodes ] )
357 # make this a valid step
358 def qemu_kill_all(self):
359 'kill all qemu instances on the qemu boxes involved by this setup'
360 # this is the brute force version, kill all qemus on that host box
361 for (box,nodes) in self.gather_hostBoxes().iteritems():
362 # pass the first nodename, as we don't push template-qemu on testboxes
363 nodedir=nodes[0].nodedir()
364 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
367 # make this a valid step
368 def qemu_list_all(self):
369 'list all qemu instances on the qemu boxes involved by this setup'
370 for (box,nodes) in self.gather_hostBoxes().iteritems():
371 # this is the brute force version, kill all qemus on that host box
372 TestBoxQemu(box,self.options.buildname).qemu_list_all()
375 # kill only the right qemus
376 def qemu_list_mine(self):
377 'list qemu instances for our nodes'
378 for (box,nodes) in self.gather_hostBoxes().iteritems():
379 # the fine-grain version
384 # kill only the right qemus
385 def qemu_kill_mine(self):
386 'kill the qemu instances for our nodes'
387 for (box,nodes) in self.gather_hostBoxes().iteritems():
388 # the fine-grain version
393 #################### display config
395 "show test configuration after localization"
400 # uggly hack to make sure 'run export' only reports about the 1st plc
401 # to avoid confusion - also we use 'inri_slice1' in various aliases..
404 "print cut'n paste-able stuff to export env variables to your shell"
405 # guess local domain from hostname
406 if TestPlc.exported_id>1:
407 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
409 TestPlc.exported_id+=1
410 domain=socket.gethostname().split('.',1)[1]
411 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
412 print "export BUILD=%s"%self.options.buildname
413 if self.options.plcs_use_lxc:
414 print "export PLCHOSTLXC=%s"%fqdn
416 print "export PLCHOSTVS=%s"%fqdn
417 print "export GUESTNAME=%s"%self.plc_spec['vservername']
418 vplcname=self.plc_spec['vservername'].split('-')[-1]
419 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
420 # find hostname of first node
421 (hostname,qemubox) = self.all_node_infos()[0]
422 print "export KVMHOST=%s.%s"%(qemubox,domain)
423 print "export NODE=%s"%(hostname)
427 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
428 def show_pass (self,passno):
429 for (key,val) in self.plc_spec.iteritems():
430 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
434 self.display_site_spec(site)
435 for node in site['nodes']:
436 self.display_node_spec(node)
437 elif key=='initscripts':
438 for initscript in val:
439 self.display_initscript_spec (initscript)
442 self.display_slice_spec (slice)
445 self.display_key_spec (key)
447 if key not in ['sites','initscripts','slices','keys', 'sfa']:
448 print '+ ',key,':',val
450 def display_site_spec (self,site):
451 print '+ ======== site',site['site_fields']['name']
452 for (k,v) in site.iteritems():
453 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
456 print '+ ','nodes : ',
458 print node['node_fields']['hostname'],'',
464 print user['name'],'',
466 elif k == 'site_fields':
467 print '+ login_base',':',v['login_base']
468 elif k == 'address_fields':
474 def display_initscript_spec (self,initscript):
475 print '+ ======== initscript',initscript['initscript_fields']['name']
477 def display_key_spec (self,key):
478 print '+ ======== key',key['key_name']
480 def display_slice_spec (self,slice):
481 print '+ ======== slice',slice['slice_fields']['name']
482 for (k,v) in slice.iteritems():
495 elif k=='slice_fields':
496 print '+ fields',':',
497 print 'max_nodes=',v['max_nodes'],
502 def display_node_spec (self,node):
503 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
504 print "hostname=",node['node_fields']['hostname'],
505 print "ip=",node['interface_fields']['ip']
506 if self.options.verbose:
507 utils.pprint("node details",node,depth=3)
509 # another entry point for just showing the boxes involved
510 def display_mapping (self):
511 TestPlc.display_mapping_plc(self.plc_spec)
515 def display_mapping_plc (plc_spec):
516 print '+ MyPLC',plc_spec['name']
517 # WARNING this would not be right for lxc-based PLC's - should be harmless though
518 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
519 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
520 for site_spec in plc_spec['sites']:
521 for node_spec in site_spec['nodes']:
522 TestPlc.display_mapping_node(node_spec)
525 def display_mapping_node (node_spec):
526 print '+ NODE %s'%(node_spec['name'])
527 print '+\tqemu box %s'%node_spec['host_box']
528 print '+\thostname=%s'%node_spec['node_fields']['hostname']
530 # write a timestamp in /vservers/<>.timestamp
531 # cannot be inside the vserver, that causes vserver .. build to cough
532 def timestamp_vs (self):
533 "Create a timestamp to remember creation date for this plc"
535 # TODO-lxc check this one
536 # a first approx. is to store the timestamp close to the VM root like vs does
537 stamp_path=self.vm_timestamp_path ()
538 stamp_dir = os.path.dirname (stamp_path)
539 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
540 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
542 # this is called inconditionnally at the beginning of the test sequence
543 # just in case this is a rerun, so if the vm is not running it's fine
545 "vserver delete the test myplc"
546 stamp_path=self.vm_timestamp_path()
547 self.run_in_host("rm -f %s"%stamp_path)
548 if self.options.plcs_use_lxc:
549 self.run_in_host("lxc-stop --name %s"%self.vservername)
550 self.run_in_host("lxc-destroy --name %s"%self.vservername)
553 self.run_in_host("vserver --silent %s delete"%self.vservername)
557 # historically the build was being fetched by the tests
558 # now the build pushes itself as a subdir of the tests workdir
559 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
560 def vs_create (self):
561 "vserver creation (no install done)"
562 # push the local build/ dir to the testplc box
564 # a full path for the local calls
565 build_dir=os.path.dirname(sys.argv[0])
566 # sometimes this is empty - set to "." in such a case
567 if not build_dir: build_dir="."
568 build_dir += "/build"
570 # use a standard name - will be relative to remote buildname
572 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
573 self.test_ssh.rmdir(build_dir)
574 self.test_ssh.copy(build_dir,recursive=True)
575 # the repo url is taken from arch-rpms-url
576 # with the last step (i386) removed
577 repo_url = self.options.arch_rpms_url
578 for level in [ 'arch' ]:
579 repo_url = os.path.dirname(repo_url)
580 # pass the vbuild-nightly options to vtest-init-vserver
582 test_env_options += " -p %s"%self.options.personality
583 test_env_options += " -d %s"%self.options.pldistro
584 test_env_options += " -f %s"%self.options.fcdistro
585 if self.options.plcs_use_lxc:
586 script="vtest-init-lxc.sh"
588 script="vtest-init-vserver.sh"
589 vserver_name = self.vservername
590 vserver_options="--netdev eth0 --interface %s"%self.vserverip
592 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
593 vserver_options += " --hostname %s"%vserver_hostname
595 print "Cannot reverse lookup %s"%self.vserverip
596 print "This is considered fatal, as this might pollute the test results"
598 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
599 return self.run_in_host(create_vserver) == 0
602 def plc_install(self):
603 "yum install myplc, noderepo, and the plain bootstrapfs"
605 # workaround for getting pgsql8.2 on centos5
606 if self.options.fcdistro == "centos5":
607 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
610 if self.options.personality == "linux32":
612 elif self.options.personality == "linux64":
615 raise Exception, "Unsupported personality %r"%self.options.personality
616 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
619 pkgs_list.append ("slicerepo-%s"%nodefamily)
620 pkgs_list.append ("myplc")
621 pkgs_list.append ("noderepo-%s"%nodefamily)
622 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
623 pkgs_string=" ".join(pkgs_list)
624 return self.yum_install (pkgs_list)
627 def plc_configure(self):
629 tmpname='%s.plc-config-tty'%(self.name())
630 fileconf=open(tmpname,'w')
631 for var in [ 'PLC_NAME',
636 'PLC_MAIL_SUPPORT_ADDRESS',
639 # Above line was added for integrating SFA Testing
645 'PLC_RESERVATION_GRANULARITY',
647 'PLC_OMF_XMPP_SERVER',
650 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
651 fileconf.write('w\n')
652 fileconf.write('q\n')
654 utils.system('cat %s'%tmpname)
655 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
656 utils.system('rm %s'%tmpname)
661 self.run_in_guest('service plc start')
666 self.run_in_guest('service plc stop')
670 "start the PLC vserver"
675 "stop the PLC vserver"
679 # stores the keys from the config for further use
680 def keys_store(self):
681 "stores test users ssh keys in keys/"
682 for key_spec in self.plc_spec['keys']:
683 TestKey(self,key_spec).store_key()
686 def keys_clean(self):
687 "removes keys cached in keys/"
688 utils.system("rm -rf ./keys")
691 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
692 # for later direct access to the nodes
693 def keys_fetch(self):
694 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
696 if not os.path.isdir(dir):
698 vservername=self.vservername
699 vm_root=self.vm_root_in_host()
701 prefix = 'debug_ssh_key'
702 for ext in [ 'pub', 'rsa' ] :
703 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
704 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
705 if self.test_ssh.fetch(src,dst) != 0: overall=False
709 "create sites with PLCAPI"
710 return self.do_sites()
712 def delete_sites (self):
713 "delete sites with PLCAPI"
714 return self.do_sites(action="delete")
716 def do_sites (self,action="add"):
717 for site_spec in self.plc_spec['sites']:
718 test_site = TestSite (self,site_spec)
719 if (action != "add"):
720 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
721 test_site.delete_site()
722 # deleted with the site
723 #test_site.delete_users()
726 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
727 test_site.create_site()
728 test_site.create_users()
731 def delete_all_sites (self):
732 "Delete all sites in PLC, and related objects"
733 print 'auth_root',self.auth_root()
734 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
736 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
737 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
738 site_id=site['site_id']
739 print 'Deleting site_id',site_id
740 self.apiserver.DeleteSite(self.auth_root(),site_id)
744 "create nodes with PLCAPI"
745 return self.do_nodes()
746 def delete_nodes (self):
747 "delete nodes with PLCAPI"
748 return self.do_nodes(action="delete")
750 def do_nodes (self,action="add"):
751 for site_spec in self.plc_spec['sites']:
752 test_site = TestSite (self,site_spec)
754 utils.header("Deleting nodes in site %s"%test_site.name())
755 for node_spec in site_spec['nodes']:
756 test_node=TestNode(self,test_site,node_spec)
757 utils.header("Deleting %s"%test_node.name())
758 test_node.delete_node()
760 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
761 for node_spec in site_spec['nodes']:
762 utils.pprint('Creating node %s'%node_spec,node_spec)
763 test_node = TestNode (self,test_site,node_spec)
764 test_node.create_node ()
767 def nodegroups (self):
768 "create nodegroups with PLCAPI"
769 return self.do_nodegroups("add")
770 def delete_nodegroups (self):
771 "delete nodegroups with PLCAPI"
772 return self.do_nodegroups("delete")
776 def translate_timestamp (start,grain,timestamp):
777 if timestamp < TestPlc.YEAR: return start+timestamp*grain
778 else: return timestamp
781 def timestamp_printable (timestamp):
782 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
785 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
787 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
788 print 'API answered grain=',grain
789 start=(now/grain)*grain
791 # find out all nodes that are reservable
792 nodes=self.all_reservable_nodenames()
794 utils.header ("No reservable node found - proceeding without leases")
797 # attach them to the leases as specified in plc_specs
798 # this is where the 'leases' field gets interpreted as relative of absolute
799 for lease_spec in self.plc_spec['leases']:
800 # skip the ones that come with a null slice id
801 if not lease_spec['slice']: continue
802 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
803 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
804 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
805 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
806 if lease_addition['errors']:
807 utils.header("Cannot create leases, %s"%lease_addition['errors'])
810 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
811 (nodes,lease_spec['slice'],
812 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
813 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
817 def delete_leases (self):
818 "remove all leases in the myplc side"
819 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
820 utils.header("Cleaning leases %r"%lease_ids)
821 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
824 def list_leases (self):
825 "list all leases known to the myplc"
826 leases = self.apiserver.GetLeases(self.auth_root())
829 current=l['t_until']>=now
830 if self.options.verbose or current:
831 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
832 TestPlc.timestamp_printable(l['t_from']),
833 TestPlc.timestamp_printable(l['t_until'])))
836 # create nodegroups if needed, and populate
837 def do_nodegroups (self, action="add"):
838 # 1st pass to scan contents
840 for site_spec in self.plc_spec['sites']:
841 test_site = TestSite (self,site_spec)
842 for node_spec in site_spec['nodes']:
843 test_node=TestNode (self,test_site,node_spec)
844 if node_spec.has_key('nodegroups'):
845 nodegroupnames=node_spec['nodegroups']
846 if isinstance(nodegroupnames,StringTypes):
847 nodegroupnames = [ nodegroupnames ]
848 for nodegroupname in nodegroupnames:
849 if not groups_dict.has_key(nodegroupname):
850 groups_dict[nodegroupname]=[]
851 groups_dict[nodegroupname].append(test_node.name())
852 auth=self.auth_root()
854 for (nodegroupname,group_nodes) in groups_dict.iteritems():
856 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
857 # first, check if the nodetagtype is here
858 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
860 tag_type_id = tag_types[0]['tag_type_id']
862 tag_type_id = self.apiserver.AddTagType(auth,
863 {'tagname':nodegroupname,
864 'description': 'for nodegroup %s'%nodegroupname,
866 print 'located tag (type)',nodegroupname,'as',tag_type_id
868 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
870 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
871 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
872 # set node tag on all nodes, value='yes'
873 for nodename in group_nodes:
875 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
877 traceback.print_exc()
878 print 'node',nodename,'seems to already have tag',nodegroupname
881 expect_yes = self.apiserver.GetNodeTags(auth,
882 {'hostname':nodename,
883 'tagname':nodegroupname},
884 ['value'])[0]['value']
885 if expect_yes != "yes":
886 print 'Mismatch node tag on node',nodename,'got',expect_yes
889 if not self.options.dry_run:
890 print 'Cannot find tag',nodegroupname,'on node',nodename
894 print 'cleaning nodegroup',nodegroupname
895 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
897 traceback.print_exc()
901 # a list of TestNode objs
902 def all_nodes (self):
904 for site_spec in self.plc_spec['sites']:
905 test_site = TestSite (self,site_spec)
906 for node_spec in site_spec['nodes']:
907 nodes.append(TestNode (self,test_site,node_spec))
910 # return a list of tuples (nodename,qemuname)
911 def all_node_infos (self) :
913 for site_spec in self.plc_spec['sites']:
914 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
915 for node_spec in site_spec['nodes'] ]
918 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
919 def all_reservable_nodenames (self):
921 for site_spec in self.plc_spec['sites']:
922 for node_spec in site_spec['nodes']:
923 node_fields=node_spec['node_fields']
924 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
925 res.append(node_fields['hostname'])
928 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
929 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period_seconds=15):
930 if self.options.dry_run:
934 class CompleterTaskBootState (CompleterTask):
935 def __init__ (self, test_plc,hostname):
936 self.test_plc=test_plc
937 self.hostname=hostname
938 self.last_boot_state='undef'
939 def actual_run (self):
941 node = self.test_plc.apiserver.GetNodes(self.test_plc.auth_root(), [ self.hostname ],
943 self.last_boot_state = node['boot_state']
944 return self.last_boot_state == target_boot_state
948 return "CompleterTaskBootState with node %s"%self.hostname
949 def failure_message (self):
950 return "node %s in state %s - expected %s"%(self.hostname,self.last_boot_state,target_boot_state)
952 timeout = timedelta(minutes=timeout_minutes)
953 graceout = timedelta(minutes=silent_minutes)
954 period = timedelta(seconds=period_seconds)
955 # the nodes that haven't checked yet - start with a full list and shrink over time
956 utils.header("checking nodes boot state (expected %s)"%target_boot_state)
957 tasks = [ CompleterTaskBootState (self,hostname) \
958 for (hostname,_) in self.all_node_infos() ]
959 return Completer (tasks).run (timeout, graceout, period)
961 def nodes_booted(self):
962 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
964 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
965 class CompleterTaskNodeSsh (CompleterTask):
966 def __init__ (self, hostname, qemuname, boot_state, local_key):
967 self.hostname=hostname
968 self.qemuname=qemuname
969 self.boot_state=boot_state
970 self.local_key=local_key
971 def run (self, silent):
972 command = TestSsh (self.hostname,key=self.local_key).actual_command("hostname;uname -a")
973 return utils.system (command, silent=silent)==0
974 def failure_message (self):
975 return "Cannot reach %s @ %s in %s mode"%(self.hostname, self.qemuname, self.boot_state)
978 timeout = timedelta(minutes=timeout_minutes)
979 graceout = timedelta(minutes=silent_minutes)
980 period = timedelta(seconds=period_seconds)
981 vservername=self.vservername
984 local_key = "keys/%(vservername)s-debug.rsa"%locals()
987 local_key = "keys/key_admin.rsa"
988 utils.header("checking ssh access to nodes (expected in %s mode)"%message)
989 node_infos = self.all_node_infos()
990 tasks = [ CompleterTaskNodeSsh (nodename, qemuname, message, local_key) \
991 for (nodename,qemuname) in node_infos ]
992 return Completer (tasks).run (timeout, graceout, period)
994 def ssh_node_debug(self):
995 "Tries to ssh into nodes in debug mode with the debug ssh key"
996 return self.check_nodes_ssh(debug=True,
997 timeout_minutes=self.ssh_node_debug_timeout,
998 silent_minutes=self.ssh_node_debug_silent)
1000 def ssh_node_boot(self):
1001 "Tries to ssh into nodes in production mode with the root ssh key"
1002 return self.check_nodes_ssh(debug=False,
1003 timeout_minutes=self.ssh_node_boot_timeout,
1004 silent_minutes=self.ssh_node_boot_silent)
1006 def node_bmlogs(self):
1007 "Checks that there's a non-empty dir. /var/log/bm/raw"
1008 return utils.system(self.actual_command_in_guest("ls /var/log/bm/raw"))==0
1011 def qemu_local_init (self): pass
1013 def bootcd (self): pass
1015 def qemu_local_config (self): pass
1017 def nodestate_reinstall (self): pass
1019 def nodestate_safeboot (self): pass
1021 def nodestate_boot (self): pass
1023 def nodestate_show (self): pass
1025 def qemu_export (self): pass
1027 ### check hooks : invoke scripts from hooks/{node,slice}
1028 def check_hooks_node (self):
1029 return self.locate_first_node().check_hooks()
1030 def check_hooks_sliver (self) :
1031 return self.locate_first_sliver().check_hooks()
1033 def check_hooks (self):
1034 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1035 return self.check_hooks_node() and self.check_hooks_sliver()
1038 def do_check_initscripts(self):
1039 class CompleterTaskInitscript (CompleterTask):
1040 def __init__ (self, test_sliver, stamp):
1041 self.test_sliver=test_sliver
1043 def actual_run (self):
1044 return self.test_sliver.check_initscript_stamp (self.stamp)
1046 return "initscript checker for %s"%self.test_sliver.name()
1047 def failure_message (self):
1048 return "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
1051 for slice_spec in self.plc_spec['slices']:
1052 if not slice_spec.has_key('initscriptstamp'):
1054 stamp=slice_spec['initscriptstamp']
1055 slicename=slice_spec['slice_fields']['name']
1056 for nodename in slice_spec['nodenames']:
1057 print 'nodename',nodename,'slicename',slicename,'stamp',stamp
1058 (site,node) = self.locate_node (nodename)
1059 # xxx - passing the wrong site - probably harmless
1060 test_site = TestSite (self,site)
1061 test_slice = TestSlice (self,test_site,slice_spec)
1062 test_node = TestNode (self,test_site,node)
1063 test_sliver = TestSliver (self, test_node, test_slice)
1064 tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
1065 return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
1067 def check_initscripts(self):
1068 "check that the initscripts have triggered"
1069 return self.do_check_initscripts()
1071 def initscripts (self):
1072 "create initscripts with PLCAPI"
1073 for initscript in self.plc_spec['initscripts']:
1074 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1075 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1078 def delete_initscripts (self):
1079 "delete initscripts with PLCAPI"
1080 for initscript in self.plc_spec['initscripts']:
1081 initscript_name = initscript['initscript_fields']['name']
1082 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1084 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1085 print initscript_name,'deleted'
1087 print 'deletion went wrong - probably did not exist'
1092 "create slices with PLCAPI"
1093 return self.do_slices(action="add")
1095 def delete_slices (self):
1096 "delete slices with PLCAPI"
1097 return self.do_slices(action="delete")
1099 def fill_slices (self):
1100 "add nodes in slices with PLCAPI"
1101 return self.do_slices(action="fill")
1103 def empty_slices (self):
1104 "remove nodes from slices with PLCAPI"
1105 return self.do_slices(action="empty")
1107 def do_slices (self, action="add"):
1108 for slice in self.plc_spec['slices']:
1109 site_spec = self.locate_site (slice['sitename'])
1110 test_site = TestSite(self,site_spec)
1111 test_slice=TestSlice(self,test_site,slice)
1112 if action == "delete":
1113 test_slice.delete_slice()
1114 elif action=="fill":
1115 test_slice.add_nodes()
1116 elif action=="empty":
1117 test_slice.delete_nodes()
1119 test_slice.create_slice()
1123 def ssh_slice(self): pass
1125 def ssh_slice_off (self): pass
1127 def ssh_slice_basics(self): pass
1130 def check_vsys_defaults(self): pass
1133 def keys_clear_known_hosts (self): pass
1135 def plcapi_urls (self):
1136 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1138 def speed_up_slices (self):
1139 "tweak nodemanager settings on all nodes using a conf file"
1140 # create the template on the server-side
1141 template="%s.nodemanager"%self.name()
1142 template_file = open (template,"w")
1143 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1144 template_file.close()
1145 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1146 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1147 self.test_ssh.copy_abs(template,remote)
1149 self.apiserver.AddConfFile (self.auth_root(),
1150 {'dest':'/etc/sysconfig/nodemanager',
1151 'source':'PlanetLabConf/nodemanager',
1152 'postinstall_cmd':'service nm restart',})
1155 def debug_nodemanager (self):
1156 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1157 template="%s.nodemanager"%self.name()
1158 template_file = open (template,"w")
1159 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1160 template_file.close()
1161 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1162 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1163 self.test_ssh.copy_abs(template,remote)
1167 def qemu_start (self) : pass
1170 def timestamp_qemu (self) : pass
1172 # when a spec refers to a node possibly on another plc
1173 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1174 for plc in [ self ] + other_plcs:
1176 return plc.locate_sliver_obj (nodename, slicename)
1179 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1181 # implement this one as a cross step so that we can take advantage of different nodes
1182 # in multi-plcs mode
1183 def cross_check_tcp (self, other_plcs):
1184 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1185 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1186 utils.header ("check_tcp: no/empty config found")
1188 specs = self.plc_spec['tcp_specs']
1193 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1194 if not s_test_sliver.run_tcp_server(port,timeout=20):
1198 # idem for the client side
1199 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1200 # use nodename from locatesd sliver, unless 'client_connect' is set
1201 if 'client_connect' in spec:
1202 destination = spec['client_connect']
1204 destination=s_test_sliver.test_node.name()
1205 if not c_test_sliver.run_tcp_client(destination,port):
1209 # painfully enough, we need to allow for some time as netflow might show up last
1210 def check_system_slice (self):
1211 "all nodes: check that a system slice is alive"
1212 # netflow currently not working in the lxc distro
1213 # drl not built at all in the wtx distro
1214 # if we find either of them we're happy
1215 return self.check_netflow() or self.check_drl()
1218 def check_netflow (self): return self._check_system_slice ('netflow')
1219 def check_drl (self): return self._check_system_slice ('drl')
1221 # we have the slices up already here, so it should not take too long
1222 def _check_system_slice (self, slicename, timeout_minutes=5, period_seconds=15):
1223 class CompleterTaskSystemSlice (CompleterTask):
1224 def __init__ (self, test_node, dry_run):
1225 self.test_node=test_node
1226 self.dry_run=dry_run
1227 def actual_run (self):
1228 return self.test_node._check_system_slice (slicename, dry_run=self.dry_run)
1230 return "System slice %s @ %s"%(slicename, self.test_node.name())
1231 def failure_message (self):
1232 return "COULD not find system slice %s @ %s"%(slicename, self.test_node.name())
1233 timeout = timedelta(minutes=timeout_minutes)
1234 silent = timedelta (0)
1235 period = timedelta (seconds=period_seconds)
1236 tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
1237 for test_node in self.all_nodes() ]
1238 return Completer (tasks) . run (timeout, silent, period)
1240 def plcsh_stress_test (self):
1241 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1242 # install the stress-test in the plc image
1243 location = "/usr/share/plc_api/plcsh_stress_test.py"
1244 remote="%s/%s"%(self.vm_root_in_host(),location)
1245 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1247 command += " -- --check"
1248 if self.options.size == 1:
1249 command += " --tiny"
1250 return ( self.run_in_guest(command) == 0)
1252 # populate runs the same utility without slightly different options
1253 # in particular runs with --preserve (dont cleanup) and without --check
1254 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1256 def sfa_install_all (self):
1257 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1258 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1260 def sfa_install_core(self):
1262 return self.yum_install ("sfa")
1264 def sfa_install_plc(self):
1265 "yum install sfa-plc"
1266 return self.yum_install("sfa-plc")
1268 def sfa_install_sfatables(self):
1269 "yum install sfa-sfatables"
1270 return self.yum_install ("sfa-sfatables")
1272 # for some very odd reason, this sometimes fails with the following symptom
1273 # # yum install sfa-client
1274 # Setting up Install Process
1276 # Downloading Packages:
1277 # Running rpm_check_debug
1278 # Running Transaction Test
1279 # Transaction Test Succeeded
1280 # Running Transaction
1281 # Transaction couldn't start:
1282 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1283 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1284 # even though in the same context I have
1285 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1286 # Filesystem Size Used Avail Use% Mounted on
1287 # /dev/hdv1 806G 264G 501G 35% /
1288 # none 16M 36K 16M 1% /tmp
1290 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1291 def sfa_install_client(self):
1292 "yum install sfa-client"
1293 first_try=self.yum_install("sfa-client")
1294 if first_try: return True
1295 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1296 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1297 utils.header("rpm_path=<<%s>>"%rpm_path)
1299 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1300 return self.yum_check_installed ("sfa-client")
1302 def sfa_dbclean(self):
1303 "thoroughly wipes off the SFA database"
1304 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1305 self.run_in_guest("sfa-nuke.py")==0 or \
1306 self.run_in_guest("sfa-nuke-plc.py")==0
1308 def sfa_fsclean(self):
1309 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1310 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1313 def sfa_plcclean(self):
1314 "cleans the PLC entries that were created as a side effect of running the script"
1316 sfa_spec=self.plc_spec['sfa']
1318 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1319 login_base=auth_sfa_spec['login_base']
1320 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1321 except: print "Site %s already absent from PLC db"%login_base
1323 for spec_name in ['pi_spec','user_spec']:
1324 user_spec=auth_sfa_spec[spec_name]
1325 username=user_spec['email']
1326 try: self.apiserver.DeletePerson(self.auth_root(),username)
1328 # this in fact is expected as sites delete their members
1329 #print "User %s already absent from PLC db"%username
1332 print "REMEMBER TO RUN sfa_import AGAIN"
1335 def sfa_uninstall(self):
1336 "uses rpm to uninstall sfa - ignore result"
1337 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1338 self.run_in_guest("rm -rf /var/lib/sfa")
1339 self.run_in_guest("rm -rf /etc/sfa")
1340 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1342 self.run_in_guest("rpm -e --noscripts sfa-plc")
1345 ### run unit tests for SFA
1346 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1347 # Running Transaction
1348 # Transaction couldn't start:
1349 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1350 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1351 # no matter how many Gbs are available on the testplc
1352 # could not figure out what's wrong, so...
1353 # if the yum install phase fails, consider the test is successful
1354 # other combinations will eventually run it hopefully
1355 def sfa_utest(self):
1356 "yum install sfa-tests and run SFA unittests"
1357 self.run_in_guest("yum -y install sfa-tests")
1358 # failed to install - forget it
1359 if self.run_in_guest("rpm -q sfa-tests")!=0:
1360 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1362 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1366 dirname="conf.%s"%self.plc_spec['name']
1367 if not os.path.isdir(dirname):
1368 utils.system("mkdir -p %s"%dirname)
1369 if not os.path.isdir(dirname):
1370 raise Exception,"Cannot create config dir for plc %s"%self.name()
1373 def conffile(self,filename):
1374 return "%s/%s"%(self.confdir(),filename)
1375 def confsubdir(self,dirname,clean,dry_run=False):
1376 subdirname="%s/%s"%(self.confdir(),dirname)
1378 utils.system("rm -rf %s"%subdirname)
1379 if not os.path.isdir(subdirname):
1380 utils.system("mkdir -p %s"%subdirname)
1381 if not dry_run and not os.path.isdir(subdirname):
1382 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1385 def conffile_clean (self,filename):
1386 filename=self.conffile(filename)
1387 return utils.system("rm -rf %s"%filename)==0
1390 def sfa_configure(self):
1391 "run sfa-config-tty"
1392 tmpname=self.conffile("sfa-config-tty")
1393 fileconf=open(tmpname,'w')
1394 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1395 'SFA_INTERFACE_HRN',
1396 'SFA_REGISTRY_LEVEL1_AUTH',
1397 'SFA_REGISTRY_HOST',
1398 'SFA_AGGREGATE_HOST',
1408 'SFA_GENERIC_FLAVOUR',
1409 'SFA_AGGREGATE_ENABLED',
1411 if self.plc_spec['sfa'].has_key(var):
1412 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1413 # the way plc_config handles booleans just sucks..
1416 if self.plc_spec['sfa'][var]: val='true'
1417 fileconf.write ('e %s\n%s\n'%(var,val))
1418 fileconf.write('w\n')
1419 fileconf.write('R\n')
1420 fileconf.write('q\n')
1422 utils.system('cat %s'%tmpname)
1423 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1426 def aggregate_xml_line(self):
1427 port=self.plc_spec['sfa']['neighbours-port']
1428 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1429 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1431 def registry_xml_line(self):
1432 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1433 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1436 # a cross step that takes all other plcs in argument
1437 def cross_sfa_configure(self, other_plcs):
1438 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1439 # of course with a single plc, other_plcs is an empty list
1442 agg_fname=self.conffile("agg.xml")
1443 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1444 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1445 utils.header ("(Over)wrote %s"%agg_fname)
1446 reg_fname=self.conffile("reg.xml")
1447 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1448 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1449 utils.header ("(Over)wrote %s"%reg_fname)
1450 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1451 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1453 def sfa_import(self):
1454 "use sfaadmin to import from plc"
1455 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1457 self.run_in_guest('sfaadmin reg import_registry')==0
1458 # not needed anymore
1459 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1461 def sfa_start(self):
1463 return self.run_in_guest('service sfa start')==0
1465 def sfi_configure(self):
1466 "Create /root/sfi on the plc side for sfi client configuration"
1467 if self.options.dry_run:
1468 utils.header("DRY RUN - skipping step")
1470 sfa_spec=self.plc_spec['sfa']
1471 # cannot use auth_sfa_mapper to pass dir_name
1472 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1473 test_slice=TestAuthSfa(self,slice_spec)
1474 dir_basename=os.path.basename(test_slice.sfi_path())
1475 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1476 test_slice.sfi_configure(dir_name)
1477 # push into the remote /root/sfi area
1478 location = test_slice.sfi_path()
1479 remote="%s/%s"%(self.vm_root_in_host(),location)
1480 self.test_ssh.mkdir(remote,abs=True)
1481 # need to strip last level or remote otherwise we get an extra dir level
1482 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1486 def sfi_clean (self):
1487 "clean up /root/sfi on the plc side"
1488 self.run_in_guest("rm -rf /root/sfi")
1492 def sfa_add_site (self): pass
1494 def sfa_add_pi (self): pass
1496 def sfa_add_user(self): pass
1498 def sfa_update_user(self): pass
1500 def sfa_add_slice(self): pass
1502 def sfa_renew_slice(self): pass
1504 def sfa_discover(self): pass
1506 def sfa_create_slice(self): pass
1508 def sfa_check_slice_plc(self): pass
1510 def sfa_update_slice(self): pass
1512 def sfi_list(self): pass
1514 def sfi_show(self): pass
1516 def sfi_slices(self): pass
1518 def ssh_slice_sfa(self): pass
1520 def sfa_delete_user(self): pass
1522 def sfa_delete_slice(self): pass
1526 self.run_in_guest('service sfa stop')==0
1529 def populate (self):
1530 "creates random entries in the PLCAPI"
1531 # install the stress-test in the plc image
1532 location = "/usr/share/plc_api/plcsh_stress_test.py"
1533 remote="%s/%s"%(self.vm_root_in_host(),location)
1534 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1536 command += " -- --preserve --short-names"
1537 local = (self.run_in_guest(command) == 0);
1538 # second run with --foreign
1539 command += ' --foreign'
1540 remote = (self.run_in_guest(command) == 0);
1541 return ( local and remote)
1543 def gather_logs (self):
1544 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1545 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1546 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1547 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1548 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1549 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1550 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1552 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1553 self.gather_var_logs ()
1555 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1556 self.gather_pgsql_logs ()
1558 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1559 self.gather_root_sfi ()
1561 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1562 for site_spec in self.plc_spec['sites']:
1563 test_site = TestSite (self,site_spec)
1564 for node_spec in site_spec['nodes']:
1565 test_node=TestNode(self,test_site,node_spec)
1566 test_node.gather_qemu_logs()
1568 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1569 self.gather_nodes_var_logs()
1571 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1572 self.gather_slivers_var_logs()
1575 def gather_slivers_var_logs(self):
1576 for test_sliver in self.all_sliver_objs():
1577 remote = test_sliver.tar_var_logs()
1578 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1579 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1580 utils.system(command)
1583 def gather_var_logs (self):
1584 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1585 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1586 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1587 utils.system(command)
1588 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1589 utils.system(command)
1591 def gather_pgsql_logs (self):
1592 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1593 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1594 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1595 utils.system(command)
1597 def gather_root_sfi (self):
1598 utils.system("mkdir -p logs/sfi.%s"%self.name())
1599 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1600 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1601 utils.system(command)
1603 def gather_nodes_var_logs (self):
1604 for site_spec in self.plc_spec['sites']:
1605 test_site = TestSite (self,site_spec)
1606 for node_spec in site_spec['nodes']:
1607 test_node=TestNode(self,test_site,node_spec)
1608 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1609 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1610 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1611 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1612 utils.system(command)
1615 # returns the filename to use for sql dump/restore, using options.dbname if set
1616 def dbfile (self, database):
1617 # uses options.dbname if it is found
1619 name=self.options.dbname
1620 if not isinstance(name,StringTypes):
1626 return "/root/%s-%s.sql"%(database,name)
1628 def plc_db_dump(self):
1629 'dump the planetlab5 DB in /root in the PLC - filename has time'
1630 dump=self.dbfile("planetab5")
1631 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1632 utils.header('Dumped planetlab5 database in %s'%dump)
1635 def plc_db_restore(self):
1636 'restore the planetlab5 DB - looks broken, but run -n might help'
1637 dump=self.dbfile("planetab5")
1638 ##stop httpd service
1639 self.run_in_guest('service httpd stop')
1640 # xxx - need another wrapper
1641 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1642 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1643 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1644 ##starting httpd service
1645 self.run_in_guest('service httpd start')
1647 utils.header('Database restored from ' + dump)
1649 def standby_1_through_20(self):
1650 """convenience function to wait for a specified number of minutes"""
1653 def standby_1(): pass
1655 def standby_2(): pass
1657 def standby_3(): pass
1659 def standby_4(): pass
1661 def standby_5(): pass
1663 def standby_6(): pass
1665 def standby_7(): pass
1667 def standby_8(): pass
1669 def standby_9(): pass
1671 def standby_10(): pass
1673 def standby_11(): pass
1675 def standby_12(): pass
1677 def standby_13(): pass
1679 def standby_14(): pass
1681 def standby_15(): pass
1683 def standby_16(): pass
1685 def standby_17(): pass
1687 def standby_18(): pass
1689 def standby_19(): pass
1691 def standby_20(): pass