1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
65 def auth_sfa_mapper (method):
68 auth_method = TestAuthSfa.__dict__[method.__name__]
69 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
70 test_auth=TestAuthSfa(self,auth_spec)
71 if not auth_method(test_auth,self.options): overall=False
73 # restore the doc text
74 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
84 'vs_delete','timestamp_vs','vs_create', SEP,
85 'plc_install', 'plc_configure', 'plc_start', SEP,
86 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', 'speed_up_slices', SEP,
87 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
88 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
89 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
90 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
91 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
92 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
93 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
94 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
95 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
96 # but as the stress test might take a while, we sometimes missed the debug mode..
97 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
98 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
99 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
100 'cross_check_tcp@1', 'check_system_slice', SEP,
101 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
102 'force_gather_logs', SEP,
105 'export', 'show_boxes', SEP,
106 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
107 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
108 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
109 'delete_leases', 'list_leases', SEP,
111 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
112 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
113 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
114 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
115 'plc_db_dump' , 'plc_db_restore', SEP,
116 'check_netflow','check_drl', SEP,
117 'debug_nodemanager', SEP,
118 'standby_1_through_20',SEP,
122 def printable_steps (list):
123 single_line=" ".join(list)+" "
124 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
126 def valid_step (step):
127 return step != SEP and step != SEPSFA
129 # turn off the sfa-related steps when build has skipped SFA
130 # this was originally for centos5 but is still valid
131 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
133 def check_whether_build_has_sfa (rpms_url):
134 utils.header ("Checking if build provides SFA package...")
135 # warning, we're now building 'sface' so let's be a bit more picky
136 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
137 # full builds are expected to return with 0 here
139 utils.header("build does provide SFA")
141 # move all steps containing 'sfa' from default_steps to other_steps
142 utils.header("SFA package not found - removing steps with sfa or sfi")
143 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
144 TestPlc.other_steps += sfa_steps
145 for step in sfa_steps: TestPlc.default_steps.remove(step)
147 def __init__ (self,plc_spec,options):
148 self.plc_spec=plc_spec
150 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
151 self.vserverip=plc_spec['vserverip']
152 self.vservername=plc_spec['vservername']
153 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
154 self.apiserver=TestApiserver(self.url,options.dry_run)
156 def has_addresses_api (self):
157 return self.apiserver.has_method('AddIpAddress')
160 name=self.plc_spec['name']
161 return "%s.%s"%(name,self.vservername)
164 return self.plc_spec['host_box']
167 return self.test_ssh.is_local()
169 # define the API methods on this object through xmlrpc
170 # would help, but not strictly necessary
174 def actual_command_in_guest (self,command):
175 return self.test_ssh.actual_command(self.host_to_guest(command))
177 def start_guest (self):
178 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
180 def stop_guest (self):
181 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
183 def run_in_guest (self,command):
184 return utils.system(self.actual_command_in_guest(command))
186 def run_in_host (self,command):
187 return self.test_ssh.run_in_buildname(command)
189 #command gets run in the plc's vm
190 def host_to_guest(self,command):
191 if self.options.plcs_use_lxc:
192 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
194 return "vserver %s exec %s"%(self.vservername,command)
196 def vm_root_in_host(self):
197 if self.options.plcs_use_lxc:
198 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
200 return "/vservers/%s"%(self.vservername)
202 def vm_timestamp_path (self):
203 if self.options.plcs_use_lxc:
204 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
206 return "/vservers/%s.timestamp"%(self.vservername)
208 #start/stop the vserver
209 def start_guest_in_host(self):
210 if self.options.plcs_use_lxc:
211 return "lxc-start --daemon --name=%s"%(self.vservername)
213 return "vserver %s start"%(self.vservername)
215 def stop_guest_in_host(self):
216 if self.options.plcs_use_lxc:
217 return "lxc-stop --name=%s"%(self.vservername)
219 return "vserver %s stop"%(self.vservername)
222 def run_in_guest_piped (self,local,remote):
223 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
225 def yum_check_installed (self, rpms):
226 if isinstance (rpms, list):
228 return self.run_in_guest("rpm -q %s"%rpms)==0
230 # does a yum install in the vs, ignore yum retcod, check with rpm
231 def yum_install (self, rpms):
232 if isinstance (rpms, list):
234 self.run_in_guest("yum -y install %s"%rpms)
235 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
236 self.run_in_guest("yum-complete-transaction -y")
237 return self.yum_check_installed (rpms)
239 def auth_root (self):
240 return {'Username':self.plc_spec['PLC_ROOT_USER'],
241 'AuthMethod':'password',
242 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
243 'Role' : self.plc_spec['role']
245 def locate_site (self,sitename):
246 for site in self.plc_spec['sites']:
247 if site['site_fields']['name'] == sitename:
249 if site['site_fields']['login_base'] == sitename:
251 raise Exception,"Cannot locate site %s"%sitename
253 def locate_node (self,nodename):
254 for site in self.plc_spec['sites']:
255 for node in site['nodes']:
256 if node['name'] == nodename:
258 raise Exception,"Cannot locate node %s"%nodename
260 def locate_hostname (self,hostname):
261 for site in self.plc_spec['sites']:
262 for node in site['nodes']:
263 if node['node_fields']['hostname'] == hostname:
265 raise Exception,"Cannot locate hostname %s"%hostname
267 def locate_key (self,key_name):
268 for key in self.plc_spec['keys']:
269 if key['key_name'] == key_name:
271 raise Exception,"Cannot locate key %s"%key_name
273 def locate_private_key_from_key_names (self, key_names):
274 # locate the first avail. key
276 for key_name in key_names:
277 key_spec=self.locate_key(key_name)
278 test_key=TestKey(self,key_spec)
279 publickey=test_key.publicpath()
280 privatekey=test_key.privatepath()
281 if os.path.isfile(publickey) and os.path.isfile(privatekey):
283 if found: return privatekey
286 def locate_slice (self, slicename):
287 for slice in self.plc_spec['slices']:
288 if slice['slice_fields']['name'] == slicename:
290 raise Exception,"Cannot locate slice %s"%slicename
292 def all_sliver_objs (self):
294 for slice_spec in self.plc_spec['slices']:
295 slicename = slice_spec['slice_fields']['name']
296 for nodename in slice_spec['nodenames']:
297 result.append(self.locate_sliver_obj (nodename,slicename))
300 def locate_sliver_obj (self,nodename,slicename):
301 (site,node) = self.locate_node(nodename)
302 slice = self.locate_slice (slicename)
304 test_site = TestSite (self, site)
305 test_node = TestNode (self, test_site,node)
306 # xxx the slice site is assumed to be the node site - mhh - probably harmless
307 test_slice = TestSlice (self, test_site, slice)
308 return TestSliver (self, test_node, test_slice)
310 def locate_first_node(self):
311 nodename=self.plc_spec['slices'][0]['nodenames'][0]
312 (site,node) = self.locate_node(nodename)
313 test_site = TestSite (self, site)
314 test_node = TestNode (self, test_site,node)
317 def locate_first_sliver (self):
318 slice_spec=self.plc_spec['slices'][0]
319 slicename=slice_spec['slice_fields']['name']
320 nodename=slice_spec['nodenames'][0]
321 return self.locate_sliver_obj(nodename,slicename)
323 # all different hostboxes used in this plc
324 def gather_hostBoxes(self):
325 # maps on sites and nodes, return [ (host_box,test_node) ]
327 for site_spec in self.plc_spec['sites']:
328 test_site = TestSite (self,site_spec)
329 for node_spec in site_spec['nodes']:
330 test_node = TestNode (self, test_site, node_spec)
331 if not test_node.is_real():
332 tuples.append( (test_node.host_box(),test_node) )
333 # transform into a dict { 'host_box' -> [ test_node .. ] }
335 for (box,node) in tuples:
336 if not result.has_key(box):
339 result[box].append(node)
342 # a step for checking this stuff
343 def show_boxes (self):
344 'print summary of nodes location'
345 for (box,nodes) in self.gather_hostBoxes().iteritems():
346 print box,":"," + ".join( [ node.name() for node in nodes ] )
349 # make this a valid step
350 def qemu_kill_all(self):
351 'kill all qemu instances on the qemu boxes involved by this setup'
352 # this is the brute force version, kill all qemus on that host box
353 for (box,nodes) in self.gather_hostBoxes().iteritems():
354 # pass the first nodename, as we don't push template-qemu on testboxes
355 nodedir=nodes[0].nodedir()
356 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
359 # make this a valid step
360 def qemu_list_all(self):
361 'list all qemu instances on the qemu boxes involved by this setup'
362 for (box,nodes) in self.gather_hostBoxes().iteritems():
363 # this is the brute force version, kill all qemus on that host box
364 TestBoxQemu(box,self.options.buildname).qemu_list_all()
367 # kill only the right qemus
368 def qemu_list_mine(self):
369 'list qemu instances for our nodes'
370 for (box,nodes) in self.gather_hostBoxes().iteritems():
371 # the fine-grain version
376 # kill only the right qemus
377 def qemu_kill_mine(self):
378 'kill the qemu instances for our nodes'
379 for (box,nodes) in self.gather_hostBoxes().iteritems():
380 # the fine-grain version
385 #################### display config
387 "show test configuration after localization"
392 # uggly hack to make sure 'run export' only reports about the 1st plc
393 # to avoid confusion - also we use 'inri_slice1' in various aliases..
396 "print cut'n paste-able stuff to export env variables to your shell"
397 # guess local domain from hostname
398 if TestPlc.exported_id>1:
399 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
401 TestPlc.exported_id+=1
402 domain=socket.gethostname().split('.',1)[1]
403 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
404 print "export BUILD=%s"%self.options.buildname
405 if self.options.plcs_use_lxc:
406 print "export PLCHOSTLXC=%s"%fqdn
408 print "export PLCHOSTVS=%s"%fqdn
409 print "export GUESTNAME=%s"%self.plc_spec['vservername']
410 vplcname=self.plc_spec['vservername'].split('-')[-1]
411 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
412 # find hostname of first node
413 (hostname,qemubox) = self.all_node_infos()[0]
414 print "export KVMHOST=%s.%s"%(qemubox,domain)
415 print "export NODE=%s"%(hostname)
419 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
420 def show_pass (self,passno):
421 for (key,val) in self.plc_spec.iteritems():
422 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
426 self.display_site_spec(site)
427 for node in site['nodes']:
428 self.display_node_spec(node)
429 elif key=='initscripts':
430 for initscript in val:
431 self.display_initscript_spec (initscript)
434 self.display_slice_spec (slice)
437 self.display_key_spec (key)
439 if key not in ['sites','initscripts','slices','keys', 'sfa']:
440 print '+ ',key,':',val
442 def display_site_spec (self,site):
443 print '+ ======== site',site['site_fields']['name']
444 for (k,v) in site.iteritems():
445 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
448 print '+ ','nodes : ',
450 print node['node_fields']['hostname'],'',
456 print user['name'],'',
458 elif k == 'site_fields':
459 print '+ login_base',':',v['login_base']
460 elif k == 'address_fields':
466 def display_initscript_spec (self,initscript):
467 print '+ ======== initscript',initscript['initscript_fields']['name']
469 def display_key_spec (self,key):
470 print '+ ======== key',key['key_name']
472 def display_slice_spec (self,slice):
473 print '+ ======== slice',slice['slice_fields']['name']
474 for (k,v) in slice.iteritems():
487 elif k=='slice_fields':
488 print '+ fields',':',
489 print 'max_nodes=',v['max_nodes'],
494 def display_node_spec (self,node):
495 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
496 print "hostname=",node['node_fields']['hostname'],
497 print "ip=",node['interface_fields']['ip']
498 if self.options.verbose:
499 utils.pprint("node details",node,depth=3)
501 # another entry point for just showing the boxes involved
502 def display_mapping (self):
503 TestPlc.display_mapping_plc(self.plc_spec)
507 def display_mapping_plc (plc_spec):
508 print '+ MyPLC',plc_spec['name']
509 # WARNING this would not be right for lxc-based PLC's - should be harmless though
510 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
511 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
512 for site_spec in plc_spec['sites']:
513 for node_spec in site_spec['nodes']:
514 TestPlc.display_mapping_node(node_spec)
517 def display_mapping_node (node_spec):
518 print '+ NODE %s'%(node_spec['name'])
519 print '+\tqemu box %s'%node_spec['host_box']
520 print '+\thostname=%s'%node_spec['node_fields']['hostname']
522 # write a timestamp in /vservers/<>.timestamp
523 # cannot be inside the vserver, that causes vserver .. build to cough
524 def timestamp_vs (self):
525 "Create a timestamp to remember creation date for this plc"
527 # TODO-lxc check this one
528 # a first approx. is to store the timestamp close to the VM root like vs does
529 stamp_path=self.vm_timestamp_path ()
530 stamp_dir = os.path.dirname (stamp_path)
531 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
532 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
534 # this is called inconditionnally at the beginning of the test sequence
535 # just in case this is a rerun, so if the vm is not running it's fine
537 "vserver delete the test myplc"
538 stamp_path=self.vm_timestamp_path()
539 self.run_in_host("rm -f %s"%stamp_path)
540 if self.options.plcs_use_lxc:
541 self.run_in_host("lxc-stop --name %s"%self.vservername)
542 self.run_in_host("lxc-destroy --name %s"%self.vservername)
545 self.run_in_host("vserver --silent %s delete"%self.vservername)
549 # historically the build was being fetched by the tests
550 # now the build pushes itself as a subdir of the tests workdir
551 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
552 def vs_create (self):
553 "vserver creation (no install done)"
554 # push the local build/ dir to the testplc box
556 # a full path for the local calls
557 build_dir=os.path.dirname(sys.argv[0])
558 # sometimes this is empty - set to "." in such a case
559 if not build_dir: build_dir="."
560 build_dir += "/build"
562 # use a standard name - will be relative to remote buildname
564 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
565 self.test_ssh.rmdir(build_dir)
566 self.test_ssh.copy(build_dir,recursive=True)
567 # the repo url is taken from arch-rpms-url
568 # with the last step (i386) removed
569 repo_url = self.options.arch_rpms_url
570 for level in [ 'arch' ]:
571 repo_url = os.path.dirname(repo_url)
572 # pass the vbuild-nightly options to vtest-init-vserver
574 test_env_options += " -p %s"%self.options.personality
575 test_env_options += " -d %s"%self.options.pldistro
576 test_env_options += " -f %s"%self.options.fcdistro
577 if self.options.plcs_use_lxc:
578 script="vtest-init-lxc.sh"
580 script="vtest-init-vserver.sh"
581 vserver_name = self.vservername
582 vserver_options="--netdev eth0 --interface %s"%self.vserverip
584 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
585 vserver_options += " --hostname %s"%vserver_hostname
587 print "Cannot reverse lookup %s"%self.vserverip
588 print "This is considered fatal, as this might pollute the test results"
590 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
591 return self.run_in_host(create_vserver) == 0
594 def plc_install(self):
595 "yum install myplc, noderepo, and the plain bootstrapfs"
597 # workaround for getting pgsql8.2 on centos5
598 if self.options.fcdistro == "centos5":
599 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
602 if self.options.personality == "linux32":
604 elif self.options.personality == "linux64":
607 raise Exception, "Unsupported personality %r"%self.options.personality
608 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
611 pkgs_list.append ("slicerepo-%s"%nodefamily)
612 pkgs_list.append ("myplc")
613 pkgs_list.append ("noderepo-%s"%nodefamily)
614 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
615 pkgs_string=" ".join(pkgs_list)
616 return self.yum_install (pkgs_list)
619 def plc_configure(self):
621 tmpname='%s.plc-config-tty'%(self.name())
622 fileconf=open(tmpname,'w')
623 for var in [ 'PLC_NAME',
628 'PLC_MAIL_SUPPORT_ADDRESS',
631 # Above line was added for integrating SFA Testing
637 'PLC_RESERVATION_GRANULARITY',
639 'PLC_OMF_XMPP_SERVER',
641 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
642 fileconf.write('w\n')
643 fileconf.write('q\n')
645 utils.system('cat %s'%tmpname)
646 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
647 utils.system('rm %s'%tmpname)
652 self.run_in_guest('service plc start')
657 self.run_in_guest('service plc stop')
661 "start the PLC vserver"
666 "stop the PLC vserver"
670 # stores the keys from the config for further use
671 def keys_store(self):
672 "stores test users ssh keys in keys/"
673 for key_spec in self.plc_spec['keys']:
674 TestKey(self,key_spec).store_key()
677 def keys_clean(self):
678 "removes keys cached in keys/"
679 utils.system("rm -rf ./keys")
682 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
683 # for later direct access to the nodes
684 def keys_fetch(self):
685 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
687 if not os.path.isdir(dir):
689 vservername=self.vservername
690 vm_root=self.vm_root_in_host()
692 prefix = 'debug_ssh_key'
693 for ext in [ 'pub', 'rsa' ] :
694 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
695 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
696 if self.test_ssh.fetch(src,dst) != 0: overall=False
700 "create sites with PLCAPI"
701 return self.do_sites()
703 def delete_sites (self):
704 "delete sites with PLCAPI"
705 return self.do_sites(action="delete")
707 def do_sites (self,action="add"):
708 for site_spec in self.plc_spec['sites']:
709 test_site = TestSite (self,site_spec)
710 if (action != "add"):
711 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
712 test_site.delete_site()
713 # deleted with the site
714 #test_site.delete_users()
717 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
718 test_site.create_site()
719 test_site.create_users()
722 def delete_all_sites (self):
723 "Delete all sites in PLC, and related objects"
724 print 'auth_root',self.auth_root()
725 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id'])
727 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
728 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
729 site_id=site['site_id']
730 print 'Deleting site_id',site_id
731 self.apiserver.DeleteSite(self.auth_root(),site_id)
735 "create nodes with PLCAPI"
736 return self.do_nodes()
737 def delete_nodes (self):
738 "delete nodes with PLCAPI"
739 return self.do_nodes(action="delete")
741 def do_nodes (self,action="add"):
742 for site_spec in self.plc_spec['sites']:
743 test_site = TestSite (self,site_spec)
745 utils.header("Deleting nodes in site %s"%test_site.name())
746 for node_spec in site_spec['nodes']:
747 test_node=TestNode(self,test_site,node_spec)
748 utils.header("Deleting %s"%test_node.name())
749 test_node.delete_node()
751 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
752 for node_spec in site_spec['nodes']:
753 utils.pprint('Creating node %s'%node_spec,node_spec)
754 test_node = TestNode (self,test_site,node_spec)
755 test_node.create_node ()
758 def nodegroups (self):
759 "create nodegroups with PLCAPI"
760 return self.do_nodegroups("add")
761 def delete_nodegroups (self):
762 "delete nodegroups with PLCAPI"
763 return self.do_nodegroups("delete")
767 def translate_timestamp (start,grain,timestamp):
768 if timestamp < TestPlc.YEAR: return start+timestamp*grain
769 else: return timestamp
772 def timestamp_printable (timestamp):
773 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
776 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
778 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
779 print 'API answered grain=',grain
780 start=(now/grain)*grain
782 # find out all nodes that are reservable
783 nodes=self.all_reservable_nodenames()
785 utils.header ("No reservable node found - proceeding without leases")
788 # attach them to the leases as specified in plc_specs
789 # this is where the 'leases' field gets interpreted as relative of absolute
790 for lease_spec in self.plc_spec['leases']:
791 # skip the ones that come with a null slice id
792 if not lease_spec['slice']: continue
793 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
794 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
795 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
796 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
797 if lease_addition['errors']:
798 utils.header("Cannot create leases, %s"%lease_addition['errors'])
801 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
802 (nodes,lease_spec['slice'],
803 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
804 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
808 def delete_leases (self):
809 "remove all leases in the myplc side"
810 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
811 utils.header("Cleaning leases %r"%lease_ids)
812 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
815 def list_leases (self):
816 "list all leases known to the myplc"
817 leases = self.apiserver.GetLeases(self.auth_root())
820 current=l['t_until']>=now
821 if self.options.verbose or current:
822 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
823 TestPlc.timestamp_printable(l['t_from']),
824 TestPlc.timestamp_printable(l['t_until'])))
827 # create nodegroups if needed, and populate
828 def do_nodegroups (self, action="add"):
829 # 1st pass to scan contents
831 for site_spec in self.plc_spec['sites']:
832 test_site = TestSite (self,site_spec)
833 for node_spec in site_spec['nodes']:
834 test_node=TestNode (self,test_site,node_spec)
835 if node_spec.has_key('nodegroups'):
836 nodegroupnames=node_spec['nodegroups']
837 if isinstance(nodegroupnames,StringTypes):
838 nodegroupnames = [ nodegroupnames ]
839 for nodegroupname in nodegroupnames:
840 if not groups_dict.has_key(nodegroupname):
841 groups_dict[nodegroupname]=[]
842 groups_dict[nodegroupname].append(test_node.name())
843 auth=self.auth_root()
845 for (nodegroupname,group_nodes) in groups_dict.iteritems():
847 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
848 # first, check if the nodetagtype is here
849 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
851 tag_type_id = tag_types[0]['tag_type_id']
853 tag_type_id = self.apiserver.AddTagType(auth,
854 {'tagname':nodegroupname,
855 'description': 'for nodegroup %s'%nodegroupname,
857 print 'located tag (type)',nodegroupname,'as',tag_type_id
859 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
861 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
862 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
863 # set node tag on all nodes, value='yes'
864 for nodename in group_nodes:
866 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
868 traceback.print_exc()
869 print 'node',nodename,'seems to already have tag',nodegroupname
872 expect_yes = self.apiserver.GetNodeTags(auth,
873 {'hostname':nodename,
874 'tagname':nodegroupname},
875 ['value'])[0]['value']
876 if expect_yes != "yes":
877 print 'Mismatch node tag on node',nodename,'got',expect_yes
880 if not self.options.dry_run:
881 print 'Cannot find tag',nodegroupname,'on node',nodename
885 print 'cleaning nodegroup',nodegroupname
886 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
888 traceback.print_exc()
892 # a list of TestNode objs
893 def all_nodes (self):
895 for site_spec in self.plc_spec['sites']:
896 test_site = TestSite (self,site_spec)
897 for node_spec in site_spec['nodes']:
898 nodes.append(TestNode (self,test_site,node_spec))
901 # return a list of tuples (nodename,qemuname)
902 def all_node_infos (self) :
904 for site_spec in self.plc_spec['sites']:
905 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
906 for node_spec in site_spec['nodes'] ]
909 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
910 def all_reservable_nodenames (self):
912 for site_spec in self.plc_spec['sites']:
913 for node_spec in site_spec['nodes']:
914 node_fields=node_spec['node_fields']
915 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
916 res.append(node_fields['hostname'])
919 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
920 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
921 if self.options.dry_run:
925 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
926 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
927 # the nodes that haven't checked yet - start with a full list and shrink over time
928 tocheck = self.all_hostnames()
929 utils.header("checking nodes %r"%tocheck)
930 # create a dict hostname -> status
931 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
934 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
936 for array in tocheck_status:
937 hostname=array['hostname']
938 boot_state=array['boot_state']
939 if boot_state == target_boot_state:
940 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
942 # if it's a real node, never mind
943 (site_spec,node_spec)=self.locate_hostname(hostname)
944 if TestNode.is_real_model(node_spec['node_fields']['model']):
945 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
947 boot_state = target_boot_state
948 elif datetime.datetime.now() > graceout:
949 utils.header ("%s still in '%s' state"%(hostname,boot_state))
950 graceout=datetime.datetime.now()+datetime.timedelta(1)
951 status[hostname] = boot_state
953 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
956 if datetime.datetime.now() > timeout:
957 for hostname in tocheck:
958 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
960 # otherwise, sleep for a while
962 # only useful in empty plcs
965 def nodes_booted(self):
966 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
968 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
970 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
971 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
972 vservername=self.vservername
975 local_key = "keys/%(vservername)s-debug.rsa"%locals()
978 local_key = "keys/key_admin.rsa"
979 node_infos = self.all_node_infos()
980 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
981 for (nodename,qemuname) in node_infos:
982 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
983 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
984 (timeout_minutes,silent_minutes,period))
986 for node_info in node_infos:
987 (hostname,qemuname) = node_info
988 # try to run 'hostname' in the node
989 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
990 # don't spam logs - show the command only after the grace period
991 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
993 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
995 node_infos.remove(node_info)
997 # we will have tried real nodes once, in case they're up - but if not, just skip
998 (site_spec,node_spec)=self.locate_hostname(hostname)
999 if TestNode.is_real_model(node_spec['node_fields']['model']):
1000 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
1001 node_infos.remove(node_info)
1004 if datetime.datetime.now() > timeout:
1005 for (hostname,qemuname) in node_infos:
1006 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
1008 # otherwise, sleep for a while
1010 # only useful in empty plcs
1013 def ssh_node_debug(self):
1014 "Tries to ssh into nodes in debug mode with the debug ssh key"
1015 return self.check_nodes_ssh(debug=True,timeout_minutes=10,silent_minutes=8)
1017 def ssh_node_boot(self):
1018 "Tries to ssh into nodes in production mode with the root ssh key"
1019 return self.check_nodes_ssh(debug=False,timeout_minutes=40,silent_minutes=38)
1022 def qemu_local_init (self): pass
1024 def bootcd (self): pass
1026 def qemu_local_config (self): pass
1028 def nodestate_reinstall (self): pass
1030 def nodestate_safeboot (self): pass
1032 def nodestate_boot (self): pass
1034 def nodestate_show (self): pass
1036 def qemu_export (self): pass
1038 ### check hooks : invoke scripts from hooks/{node,slice}
1039 def check_hooks_node (self):
1040 return self.locate_first_node().check_hooks()
1041 def check_hooks_sliver (self) :
1042 return self.locate_first_sliver().check_hooks()
1044 def check_hooks (self):
1045 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1046 return self.check_hooks_node() and self.check_hooks_sliver()
1049 def do_check_initscripts(self):
1051 for slice_spec in self.plc_spec['slices']:
1052 if not slice_spec.has_key('initscriptstamp'):
1054 stamp=slice_spec['initscriptstamp']
1055 for nodename in slice_spec['nodenames']:
1056 (site,node) = self.locate_node (nodename)
1057 # xxx - passing the wrong site - probably harmless
1058 test_site = TestSite (self,site)
1059 test_slice = TestSlice (self,test_site,slice_spec)
1060 test_node = TestNode (self,test_site,node)
1061 test_sliver = TestSliver (self, test_node, test_slice)
1062 if not test_sliver.check_initscript_stamp(stamp):
1066 def check_initscripts(self):
1067 "check that the initscripts have triggered"
1068 return self.do_check_initscripts()
1070 def initscripts (self):
1071 "create initscripts with PLCAPI"
1072 for initscript in self.plc_spec['initscripts']:
1073 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1074 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1077 def delete_initscripts (self):
1078 "delete initscripts with PLCAPI"
1079 for initscript in self.plc_spec['initscripts']:
1080 initscript_name = initscript['initscript_fields']['name']
1081 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1083 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1084 print initscript_name,'deleted'
1086 print 'deletion went wrong - probably did not exist'
1091 "create slices with PLCAPI"
1092 return self.do_slices(action="add")
1094 def delete_slices (self):
1095 "delete slices with PLCAPI"
1096 return self.do_slices(action="delete")
1098 def fill_slices (self):
1099 "add nodes in slices with PLCAPI"
1100 return self.do_slices(action="fill")
1102 def empty_slices (self):
1103 "remove nodes from slices with PLCAPI"
1104 return self.do_slices(action="empty")
1106 def do_slices (self, action="add"):
1107 for slice in self.plc_spec['slices']:
1108 site_spec = self.locate_site (slice['sitename'])
1109 test_site = TestSite(self,site_spec)
1110 test_slice=TestSlice(self,test_site,slice)
1111 if action == "delete":
1112 test_slice.delete_slice()
1113 elif action=="fill":
1114 test_slice.add_nodes()
1115 elif action=="empty":
1116 test_slice.delete_nodes()
1118 test_slice.create_slice()
1122 def ssh_slice(self): pass
1124 def ssh_slice_off (self): pass
1127 def keys_clear_known_hosts (self): pass
1129 def speed_up_slices (self):
1130 "tweak nodemanager settings on all nodes using a conf file"
1131 # create the template on the server-side
1132 template="%s.nodemanager"%self.name()
1133 template_file = open (template,"w")
1134 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1135 template_file.close()
1136 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1137 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1138 self.test_ssh.copy_abs(template,remote)
1140 self.apiserver.AddConfFile (self.auth_root(),
1141 {'dest':'/etc/sysconfig/nodemanager',
1142 'source':'PlanetLabConf/nodemanager',
1143 'postinstall_cmd':'service nm restart',})
1146 def debug_nodemanager (self):
1147 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1148 template="%s.nodemanager"%self.name()
1149 template_file = open (template,"w")
1150 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1151 template_file.close()
1152 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1153 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1154 self.test_ssh.copy_abs(template,remote)
1158 def qemu_start (self) : pass
1161 def timestamp_qemu (self) : pass
1163 # when a spec refers to a node possibly on another plc
1164 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1165 for plc in [ self ] + other_plcs:
1167 return plc.locate_sliver_obj (nodename, slicename)
1170 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1172 # implement this one as a cross step so that we can take advantage of different nodes
1173 # in multi-plcs mode
1174 def cross_check_tcp (self, other_plcs):
1175 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1176 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1177 utils.header ("check_tcp: no/empty config found")
1179 specs = self.plc_spec['tcp_specs']
1184 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1185 if not s_test_sliver.run_tcp_server(port,timeout=20):
1189 # idem for the client side
1190 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1191 # use nodename from locatesd sliver, unless 'client_connect' is set
1192 if 'client_connect' in spec:
1193 destination = spec['client_connect']
1195 destination=s_test_sliver.test_node.name()
1196 if not c_test_sliver.run_tcp_client(destination,port):
1200 # painfully enough, we need to allow for some time as netflow might show up last
1201 def check_system_slice (self):
1202 "all nodes: check that a system slice is alive"
1203 # netflow currently not working in the lxc distro
1204 # drl not built at all in the wtx distro
1205 # if we find either of them we're happy
1206 return self.check_netflow() or self.check_drl()
1209 def check_netflow (self): return self._check_system_slice ('netflow')
1210 def check_drl (self): return self._check_system_slice ('drl')
1212 # we have the slices up already here, so it should not take too long
1213 def _check_system_slice (self, slicename, timeout_minutes=5, period=15):
1214 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1215 test_nodes=self.all_nodes()
1217 for test_node in test_nodes:
1218 if test_node._check_system_slice (slicename,dry_run=self.options.dry_run):
1220 test_nodes.remove(test_node)
1225 if datetime.datetime.now () > timeout:
1226 for test_node in test_nodes:
1227 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1232 def plcsh_stress_test (self):
1233 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1234 # install the stress-test in the plc image
1235 location = "/usr/share/plc_api/plcsh_stress_test.py"
1236 remote="%s/%s"%(self.vm_root_in_host(),location)
1237 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1239 command += " -- --check"
1240 if self.options.size == 1:
1241 command += " --tiny"
1242 return ( self.run_in_guest(command) == 0)
1244 # populate runs the same utility without slightly different options
1245 # in particular runs with --preserve (dont cleanup) and without --check
1246 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1248 def sfa_install_all (self):
1249 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1250 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1252 def sfa_install_core(self):
1254 return self.yum_install ("sfa")
1256 def sfa_install_plc(self):
1257 "yum install sfa-plc"
1258 return self.yum_install("sfa-plc")
1260 def sfa_install_sfatables(self):
1261 "yum install sfa-sfatables"
1262 return self.yum_install ("sfa-sfatables")
1264 # for some very odd reason, this sometimes fails with the following symptom
1265 # # yum install sfa-client
1266 # Setting up Install Process
1268 # Downloading Packages:
1269 # Running rpm_check_debug
1270 # Running Transaction Test
1271 # Transaction Test Succeeded
1272 # Running Transaction
1273 # Transaction couldn't start:
1274 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1275 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1276 # even though in the same context I have
1277 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1278 # Filesystem Size Used Avail Use% Mounted on
1279 # /dev/hdv1 806G 264G 501G 35% /
1280 # none 16M 36K 16M 1% /tmp
1282 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1283 def sfa_install_client(self):
1284 "yum install sfa-client"
1285 first_try=self.yum_install("sfa-client")
1286 if first_try: return True
1287 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1288 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1289 utils.header("rpm_path=<<%s>>"%rpm_path)
1291 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1292 return self.yum_check_installed ("sfa-client")
1294 def sfa_dbclean(self):
1295 "thoroughly wipes off the SFA database"
1296 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1297 self.run_in_guest("sfa-nuke.py")==0 or \
1298 self.run_in_guest("sfa-nuke-plc.py")==0
1300 def sfa_fsclean(self):
1301 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1302 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1305 def sfa_plcclean(self):
1306 "cleans the PLC entries that were created as a side effect of running the script"
1308 sfa_spec=self.plc_spec['sfa']
1310 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1311 login_base=auth_sfa_spec['login_base']
1312 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1313 except: print "Site %s already absent from PLC db"%login_base
1315 for spec_name in ['pi_spec','user_spec']:
1316 user_spec=auth_sfa_spec[spec_name]
1317 username=user_spec['email']
1318 try: self.apiserver.DeletePerson(self.auth_root(),username)
1320 # this in fact is expected as sites delete their members
1321 #print "User %s already absent from PLC db"%username
1324 print "REMEMBER TO RUN sfa_import AGAIN"
1327 def sfa_uninstall(self):
1328 "uses rpm to uninstall sfa - ignore result"
1329 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1330 self.run_in_guest("rm -rf /var/lib/sfa")
1331 self.run_in_guest("rm -rf /etc/sfa")
1332 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1334 self.run_in_guest("rpm -e --noscripts sfa-plc")
1337 ### run unit tests for SFA
1338 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1339 # Running Transaction
1340 # Transaction couldn't start:
1341 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1342 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1343 # no matter how many Gbs are available on the testplc
1344 # could not figure out what's wrong, so...
1345 # if the yum install phase fails, consider the test is successful
1346 # other combinations will eventually run it hopefully
1347 def sfa_utest(self):
1348 "yum install sfa-tests and run SFA unittests"
1349 self.run_in_guest("yum -y install sfa-tests")
1350 # failed to install - forget it
1351 if self.run_in_guest("rpm -q sfa-tests")!=0:
1352 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1354 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1358 dirname="conf.%s"%self.plc_spec['name']
1359 if not os.path.isdir(dirname):
1360 utils.system("mkdir -p %s"%dirname)
1361 if not os.path.isdir(dirname):
1362 raise Exception,"Cannot create config dir for plc %s"%self.name()
1365 def conffile(self,filename):
1366 return "%s/%s"%(self.confdir(),filename)
1367 def confsubdir(self,dirname,clean,dry_run=False):
1368 subdirname="%s/%s"%(self.confdir(),dirname)
1370 utils.system("rm -rf %s"%subdirname)
1371 if not os.path.isdir(subdirname):
1372 utils.system("mkdir -p %s"%subdirname)
1373 if not dry_run and not os.path.isdir(subdirname):
1374 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1377 def conffile_clean (self,filename):
1378 filename=self.conffile(filename)
1379 return utils.system("rm -rf %s"%filename)==0
1382 def sfa_configure(self):
1383 "run sfa-config-tty"
1384 tmpname=self.conffile("sfa-config-tty")
1385 fileconf=open(tmpname,'w')
1386 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1387 'SFA_INTERFACE_HRN',
1388 'SFA_REGISTRY_LEVEL1_AUTH',
1389 'SFA_REGISTRY_HOST',
1390 'SFA_AGGREGATE_HOST',
1400 'SFA_GENERIC_FLAVOUR',
1401 'SFA_AGGREGATE_ENABLED',
1403 if self.plc_spec['sfa'].has_key(var):
1404 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1405 # the way plc_config handles booleans just sucks..
1408 if self.plc_spec['sfa'][var]: val='true'
1409 fileconf.write ('e %s\n%s\n'%(var,val))
1410 fileconf.write('w\n')
1411 fileconf.write('R\n')
1412 fileconf.write('q\n')
1414 utils.system('cat %s'%tmpname)
1415 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1418 def aggregate_xml_line(self):
1419 port=self.plc_spec['sfa']['neighbours-port']
1420 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1421 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1423 def registry_xml_line(self):
1424 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1425 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1428 # a cross step that takes all other plcs in argument
1429 def cross_sfa_configure(self, other_plcs):
1430 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1431 # of course with a single plc, other_plcs is an empty list
1434 agg_fname=self.conffile("agg.xml")
1435 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1436 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1437 utils.header ("(Over)wrote %s"%agg_fname)
1438 reg_fname=self.conffile("reg.xml")
1439 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1440 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1441 utils.header ("(Over)wrote %s"%reg_fname)
1442 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1443 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1445 def sfa_import(self):
1446 "use sfaadmin to import from plc"
1447 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1449 self.run_in_guest('sfaadmin reg import_registry')==0
1450 # not needed anymore
1451 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1453 def sfa_start(self):
1455 return self.run_in_guest('service sfa start')==0
1457 def sfi_configure(self):
1458 "Create /root/sfi on the plc side for sfi client configuration"
1459 if self.options.dry_run:
1460 utils.header("DRY RUN - skipping step")
1462 sfa_spec=self.plc_spec['sfa']
1463 # cannot use auth_sfa_mapper to pass dir_name
1464 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1465 test_slice=TestAuthSfa(self,slice_spec)
1466 dir_basename=os.path.basename(test_slice.sfi_path())
1467 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1468 test_slice.sfi_configure(dir_name)
1469 # push into the remote /root/sfi area
1470 location = test_slice.sfi_path()
1471 remote="%s/%s"%(self.vm_root_in_host(),location)
1472 self.test_ssh.mkdir(remote,abs=True)
1473 # need to strip last level or remote otherwise we get an extra dir level
1474 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1478 def sfi_clean (self):
1479 "clean up /root/sfi on the plc side"
1480 self.run_in_guest("rm -rf /root/sfi")
1484 def sfa_add_site (self): pass
1486 def sfa_add_pi (self): pass
1488 def sfa_add_user(self): pass
1490 def sfa_update_user(self): pass
1492 def sfa_add_slice(self): pass
1494 def sfa_renew_slice(self): pass
1496 def sfa_discover(self): pass
1498 def sfa_create_slice(self): pass
1500 def sfa_check_slice_plc(self): pass
1502 def sfa_update_slice(self): pass
1504 def sfi_list(self): pass
1506 def sfi_show(self): pass
1508 def sfi_slices(self): pass
1510 def ssh_slice_sfa(self): pass
1512 def sfa_delete_user(self): pass
1514 def sfa_delete_slice(self): pass
1518 self.run_in_guest('service sfa stop')==0
1521 def populate (self):
1522 "creates random entries in the PLCAPI"
1523 # install the stress-test in the plc image
1524 location = "/usr/share/plc_api/plcsh_stress_test.py"
1525 remote="%s/%s"%(self.vm_root_in_host(),location)
1526 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1528 command += " -- --preserve --short-names"
1529 local = (self.run_in_guest(command) == 0);
1530 # second run with --foreign
1531 command += ' --foreign'
1532 remote = (self.run_in_guest(command) == 0);
1533 return ( local and remote)
1535 def gather_logs (self):
1536 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1537 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1538 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1539 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1540 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1541 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1542 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1544 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1545 self.gather_var_logs ()
1547 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1548 self.gather_pgsql_logs ()
1550 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1551 self.gather_root_sfi ()
1553 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1554 for site_spec in self.plc_spec['sites']:
1555 test_site = TestSite (self,site_spec)
1556 for node_spec in site_spec['nodes']:
1557 test_node=TestNode(self,test_site,node_spec)
1558 test_node.gather_qemu_logs()
1560 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1561 self.gather_nodes_var_logs()
1563 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1564 self.gather_slivers_var_logs()
1567 def gather_slivers_var_logs(self):
1568 for test_sliver in self.all_sliver_objs():
1569 remote = test_sliver.tar_var_logs()
1570 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1571 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1572 utils.system(command)
1575 def gather_var_logs (self):
1576 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1577 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1578 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1579 utils.system(command)
1580 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1581 utils.system(command)
1583 def gather_pgsql_logs (self):
1584 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1585 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1586 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1587 utils.system(command)
1589 def gather_root_sfi (self):
1590 utils.system("mkdir -p logs/sfi.%s"%self.name())
1591 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1592 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1593 utils.system(command)
1595 def gather_nodes_var_logs (self):
1596 for site_spec in self.plc_spec['sites']:
1597 test_site = TestSite (self,site_spec)
1598 for node_spec in site_spec['nodes']:
1599 test_node=TestNode(self,test_site,node_spec)
1600 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1601 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1602 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1603 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1604 utils.system(command)
1607 # returns the filename to use for sql dump/restore, using options.dbname if set
1608 def dbfile (self, database):
1609 # uses options.dbname if it is found
1611 name=self.options.dbname
1612 if not isinstance(name,StringTypes):
1615 t=datetime.datetime.now()
1618 return "/root/%s-%s.sql"%(database,name)
1620 def plc_db_dump(self):
1621 'dump the planetlab5 DB in /root in the PLC - filename has time'
1622 dump=self.dbfile("planetab5")
1623 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1624 utils.header('Dumped planetlab5 database in %s'%dump)
1627 def plc_db_restore(self):
1628 'restore the planetlab5 DB - looks broken, but run -n might help'
1629 dump=self.dbfile("planetab5")
1630 ##stop httpd service
1631 self.run_in_guest('service httpd stop')
1632 # xxx - need another wrapper
1633 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1634 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1635 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1636 ##starting httpd service
1637 self.run_in_guest('service httpd start')
1639 utils.header('Database restored from ' + dump)
1641 def standby_1_through_20(self):
1642 """convenience function to wait for a specified number of minutes"""
1645 def standby_1(): pass
1647 def standby_2(): pass
1649 def standby_3(): pass
1651 def standby_4(): pass
1653 def standby_5(): pass
1655 def standby_6(): pass
1657 def standby_7(): pass
1659 def standby_8(): pass
1661 def standby_9(): pass
1663 def standby_10(): pass
1665 def standby_11(): pass
1667 def standby_12(): pass
1669 def standby_13(): pass
1671 def standby_14(): pass
1673 def standby_15(): pass
1675 def standby_16(): pass
1677 def standby_17(): pass
1679 def standby_18(): pass
1681 def standby_19(): pass
1683 def standby_20(): pass