1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
23 from PlcapiUrlScanner import PlcapiUrlScanner
25 # step methods must take (self) and return a boolean (options is a member of the class)
27 def standby(minutes,dry_run):
28 utils.header('Entering StandBy for %d mn'%minutes)
32 time.sleep(60*minutes)
35 def standby_generic (func):
37 minutes=int(func.__name__.split("_")[1])
38 return standby(minutes,self.options.dry_run)
41 def node_mapper (method):
42 def actual(self,*args, **kwds):
44 node_method = TestNode.__dict__[method.__name__]
45 for test_node in self.all_nodes():
46 if not node_method(test_node, *args, **kwds): overall=False
48 # restore the doc text
49 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
52 def slice_mapper (method):
55 slice_method = TestSlice.__dict__[method.__name__]
56 for slice_spec in self.plc_spec['slices']:
57 site_spec = self.locate_site (slice_spec['sitename'])
58 test_site = TestSite(self,site_spec)
59 test_slice=TestSlice(self,test_site,slice_spec)
60 if not slice_method(test_slice,self.options): overall=False
62 # restore the doc text
63 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
66 def auth_sfa_mapper (method):
69 auth_method = TestAuthSfa.__dict__[method.__name__]
70 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
71 test_auth=TestAuthSfa(self,auth_spec)
72 if not auth_method(test_auth,self.options): overall=False
74 # restore the doc text
75 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
85 'vs_delete','timestamp_vs','vs_create', SEP,
86 'plc_install', 'plc_configure', 'plc_start', SEP,
87 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', SEP,
88 'plcapi_urls','speed_up_slices', SEP,
89 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
90 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
91 # keep this our of the way for now
92 # 'check_vsys_defaults', SEP,
93 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
94 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
95 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
96 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
97 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
98 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
99 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
100 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
101 # but as the stress test might take a while, we sometimes missed the debug mode..
102 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
103 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
104 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
105 'cross_check_tcp@1', 'check_system_slice', SEP,
106 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
107 'force_gather_logs', SEP,
110 'export', 'show_boxes', SEP,
111 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
112 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
113 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
114 'delete_leases', 'list_leases', SEP,
116 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
117 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
118 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
119 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
120 'plc_db_dump' , 'plc_db_restore', SEP,
121 'check_netflow','check_drl', SEP,
122 'debug_nodemanager', SEP,
123 'standby_1_through_20',SEP,
127 def printable_steps (list):
128 single_line=" ".join(list)+" "
129 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
131 def valid_step (step):
132 return step != SEP and step != SEPSFA
134 # turn off the sfa-related steps when build has skipped SFA
135 # this was originally for centos5 but is still valid
136 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
138 def check_whether_build_has_sfa (rpms_url):
139 utils.header ("Checking if build provides SFA package...")
140 # warning, we're now building 'sface' so let's be a bit more picky
141 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
142 # full builds are expected to return with 0 here
144 utils.header("build does provide SFA")
146 # move all steps containing 'sfa' from default_steps to other_steps
147 utils.header("SFA package not found - removing steps with sfa or sfi")
148 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
149 TestPlc.other_steps += sfa_steps
150 for step in sfa_steps: TestPlc.default_steps.remove(step)
152 def __init__ (self,plc_spec,options):
153 self.plc_spec=plc_spec
155 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
156 self.vserverip=plc_spec['vserverip']
157 self.vservername=plc_spec['vservername']
158 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
159 self.apiserver=TestApiserver(self.url,options.dry_run)
160 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
161 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
163 def has_addresses_api (self):
164 return self.apiserver.has_method('AddIpAddress')
167 name=self.plc_spec['name']
168 return "%s.%s"%(name,self.vservername)
171 return self.plc_spec['host_box']
174 return self.test_ssh.is_local()
176 # define the API methods on this object through xmlrpc
177 # would help, but not strictly necessary
181 def actual_command_in_guest (self,command):
182 return self.test_ssh.actual_command(self.host_to_guest(command))
184 def start_guest (self):
185 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
187 def stop_guest (self):
188 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
190 def run_in_guest (self,command):
191 return utils.system(self.actual_command_in_guest(command))
193 def run_in_host (self,command):
194 return self.test_ssh.run_in_buildname(command)
196 #command gets run in the plc's vm
197 def host_to_guest(self,command):
198 if self.options.plcs_use_lxc:
199 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
201 return "vserver %s exec %s"%(self.vservername,command)
203 def vm_root_in_host(self):
204 if self.options.plcs_use_lxc:
205 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
207 return "/vservers/%s"%(self.vservername)
209 def vm_timestamp_path (self):
210 if self.options.plcs_use_lxc:
211 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
213 return "/vservers/%s.timestamp"%(self.vservername)
215 #start/stop the vserver
216 def start_guest_in_host(self):
217 if self.options.plcs_use_lxc:
218 return "lxc-start --daemon --name=%s"%(self.vservername)
220 return "vserver %s start"%(self.vservername)
222 def stop_guest_in_host(self):
223 if self.options.plcs_use_lxc:
224 return "lxc-stop --name=%s"%(self.vservername)
226 return "vserver %s stop"%(self.vservername)
229 def run_in_guest_piped (self,local,remote):
230 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
232 def yum_check_installed (self, rpms):
233 if isinstance (rpms, list):
235 return self.run_in_guest("rpm -q %s"%rpms)==0
237 # does a yum install in the vs, ignore yum retcod, check with rpm
238 def yum_install (self, rpms):
239 if isinstance (rpms, list):
241 self.run_in_guest("yum -y install %s"%rpms)
242 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
243 self.run_in_guest("yum-complete-transaction -y")
244 return self.yum_check_installed (rpms)
246 def auth_root (self):
247 return {'Username':self.plc_spec['PLC_ROOT_USER'],
248 'AuthMethod':'password',
249 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
250 'Role' : self.plc_spec['role']
252 def locate_site (self,sitename):
253 for site in self.plc_spec['sites']:
254 if site['site_fields']['name'] == sitename:
256 if site['site_fields']['login_base'] == sitename:
258 raise Exception,"Cannot locate site %s"%sitename
260 def locate_node (self,nodename):
261 for site in self.plc_spec['sites']:
262 for node in site['nodes']:
263 if node['name'] == nodename:
265 raise Exception,"Cannot locate node %s"%nodename
267 def locate_hostname (self,hostname):
268 for site in self.plc_spec['sites']:
269 for node in site['nodes']:
270 if node['node_fields']['hostname'] == hostname:
272 raise Exception,"Cannot locate hostname %s"%hostname
274 def locate_key (self,key_name):
275 for key in self.plc_spec['keys']:
276 if key['key_name'] == key_name:
278 raise Exception,"Cannot locate key %s"%key_name
280 def locate_private_key_from_key_names (self, key_names):
281 # locate the first avail. key
283 for key_name in key_names:
284 key_spec=self.locate_key(key_name)
285 test_key=TestKey(self,key_spec)
286 publickey=test_key.publicpath()
287 privatekey=test_key.privatepath()
288 if os.path.isfile(publickey) and os.path.isfile(privatekey):
290 if found: return privatekey
293 def locate_slice (self, slicename):
294 for slice in self.plc_spec['slices']:
295 if slice['slice_fields']['name'] == slicename:
297 raise Exception,"Cannot locate slice %s"%slicename
299 def all_sliver_objs (self):
301 for slice_spec in self.plc_spec['slices']:
302 slicename = slice_spec['slice_fields']['name']
303 for nodename in slice_spec['nodenames']:
304 result.append(self.locate_sliver_obj (nodename,slicename))
307 def locate_sliver_obj (self,nodename,slicename):
308 (site,node) = self.locate_node(nodename)
309 slice = self.locate_slice (slicename)
311 test_site = TestSite (self, site)
312 test_node = TestNode (self, test_site,node)
313 # xxx the slice site is assumed to be the node site - mhh - probably harmless
314 test_slice = TestSlice (self, test_site, slice)
315 return TestSliver (self, test_node, test_slice)
317 def locate_first_node(self):
318 nodename=self.plc_spec['slices'][0]['nodenames'][0]
319 (site,node) = self.locate_node(nodename)
320 test_site = TestSite (self, site)
321 test_node = TestNode (self, test_site,node)
324 def locate_first_sliver (self):
325 slice_spec=self.plc_spec['slices'][0]
326 slicename=slice_spec['slice_fields']['name']
327 nodename=slice_spec['nodenames'][0]
328 return self.locate_sliver_obj(nodename,slicename)
330 # all different hostboxes used in this plc
331 def gather_hostBoxes(self):
332 # maps on sites and nodes, return [ (host_box,test_node) ]
334 for site_spec in self.plc_spec['sites']:
335 test_site = TestSite (self,site_spec)
336 for node_spec in site_spec['nodes']:
337 test_node = TestNode (self, test_site, node_spec)
338 if not test_node.is_real():
339 tuples.append( (test_node.host_box(),test_node) )
340 # transform into a dict { 'host_box' -> [ test_node .. ] }
342 for (box,node) in tuples:
343 if not result.has_key(box):
346 result[box].append(node)
349 # a step for checking this stuff
350 def show_boxes (self):
351 'print summary of nodes location'
352 for (box,nodes) in self.gather_hostBoxes().iteritems():
353 print box,":"," + ".join( [ node.name() for node in nodes ] )
356 # make this a valid step
357 def qemu_kill_all(self):
358 'kill all qemu instances on the qemu boxes involved by this setup'
359 # this is the brute force version, kill all qemus on that host box
360 for (box,nodes) in self.gather_hostBoxes().iteritems():
361 # pass the first nodename, as we don't push template-qemu on testboxes
362 nodedir=nodes[0].nodedir()
363 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
366 # make this a valid step
367 def qemu_list_all(self):
368 'list all qemu instances on the qemu boxes involved by this setup'
369 for (box,nodes) in self.gather_hostBoxes().iteritems():
370 # this is the brute force version, kill all qemus on that host box
371 TestBoxQemu(box,self.options.buildname).qemu_list_all()
374 # kill only the right qemus
375 def qemu_list_mine(self):
376 'list qemu instances for our nodes'
377 for (box,nodes) in self.gather_hostBoxes().iteritems():
378 # the fine-grain version
383 # kill only the right qemus
384 def qemu_kill_mine(self):
385 'kill the qemu instances for our nodes'
386 for (box,nodes) in self.gather_hostBoxes().iteritems():
387 # the fine-grain version
392 #################### display config
394 "show test configuration after localization"
399 # uggly hack to make sure 'run export' only reports about the 1st plc
400 # to avoid confusion - also we use 'inri_slice1' in various aliases..
403 "print cut'n paste-able stuff to export env variables to your shell"
404 # guess local domain from hostname
405 if TestPlc.exported_id>1:
406 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
408 TestPlc.exported_id+=1
409 domain=socket.gethostname().split('.',1)[1]
410 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
411 print "export BUILD=%s"%self.options.buildname
412 if self.options.plcs_use_lxc:
413 print "export PLCHOSTLXC=%s"%fqdn
415 print "export PLCHOSTVS=%s"%fqdn
416 print "export GUESTNAME=%s"%self.plc_spec['vservername']
417 vplcname=self.plc_spec['vservername'].split('-')[-1]
418 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
419 # find hostname of first node
420 (hostname,qemubox) = self.all_node_infos()[0]
421 print "export KVMHOST=%s.%s"%(qemubox,domain)
422 print "export NODE=%s"%(hostname)
426 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
427 def show_pass (self,passno):
428 for (key,val) in self.plc_spec.iteritems():
429 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
433 self.display_site_spec(site)
434 for node in site['nodes']:
435 self.display_node_spec(node)
436 elif key=='initscripts':
437 for initscript in val:
438 self.display_initscript_spec (initscript)
441 self.display_slice_spec (slice)
444 self.display_key_spec (key)
446 if key not in ['sites','initscripts','slices','keys', 'sfa']:
447 print '+ ',key,':',val
449 def display_site_spec (self,site):
450 print '+ ======== site',site['site_fields']['name']
451 for (k,v) in site.iteritems():
452 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
455 print '+ ','nodes : ',
457 print node['node_fields']['hostname'],'',
463 print user['name'],'',
465 elif k == 'site_fields':
466 print '+ login_base',':',v['login_base']
467 elif k == 'address_fields':
473 def display_initscript_spec (self,initscript):
474 print '+ ======== initscript',initscript['initscript_fields']['name']
476 def display_key_spec (self,key):
477 print '+ ======== key',key['key_name']
479 def display_slice_spec (self,slice):
480 print '+ ======== slice',slice['slice_fields']['name']
481 for (k,v) in slice.iteritems():
494 elif k=='slice_fields':
495 print '+ fields',':',
496 print 'max_nodes=',v['max_nodes'],
501 def display_node_spec (self,node):
502 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
503 print "hostname=",node['node_fields']['hostname'],
504 print "ip=",node['interface_fields']['ip']
505 if self.options.verbose:
506 utils.pprint("node details",node,depth=3)
508 # another entry point for just showing the boxes involved
509 def display_mapping (self):
510 TestPlc.display_mapping_plc(self.plc_spec)
514 def display_mapping_plc (plc_spec):
515 print '+ MyPLC',plc_spec['name']
516 # WARNING this would not be right for lxc-based PLC's - should be harmless though
517 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
518 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
519 for site_spec in plc_spec['sites']:
520 for node_spec in site_spec['nodes']:
521 TestPlc.display_mapping_node(node_spec)
524 def display_mapping_node (node_spec):
525 print '+ NODE %s'%(node_spec['name'])
526 print '+\tqemu box %s'%node_spec['host_box']
527 print '+\thostname=%s'%node_spec['node_fields']['hostname']
529 # write a timestamp in /vservers/<>.timestamp
530 # cannot be inside the vserver, that causes vserver .. build to cough
531 def timestamp_vs (self):
532 "Create a timestamp to remember creation date for this plc"
534 # TODO-lxc check this one
535 # a first approx. is to store the timestamp close to the VM root like vs does
536 stamp_path=self.vm_timestamp_path ()
537 stamp_dir = os.path.dirname (stamp_path)
538 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
539 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
541 # this is called inconditionnally at the beginning of the test sequence
542 # just in case this is a rerun, so if the vm is not running it's fine
544 "vserver delete the test myplc"
545 stamp_path=self.vm_timestamp_path()
546 self.run_in_host("rm -f %s"%stamp_path)
547 if self.options.plcs_use_lxc:
548 self.run_in_host("lxc-stop --name %s"%self.vservername)
549 self.run_in_host("lxc-destroy --name %s"%self.vservername)
552 self.run_in_host("vserver --silent %s delete"%self.vservername)
556 # historically the build was being fetched by the tests
557 # now the build pushes itself as a subdir of the tests workdir
558 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
559 def vs_create (self):
560 "vserver creation (no install done)"
561 # push the local build/ dir to the testplc box
563 # a full path for the local calls
564 build_dir=os.path.dirname(sys.argv[0])
565 # sometimes this is empty - set to "." in such a case
566 if not build_dir: build_dir="."
567 build_dir += "/build"
569 # use a standard name - will be relative to remote buildname
571 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
572 self.test_ssh.rmdir(build_dir)
573 self.test_ssh.copy(build_dir,recursive=True)
574 # the repo url is taken from arch-rpms-url
575 # with the last step (i386) removed
576 repo_url = self.options.arch_rpms_url
577 for level in [ 'arch' ]:
578 repo_url = os.path.dirname(repo_url)
579 # pass the vbuild-nightly options to vtest-init-vserver
581 test_env_options += " -p %s"%self.options.personality
582 test_env_options += " -d %s"%self.options.pldistro
583 test_env_options += " -f %s"%self.options.fcdistro
584 if self.options.plcs_use_lxc:
585 script="vtest-init-lxc.sh"
587 script="vtest-init-vserver.sh"
588 vserver_name = self.vservername
589 vserver_options="--netdev eth0 --interface %s"%self.vserverip
591 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
592 vserver_options += " --hostname %s"%vserver_hostname
594 print "Cannot reverse lookup %s"%self.vserverip
595 print "This is considered fatal, as this might pollute the test results"
597 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
598 return self.run_in_host(create_vserver) == 0
601 def plc_install(self):
602 "yum install myplc, noderepo, and the plain bootstrapfs"
604 # workaround for getting pgsql8.2 on centos5
605 if self.options.fcdistro == "centos5":
606 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
609 if self.options.personality == "linux32":
611 elif self.options.personality == "linux64":
614 raise Exception, "Unsupported personality %r"%self.options.personality
615 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
618 pkgs_list.append ("slicerepo-%s"%nodefamily)
619 pkgs_list.append ("myplc")
620 pkgs_list.append ("noderepo-%s"%nodefamily)
621 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
622 pkgs_string=" ".join(pkgs_list)
623 return self.yum_install (pkgs_list)
626 def plc_configure(self):
628 tmpname='%s.plc-config-tty'%(self.name())
629 fileconf=open(tmpname,'w')
630 for var in [ 'PLC_NAME',
635 'PLC_MAIL_SUPPORT_ADDRESS',
638 # Above line was added for integrating SFA Testing
644 'PLC_RESERVATION_GRANULARITY',
646 'PLC_OMF_XMPP_SERVER',
649 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
650 fileconf.write('w\n')
651 fileconf.write('q\n')
653 utils.system('cat %s'%tmpname)
654 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
655 utils.system('rm %s'%tmpname)
660 self.run_in_guest('service plc start')
665 self.run_in_guest('service plc stop')
669 "start the PLC vserver"
674 "stop the PLC vserver"
678 # stores the keys from the config for further use
679 def keys_store(self):
680 "stores test users ssh keys in keys/"
681 for key_spec in self.plc_spec['keys']:
682 TestKey(self,key_spec).store_key()
685 def keys_clean(self):
686 "removes keys cached in keys/"
687 utils.system("rm -rf ./keys")
690 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
691 # for later direct access to the nodes
692 def keys_fetch(self):
693 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
695 if not os.path.isdir(dir):
697 vservername=self.vservername
698 vm_root=self.vm_root_in_host()
700 prefix = 'debug_ssh_key'
701 for ext in [ 'pub', 'rsa' ] :
702 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
703 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
704 if self.test_ssh.fetch(src,dst) != 0: overall=False
708 "create sites with PLCAPI"
709 return self.do_sites()
711 def delete_sites (self):
712 "delete sites with PLCAPI"
713 return self.do_sites(action="delete")
715 def do_sites (self,action="add"):
716 for site_spec in self.plc_spec['sites']:
717 test_site = TestSite (self,site_spec)
718 if (action != "add"):
719 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
720 test_site.delete_site()
721 # deleted with the site
722 #test_site.delete_users()
725 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
726 test_site.create_site()
727 test_site.create_users()
730 def delete_all_sites (self):
731 "Delete all sites in PLC, and related objects"
732 print 'auth_root',self.auth_root()
733 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
735 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
736 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
737 site_id=site['site_id']
738 print 'Deleting site_id',site_id
739 self.apiserver.DeleteSite(self.auth_root(),site_id)
743 "create nodes with PLCAPI"
744 return self.do_nodes()
745 def delete_nodes (self):
746 "delete nodes with PLCAPI"
747 return self.do_nodes(action="delete")
749 def do_nodes (self,action="add"):
750 for site_spec in self.plc_spec['sites']:
751 test_site = TestSite (self,site_spec)
753 utils.header("Deleting nodes in site %s"%test_site.name())
754 for node_spec in site_spec['nodes']:
755 test_node=TestNode(self,test_site,node_spec)
756 utils.header("Deleting %s"%test_node.name())
757 test_node.delete_node()
759 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
760 for node_spec in site_spec['nodes']:
761 utils.pprint('Creating node %s'%node_spec,node_spec)
762 test_node = TestNode (self,test_site,node_spec)
763 test_node.create_node ()
766 def nodegroups (self):
767 "create nodegroups with PLCAPI"
768 return self.do_nodegroups("add")
769 def delete_nodegroups (self):
770 "delete nodegroups with PLCAPI"
771 return self.do_nodegroups("delete")
775 def translate_timestamp (start,grain,timestamp):
776 if timestamp < TestPlc.YEAR: return start+timestamp*grain
777 else: return timestamp
780 def timestamp_printable (timestamp):
781 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
784 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
786 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
787 print 'API answered grain=',grain
788 start=(now/grain)*grain
790 # find out all nodes that are reservable
791 nodes=self.all_reservable_nodenames()
793 utils.header ("No reservable node found - proceeding without leases")
796 # attach them to the leases as specified in plc_specs
797 # this is where the 'leases' field gets interpreted as relative of absolute
798 for lease_spec in self.plc_spec['leases']:
799 # skip the ones that come with a null slice id
800 if not lease_spec['slice']: continue
801 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
802 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
803 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
804 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
805 if lease_addition['errors']:
806 utils.header("Cannot create leases, %s"%lease_addition['errors'])
809 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
810 (nodes,lease_spec['slice'],
811 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
812 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
816 def delete_leases (self):
817 "remove all leases in the myplc side"
818 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
819 utils.header("Cleaning leases %r"%lease_ids)
820 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
823 def list_leases (self):
824 "list all leases known to the myplc"
825 leases = self.apiserver.GetLeases(self.auth_root())
828 current=l['t_until']>=now
829 if self.options.verbose or current:
830 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
831 TestPlc.timestamp_printable(l['t_from']),
832 TestPlc.timestamp_printable(l['t_until'])))
835 # create nodegroups if needed, and populate
836 def do_nodegroups (self, action="add"):
837 # 1st pass to scan contents
839 for site_spec in self.plc_spec['sites']:
840 test_site = TestSite (self,site_spec)
841 for node_spec in site_spec['nodes']:
842 test_node=TestNode (self,test_site,node_spec)
843 if node_spec.has_key('nodegroups'):
844 nodegroupnames=node_spec['nodegroups']
845 if isinstance(nodegroupnames,StringTypes):
846 nodegroupnames = [ nodegroupnames ]
847 for nodegroupname in nodegroupnames:
848 if not groups_dict.has_key(nodegroupname):
849 groups_dict[nodegroupname]=[]
850 groups_dict[nodegroupname].append(test_node.name())
851 auth=self.auth_root()
853 for (nodegroupname,group_nodes) in groups_dict.iteritems():
855 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
856 # first, check if the nodetagtype is here
857 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
859 tag_type_id = tag_types[0]['tag_type_id']
861 tag_type_id = self.apiserver.AddTagType(auth,
862 {'tagname':nodegroupname,
863 'description': 'for nodegroup %s'%nodegroupname,
865 print 'located tag (type)',nodegroupname,'as',tag_type_id
867 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
869 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
870 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
871 # set node tag on all nodes, value='yes'
872 for nodename in group_nodes:
874 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
876 traceback.print_exc()
877 print 'node',nodename,'seems to already have tag',nodegroupname
880 expect_yes = self.apiserver.GetNodeTags(auth,
881 {'hostname':nodename,
882 'tagname':nodegroupname},
883 ['value'])[0]['value']
884 if expect_yes != "yes":
885 print 'Mismatch node tag on node',nodename,'got',expect_yes
888 if not self.options.dry_run:
889 print 'Cannot find tag',nodegroupname,'on node',nodename
893 print 'cleaning nodegroup',nodegroupname
894 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
896 traceback.print_exc()
900 # a list of TestNode objs
901 def all_nodes (self):
903 for site_spec in self.plc_spec['sites']:
904 test_site = TestSite (self,site_spec)
905 for node_spec in site_spec['nodes']:
906 nodes.append(TestNode (self,test_site,node_spec))
909 # return a list of tuples (nodename,qemuname)
910 def all_node_infos (self) :
912 for site_spec in self.plc_spec['sites']:
913 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
914 for node_spec in site_spec['nodes'] ]
917 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
918 def all_reservable_nodenames (self):
920 for site_spec in self.plc_spec['sites']:
921 for node_spec in site_spec['nodes']:
922 node_fields=node_spec['node_fields']
923 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
924 res.append(node_fields['hostname'])
927 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
928 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
929 if self.options.dry_run:
933 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
934 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
935 # the nodes that haven't checked yet - start with a full list and shrink over time
936 tocheck = self.all_hostnames()
937 utils.header("checking nodes %r"%tocheck)
938 # create a dict hostname -> status
939 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
942 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
944 for array in tocheck_status:
945 hostname=array['hostname']
946 boot_state=array['boot_state']
947 if boot_state == target_boot_state:
948 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
950 # if it's a real node, never mind
951 (site_spec,node_spec)=self.locate_hostname(hostname)
952 if TestNode.is_real_model(node_spec['node_fields']['model']):
953 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
955 boot_state = target_boot_state
956 elif datetime.datetime.now() > graceout:
957 utils.header ("%s still in '%s' state"%(hostname,boot_state))
958 graceout=datetime.datetime.now()+datetime.timedelta(1)
959 status[hostname] = boot_state
961 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
964 if datetime.datetime.now() > timeout:
965 for hostname in tocheck:
966 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
968 # otherwise, sleep for a while
970 # only useful in empty plcs
973 def nodes_booted(self):
974 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
976 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
978 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
979 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
980 vservername=self.vservername
983 local_key = "keys/%(vservername)s-debug.rsa"%locals()
986 local_key = "keys/key_admin.rsa"
987 node_infos = self.all_node_infos()
988 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
989 for (nodename,qemuname) in node_infos:
990 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
991 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
992 (timeout_minutes,silent_minutes,period))
994 for node_info in node_infos:
995 (hostname,qemuname) = node_info
996 # try to run 'hostname' in the node
997 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
998 # don't spam logs - show the command only after the grace period
999 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
1001 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
1002 # refresh node_infos
1003 node_infos.remove(node_info)
1005 # we will have tried real nodes once, in case they're up - but if not, just skip
1006 (site_spec,node_spec)=self.locate_hostname(hostname)
1007 if TestNode.is_real_model(node_spec['node_fields']['model']):
1008 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
1009 node_infos.remove(node_info)
1012 if datetime.datetime.now() > timeout:
1013 for (hostname,qemuname) in node_infos:
1014 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
1016 # otherwise, sleep for a while
1018 # only useful in empty plcs
1021 def ssh_node_debug(self):
1022 "Tries to ssh into nodes in debug mode with the debug ssh key"
1023 return self.check_nodes_ssh(debug=True,
1024 timeout_minutes=self.ssh_node_debug_timeout,
1025 silent_minutes=self.ssh_node_debug_silent)
1027 def ssh_node_boot(self):
1028 "Tries to ssh into nodes in production mode with the root ssh key"
1029 return self.check_nodes_ssh(debug=False,
1030 timeout_minutes=self.ssh_node_boot_timeout,
1031 silent_minutes=self.ssh_node_boot_silent)
1034 def qemu_local_init (self): pass
1036 def bootcd (self): pass
1038 def qemu_local_config (self): pass
1040 def nodestate_reinstall (self): pass
1042 def nodestate_safeboot (self): pass
1044 def nodestate_boot (self): pass
1046 def nodestate_show (self): pass
1048 def qemu_export (self): pass
1050 ### check hooks : invoke scripts from hooks/{node,slice}
1051 def check_hooks_node (self):
1052 return self.locate_first_node().check_hooks()
1053 def check_hooks_sliver (self) :
1054 return self.locate_first_sliver().check_hooks()
1056 def check_hooks (self):
1057 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1058 return self.check_hooks_node() and self.check_hooks_sliver()
1061 def do_check_initscripts(self):
1063 for slice_spec in self.plc_spec['slices']:
1064 if not slice_spec.has_key('initscriptstamp'):
1066 stamp=slice_spec['initscriptstamp']
1067 for nodename in slice_spec['nodenames']:
1068 (site,node) = self.locate_node (nodename)
1069 # xxx - passing the wrong site - probably harmless
1070 test_site = TestSite (self,site)
1071 test_slice = TestSlice (self,test_site,slice_spec)
1072 test_node = TestNode (self,test_site,node)
1073 test_sliver = TestSliver (self, test_node, test_slice)
1074 if not test_sliver.check_initscript_stamp(stamp):
1078 def check_initscripts(self):
1079 "check that the initscripts have triggered"
1080 return self.do_check_initscripts()
1082 def initscripts (self):
1083 "create initscripts with PLCAPI"
1084 for initscript in self.plc_spec['initscripts']:
1085 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1086 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1089 def delete_initscripts (self):
1090 "delete initscripts with PLCAPI"
1091 for initscript in self.plc_spec['initscripts']:
1092 initscript_name = initscript['initscript_fields']['name']
1093 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1095 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1096 print initscript_name,'deleted'
1098 print 'deletion went wrong - probably did not exist'
1103 "create slices with PLCAPI"
1104 return self.do_slices(action="add")
1106 def delete_slices (self):
1107 "delete slices with PLCAPI"
1108 return self.do_slices(action="delete")
1110 def fill_slices (self):
1111 "add nodes in slices with PLCAPI"
1112 return self.do_slices(action="fill")
1114 def empty_slices (self):
1115 "remove nodes from slices with PLCAPI"
1116 return self.do_slices(action="empty")
1118 def do_slices (self, action="add"):
1119 for slice in self.plc_spec['slices']:
1120 site_spec = self.locate_site (slice['sitename'])
1121 test_site = TestSite(self,site_spec)
1122 test_slice=TestSlice(self,test_site,slice)
1123 if action == "delete":
1124 test_slice.delete_slice()
1125 elif action=="fill":
1126 test_slice.add_nodes()
1127 elif action=="empty":
1128 test_slice.delete_nodes()
1130 test_slice.create_slice()
1134 def ssh_slice(self): pass
1136 def ssh_slice_off (self): pass
1139 def check_vsys_defaults(self): pass
1142 def keys_clear_known_hosts (self): pass
1144 def plcapi_urls (self):
1145 return PlcapiUrlScanner (self.auth_root(),ip=self.vserverip).scan()
1147 def speed_up_slices (self):
1148 "tweak nodemanager settings on all nodes using a conf file"
1149 # create the template on the server-side
1150 template="%s.nodemanager"%self.name()
1151 template_file = open (template,"w")
1152 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1153 template_file.close()
1154 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1155 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1156 self.test_ssh.copy_abs(template,remote)
1158 self.apiserver.AddConfFile (self.auth_root(),
1159 {'dest':'/etc/sysconfig/nodemanager',
1160 'source':'PlanetLabConf/nodemanager',
1161 'postinstall_cmd':'service nm restart',})
1164 def debug_nodemanager (self):
1165 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1166 template="%s.nodemanager"%self.name()
1167 template_file = open (template,"w")
1168 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1169 template_file.close()
1170 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1171 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1172 self.test_ssh.copy_abs(template,remote)
1176 def qemu_start (self) : pass
1179 def timestamp_qemu (self) : pass
1181 # when a spec refers to a node possibly on another plc
1182 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1183 for plc in [ self ] + other_plcs:
1185 return plc.locate_sliver_obj (nodename, slicename)
1188 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1190 # implement this one as a cross step so that we can take advantage of different nodes
1191 # in multi-plcs mode
1192 def cross_check_tcp (self, other_plcs):
1193 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1194 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1195 utils.header ("check_tcp: no/empty config found")
1197 specs = self.plc_spec['tcp_specs']
1202 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1203 if not s_test_sliver.run_tcp_server(port,timeout=20):
1207 # idem for the client side
1208 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1209 # use nodename from locatesd sliver, unless 'client_connect' is set
1210 if 'client_connect' in spec:
1211 destination = spec['client_connect']
1213 destination=s_test_sliver.test_node.name()
1214 if not c_test_sliver.run_tcp_client(destination,port):
1218 # painfully enough, we need to allow for some time as netflow might show up last
1219 def check_system_slice (self):
1220 "all nodes: check that a system slice is alive"
1221 # netflow currently not working in the lxc distro
1222 # drl not built at all in the wtx distro
1223 # if we find either of them we're happy
1224 return self.check_netflow() or self.check_drl()
1227 def check_netflow (self): return self._check_system_slice ('netflow')
1228 def check_drl (self): return self._check_system_slice ('drl')
1230 # we have the slices up already here, so it should not take too long
1231 def _check_system_slice (self, slicename, timeout_minutes=5, period=15):
1232 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1233 test_nodes=self.all_nodes()
1235 for test_node in test_nodes:
1236 if test_node._check_system_slice (slicename,dry_run=self.options.dry_run):
1238 test_nodes.remove(test_node)
1243 if datetime.datetime.now () > timeout:
1244 for test_node in test_nodes:
1245 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1250 def plcsh_stress_test (self):
1251 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1252 # install the stress-test in the plc image
1253 location = "/usr/share/plc_api/plcsh_stress_test.py"
1254 remote="%s/%s"%(self.vm_root_in_host(),location)
1255 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1257 command += " -- --check"
1258 if self.options.size == 1:
1259 command += " --tiny"
1260 return ( self.run_in_guest(command) == 0)
1262 # populate runs the same utility without slightly different options
1263 # in particular runs with --preserve (dont cleanup) and without --check
1264 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1266 def sfa_install_all (self):
1267 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1268 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1270 def sfa_install_core(self):
1272 return self.yum_install ("sfa")
1274 def sfa_install_plc(self):
1275 "yum install sfa-plc"
1276 return self.yum_install("sfa-plc")
1278 def sfa_install_sfatables(self):
1279 "yum install sfa-sfatables"
1280 return self.yum_install ("sfa-sfatables")
1282 # for some very odd reason, this sometimes fails with the following symptom
1283 # # yum install sfa-client
1284 # Setting up Install Process
1286 # Downloading Packages:
1287 # Running rpm_check_debug
1288 # Running Transaction Test
1289 # Transaction Test Succeeded
1290 # Running Transaction
1291 # Transaction couldn't start:
1292 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1293 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1294 # even though in the same context I have
1295 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1296 # Filesystem Size Used Avail Use% Mounted on
1297 # /dev/hdv1 806G 264G 501G 35% /
1298 # none 16M 36K 16M 1% /tmp
1300 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1301 def sfa_install_client(self):
1302 "yum install sfa-client"
1303 first_try=self.yum_install("sfa-client")
1304 if first_try: return True
1305 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1306 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1307 utils.header("rpm_path=<<%s>>"%rpm_path)
1309 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1310 return self.yum_check_installed ("sfa-client")
1312 def sfa_dbclean(self):
1313 "thoroughly wipes off the SFA database"
1314 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1315 self.run_in_guest("sfa-nuke.py")==0 or \
1316 self.run_in_guest("sfa-nuke-plc.py")==0
1318 def sfa_fsclean(self):
1319 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1320 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1323 def sfa_plcclean(self):
1324 "cleans the PLC entries that were created as a side effect of running the script"
1326 sfa_spec=self.plc_spec['sfa']
1328 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1329 login_base=auth_sfa_spec['login_base']
1330 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1331 except: print "Site %s already absent from PLC db"%login_base
1333 for spec_name in ['pi_spec','user_spec']:
1334 user_spec=auth_sfa_spec[spec_name]
1335 username=user_spec['email']
1336 try: self.apiserver.DeletePerson(self.auth_root(),username)
1338 # this in fact is expected as sites delete their members
1339 #print "User %s already absent from PLC db"%username
1342 print "REMEMBER TO RUN sfa_import AGAIN"
1345 def sfa_uninstall(self):
1346 "uses rpm to uninstall sfa - ignore result"
1347 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1348 self.run_in_guest("rm -rf /var/lib/sfa")
1349 self.run_in_guest("rm -rf /etc/sfa")
1350 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1352 self.run_in_guest("rpm -e --noscripts sfa-plc")
1355 ### run unit tests for SFA
1356 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1357 # Running Transaction
1358 # Transaction couldn't start:
1359 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1360 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1361 # no matter how many Gbs are available on the testplc
1362 # could not figure out what's wrong, so...
1363 # if the yum install phase fails, consider the test is successful
1364 # other combinations will eventually run it hopefully
1365 def sfa_utest(self):
1366 "yum install sfa-tests and run SFA unittests"
1367 self.run_in_guest("yum -y install sfa-tests")
1368 # failed to install - forget it
1369 if self.run_in_guest("rpm -q sfa-tests")!=0:
1370 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1372 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1376 dirname="conf.%s"%self.plc_spec['name']
1377 if not os.path.isdir(dirname):
1378 utils.system("mkdir -p %s"%dirname)
1379 if not os.path.isdir(dirname):
1380 raise Exception,"Cannot create config dir for plc %s"%self.name()
1383 def conffile(self,filename):
1384 return "%s/%s"%(self.confdir(),filename)
1385 def confsubdir(self,dirname,clean,dry_run=False):
1386 subdirname="%s/%s"%(self.confdir(),dirname)
1388 utils.system("rm -rf %s"%subdirname)
1389 if not os.path.isdir(subdirname):
1390 utils.system("mkdir -p %s"%subdirname)
1391 if not dry_run and not os.path.isdir(subdirname):
1392 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1395 def conffile_clean (self,filename):
1396 filename=self.conffile(filename)
1397 return utils.system("rm -rf %s"%filename)==0
1400 def sfa_configure(self):
1401 "run sfa-config-tty"
1402 tmpname=self.conffile("sfa-config-tty")
1403 fileconf=open(tmpname,'w')
1404 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1405 'SFA_INTERFACE_HRN',
1406 'SFA_REGISTRY_LEVEL1_AUTH',
1407 'SFA_REGISTRY_HOST',
1408 'SFA_AGGREGATE_HOST',
1418 'SFA_GENERIC_FLAVOUR',
1419 'SFA_AGGREGATE_ENABLED',
1421 if self.plc_spec['sfa'].has_key(var):
1422 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1423 # the way plc_config handles booleans just sucks..
1426 if self.plc_spec['sfa'][var]: val='true'
1427 fileconf.write ('e %s\n%s\n'%(var,val))
1428 fileconf.write('w\n')
1429 fileconf.write('R\n')
1430 fileconf.write('q\n')
1432 utils.system('cat %s'%tmpname)
1433 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1436 def aggregate_xml_line(self):
1437 port=self.plc_spec['sfa']['neighbours-port']
1438 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1439 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1441 def registry_xml_line(self):
1442 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1443 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1446 # a cross step that takes all other plcs in argument
1447 def cross_sfa_configure(self, other_plcs):
1448 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1449 # of course with a single plc, other_plcs is an empty list
1452 agg_fname=self.conffile("agg.xml")
1453 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1454 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1455 utils.header ("(Over)wrote %s"%agg_fname)
1456 reg_fname=self.conffile("reg.xml")
1457 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1458 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1459 utils.header ("(Over)wrote %s"%reg_fname)
1460 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1461 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1463 def sfa_import(self):
1464 "use sfaadmin to import from plc"
1465 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1467 self.run_in_guest('sfaadmin reg import_registry')==0
1468 # not needed anymore
1469 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1471 def sfa_start(self):
1473 return self.run_in_guest('service sfa start')==0
1475 def sfi_configure(self):
1476 "Create /root/sfi on the plc side for sfi client configuration"
1477 if self.options.dry_run:
1478 utils.header("DRY RUN - skipping step")
1480 sfa_spec=self.plc_spec['sfa']
1481 # cannot use auth_sfa_mapper to pass dir_name
1482 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1483 test_slice=TestAuthSfa(self,slice_spec)
1484 dir_basename=os.path.basename(test_slice.sfi_path())
1485 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1486 test_slice.sfi_configure(dir_name)
1487 # push into the remote /root/sfi area
1488 location = test_slice.sfi_path()
1489 remote="%s/%s"%(self.vm_root_in_host(),location)
1490 self.test_ssh.mkdir(remote,abs=True)
1491 # need to strip last level or remote otherwise we get an extra dir level
1492 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1496 def sfi_clean (self):
1497 "clean up /root/sfi on the plc side"
1498 self.run_in_guest("rm -rf /root/sfi")
1502 def sfa_add_site (self): pass
1504 def sfa_add_pi (self): pass
1506 def sfa_add_user(self): pass
1508 def sfa_update_user(self): pass
1510 def sfa_add_slice(self): pass
1512 def sfa_renew_slice(self): pass
1514 def sfa_discover(self): pass
1516 def sfa_create_slice(self): pass
1518 def sfa_check_slice_plc(self): pass
1520 def sfa_update_slice(self): pass
1522 def sfi_list(self): pass
1524 def sfi_show(self): pass
1526 def sfi_slices(self): pass
1528 def ssh_slice_sfa(self): pass
1530 def sfa_delete_user(self): pass
1532 def sfa_delete_slice(self): pass
1536 self.run_in_guest('service sfa stop')==0
1539 def populate (self):
1540 "creates random entries in the PLCAPI"
1541 # install the stress-test in the plc image
1542 location = "/usr/share/plc_api/plcsh_stress_test.py"
1543 remote="%s/%s"%(self.vm_root_in_host(),location)
1544 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1546 command += " -- --preserve --short-names"
1547 local = (self.run_in_guest(command) == 0);
1548 # second run with --foreign
1549 command += ' --foreign'
1550 remote = (self.run_in_guest(command) == 0);
1551 return ( local and remote)
1553 def gather_logs (self):
1554 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1555 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1556 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1557 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1558 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1559 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1560 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1562 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1563 self.gather_var_logs ()
1565 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1566 self.gather_pgsql_logs ()
1568 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1569 self.gather_root_sfi ()
1571 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1572 for site_spec in self.plc_spec['sites']:
1573 test_site = TestSite (self,site_spec)
1574 for node_spec in site_spec['nodes']:
1575 test_node=TestNode(self,test_site,node_spec)
1576 test_node.gather_qemu_logs()
1578 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1579 self.gather_nodes_var_logs()
1581 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1582 self.gather_slivers_var_logs()
1585 def gather_slivers_var_logs(self):
1586 for test_sliver in self.all_sliver_objs():
1587 remote = test_sliver.tar_var_logs()
1588 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1589 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1590 utils.system(command)
1593 def gather_var_logs (self):
1594 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1595 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1596 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1597 utils.system(command)
1598 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1599 utils.system(command)
1601 def gather_pgsql_logs (self):
1602 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1603 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1604 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1605 utils.system(command)
1607 def gather_root_sfi (self):
1608 utils.system("mkdir -p logs/sfi.%s"%self.name())
1609 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1610 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1611 utils.system(command)
1613 def gather_nodes_var_logs (self):
1614 for site_spec in self.plc_spec['sites']:
1615 test_site = TestSite (self,site_spec)
1616 for node_spec in site_spec['nodes']:
1617 test_node=TestNode(self,test_site,node_spec)
1618 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1619 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1620 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1621 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1622 utils.system(command)
1625 # returns the filename to use for sql dump/restore, using options.dbname if set
1626 def dbfile (self, database):
1627 # uses options.dbname if it is found
1629 name=self.options.dbname
1630 if not isinstance(name,StringTypes):
1633 t=datetime.datetime.now()
1636 return "/root/%s-%s.sql"%(database,name)
1638 def plc_db_dump(self):
1639 'dump the planetlab5 DB in /root in the PLC - filename has time'
1640 dump=self.dbfile("planetab5")
1641 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1642 utils.header('Dumped planetlab5 database in %s'%dump)
1645 def plc_db_restore(self):
1646 'restore the planetlab5 DB - looks broken, but run -n might help'
1647 dump=self.dbfile("planetab5")
1648 ##stop httpd service
1649 self.run_in_guest('service httpd stop')
1650 # xxx - need another wrapper
1651 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1652 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1653 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1654 ##starting httpd service
1655 self.run_in_guest('service httpd start')
1657 utils.header('Database restored from ' + dump)
1659 def standby_1_through_20(self):
1660 """convenience function to wait for a specified number of minutes"""
1663 def standby_1(): pass
1665 def standby_2(): pass
1667 def standby_3(): pass
1669 def standby_4(): pass
1671 def standby_5(): pass
1673 def standby_6(): pass
1675 def standby_7(): pass
1677 def standby_8(): pass
1679 def standby_9(): pass
1681 def standby_10(): pass
1683 def standby_11(): pass
1685 def standby_12(): pass
1687 def standby_13(): pass
1689 def standby_14(): pass
1691 def standby_15(): pass
1693 def standby_16(): pass
1695 def standby_17(): pass
1697 def standby_18(): pass
1699 def standby_19(): pass
1701 def standby_20(): pass