1 # Thierry Parmentelat <thierry.parmentelat@inria.fr>
2 # Copyright (C) 2010 INRIA
9 from types import StringTypes
13 from TestSite import TestSite
14 from TestNode import TestNode
15 from TestUser import TestUser
16 from TestKey import TestKey
17 from TestSlice import TestSlice
18 from TestSliver import TestSliver
19 from TestBoxQemu import TestBoxQemu
20 from TestSsh import TestSsh
21 from TestApiserver import TestApiserver
22 from TestAuthSfa import TestAuthSfa
24 # step methods must take (self) and return a boolean (options is a member of the class)
26 def standby(minutes,dry_run):
27 utils.header('Entering StandBy for %d mn'%minutes)
31 time.sleep(60*minutes)
34 def standby_generic (func):
36 minutes=int(func.__name__.split("_")[1])
37 return standby(minutes,self.options.dry_run)
40 def node_mapper (method):
41 def actual(self,*args, **kwds):
43 node_method = TestNode.__dict__[method.__name__]
44 for test_node in self.all_nodes():
45 if not node_method(test_node, *args, **kwds): overall=False
47 # restore the doc text
48 actual.__doc__=TestNode.__dict__[method.__name__].__doc__
51 def slice_mapper (method):
54 slice_method = TestSlice.__dict__[method.__name__]
55 for slice_spec in self.plc_spec['slices']:
56 site_spec = self.locate_site (slice_spec['sitename'])
57 test_site = TestSite(self,site_spec)
58 test_slice=TestSlice(self,test_site,slice_spec)
59 if not slice_method(test_slice,self.options): overall=False
61 # restore the doc text
62 actual.__doc__=TestSlice.__dict__[method.__name__].__doc__
65 def auth_sfa_mapper (method):
68 auth_method = TestAuthSfa.__dict__[method.__name__]
69 for auth_spec in self.plc_spec['sfa']['auth_sfa_specs']:
70 test_auth=TestAuthSfa(self,auth_spec)
71 if not auth_method(test_auth,self.options): overall=False
73 # restore the doc text
74 actual.__doc__=TestAuthSfa.__dict__[method.__name__].__doc__
84 'vs_delete','timestamp_vs','vs_create', SEP,
85 'plc_install', 'plc_configure', 'plc_start', SEP,
86 'keys_fetch', 'keys_store', 'keys_clear_known_hosts', 'speed_up_slices', SEP,
87 'initscripts', 'sites', 'nodes', 'slices', 'nodegroups', 'leases', SEP,
88 # slices created under plcsh interactively seem to be fine but these ones don't have the tags
89 # keep this our of the way for now
90 # 'check_vsys_defaults', SEP,
91 'nodestate_reinstall', 'qemu_local_init','bootcd', 'qemu_local_config', SEP,
92 'qemu_export', 'qemu_kill_mine', 'qemu_start', 'timestamp_qemu', SEP,
93 'sfa_install_all', 'sfa_configure', 'cross_sfa_configure', 'sfa_start', 'sfa_import', SEPSFA,
94 'sfi_configure@1', 'sfa_add_site@1','sfa_add_pi@1', SEPSFA,
95 'sfa_add_user@1', 'sfa_update_user@1', 'sfa_add_slice@1', 'sfa_renew_slice@1', SEPSFA,
96 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
97 'sfi_list@1', 'sfi_show@1', 'sfi_slices@1', 'sfa_utest@1', SEPSFA,
98 # we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
99 # but as the stress test might take a while, we sometimes missed the debug mode..
100 'ssh_node_debug@1', 'plcsh_stress_test@1', SEP,
101 'ssh_node_boot@1', 'ssh_slice', 'check_initscripts', SEP,
102 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
103 'cross_check_tcp@1', 'check_system_slice', SEP,
104 'empty_slices', 'ssh_slice_off', 'fill_slices', SEP,
105 'force_gather_logs', SEP,
108 'export', 'show_boxes', SEP,
109 'check_hooks', 'plc_stop', 'vs_start', 'vs_stop', SEP,
110 'delete_initscripts', 'delete_nodegroups','delete_all_sites', SEP,
111 'delete_sites', 'delete_nodes', 'delete_slices', 'keys_clean', SEP,
112 'delete_leases', 'list_leases', SEP,
114 'nodestate_show','nodestate_safeboot','nodestate_boot', SEP,
115 'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
116 'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
117 'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
118 'plc_db_dump' , 'plc_db_restore', SEP,
119 'check_netflow','check_drl', SEP,
120 'debug_nodemanager', SEP,
121 'standby_1_through_20',SEP,
125 def printable_steps (list):
126 single_line=" ".join(list)+" "
127 return single_line.replace(" "+SEP+" "," \\\n").replace(" "+SEPSFA+" "," \\\n")
129 def valid_step (step):
130 return step != SEP and step != SEPSFA
132 # turn off the sfa-related steps when build has skipped SFA
133 # this was originally for centos5 but is still valid
134 # for up to f12 as recent SFAs with sqlalchemy won't build before f14
136 def check_whether_build_has_sfa (rpms_url):
137 utils.header ("Checking if build provides SFA package...")
138 # warning, we're now building 'sface' so let's be a bit more picky
139 retcod=os.system ("curl --silent %s/ | grep -q sfa-"%rpms_url)
140 # full builds are expected to return with 0 here
142 utils.header("build does provide SFA")
144 # move all steps containing 'sfa' from default_steps to other_steps
145 utils.header("SFA package not found - removing steps with sfa or sfi")
146 sfa_steps= [ step for step in TestPlc.default_steps if step.find('sfa')>=0 or step.find("sfi")>=0 ]
147 TestPlc.other_steps += sfa_steps
148 for step in sfa_steps: TestPlc.default_steps.remove(step)
150 def __init__ (self,plc_spec,options):
151 self.plc_spec=plc_spec
153 self.test_ssh=TestSsh(self.plc_spec['host_box'],self.options.buildname)
154 self.vserverip=plc_spec['vserverip']
155 self.vservername=plc_spec['vservername']
156 self.url="https://%s:443/PLCAPI/"%plc_spec['vserverip']
157 self.apiserver=TestApiserver(self.url,options.dry_run)
158 (self.ssh_node_boot_timeout,self.ssh_node_boot_silent)=plc_spec['ssh_node_boot_timers']
159 (self.ssh_node_debug_timeout,self.ssh_node_debug_silent)=plc_spec['ssh_node_debug_timers']
161 def has_addresses_api (self):
162 return self.apiserver.has_method('AddIpAddress')
165 name=self.plc_spec['name']
166 return "%s.%s"%(name,self.vservername)
169 return self.plc_spec['host_box']
172 return self.test_ssh.is_local()
174 # define the API methods on this object through xmlrpc
175 # would help, but not strictly necessary
179 def actual_command_in_guest (self,command):
180 return self.test_ssh.actual_command(self.host_to_guest(command))
182 def start_guest (self):
183 return utils.system(self.test_ssh.actual_command(self.start_guest_in_host()))
185 def stop_guest (self):
186 return utils.system(self.test_ssh.actual_command(self.stop_guest_in_host()))
188 def run_in_guest (self,command):
189 return utils.system(self.actual_command_in_guest(command))
191 def run_in_host (self,command):
192 return self.test_ssh.run_in_buildname(command)
194 #command gets run in the plc's vm
195 def host_to_guest(self,command):
196 if self.options.plcs_use_lxc:
197 return "ssh -o StrictHostKeyChecking=no %s %s"%(self.vserverip,command)
199 return "vserver %s exec %s"%(self.vservername,command)
201 def vm_root_in_host(self):
202 if self.options.plcs_use_lxc:
203 return "/var/lib/lxc/%s/rootfs/"%(self.vservername)
205 return "/vservers/%s"%(self.vservername)
207 def vm_timestamp_path (self):
208 if self.options.plcs_use_lxc:
209 return "/var/lib/lxc/%s/%s.timestamp"%(self.vservername,self.vservername)
211 return "/vservers/%s.timestamp"%(self.vservername)
213 #start/stop the vserver
214 def start_guest_in_host(self):
215 if self.options.plcs_use_lxc:
216 return "lxc-start --daemon --name=%s"%(self.vservername)
218 return "vserver %s start"%(self.vservername)
220 def stop_guest_in_host(self):
221 if self.options.plcs_use_lxc:
222 return "lxc-stop --name=%s"%(self.vservername)
224 return "vserver %s stop"%(self.vservername)
227 def run_in_guest_piped (self,local,remote):
228 return utils.system(local+" | "+self.test_ssh.actual_command(self.host_to_guest(remote),keep_stdin=True))
230 def yum_check_installed (self, rpms):
231 if isinstance (rpms, list):
233 return self.run_in_guest("rpm -q %s"%rpms)==0
235 # does a yum install in the vs, ignore yum retcod, check with rpm
236 def yum_install (self, rpms):
237 if isinstance (rpms, list):
239 self.run_in_guest("yum -y install %s"%rpms)
240 # yum-complete-transaction comes with yum-utils, that is in vtest.pkgs
241 self.run_in_guest("yum-complete-transaction -y")
242 return self.yum_check_installed (rpms)
244 def auth_root (self):
245 return {'Username':self.plc_spec['PLC_ROOT_USER'],
246 'AuthMethod':'password',
247 'AuthString':self.plc_spec['PLC_ROOT_PASSWORD'],
248 'Role' : self.plc_spec['role']
250 def locate_site (self,sitename):
251 for site in self.plc_spec['sites']:
252 if site['site_fields']['name'] == sitename:
254 if site['site_fields']['login_base'] == sitename:
256 raise Exception,"Cannot locate site %s"%sitename
258 def locate_node (self,nodename):
259 for site in self.plc_spec['sites']:
260 for node in site['nodes']:
261 if node['name'] == nodename:
263 raise Exception,"Cannot locate node %s"%nodename
265 def locate_hostname (self,hostname):
266 for site in self.plc_spec['sites']:
267 for node in site['nodes']:
268 if node['node_fields']['hostname'] == hostname:
270 raise Exception,"Cannot locate hostname %s"%hostname
272 def locate_key (self,key_name):
273 for key in self.plc_spec['keys']:
274 if key['key_name'] == key_name:
276 raise Exception,"Cannot locate key %s"%key_name
278 def locate_private_key_from_key_names (self, key_names):
279 # locate the first avail. key
281 for key_name in key_names:
282 key_spec=self.locate_key(key_name)
283 test_key=TestKey(self,key_spec)
284 publickey=test_key.publicpath()
285 privatekey=test_key.privatepath()
286 if os.path.isfile(publickey) and os.path.isfile(privatekey):
288 if found: return privatekey
291 def locate_slice (self, slicename):
292 for slice in self.plc_spec['slices']:
293 if slice['slice_fields']['name'] == slicename:
295 raise Exception,"Cannot locate slice %s"%slicename
297 def all_sliver_objs (self):
299 for slice_spec in self.plc_spec['slices']:
300 slicename = slice_spec['slice_fields']['name']
301 for nodename in slice_spec['nodenames']:
302 result.append(self.locate_sliver_obj (nodename,slicename))
305 def locate_sliver_obj (self,nodename,slicename):
306 (site,node) = self.locate_node(nodename)
307 slice = self.locate_slice (slicename)
309 test_site = TestSite (self, site)
310 test_node = TestNode (self, test_site,node)
311 # xxx the slice site is assumed to be the node site - mhh - probably harmless
312 test_slice = TestSlice (self, test_site, slice)
313 return TestSliver (self, test_node, test_slice)
315 def locate_first_node(self):
316 nodename=self.plc_spec['slices'][0]['nodenames'][0]
317 (site,node) = self.locate_node(nodename)
318 test_site = TestSite (self, site)
319 test_node = TestNode (self, test_site,node)
322 def locate_first_sliver (self):
323 slice_spec=self.plc_spec['slices'][0]
324 slicename=slice_spec['slice_fields']['name']
325 nodename=slice_spec['nodenames'][0]
326 return self.locate_sliver_obj(nodename,slicename)
328 # all different hostboxes used in this plc
329 def gather_hostBoxes(self):
330 # maps on sites and nodes, return [ (host_box,test_node) ]
332 for site_spec in self.plc_spec['sites']:
333 test_site = TestSite (self,site_spec)
334 for node_spec in site_spec['nodes']:
335 test_node = TestNode (self, test_site, node_spec)
336 if not test_node.is_real():
337 tuples.append( (test_node.host_box(),test_node) )
338 # transform into a dict { 'host_box' -> [ test_node .. ] }
340 for (box,node) in tuples:
341 if not result.has_key(box):
344 result[box].append(node)
347 # a step for checking this stuff
348 def show_boxes (self):
349 'print summary of nodes location'
350 for (box,nodes) in self.gather_hostBoxes().iteritems():
351 print box,":"," + ".join( [ node.name() for node in nodes ] )
354 # make this a valid step
355 def qemu_kill_all(self):
356 'kill all qemu instances on the qemu boxes involved by this setup'
357 # this is the brute force version, kill all qemus on that host box
358 for (box,nodes) in self.gather_hostBoxes().iteritems():
359 # pass the first nodename, as we don't push template-qemu on testboxes
360 nodedir=nodes[0].nodedir()
361 TestBoxQemu(box,self.options.buildname).qemu_kill_all(nodedir)
364 # make this a valid step
365 def qemu_list_all(self):
366 'list all qemu instances on the qemu boxes involved by this setup'
367 for (box,nodes) in self.gather_hostBoxes().iteritems():
368 # this is the brute force version, kill all qemus on that host box
369 TestBoxQemu(box,self.options.buildname).qemu_list_all()
372 # kill only the right qemus
373 def qemu_list_mine(self):
374 'list qemu instances for our nodes'
375 for (box,nodes) in self.gather_hostBoxes().iteritems():
376 # the fine-grain version
381 # kill only the right qemus
382 def qemu_kill_mine(self):
383 'kill the qemu instances for our nodes'
384 for (box,nodes) in self.gather_hostBoxes().iteritems():
385 # the fine-grain version
390 #################### display config
392 "show test configuration after localization"
397 # uggly hack to make sure 'run export' only reports about the 1st plc
398 # to avoid confusion - also we use 'inri_slice1' in various aliases..
401 "print cut'n paste-able stuff to export env variables to your shell"
402 # guess local domain from hostname
403 if TestPlc.exported_id>1:
404 print "export GUESTHOSTNAME%d=%s"%(TestPlc.exported_id,self.plc_spec['vservername'])
406 TestPlc.exported_id+=1
407 domain=socket.gethostname().split('.',1)[1]
408 fqdn="%s.%s"%(self.plc_spec['host_box'],domain)
409 print "export BUILD=%s"%self.options.buildname
410 if self.options.plcs_use_lxc:
411 print "export PLCHOSTLXC=%s"%fqdn
413 print "export PLCHOSTVS=%s"%fqdn
414 print "export GUESTNAME=%s"%self.plc_spec['vservername']
415 vplcname=self.plc_spec['vservername'].split('-')[-1]
416 print "export GUESTHOSTNAME=%s.%s"%(vplcname,domain)
417 # find hostname of first node
418 (hostname,qemubox) = self.all_node_infos()[0]
419 print "export KVMHOST=%s.%s"%(qemubox,domain)
420 print "export NODE=%s"%(hostname)
424 always_display_keys=['PLC_WWW_HOST','nodes','sites',]
425 def show_pass (self,passno):
426 for (key,val) in self.plc_spec.iteritems():
427 if not self.options.verbose and key not in TestPlc.always_display_keys: continue
431 self.display_site_spec(site)
432 for node in site['nodes']:
433 self.display_node_spec(node)
434 elif key=='initscripts':
435 for initscript in val:
436 self.display_initscript_spec (initscript)
439 self.display_slice_spec (slice)
442 self.display_key_spec (key)
444 if key not in ['sites','initscripts','slices','keys', 'sfa']:
445 print '+ ',key,':',val
447 def display_site_spec (self,site):
448 print '+ ======== site',site['site_fields']['name']
449 for (k,v) in site.iteritems():
450 if not self.options.verbose and k not in TestPlc.always_display_keys: continue
453 print '+ ','nodes : ',
455 print node['node_fields']['hostname'],'',
461 print user['name'],'',
463 elif k == 'site_fields':
464 print '+ login_base',':',v['login_base']
465 elif k == 'address_fields':
471 def display_initscript_spec (self,initscript):
472 print '+ ======== initscript',initscript['initscript_fields']['name']
474 def display_key_spec (self,key):
475 print '+ ======== key',key['key_name']
477 def display_slice_spec (self,slice):
478 print '+ ======== slice',slice['slice_fields']['name']
479 for (k,v) in slice.iteritems():
492 elif k=='slice_fields':
493 print '+ fields',':',
494 print 'max_nodes=',v['max_nodes'],
499 def display_node_spec (self,node):
500 print "+ node=%s host_box=%s"%(node['name'],node['host_box']),
501 print "hostname=",node['node_fields']['hostname'],
502 print "ip=",node['interface_fields']['ip']
503 if self.options.verbose:
504 utils.pprint("node details",node,depth=3)
506 # another entry point for just showing the boxes involved
507 def display_mapping (self):
508 TestPlc.display_mapping_plc(self.plc_spec)
512 def display_mapping_plc (plc_spec):
513 print '+ MyPLC',plc_spec['name']
514 # WARNING this would not be right for lxc-based PLC's - should be harmless though
515 print '+\tvserver address = root@%s:/vservers/%s'%(plc_spec['host_box'],plc_spec['vservername'])
516 print '+\tIP = %s/%s'%(plc_spec['PLC_API_HOST'],plc_spec['vserverip'])
517 for site_spec in plc_spec['sites']:
518 for node_spec in site_spec['nodes']:
519 TestPlc.display_mapping_node(node_spec)
522 def display_mapping_node (node_spec):
523 print '+ NODE %s'%(node_spec['name'])
524 print '+\tqemu box %s'%node_spec['host_box']
525 print '+\thostname=%s'%node_spec['node_fields']['hostname']
527 # write a timestamp in /vservers/<>.timestamp
528 # cannot be inside the vserver, that causes vserver .. build to cough
529 def timestamp_vs (self):
530 "Create a timestamp to remember creation date for this plc"
532 # TODO-lxc check this one
533 # a first approx. is to store the timestamp close to the VM root like vs does
534 stamp_path=self.vm_timestamp_path ()
535 stamp_dir = os.path.dirname (stamp_path)
536 utils.system(self.test_ssh.actual_command("mkdir -p %s"%stamp_dir))
537 return utils.system(self.test_ssh.actual_command("echo %d > %s"%(now,stamp_path)))==0
539 # this is called inconditionnally at the beginning of the test sequence
540 # just in case this is a rerun, so if the vm is not running it's fine
542 "vserver delete the test myplc"
543 stamp_path=self.vm_timestamp_path()
544 self.run_in_host("rm -f %s"%stamp_path)
545 if self.options.plcs_use_lxc:
546 self.run_in_host("lxc-stop --name %s"%self.vservername)
547 self.run_in_host("lxc-destroy --name %s"%self.vservername)
550 self.run_in_host("vserver --silent %s delete"%self.vservername)
554 # historically the build was being fetched by the tests
555 # now the build pushes itself as a subdir of the tests workdir
556 # so that the tests do not have to worry about extracting the build (svn, git, or whatever)
557 def vs_create (self):
558 "vserver creation (no install done)"
559 # push the local build/ dir to the testplc box
561 # a full path for the local calls
562 build_dir=os.path.dirname(sys.argv[0])
563 # sometimes this is empty - set to "." in such a case
564 if not build_dir: build_dir="."
565 build_dir += "/build"
567 # use a standard name - will be relative to remote buildname
569 # remove for safety; do *not* mkdir first, otherwise we end up with build/build/
570 self.test_ssh.rmdir(build_dir)
571 self.test_ssh.copy(build_dir,recursive=True)
572 # the repo url is taken from arch-rpms-url
573 # with the last step (i386) removed
574 repo_url = self.options.arch_rpms_url
575 for level in [ 'arch' ]:
576 repo_url = os.path.dirname(repo_url)
577 # pass the vbuild-nightly options to vtest-init-vserver
579 test_env_options += " -p %s"%self.options.personality
580 test_env_options += " -d %s"%self.options.pldistro
581 test_env_options += " -f %s"%self.options.fcdistro
582 if self.options.plcs_use_lxc:
583 script="vtest-init-lxc.sh"
585 script="vtest-init-vserver.sh"
586 vserver_name = self.vservername
587 vserver_options="--netdev eth0 --interface %s"%self.vserverip
589 vserver_hostname=socket.gethostbyaddr(self.vserverip)[0]
590 vserver_options += " --hostname %s"%vserver_hostname
592 print "Cannot reverse lookup %s"%self.vserverip
593 print "This is considered fatal, as this might pollute the test results"
595 create_vserver="%(build_dir)s/%(script)s %(test_env_options)s %(vserver_name)s %(repo_url)s -- %(vserver_options)s"%locals()
596 return self.run_in_host(create_vserver) == 0
599 def plc_install(self):
600 "yum install myplc, noderepo, and the plain bootstrapfs"
602 # workaround for getting pgsql8.2 on centos5
603 if self.options.fcdistro == "centos5":
604 self.run_in_guest("rpm -Uvh http://download.fedora.redhat.com/pub/epel/5/i386/epel-release-5-3.noarch.rpm")
607 if self.options.personality == "linux32":
609 elif self.options.personality == "linux64":
612 raise Exception, "Unsupported personality %r"%self.options.personality
613 nodefamily="%s-%s-%s"%(self.options.pldistro,self.options.fcdistro,arch)
616 pkgs_list.append ("slicerepo-%s"%nodefamily)
617 pkgs_list.append ("myplc")
618 pkgs_list.append ("noderepo-%s"%nodefamily)
619 pkgs_list.append ("nodeimage-%s-plain"%nodefamily)
620 pkgs_string=" ".join(pkgs_list)
621 return self.yum_install (pkgs_list)
624 def plc_configure(self):
626 tmpname='%s.plc-config-tty'%(self.name())
627 fileconf=open(tmpname,'w')
628 for var in [ 'PLC_NAME',
633 'PLC_MAIL_SUPPORT_ADDRESS',
636 # Above line was added for integrating SFA Testing
642 'PLC_RESERVATION_GRANULARITY',
644 'PLC_OMF_XMPP_SERVER',
647 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec[var]))
648 fileconf.write('w\n')
649 fileconf.write('q\n')
651 utils.system('cat %s'%tmpname)
652 self.run_in_guest_piped('cat %s'%tmpname,'plc-config-tty')
653 utils.system('rm %s'%tmpname)
658 self.run_in_guest('service plc start')
663 self.run_in_guest('service plc stop')
667 "start the PLC vserver"
672 "stop the PLC vserver"
676 # stores the keys from the config for further use
677 def keys_store(self):
678 "stores test users ssh keys in keys/"
679 for key_spec in self.plc_spec['keys']:
680 TestKey(self,key_spec).store_key()
683 def keys_clean(self):
684 "removes keys cached in keys/"
685 utils.system("rm -rf ./keys")
688 # fetches the ssh keys in the plc's /etc/planetlab and stores them in keys/
689 # for later direct access to the nodes
690 def keys_fetch(self):
691 "gets ssh keys in /etc/planetlab/ and stores them locally in keys/"
693 if not os.path.isdir(dir):
695 vservername=self.vservername
696 vm_root=self.vm_root_in_host()
698 prefix = 'debug_ssh_key'
699 for ext in [ 'pub', 'rsa' ] :
700 src="%(vm_root)s/etc/planetlab/%(prefix)s.%(ext)s"%locals()
701 dst="keys/%(vservername)s-debug.%(ext)s"%locals()
702 if self.test_ssh.fetch(src,dst) != 0: overall=False
706 "create sites with PLCAPI"
707 return self.do_sites()
709 def delete_sites (self):
710 "delete sites with PLCAPI"
711 return self.do_sites(action="delete")
713 def do_sites (self,action="add"):
714 for site_spec in self.plc_spec['sites']:
715 test_site = TestSite (self,site_spec)
716 if (action != "add"):
717 utils.header("Deleting site %s in %s"%(test_site.name(),self.name()))
718 test_site.delete_site()
719 # deleted with the site
720 #test_site.delete_users()
723 utils.header("Creating site %s & users in %s"%(test_site.name(),self.name()))
724 test_site.create_site()
725 test_site.create_users()
728 def delete_all_sites (self):
729 "Delete all sites in PLC, and related objects"
730 print 'auth_root',self.auth_root()
731 sites = self.apiserver.GetSites(self.auth_root(), {}, ['site_id','login_base'])
733 # keep automatic site - otherwise we shoot in our own foot, root_auth is not valid anymore
734 if site['login_base']==self.plc_spec['PLC_SLICE_PREFIX']: continue
735 site_id=site['site_id']
736 print 'Deleting site_id',site_id
737 self.apiserver.DeleteSite(self.auth_root(),site_id)
741 "create nodes with PLCAPI"
742 return self.do_nodes()
743 def delete_nodes (self):
744 "delete nodes with PLCAPI"
745 return self.do_nodes(action="delete")
747 def do_nodes (self,action="add"):
748 for site_spec in self.plc_spec['sites']:
749 test_site = TestSite (self,site_spec)
751 utils.header("Deleting nodes in site %s"%test_site.name())
752 for node_spec in site_spec['nodes']:
753 test_node=TestNode(self,test_site,node_spec)
754 utils.header("Deleting %s"%test_node.name())
755 test_node.delete_node()
757 utils.header("Creating nodes for site %s in %s"%(test_site.name(),self.name()))
758 for node_spec in site_spec['nodes']:
759 utils.pprint('Creating node %s'%node_spec,node_spec)
760 test_node = TestNode (self,test_site,node_spec)
761 test_node.create_node ()
764 def nodegroups (self):
765 "create nodegroups with PLCAPI"
766 return self.do_nodegroups("add")
767 def delete_nodegroups (self):
768 "delete nodegroups with PLCAPI"
769 return self.do_nodegroups("delete")
773 def translate_timestamp (start,grain,timestamp):
774 if timestamp < TestPlc.YEAR: return start+timestamp*grain
775 else: return timestamp
778 def timestamp_printable (timestamp):
779 return time.strftime('%m-%d %H:%M:%S UTC',time.gmtime(timestamp))
782 "create leases (on reservable nodes only, use e.g. run -c default -c resa)"
784 grain=self.apiserver.GetLeaseGranularity(self.auth_root())
785 print 'API answered grain=',grain
786 start=(now/grain)*grain
788 # find out all nodes that are reservable
789 nodes=self.all_reservable_nodenames()
791 utils.header ("No reservable node found - proceeding without leases")
794 # attach them to the leases as specified in plc_specs
795 # this is where the 'leases' field gets interpreted as relative of absolute
796 for lease_spec in self.plc_spec['leases']:
797 # skip the ones that come with a null slice id
798 if not lease_spec['slice']: continue
799 lease_spec['t_from']=TestPlc.translate_timestamp(start,grain,lease_spec['t_from'])
800 lease_spec['t_until']=TestPlc.translate_timestamp(start,grain,lease_spec['t_until'])
801 lease_addition=self.apiserver.AddLeases(self.auth_root(),nodes,
802 lease_spec['slice'],lease_spec['t_from'],lease_spec['t_until'])
803 if lease_addition['errors']:
804 utils.header("Cannot create leases, %s"%lease_addition['errors'])
807 utils.header('Leases on nodes %r for %s from %d (%s) until %d (%s)'%\
808 (nodes,lease_spec['slice'],
809 lease_spec['t_from'],TestPlc.timestamp_printable(lease_spec['t_from']),
810 lease_spec['t_until'],TestPlc.timestamp_printable(lease_spec['t_until'])))
814 def delete_leases (self):
815 "remove all leases in the myplc side"
816 lease_ids= [ l['lease_id'] for l in self.apiserver.GetLeases(self.auth_root())]
817 utils.header("Cleaning leases %r"%lease_ids)
818 self.apiserver.DeleteLeases(self.auth_root(),lease_ids)
821 def list_leases (self):
822 "list all leases known to the myplc"
823 leases = self.apiserver.GetLeases(self.auth_root())
826 current=l['t_until']>=now
827 if self.options.verbose or current:
828 utils.header("%s %s from %s until %s"%(l['hostname'],l['name'],
829 TestPlc.timestamp_printable(l['t_from']),
830 TestPlc.timestamp_printable(l['t_until'])))
833 # create nodegroups if needed, and populate
834 def do_nodegroups (self, action="add"):
835 # 1st pass to scan contents
837 for site_spec in self.plc_spec['sites']:
838 test_site = TestSite (self,site_spec)
839 for node_spec in site_spec['nodes']:
840 test_node=TestNode (self,test_site,node_spec)
841 if node_spec.has_key('nodegroups'):
842 nodegroupnames=node_spec['nodegroups']
843 if isinstance(nodegroupnames,StringTypes):
844 nodegroupnames = [ nodegroupnames ]
845 for nodegroupname in nodegroupnames:
846 if not groups_dict.has_key(nodegroupname):
847 groups_dict[nodegroupname]=[]
848 groups_dict[nodegroupname].append(test_node.name())
849 auth=self.auth_root()
851 for (nodegroupname,group_nodes) in groups_dict.iteritems():
853 print 'nodegroups:','dealing with nodegroup',nodegroupname,'on nodes',group_nodes
854 # first, check if the nodetagtype is here
855 tag_types = self.apiserver.GetTagTypes(auth,{'tagname':nodegroupname})
857 tag_type_id = tag_types[0]['tag_type_id']
859 tag_type_id = self.apiserver.AddTagType(auth,
860 {'tagname':nodegroupname,
861 'description': 'for nodegroup %s'%nodegroupname,
863 print 'located tag (type)',nodegroupname,'as',tag_type_id
865 nodegroups = self.apiserver.GetNodeGroups (auth, {'groupname':nodegroupname})
867 self.apiserver.AddNodeGroup(auth, nodegroupname, tag_type_id, 'yes')
868 print 'created nodegroup',nodegroupname,'from tagname',nodegroupname,'and value','yes'
869 # set node tag on all nodes, value='yes'
870 for nodename in group_nodes:
872 self.apiserver.AddNodeTag(auth, nodename, nodegroupname, "yes")
874 traceback.print_exc()
875 print 'node',nodename,'seems to already have tag',nodegroupname
878 expect_yes = self.apiserver.GetNodeTags(auth,
879 {'hostname':nodename,
880 'tagname':nodegroupname},
881 ['value'])[0]['value']
882 if expect_yes != "yes":
883 print 'Mismatch node tag on node',nodename,'got',expect_yes
886 if not self.options.dry_run:
887 print 'Cannot find tag',nodegroupname,'on node',nodename
891 print 'cleaning nodegroup',nodegroupname
892 self.apiserver.DeleteNodeGroup(auth,nodegroupname)
894 traceback.print_exc()
898 # a list of TestNode objs
899 def all_nodes (self):
901 for site_spec in self.plc_spec['sites']:
902 test_site = TestSite (self,site_spec)
903 for node_spec in site_spec['nodes']:
904 nodes.append(TestNode (self,test_site,node_spec))
907 # return a list of tuples (nodename,qemuname)
908 def all_node_infos (self) :
910 for site_spec in self.plc_spec['sites']:
911 node_infos += [ (node_spec['node_fields']['hostname'],node_spec['host_box']) \
912 for node_spec in site_spec['nodes'] ]
915 def all_nodenames (self): return [ x[0] for x in self.all_node_infos() ]
916 def all_reservable_nodenames (self):
918 for site_spec in self.plc_spec['sites']:
919 for node_spec in site_spec['nodes']:
920 node_fields=node_spec['node_fields']
921 if 'node_type' in node_fields and node_fields['node_type']=='reservable':
922 res.append(node_fields['hostname'])
925 # silent_minutes : during the first <silent_minutes> minutes nothing gets printed
926 def nodes_check_boot_state (self, target_boot_state, timeout_minutes, silent_minutes,period=15):
927 if self.options.dry_run:
931 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
932 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
933 # the nodes that haven't checked yet - start with a full list and shrink over time
934 tocheck = self.all_hostnames()
935 utils.header("checking nodes %r"%tocheck)
936 # create a dict hostname -> status
937 status = dict ( [ (hostname,'undef') for hostname in tocheck ] )
940 tocheck_status=self.apiserver.GetNodes(self.auth_root(), tocheck, ['hostname','boot_state' ] )
942 for array in tocheck_status:
943 hostname=array['hostname']
944 boot_state=array['boot_state']
945 if boot_state == target_boot_state:
946 utils.header ("%s has reached the %s state"%(hostname,target_boot_state))
948 # if it's a real node, never mind
949 (site_spec,node_spec)=self.locate_hostname(hostname)
950 if TestNode.is_real_model(node_spec['node_fields']['model']):
951 utils.header("WARNING - Real node %s in %s - ignored"%(hostname,boot_state))
953 boot_state = target_boot_state
954 elif datetime.datetime.now() > graceout:
955 utils.header ("%s still in '%s' state"%(hostname,boot_state))
956 graceout=datetime.datetime.now()+datetime.timedelta(1)
957 status[hostname] = boot_state
959 tocheck = [ hostname for (hostname,boot_state) in status.iteritems() if boot_state != target_boot_state ]
962 if datetime.datetime.now() > timeout:
963 for hostname in tocheck:
964 utils.header("FAILURE due to %s in '%s' state"%(hostname,status[hostname]))
966 # otherwise, sleep for a while
968 # only useful in empty plcs
971 def nodes_booted(self):
972 return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
974 def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period=15):
976 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
977 graceout = datetime.datetime.now()+datetime.timedelta(minutes=silent_minutes)
978 vservername=self.vservername
981 local_key = "keys/%(vservername)s-debug.rsa"%locals()
984 local_key = "keys/key_admin.rsa"
985 node_infos = self.all_node_infos()
986 utils.header("checking ssh access (expected in %s mode) to nodes:"%message)
987 for (nodename,qemuname) in node_infos:
988 utils.header("hostname=%s -- qemubox=%s"%(nodename,qemuname))
989 utils.header("max timeout is %d minutes, silent for %d minutes (period is %s)"%\
990 (timeout_minutes,silent_minutes,period))
992 for node_info in node_infos:
993 (hostname,qemuname) = node_info
994 # try to run 'hostname' in the node
995 command = TestSsh (hostname,key=local_key).actual_command("hostname;uname -a")
996 # don't spam logs - show the command only after the grace period
997 success = utils.system ( command, silent=datetime.datetime.now() < graceout)
999 utils.header('Successfully entered root@%s (%s)'%(hostname,message))
1000 # refresh node_infos
1001 node_infos.remove(node_info)
1003 # we will have tried real nodes once, in case they're up - but if not, just skip
1004 (site_spec,node_spec)=self.locate_hostname(hostname)
1005 if TestNode.is_real_model(node_spec['node_fields']['model']):
1006 utils.header ("WARNING : check ssh access into real node %s - skipped"%hostname)
1007 node_infos.remove(node_info)
1010 if datetime.datetime.now() > timeout:
1011 for (hostname,qemuname) in node_infos:
1012 utils.header("FAILURE to ssh into %s (on %s)"%(hostname,qemuname))
1014 # otherwise, sleep for a while
1016 # only useful in empty plcs
1019 def ssh_node_debug(self):
1020 "Tries to ssh into nodes in debug mode with the debug ssh key"
1021 return self.check_nodes_ssh(debug=True,
1022 timeout_minutes=self.ssh_node_debug_timeout,
1023 silent_minutes=self.ssh_node_debug_silent)
1025 def ssh_node_boot(self):
1026 "Tries to ssh into nodes in production mode with the root ssh key"
1027 return self.check_nodes_ssh(debug=False,
1028 timeout_minutes=self.ssh_node_boot_timeout,
1029 silent_minutes=self.ssh_node_boot_silent)
1032 def qemu_local_init (self): pass
1034 def bootcd (self): pass
1036 def qemu_local_config (self): pass
1038 def nodestate_reinstall (self): pass
1040 def nodestate_safeboot (self): pass
1042 def nodestate_boot (self): pass
1044 def nodestate_show (self): pass
1046 def qemu_export (self): pass
1048 ### check hooks : invoke scripts from hooks/{node,slice}
1049 def check_hooks_node (self):
1050 return self.locate_first_node().check_hooks()
1051 def check_hooks_sliver (self) :
1052 return self.locate_first_sliver().check_hooks()
1054 def check_hooks (self):
1055 "runs unit tests in the node and slice contexts - see hooks/{node,slice}"
1056 return self.check_hooks_node() and self.check_hooks_sliver()
1059 def do_check_initscripts(self):
1061 for slice_spec in self.plc_spec['slices']:
1062 if not slice_spec.has_key('initscriptstamp'):
1064 stamp=slice_spec['initscriptstamp']
1065 for nodename in slice_spec['nodenames']:
1066 (site,node) = self.locate_node (nodename)
1067 # xxx - passing the wrong site - probably harmless
1068 test_site = TestSite (self,site)
1069 test_slice = TestSlice (self,test_site,slice_spec)
1070 test_node = TestNode (self,test_site,node)
1071 test_sliver = TestSliver (self, test_node, test_slice)
1072 if not test_sliver.check_initscript_stamp(stamp):
1076 def check_initscripts(self):
1077 "check that the initscripts have triggered"
1078 return self.do_check_initscripts()
1080 def initscripts (self):
1081 "create initscripts with PLCAPI"
1082 for initscript in self.plc_spec['initscripts']:
1083 utils.pprint('Adding Initscript in plc %s'%self.plc_spec['name'],initscript)
1084 self.apiserver.AddInitScript(self.auth_root(),initscript['initscript_fields'])
1087 def delete_initscripts (self):
1088 "delete initscripts with PLCAPI"
1089 for initscript in self.plc_spec['initscripts']:
1090 initscript_name = initscript['initscript_fields']['name']
1091 print('Attempting to delete %s in plc %s'%(initscript_name,self.plc_spec['name']))
1093 self.apiserver.DeleteInitScript(self.auth_root(),initscript_name)
1094 print initscript_name,'deleted'
1096 print 'deletion went wrong - probably did not exist'
1101 "create slices with PLCAPI"
1102 return self.do_slices(action="add")
1104 def delete_slices (self):
1105 "delete slices with PLCAPI"
1106 return self.do_slices(action="delete")
1108 def fill_slices (self):
1109 "add nodes in slices with PLCAPI"
1110 return self.do_slices(action="fill")
1112 def empty_slices (self):
1113 "remove nodes from slices with PLCAPI"
1114 return self.do_slices(action="empty")
1116 def do_slices (self, action="add"):
1117 for slice in self.plc_spec['slices']:
1118 site_spec = self.locate_site (slice['sitename'])
1119 test_site = TestSite(self,site_spec)
1120 test_slice=TestSlice(self,test_site,slice)
1121 if action == "delete":
1122 test_slice.delete_slice()
1123 elif action=="fill":
1124 test_slice.add_nodes()
1125 elif action=="empty":
1126 test_slice.delete_nodes()
1128 test_slice.create_slice()
1132 def ssh_slice(self): pass
1134 def ssh_slice_off (self): pass
1137 def check_vsys_defaults(self): pass
1140 def keys_clear_known_hosts (self): pass
1142 def speed_up_slices (self):
1143 "tweak nodemanager settings on all nodes using a conf file"
1144 # create the template on the server-side
1145 template="%s.nodemanager"%self.name()
1146 template_file = open (template,"w")
1147 template_file.write('OPTIONS="-p 30 -r 11 -d"\n')
1148 template_file.close()
1149 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1150 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1151 self.test_ssh.copy_abs(template,remote)
1153 self.apiserver.AddConfFile (self.auth_root(),
1154 {'dest':'/etc/sysconfig/nodemanager',
1155 'source':'PlanetLabConf/nodemanager',
1156 'postinstall_cmd':'service nm restart',})
1159 def debug_nodemanager (self):
1160 "sets verbose mode for nodemanager, and speeds up cycle even more (needs speed_up_slices first)"
1161 template="%s.nodemanager"%self.name()
1162 template_file = open (template,"w")
1163 template_file.write('OPTIONS="-p 10 -r 6 -v -d"\n')
1164 template_file.close()
1165 in_vm="/var/www/html/PlanetLabConf/nodemanager"
1166 remote="%s/%s"%(self.vm_root_in_host(),in_vm)
1167 self.test_ssh.copy_abs(template,remote)
1171 def qemu_start (self) : pass
1174 def timestamp_qemu (self) : pass
1176 # when a spec refers to a node possibly on another plc
1177 def locate_sliver_obj_cross (self, nodename, slicename, other_plcs):
1178 for plc in [ self ] + other_plcs:
1180 return plc.locate_sliver_obj (nodename, slicename)
1183 raise Exception, "Cannot locate sliver %s@%s among all PLCs"%(nodename,slicename)
1185 # implement this one as a cross step so that we can take advantage of different nodes
1186 # in multi-plcs mode
1187 def cross_check_tcp (self, other_plcs):
1188 "check TCP connectivity between 2 slices (or in loopback if only one is defined)"
1189 if 'tcp_specs' not in self.plc_spec or not self.plc_spec['tcp_specs']:
1190 utils.header ("check_tcp: no/empty config found")
1192 specs = self.plc_spec['tcp_specs']
1197 s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
1198 if not s_test_sliver.run_tcp_server(port,timeout=20):
1202 # idem for the client side
1203 c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
1204 # use nodename from locatesd sliver, unless 'client_connect' is set
1205 if 'client_connect' in spec:
1206 destination = spec['client_connect']
1208 destination=s_test_sliver.test_node.name()
1209 if not c_test_sliver.run_tcp_client(destination,port):
1213 # painfully enough, we need to allow for some time as netflow might show up last
1214 def check_system_slice (self):
1215 "all nodes: check that a system slice is alive"
1216 # netflow currently not working in the lxc distro
1217 # drl not built at all in the wtx distro
1218 # if we find either of them we're happy
1219 return self.check_netflow() or self.check_drl()
1222 def check_netflow (self): return self._check_system_slice ('netflow')
1223 def check_drl (self): return self._check_system_slice ('drl')
1225 # we have the slices up already here, so it should not take too long
1226 def _check_system_slice (self, slicename, timeout_minutes=5, period=15):
1227 timeout = datetime.datetime.now()+datetime.timedelta(minutes=timeout_minutes)
1228 test_nodes=self.all_nodes()
1230 for test_node in test_nodes:
1231 if test_node._check_system_slice (slicename,dry_run=self.options.dry_run):
1233 test_nodes.remove(test_node)
1238 if datetime.datetime.now () > timeout:
1239 for test_node in test_nodes:
1240 utils.header ("can't find system slice %s in %s"%(slicename,test_node.name()))
1245 def plcsh_stress_test (self):
1246 "runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
1247 # install the stress-test in the plc image
1248 location = "/usr/share/plc_api/plcsh_stress_test.py"
1249 remote="%s/%s"%(self.vm_root_in_host(),location)
1250 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1252 command += " -- --check"
1253 if self.options.size == 1:
1254 command += " --tiny"
1255 return ( self.run_in_guest(command) == 0)
1257 # populate runs the same utility without slightly different options
1258 # in particular runs with --preserve (dont cleanup) and without --check
1259 # also it gets run twice, once with the --foreign option for creating fake foreign entries
1261 def sfa_install_all (self):
1262 "yum install sfa sfa-plc sfa-sfatables sfa-client"
1263 return self.yum_install ("sfa sfa-plc sfa-sfatables sfa-client")
1265 def sfa_install_core(self):
1267 return self.yum_install ("sfa")
1269 def sfa_install_plc(self):
1270 "yum install sfa-plc"
1271 return self.yum_install("sfa-plc")
1273 def sfa_install_sfatables(self):
1274 "yum install sfa-sfatables"
1275 return self.yum_install ("sfa-sfatables")
1277 # for some very odd reason, this sometimes fails with the following symptom
1278 # # yum install sfa-client
1279 # Setting up Install Process
1281 # Downloading Packages:
1282 # Running rpm_check_debug
1283 # Running Transaction Test
1284 # Transaction Test Succeeded
1285 # Running Transaction
1286 # Transaction couldn't start:
1287 # installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem
1288 # [('installing package sfa-client-2.1-7.onelab.2012.05.23.i686 needs 68KB on the / filesystem', (9, '/', 69632L))]
1289 # even though in the same context I have
1290 # [2012.05.23--f14-32-sfastd1-1-vplc07] / # df -h
1291 # Filesystem Size Used Avail Use% Mounted on
1292 # /dev/hdv1 806G 264G 501G 35% /
1293 # none 16M 36K 16M 1% /tmp
1295 # so as a workaround, we first try yum install, and then invoke rpm on the cached rpm...
1296 def sfa_install_client(self):
1297 "yum install sfa-client"
1298 first_try=self.yum_install("sfa-client")
1299 if first_try: return True
1300 utils.header ("********** Regular yum failed - special workaround in place, 2nd chance")
1301 (code,cached_rpm_path)=utils.output_of(self.actual_command_in_guest('find /var/cache/yum -name sfa-client\*.rpm'))
1302 utils.header("rpm_path=<<%s>>"%rpm_path)
1304 self.run_in_guest("rpm -i %s"%cached_rpm_path)
1305 return self.yum_check_installed ("sfa-client")
1307 def sfa_dbclean(self):
1308 "thoroughly wipes off the SFA database"
1309 return self.run_in_guest("sfaadmin reg nuke")==0 or \
1310 self.run_in_guest("sfa-nuke.py")==0 or \
1311 self.run_in_guest("sfa-nuke-plc.py")==0
1313 def sfa_fsclean(self):
1314 "cleanup /etc/sfa/trusted_roots and /var/lib/sfa"
1315 self.run_in_guest("rm -rf /etc/sfa/trusted_roots /var/lib/sfa/authorities")
1318 def sfa_plcclean(self):
1319 "cleans the PLC entries that were created as a side effect of running the script"
1321 sfa_spec=self.plc_spec['sfa']
1323 for auth_sfa_spec in sfa_spec['auth_sfa_specs']:
1324 login_base=auth_sfa_spec['login_base']
1325 try: self.apiserver.DeleteSite (self.auth_root(),login_base)
1326 except: print "Site %s already absent from PLC db"%login_base
1328 for spec_name in ['pi_spec','user_spec']:
1329 user_spec=auth_sfa_spec[spec_name]
1330 username=user_spec['email']
1331 try: self.apiserver.DeletePerson(self.auth_root(),username)
1333 # this in fact is expected as sites delete their members
1334 #print "User %s already absent from PLC db"%username
1337 print "REMEMBER TO RUN sfa_import AGAIN"
1340 def sfa_uninstall(self):
1341 "uses rpm to uninstall sfa - ignore result"
1342 self.run_in_guest("rpm -e sfa sfa-sfatables sfa-client sfa-plc")
1343 self.run_in_guest("rm -rf /var/lib/sfa")
1344 self.run_in_guest("rm -rf /etc/sfa")
1345 self.run_in_guest("rm -rf /var/log/sfa_access.log /var/log/sfa_import_plc.log /var/log/sfa.daemon")
1347 self.run_in_guest("rpm -e --noscripts sfa-plc")
1350 ### run unit tests for SFA
1351 # NOTE: for some reason on f14/i386, yum install sfa-tests fails for no reason
1352 # Running Transaction
1353 # Transaction couldn't start:
1354 # installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem
1355 # [('installing package sfa-tests-1.0-21.onelab.i686 needs 204KB on the / filesystem', (9, '/', 208896L))]
1356 # no matter how many Gbs are available on the testplc
1357 # could not figure out what's wrong, so...
1358 # if the yum install phase fails, consider the test is successful
1359 # other combinations will eventually run it hopefully
1360 def sfa_utest(self):
1361 "yum install sfa-tests and run SFA unittests"
1362 self.run_in_guest("yum -y install sfa-tests")
1363 # failed to install - forget it
1364 if self.run_in_guest("rpm -q sfa-tests")!=0:
1365 utils.header("WARNING: SFA unit tests failed to install, ignoring")
1367 return self.run_in_guest("/usr/share/sfa/tests/testAll.py")==0
1371 dirname="conf.%s"%self.plc_spec['name']
1372 if not os.path.isdir(dirname):
1373 utils.system("mkdir -p %s"%dirname)
1374 if not os.path.isdir(dirname):
1375 raise Exception,"Cannot create config dir for plc %s"%self.name()
1378 def conffile(self,filename):
1379 return "%s/%s"%(self.confdir(),filename)
1380 def confsubdir(self,dirname,clean,dry_run=False):
1381 subdirname="%s/%s"%(self.confdir(),dirname)
1383 utils.system("rm -rf %s"%subdirname)
1384 if not os.path.isdir(subdirname):
1385 utils.system("mkdir -p %s"%subdirname)
1386 if not dry_run and not os.path.isdir(subdirname):
1387 raise "Cannot create config subdir %s for plc %s"%(dirname,self.name())
1390 def conffile_clean (self,filename):
1391 filename=self.conffile(filename)
1392 return utils.system("rm -rf %s"%filename)==0
1395 def sfa_configure(self):
1396 "run sfa-config-tty"
1397 tmpname=self.conffile("sfa-config-tty")
1398 fileconf=open(tmpname,'w')
1399 for var in [ 'SFA_REGISTRY_ROOT_AUTH',
1400 'SFA_INTERFACE_HRN',
1401 'SFA_REGISTRY_LEVEL1_AUTH',
1402 'SFA_REGISTRY_HOST',
1403 'SFA_AGGREGATE_HOST',
1413 'SFA_GENERIC_FLAVOUR',
1414 'SFA_AGGREGATE_ENABLED',
1416 if self.plc_spec['sfa'].has_key(var):
1417 fileconf.write ('e %s\n%s\n'%(var,self.plc_spec['sfa'][var]))
1418 # the way plc_config handles booleans just sucks..
1421 if self.plc_spec['sfa'][var]: val='true'
1422 fileconf.write ('e %s\n%s\n'%(var,val))
1423 fileconf.write('w\n')
1424 fileconf.write('R\n')
1425 fileconf.write('q\n')
1427 utils.system('cat %s'%tmpname)
1428 self.run_in_guest_piped('cat %s'%tmpname,'sfa-config-tty')
1431 def aggregate_xml_line(self):
1432 port=self.plc_spec['sfa']['neighbours-port']
1433 return '<aggregate addr="%s" hrn="%s" port="%r"/>' % \
1434 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'],port)
1436 def registry_xml_line(self):
1437 return '<registry addr="%s" hrn="%s" port="12345"/>' % \
1438 (self.vserverip,self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH'])
1441 # a cross step that takes all other plcs in argument
1442 def cross_sfa_configure(self, other_plcs):
1443 "writes aggregates.xml and registries.xml that point to all other PLCs in the test"
1444 # of course with a single plc, other_plcs is an empty list
1447 agg_fname=self.conffile("agg.xml")
1448 file(agg_fname,"w").write("<aggregates>%s</aggregates>\n" % \
1449 " ".join([ plc.aggregate_xml_line() for plc in other_plcs ]))
1450 utils.header ("(Over)wrote %s"%agg_fname)
1451 reg_fname=self.conffile("reg.xml")
1452 file(reg_fname,"w").write("<registries>%s</registries>\n" % \
1453 " ".join([ plc.registry_xml_line() for plc in other_plcs ]))
1454 utils.header ("(Over)wrote %s"%reg_fname)
1455 return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
1456 and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
1458 def sfa_import(self):
1459 "use sfaadmin to import from plc"
1460 auth=self.plc_spec['sfa']['SFA_REGISTRY_ROOT_AUTH']
1462 self.run_in_guest('sfaadmin reg import_registry')==0
1463 # not needed anymore
1464 # self.run_in_guest('cp /etc/sfa/authorities/%s/%s.pkey /etc/sfa/authorities/server.key'%(auth,auth))
1466 def sfa_start(self):
1468 return self.run_in_guest('service sfa start')==0
1470 def sfi_configure(self):
1471 "Create /root/sfi on the plc side for sfi client configuration"
1472 if self.options.dry_run:
1473 utils.header("DRY RUN - skipping step")
1475 sfa_spec=self.plc_spec['sfa']
1476 # cannot use auth_sfa_mapper to pass dir_name
1477 for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
1478 test_slice=TestAuthSfa(self,slice_spec)
1479 dir_basename=os.path.basename(test_slice.sfi_path())
1480 dir_name=self.confsubdir("dot-sfi/%s"%dir_basename,clean=True,dry_run=self.options.dry_run)
1481 test_slice.sfi_configure(dir_name)
1482 # push into the remote /root/sfi area
1483 location = test_slice.sfi_path()
1484 remote="%s/%s"%(self.vm_root_in_host(),location)
1485 self.test_ssh.mkdir(remote,abs=True)
1486 # need to strip last level or remote otherwise we get an extra dir level
1487 self.test_ssh.copy_abs(dir_name, os.path.dirname(remote), recursive=True)
1491 def sfi_clean (self):
1492 "clean up /root/sfi on the plc side"
1493 self.run_in_guest("rm -rf /root/sfi")
1497 def sfa_add_site (self): pass
1499 def sfa_add_pi (self): pass
1501 def sfa_add_user(self): pass
1503 def sfa_update_user(self): pass
1505 def sfa_add_slice(self): pass
1507 def sfa_renew_slice(self): pass
1509 def sfa_discover(self): pass
1511 def sfa_create_slice(self): pass
1513 def sfa_check_slice_plc(self): pass
1515 def sfa_update_slice(self): pass
1517 def sfi_list(self): pass
1519 def sfi_show(self): pass
1521 def sfi_slices(self): pass
1523 def ssh_slice_sfa(self): pass
1525 def sfa_delete_user(self): pass
1527 def sfa_delete_slice(self): pass
1531 self.run_in_guest('service sfa stop')==0
1534 def populate (self):
1535 "creates random entries in the PLCAPI"
1536 # install the stress-test in the plc image
1537 location = "/usr/share/plc_api/plcsh_stress_test.py"
1538 remote="%s/%s"%(self.vm_root_in_host(),location)
1539 self.test_ssh.copy_abs("plcsh_stress_test.py",remote)
1541 command += " -- --preserve --short-names"
1542 local = (self.run_in_guest(command) == 0);
1543 # second run with --foreign
1544 command += ' --foreign'
1545 remote = (self.run_in_guest(command) == 0);
1546 return ( local and remote)
1548 def gather_logs (self):
1549 "gets all possible logs from plc's/qemu node's/slice's for future reference"
1550 # (1.a) get the plc's /var/log/ and store it locally in logs/myplc.var-log.<plcname>/*
1551 # (1.b) get the plc's /var/lib/pgsql/data/pg_log/ -> logs/myplc.pgsql-log.<plcname>/*
1552 # (1.c) get the plc's /root/sfi -> logs/sfi.<plcname>/
1553 # (2) get all the nodes qemu log and store it as logs/node.qemu.<node>.log
1554 # (3) get the nodes /var/log and store is as logs/node.var-log.<node>/*
1555 # (4) as far as possible get the slice's /var/log as logs/sliver.var-log.<sliver>/*
1557 print "-------------------- TestPlc.gather_logs : PLC's /var/log"
1558 self.gather_var_logs ()
1560 print "-------------------- TestPlc.gather_logs : PLC's /var/lib/psql/data/pg_log/"
1561 self.gather_pgsql_logs ()
1563 print "-------------------- TestPlc.gather_logs : PLC's /root/sfi/"
1564 self.gather_root_sfi ()
1566 print "-------------------- TestPlc.gather_logs : nodes's QEMU logs"
1567 for site_spec in self.plc_spec['sites']:
1568 test_site = TestSite (self,site_spec)
1569 for node_spec in site_spec['nodes']:
1570 test_node=TestNode(self,test_site,node_spec)
1571 test_node.gather_qemu_logs()
1573 print "-------------------- TestPlc.gather_logs : nodes's /var/log"
1574 self.gather_nodes_var_logs()
1576 print "-------------------- TestPlc.gather_logs : sample sliver's /var/log"
1577 self.gather_slivers_var_logs()
1580 def gather_slivers_var_logs(self):
1581 for test_sliver in self.all_sliver_objs():
1582 remote = test_sliver.tar_var_logs()
1583 utils.system("mkdir -p logs/sliver.var-log.%s"%test_sliver.name())
1584 command = remote + " | tar -C logs/sliver.var-log.%s -xf -"%test_sliver.name()
1585 utils.system(command)
1588 def gather_var_logs (self):
1589 utils.system("mkdir -p logs/myplc.var-log.%s"%self.name())
1590 to_plc = self.actual_command_in_guest("tar -C /var/log/ -cf - .")
1591 command = to_plc + "| tar -C logs/myplc.var-log.%s -xf -"%self.name()
1592 utils.system(command)
1593 command = "chmod a+r,a+x logs/myplc.var-log.%s/httpd"%self.name()
1594 utils.system(command)
1596 def gather_pgsql_logs (self):
1597 utils.system("mkdir -p logs/myplc.pgsql-log.%s"%self.name())
1598 to_plc = self.actual_command_in_guest("tar -C /var/lib/pgsql/data/pg_log/ -cf - .")
1599 command = to_plc + "| tar -C logs/myplc.pgsql-log.%s -xf -"%self.name()
1600 utils.system(command)
1602 def gather_root_sfi (self):
1603 utils.system("mkdir -p logs/sfi.%s"%self.name())
1604 to_plc = self.actual_command_in_guest("tar -C /root/sfi/ -cf - .")
1605 command = to_plc + "| tar -C logs/sfi.%s -xf -"%self.name()
1606 utils.system(command)
1608 def gather_nodes_var_logs (self):
1609 for site_spec in self.plc_spec['sites']:
1610 test_site = TestSite (self,site_spec)
1611 for node_spec in site_spec['nodes']:
1612 test_node=TestNode(self,test_site,node_spec)
1613 test_ssh = TestSsh (test_node.name(),key="keys/key_admin.rsa")
1614 command = test_ssh.actual_command("tar -C /var/log -cf - .")
1615 command = command + "| tar -C logs/node.var-log.%s -xf -"%test_node.name()
1616 utils.system("mkdir -p logs/node.var-log.%s"%test_node.name())
1617 utils.system(command)
1620 # returns the filename to use for sql dump/restore, using options.dbname if set
1621 def dbfile (self, database):
1622 # uses options.dbname if it is found
1624 name=self.options.dbname
1625 if not isinstance(name,StringTypes):
1628 t=datetime.datetime.now()
1631 return "/root/%s-%s.sql"%(database,name)
1633 def plc_db_dump(self):
1634 'dump the planetlab5 DB in /root in the PLC - filename has time'
1635 dump=self.dbfile("planetab5")
1636 self.run_in_guest('pg_dump -U pgsqluser planetlab5 -f '+ dump)
1637 utils.header('Dumped planetlab5 database in %s'%dump)
1640 def plc_db_restore(self):
1641 'restore the planetlab5 DB - looks broken, but run -n might help'
1642 dump=self.dbfile("planetab5")
1643 ##stop httpd service
1644 self.run_in_guest('service httpd stop')
1645 # xxx - need another wrapper
1646 self.run_in_guest_piped('echo drop database planetlab5','psql --user=pgsqluser template1')
1647 self.run_in_guest('createdb -U postgres --encoding=UNICODE --owner=pgsqluser planetlab5')
1648 self.run_in_guest('psql -U pgsqluser planetlab5 -f '+dump)
1649 ##starting httpd service
1650 self.run_in_guest('service httpd start')
1652 utils.header('Database restored from ' + dump)
1654 def standby_1_through_20(self):
1655 """convenience function to wait for a specified number of minutes"""
1658 def standby_1(): pass
1660 def standby_2(): pass
1662 def standby_3(): pass
1664 def standby_4(): pass
1666 def standby_5(): pass
1668 def standby_6(): pass
1670 def standby_7(): pass
1672 def standby_8(): pass
1674 def standby_9(): pass
1676 def standby_10(): pass
1678 def standby_11(): pass
1680 def standby_12(): pass
1682 def standby_13(): pass
1684 def standby_14(): pass
1686 def standby_15(): pass
1688 def standby_16(): pass
1690 def standby_17(): pass
1692 def standby_18(): pass
1694 def standby_19(): pass
1696 def standby_20(): pass