test_site = TestSite(self,site_spec)
test_slice=TestSlice(self,test_site,slice_spec)
tasks += slice_method (test_slice, self.options)
- return Completer (tasks).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
+ return Completer (tasks, message=method.__name__).run (decorator_self.timeout, decorator_self.silent, decorator_self.period)
# restore the doc text from the TestSlice method even if a bit odd
wrappee.__name__ = method.__name__
wrappee.__doc__ = slice_method.__doc__
'sfa_register_user@1', 'sfa_update_user@1', 'sfa_register_slice@1', 'sfa_renew_slice@1', SEPSFA,
'sfa_remove_user_from_slice@1','sfi_show_slice_researchers@1',
'sfa_insert_user_in_slice@1','sfi_show_slice_researchers@1', SEPSFA,
- 'sfa_discover@1', 'sfa_create_slice@1', 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
+ 'sfa_discover@1', 'sfa_rspec@1', 'sfa_allocate@1', 'sfa_provision@1', SEPSFA,
+ 'sfa_check_slice_plc@1', 'sfa_update_slice@1', SEPSFA,
'sfi_list@1', 'sfi_show_site@1', 'sfa_utest@1', SEPSFA,
# we used to run plcsh_stress_test, and then ssh_node_debug and ssh_node_boot
# but as the stress test might take a while, we sometimes missed the debug mode..
'probe_kvm_iptables',
'ping_node', 'ssh_node_debug', 'plcsh_stress_test@1', SEP,
'ssh_node_boot', 'node_bmlogs', 'ssh_slice', 'ssh_slice_basics', 'check_initscripts_ignore', SEP,
- 'ssh_slice_sfa@1', 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
+ 'ssh_slice_sfa@1', SEPSFA,
+ 'sfa_rspec_empty@1', 'sfa_allocate_empty@1', 'sfa_provision_empty@1','sfa_check_slice_plc_empty@1', SEPSFA,
+ 'sfa_delete_slice@1', 'sfa_delete_user@1', SEPSFA,
'cross_check_tcp@1', 'check_system_slice', SEP,
+ # for inspecting the slice while it runs the first time
+ #'fail',
# check slices are turned off properly
'empty_slices', 'ssh_slice_off', 'slice_fs_deleted_ignore', SEP,
# check they are properly re-created with the same name
'qemu_list_all', 'qemu_list_mine', 'qemu_kill_all', SEP,
'sfa_install_core', 'sfa_install_sfatables', 'sfa_install_plc', 'sfa_install_client', SEPSFA,
'sfa_plcclean', 'sfa_dbclean', 'sfa_stop','sfa_uninstall', 'sfi_clean', SEPSFA,
+ 'sfa_get_expires', SEPSFA,
'plc_db_dump' , 'plc_db_restore', SEP,
'check_netflow','check_drl', SEP,
'debug_nodemanager', 'slice_fs_present', SEP,
# see e.g. plc_start esp. the version for f14
#command gets run in the plc's vm
def host_to_guest(self,command):
+ vservername=self.vservername
+ personality=self.options.personality
+ raw="%(personality)s virsh -c lxc:/// lxc-enter-namespace %(vservername)s"%locals()
# f14 still needs some extra help
if self.options.fcdistro == 'f14':
- raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %s" %(self.vservername,command)
+ raw +=" -- /usr/bin/env PATH=/bin:/sbin:/usr/bin:/usr/sbin %(command)s" %locals()
else:
- raw="virsh -c lxc:/// lxc-enter-namespace %s -- /usr/bin/env %s" %(self.vservername,command)
+ raw +=" -- /usr/bin/env %(command)s"%locals()
return raw
# this /vservers thing is legacy...
utils.header("checking nodes boot state (expected %s)"%target_boot_state)
tasks = [ CompleterTaskBootState (self,hostname) \
for (hostname,_) in self.all_node_infos() ]
- return Completer (tasks).run (timeout, graceout, period)
+ message = 'check_boot_state={}'.format(target_boot_state)
+ return Completer (tasks, message=message).run (timeout, graceout, period)
def nodes_booted(self):
return self.nodes_check_boot_state('boot',timeout_minutes=30,silent_minutes=28)
return True
# probing nodes
- def check_nodes_ping(self,timeout_seconds=120,period_seconds=10):
- class CompleterTaskPingNode (CompleterTask):
+ def check_nodes_ping(self, timeout_seconds=30, period_seconds=10):
+ class CompleterTaskPingNode(CompleterTask):
def __init__ (self, hostname):
self.hostname=hostname
- def run(self,silent):
+ def run(self, silent):
command="ping -c 1 -w 1 %s >& /dev/null"%self.hostname
return utils.system (command, silent=silent)==0
def failure_epilogue (self):
period=timedelta (seconds=period_seconds)
node_infos = self.all_node_infos()
tasks = [ CompleterTaskPingNode (h) for (h,_) in node_infos ]
- return Completer (tasks).run (timeout, graceout, period)
+ return Completer (tasks, message='ping_node').run (timeout, graceout, period)
# ping node before we try to reach ssh, helpful for troubleshooting failing bootCDs
def ping_node (self):
"Ping nodes"
return self.check_nodes_ping ()
- def check_nodes_ssh(self,debug,timeout_minutes,silent_minutes,period_seconds=15):
+ def check_nodes_ssh(self, debug, timeout_minutes, silent_minutes, period_seconds=15):
# various delays
timeout = timedelta(minutes=timeout_minutes)
graceout = timedelta(minutes=silent_minutes)
vservername=self.vservername
if debug:
message="debug"
+ completer_message = 'ssh_node_debug'
local_key = "keys/%(vservername)s-debug.rsa"%locals()
else:
message="boot"
+ completer_message = 'ssh_node_boot'
local_key = "keys/key_admin.rsa"
utils.header("checking ssh access to nodes (expected in %s mode)"%message)
node_infos = self.all_node_infos()
- tasks = [ CompleterTaskNodeSsh (nodename, qemuname, local_key, boot_state=message) \
+ tasks = [ CompleterTaskNodeSsh (nodename, qemuname, local_key,
+ boot_state=message, dry_run=self.options.dry_run) \
for (nodename,qemuname) in node_infos ]
- return Completer (tasks).run (timeout, graceout, period)
+ return Completer (tasks, message=completer_message).run (timeout, graceout, period)
def ssh_node_debug(self):
"Tries to ssh into nodes in debug mode with the debug ssh key"
### initscripts
def do_check_initscripts(self):
class CompleterTaskInitscript (CompleterTask):
- def __init__ (self, test_sliver, stamp):
+ def __init__(self, test_sliver, stamp):
self.test_sliver=test_sliver
self.stamp=stamp
- def actual_run (self):
- return self.test_sliver.check_initscript_stamp (self.stamp)
- def message (self):
+ def actual_run(self):
+ return self.test_sliver.check_initscript_stamp(self.stamp)
+ def message(self):
return "initscript checker for %s"%self.test_sliver.name()
- def failure_epilogue (self):
+ def failure_epilogue(self):
print "initscript stamp %s not found in sliver %s"%(self.stamp,self.test_sliver.name())
tasks=[]
test_slice = TestSlice (self,test_site,slice_spec)
test_node = TestNode (self,test_site,node)
test_sliver = TestSliver (self, test_node, test_slice)
- tasks.append ( CompleterTaskInitscript (test_sliver, stamp))
- return Completer (tasks).run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
+ tasks.append(CompleterTaskInitscript(test_sliver, stamp))
+ return Completer(tasks, message='check_initscripts').\
+ run (timedelta(minutes=5), timedelta(minutes=4), timedelta(seconds=10))
def check_initscripts(self):
"check that the initscripts have triggered"
utils.header ("check_tcp: no/empty config found")
return True
specs = self.plc_spec['tcp_specs']
- overall=True
+ overall = True
+
+ # first wait for the network to be up and ready from the slices
+ class CompleterTaskNetworkReadyInSliver(CompleterTask):
+ def __init__(self, test_sliver):
+ self.test_sliver = test_sliver
+ def actual_run(self):
+ return self.test_sliver.check_tcp_ready(port=9999)
+ def message(self):
+ return "network ready checker for %s" % self.test_sliver.name()
+ def failure_epilogue(self):
+ print "could not bind port from sliver %s" % self.test_sliver.name()
+
+ sliver_specs = {}
+ tasks = []
+ managed_sliver_names = set()
+ for spec in specs:
+ # locate the TestSliver instances involved, and cache them in the spec instance
+ spec['s_sliver'] = self.locate_sliver_obj_cross (spec['server_node'], spec['server_slice'], other_plcs)
+ spec['c_sliver'] = self.locate_sliver_obj_cross (spec['client_node'], spec['client_slice'], other_plcs)
+ message = "Will check TCP between s=%s and c=%s" % \
+ (spec['s_sliver'].name(), spec['c_sliver'].name())
+ if 'client_connect' in spec:
+ message += " (using %s)" % spec['client_connect']
+ utils.header(message)
+ # we need to check network presence in both slivers, but also
+ # avoid to insert a sliver several times
+ for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
+ if sliver.name() not in managed_sliver_names:
+ tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
+ # add this sliver's name in the set
+ managed_sliver_names .update ( {sliver.name()} )
+
+ # wait for the netork to be OK in all server sides
+ if not Completer(tasks, message='check for network readiness in slivers').\
+ run(timedelta(seconds=30), timedelta(seconds=24), period=timedelta(seconds=5)):
+ return False
+
+ # run server and client
for spec in specs:
port = spec['port']
# server side
- s_test_sliver = self.locate_sliver_obj_cross (spec['server_node'],spec['server_slice'],other_plcs)
- if not s_test_sliver.run_tcp_server(port,timeout=20):
- overall=False
+ # the issue here is that we have the server run in background
+ # and so we have no clue if it took off properly or not
+ # looks like in some cases it does not
+ if not spec['s_sliver'].run_tcp_server(port, timeout=20):
+ overall = False
break
# idem for the client side
- c_test_sliver = self.locate_sliver_obj_cross (spec['client_node'],spec['client_slice'],other_plcs)
- # use nodename from locatesd sliver, unless 'client_connect' is set
+ # use nodename from located sliver, unless 'client_connect' is set
if 'client_connect' in spec:
destination = spec['client_connect']
else:
- destination=s_test_sliver.test_node.name()
- if not c_test_sliver.run_tcp_client(destination,port):
- overall=False
+ destination = spec['s_sliver'].test_node.name()
+ if not spec['c_sliver'].run_tcp_client(destination, port):
+ overall = False
return overall
# painfully enough, we need to allow for some time as netflow might show up last
period = timedelta (seconds=period_seconds)
tasks = [ CompleterTaskSystemSlice (test_node, self.options.dry_run) \
for test_node in self.all_nodes() ]
- return Completer (tasks) . run (timeout, silent, period)
+ return Completer (tasks, message='_check_system_slice') . run (timeout, silent, period)
def plcsh_stress_test (self):
"runs PLCAPI stress test, that checks Add/Update/Delete on all types - preserves contents"
" ".join([ plc.registry_xml_line() for plc in other_plcs ]))
utils.header ("(Over)wrote %s"%reg_fname)
return self.test_ssh.copy_abs(agg_fname,'/%s/etc/sfa/aggregates.xml'%self.vm_root_in_host())==0 \
- and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
+ and self.test_ssh.copy_abs(reg_fname,'/%s/etc/sfa/registries.xml'%self.vm_root_in_host())==0
def sfa_import(self):
"use sfaadmin to import from plc"
self.run_in_guest("rm -rf /root/sfi")
return True
+ def sfa_rspec_empty(self):
+ "expose a static empty rspec (ships with the tests module) in the sfi directory"
+ filename="empty-rspec.xml"
+ overall=True
+ for slice_spec in self.plc_spec['sfa']['auth_sfa_specs']:
+ test_slice=TestAuthSfa(self,slice_spec)
+ in_vm = test_slice.sfi_path()
+ remote="%s/%s"%(self.vm_root_in_host(),in_vm)
+ if self.test_ssh.copy_abs (filename, remote) !=0: overall=False
+ return overall
+
@auth_sfa_mapper
def sfa_register_site (self): pass
@auth_sfa_mapper
@auth_sfa_mapper
def sfa_renew_slice(self): pass
@auth_sfa_mapper
+ def sfa_get_expires(self): pass
+ @auth_sfa_mapper
def sfa_discover(self): pass
@auth_sfa_mapper
- def sfa_create_slice(self): pass
+ def sfa_rspec(self): pass
+ @auth_sfa_mapper
+ def sfa_allocate(self): pass
+ @auth_sfa_mapper
+ def sfa_allocate_empty(self): pass
+ @auth_sfa_mapper
+ def sfa_provision(self): pass
+ @auth_sfa_mapper
+ def sfa_provision_empty(self): pass
@auth_sfa_mapper
def sfa_check_slice_plc(self): pass
@auth_sfa_mapper
+ def sfa_check_slice_plc_empty(self): pass
+ @auth_sfa_mapper
def sfa_update_slice(self): pass
@auth_sfa_mapper
def sfa_remove_user_from_slice(self): pass
# convenience for debugging the test logic
def yes (self): return True
def no (self): return False
+ def fail (self): return False