period_seconds=int(period.total_seconds())
if self.verbose:
if timeout_seconds >= 120:
- utils.header("max timeout is %d minutes, silent for %d minutes (period is %s s)"%\
- (timeout_minutes,silent_minutes,period_seconds))
+ utils.header("Completer [%d tasks]: max timeout is %d minutes, "
+ "silent for %d minutes (period is %s s)"%\
+ (len(self.tasks), timeout_minutes,
+ silent_minutes, period_seconds))
else:
- utils.header("max timeout is %d seconds, silent for %d seconds (period is %s s)"%\
- (timeout_seconds,silent_seconds,period_seconds))
+ utils.header("Completer [%d tasks]: max timeout is %d seconds, "
+ "silent for %d seconds (period is %s s)"%\
+ (len(self.tasks), timeout_seconds,
+ silent_seconds, period_seconds))
tasks=self.tasks
while tasks:
fine=[]
def failure_epilogue(self):
print "could not bind port from sliver %s" % self.test_sliver.name()
+ sliver_specs = {}
tasks = []
+ managed_sliver_names = set()
for spec in specs:
# locate the TestSliver instances involved, and cache them in the spec instance
spec['s_sliver'] = self.locate_sliver_obj_cross (spec['server_node'], spec['server_slice'], other_plcs)
if 'client_connect' in spec:
message += " (using %s)" % spec['client_connect']
utils.header(message)
- tasks.append(CompleterTaskNetworkReadyInSliver (spec['s_sliver']))
+ # we need to check network presence in both slivers, but also
+ # avoid to insert a sliver several times
+ for sliver in [ spec['s_sliver'], spec['c_sliver'] ]:
+ if sliver.name() not in managed_sliver_names:
+ tasks.append(CompleterTaskNetworkReadyInSliver(sliver))
+ # add this sliver's name in the set
+ managed_sliver_names .update ( {sliver.name()} )
# wait for the netork to be OK in all server sides
if not Completer(tasks, message='check for network readiness in slivers').\
utils.header("Checking for initscript stamp %s on sliver %s"%(stamp, self.name()))
return self.test_ssh.run("ls -l /var/tmp/%s.stamp"%stamp)==0
+ def check_tcp_ready (self, port):
+ ready_command = "./tcptest.py ready -p %d"%(port)
+ return self.test_ssh.copy("tcptest.py") == 0 and \
+ self.test_ssh.run(ready_command) == 0
+
def run_tcp_server (self, port, timeout=10):
server_command = "./tcptest.py server -p %d -t %d"%(port, timeout)
return self.test_ssh.copy("tcptest.py") == 0 and \
self.test_ssh.run(server_command, background=True)==0
- def check_tcp_ready (self, port):
- server_command = "./tcptest.py ready -p %d"%(port)
- return self.test_ssh.copy("tcptest.py") == 0 and \
- self.test_ssh.run(server_command) == 0
-
def run_tcp_client (self, servername, port, retry=5):
client_command="./tcptest.py client -a %s -p %d"%(servername, port)
if self.test_ssh.copy("tcptest.py") != 0:
return False
- # allow for 2 attempts
- attempts = 2
- for attempt in range (attempts):
- if attempt != 0:
- time.sleep(retry)
- utils.header ("tcp client - attempt # %s" % (attempt+1))
- if self.test_ssh.run(client_command) == 0:
- return True
+ if self.test_ssh.run(client_command) == 0:
+ return True
return False
# use the node's main ssh root entrance, as the slice entrance might be down
specs += [ same_node_same_slice ]
# worth another try
specs += [ same_node_2_slices ]
- if options.size >1 :
+ if options.size > 1:
specs += [ two_nodes_same_slice, two_nodes_2_slices ]
return specs
parser.print_help()
sys.exit(1)
+ myprint("==================== tcptest.py server", id='server')
show_network_status(id='server')
server = SocketServer.TCPServer((options.address, options.port),
UppercaseRequestHandler)
default=socket.gethostname(), help="address")
(options, args) = parser.parse_args()
+ myprint("==================== tcptest.py ready", id='ready')
def can_bind ():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
parser.print_help()
sys.exit(1)
+ myprint("==================== tcptest.py client", id='client')
result=True
for i in range(1,options.loops+1):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)