### Compute metric: Avg number of Interests seen per content name
### normalized by the number of nodes in the shortest path
- content_name_count = len(content_names.values())
+ content_name_count = len(content_names)
nodes_in_shortest_path = len(shortest_path) - 1
metric = interest_count / (float(content_name_count) * float(nodes_in_shortest_path))
\r
# statistics on RTT\r
rtts = [content_names[content_name]["rtt"] \\r
- for content_name in content_names.keys()]\r
+ for content_name in content_names]
\r
# sample mean and standard deviation\r
sample = numpy.array(rtts)\r
### Compute metric: Avg number of Interests seen per content name
### normalized by the number of nodes in the shortest path
- content_name_count = len(content_names.values())
+ content_name_count = len(content_names)
nodes_in_shortest_path = len(shortest_path) - 1
metric = interest_count / (float(content_name_count) * float(nodes_in_shortest_path))
# statistics on RTT
rtts = [content_names[content_name]["rtt"] \
- for content_name in content_names.keys()]
+ for content_name in content_names]
# sample mean and standard deviation
sample = numpy.array(rtts)
# BASH command -> ' ccndstart ; ccndc add ccnx:/ udp host ; ccnr '
command = "ccndstart && "
- peers = map(lambda peer: "ccndc add ccnx:/ udp %s" % peer, peers)
+ peers = ["ccndc add ccnx:/ udp %s" % peer for peer in peers]
command += " ; ".join(peers) + " && "
command += " ccnr & "
# BASH command -> ' ccndstart ; ccndc add ccnx:/ udp host ; ccnr '
command = "ccndstart && "
- peers = map(lambda peer: "ccndc add ccnx:/ udp %s" % peer, peers)
+ peers = ["ccndc add ccnx:/ udp %s" % peer for peer in peers]
command += " ; ".join(peers) + " && "
command += " ccnr & "
# Register a collector to automatically collect traces
collector = add_collector(ec, "stderr")
- for ccnd in ccnds.values():
+ for ccnd in ccnds:
ec.register_connection(collector, ccnd)
# deploy all ResourceManagers
CCNR_DIRECTORY="/root" CCNR_STATUS_PORT="8080"'
# BASH command -> ' ccndstart ; ccndc add ccnx:/ udp host ; ccnr '
- peers = map(lambda peer: "ccndc add ccnx:/ udp %s" % peer, peers)
+ peers = ["ccndc add ccnx:/ udp %s" % peer for peer in peers]
#command += " ; ".join(peers) + " && "
command = peers[0]
nodes = dict()
chann = add_channel(ec, channel, xmpp_slice, xmpp_host)
- for i in xrange(len(all_hosts)):
- node = add_node(ec,all_hosts[i], xmpp_slice, xmpp_host)
+ for i, host in enumerate(all_hosts):
+ node = add_node(ec ,host, xmpp_slice, xmpp_host)
iface = add_interface(ec, all_ip[i], xmpp_slice, xmpp_host)
ec.register_connection(node, iface)
ec.register_connection(iface, chann)
- nodes[all_hosts[i]] = node
+ nodes[host] = node
# CCN setup for the node
ccnds = dict()
ccnrs = dict()
- for i in xrange(len(all_hosts)):
- ccndstart = add_app(ec, nodes[all_hosts[i]], "#ccndstart", "ccndstart &",
+ for i, host in enumerate(all_hosts):
+ ccndstart = add_app(ec, nodes[host], "#ccndstart", "ccndstart &",
env, xmpp_slice, xmpp_host)
- ccnr = add_app(ec, nodes[all_hosts[i]], "#ccnr", "ccnr &",
+ ccnr = add_app(ec, nodes[host], "#ccnr", "ccnr &",
env, xmpp_slice, xmpp_host)
- ccnds[all_hosts[i]] = ccndstart
- ccnrs[all_hosts[i]] = ccnr
+ ccnds[host] = ccndstart
+ ccnrs[host] = ccnr
ec.register_condition(ccnr, ResourceAction.START, ccndstart, ResourceState.STARTED, "1s")
# CCNDC setup
link = [l1u, l1d, l2u, l2d, l3u, l3d, l4u, l4d, l5u, l5d, b1u, b1d, b2u, b2d]
# List of condition
- for i in xrange(len(all_hosts)):
- ec.register_condition(ccnrs[all_hosts[i]], ResourceAction.START, ccnds[all_hosts[i]], ResourceState.STARTED, "1s")
- ec.register_condition(link, ResourceAction.START, ccnrs[all_hosts[i]], ResourceState.STARTED, "1s")
+ for host in all_hosts:
+ ec.register_condition(ccnrs[host], ResourceAction.START, ccnds[host], ResourceState.STARTED, "1s")
+ ec.register_condition(link, ResourceAction.START, ccnrs[host], ResourceState.STARTED, "1s")
# Streaming Server
pub = add_publish(ec, nodes[host5], movie, xmpp_slice, xmpp_host)
# Cleaning when the experiment stop
ccndstops = []
- for i in xrange(len(all_hosts)):
- ccndstop = add_app(ec, nodes[all_hosts[i]], "#ccndstop", "ccndstop", env, xmpp_slice, xmpp_host)
+ for host all_hosts:
+ ccndstop = add_app(ec, nodes[host], "#ccndstop", "ccndstop", env, xmpp_slice, xmpp_host)
ccndstops.append(ccndstop)
killall = add_app(ec, nodes[host6], "#kill", "killall sh", "", xmpp_slice, xmpp_host)
# Condition to stop and clean the experiment
apps = []
- for i in xrange(len(all_hosts)):
- apps.append(ccnds[all_hosts[i]])
- apps.append(ccnrs[all_hosts[i]])
+ for host in all_hosts:
+ apps.append(ccnds[host])
+ apps.append(ccnrs[host])
apps += link
apps.append(pub)
apps.append(stream)
nodes = dict()
chann = add_channel(ec, channel, xmpp_slice, xmpp_host)
- for i in xrange(len(all_hosts)):
- node = add_node(ec,all_hosts[i], xmpp_slice, xmpp_host)
+ for i, host in enumerate(all_hosts):
+ node = add_node(ec,host, xmpp_slice, xmpp_host)
iface = add_interface(ec, all_ip[i], xmpp_slice, xmpp_host)
ec.register_connection(node, iface)
ec.register_connection(iface, chann)
- nodes[all_hosts[i]] = node
+ nodes[host] = node
#### CCN setup for the node
### ccnds = dict()
# Do the iperf
iperfserv = dict()
iperfclient = dict()
- for i in xrange(len(all_hosts)):
- perfserv = add_app(ec, nodes[all_hosts[i]], "#perfserv", "iperf -s > /opt/iperfserv.txt",
+ for i, host in enumerate(all_hosts):
+ perfserv = add_app(ec, nodes[host], "#perfserv", "iperf -s > /opt/iperfserv.txt",
env, xmpp_slice, xmpp_host)
- iperfclient[all_hosts[i]] = []
+ iperfclient[host] = []
if i > 0:
cmd = "iperf -c " + all_ip[i-1] + " > /opt/iperclient1.txt"
- perfclient1 = add_app(ec, nodes[all_hosts[i]], "#perfclient1", cmd,
+ perfclient1 = add_app(ec, nodes[host], "#perfclient1", cmd,
env, xmpp_slice, xmpp_host)
- iperfclient[all_hosts[i]].append(perfclient1)
+ iperfclient[host].append(perfclient1)
if i < (len(all_hosts)-1):
cmd = "iperf -c " + all_ip[i+1] + " > /opt/iperclient2.txt"
- perfclient2 = add_app(ec, nodes[all_hosts[i]], "#perfclient2", cmd,
+ perfclient2 = add_app(ec, nodes[host], "#perfclient2", cmd,
env, xmpp_slice, xmpp_host)
- iperfclient[all_hosts[i]].append(perfclient2)
+ iperfclient[host].append(perfclient2)
- iperfserv[all_hosts[i]] = perfserv
+ iperfserv[host] = perfserv
- for i in xrange(len(all_hosts)):
- #ec.register_condition(iperfserv[all_hosts[i]], ResourceAction.START, link, ResourceState.STARTED, "2s")
- for elt in iperfclient[all_hosts[i]]:
- ec.register_condition(elt, ResourceAction.START, iperfserv[all_hosts[i]], ResourceState.STARTED, "3s")
+ for host in all_hosts:
+ #ec.register_condition(iperfserv[host], ResourceAction.START, link, ResourceState.STARTED, "2s")
+ for elt in iperfclient[host]:
+ ec.register_condition(elt, ResourceAction.START, iperfserv[host], ResourceState.STARTED, "3s")
## Streaming Server
## ccndstop = add_app(ec, nodes[all_hosts[i]], "#ccndstop", "ccndstop", env, xmpp_slice, xmpp_host)
## ccndstops.append(ccndstop)
perfkill = dict()
- for i in xrange(len(all_hosts)):
- kill = add_app(ec, nodes[all_hosts[i]], "#kill", "killall iperf", "", xmpp_slice, xmpp_host)
- perfkill[all_hosts[i]] = kill
+ for host in all_hosts:
+ kill = add_app(ec, nodes[host], "#kill", "killall iperf", "", xmpp_slice, xmpp_host)
+ perfkill[host] = kill
# Condition to stop and clean the experiment
apps = []
- for i in xrange(len(all_hosts)):
-# apps.append(ccnds[all_hosts[i]])
-# apps.append(ccnrs[all_hosts[i]])
- apps.append(iperfserv[all_hosts[i]])
- for elt in iperfclient[all_hosts[i]]:
+ for host in all_hosts:
+# apps.append(ccnds[host])
+# apps.append(ccnrs[host])
+ apps.append(iperfserv[host])
+ for elt in iperfclient[host]:
apps.append(elt)
# apps += link
#apps.append(pub)
# ec.register_condition(ccndstops + [killall], ResourceAction.STOP, ccndstops, ResourceState.STARTED, "1s")
killall = []
- for i in xrange(len(all_hosts)):
- killall.append(perfkill[all_hosts[i]])
+ for host in all_hosts:
+ killall.append(perfkill[host])
ec.register_condition(killall, ResourceAction.START, apps, ResourceState.STOPPED, "1s")
ec.register_condition(killall, ResourceAction.STOP, killall, ResourceState.STARTED, "1s")
})
# Ping from all resources to all other resources
-for r1, (n1, ip1) in r2ip.iteritems():
- for r2, (n2, ip2) in r2ip.iteritems():
+for r1, (n1, ip1) in r2ip.items():
+ for r2, (n2, ip2) in r2ip.items():
if r1 == r2:
continue
ec.wait_finished(apps.values())
# collect results
-for key, app in apps.iteritems():
+for key, app in apps.items():
stdout = ec.trace(app, "stdout")
print("***************************", key, "************************")
print(stdout)
nodes = []
-for i in xrange(node_count):
+for i in range(node_count):
node = add_node(ec, pl_slice, pl_ssh_key, pl_user, pl_password, country, os)
nodes.append(node)
pl_password = options.pl_password
hosts = options.hosts
-hosts = map(str.strip, hosts.split(","))
+hosts = [host.strip() for host in hosts.split(",")]
apps = []
## Create the experiment controller
fname = graph.node[nid]["history"]
history = load_content_history(fname)
- for content_name in history.keys():
+ for content_name in history:
hist = history[content_name]
for (timestamp, message_type, nid1, nid2, nonce, size, line) in hist:
# Compute the time elapsed between the time an interest is sent
# in the consumer node and when the content is received back
- for content_name in content_names.keys():
+ for content_name in content_names:
# order content and interest messages by timestamp
content_names[content_name]["content"] = sorted(
content_names[content_name]["content"])
for nonce, timestamps in content_names[content_name][
- "interest"].iteritems():
+ "interest"].items():
content_names[content_name]["interest"][nonce] = sorted(
timestamps)
"""
rms = []
- for guid, rm in self._resources.iteritems():
+ for guid, rm in self._resources.items():
if rm.get_rtype() == rtype:
rms.append(rm)
return rms
:rtype: list
"""
- keys = self._resources.keys()
+ keys = list(self._resources.keys())
return keys
"""
rms = []
- for guid, rm in self._resources.iteritems():
+ for guid, rm in self._resources.items():
if rm.get_rtype() == rtype:
rms.append(rm.guid)
return rms
if not guids:
# If no guids list was passed, all 'NEW' RMs will be deployed
guids = []
- for guid, rm in self._resources.iteritems():
+ for guid, rm in self._resources.items():
if rm.state == ResourceState.NEW:
guids.append(guid)
""" Returns a copy of the attributes
"""
- return copy.deepcopy(cls._attributes.values())
+ return copy.deepcopy(list(cls._attributes.values()))
@classmethod
def get_attribute(cls, name):
""" Returns a copy of the traces
"""
- return copy.deepcopy(cls._traces.values())
+ return copy.deepcopy(list(cls._traces.values()))
@classmethod
def get_help(cls):
if not isinstance(group, list):
group = [group]
- for act, conditions in self.conditions.iteritems():
+ for act, conditions in self.conditions.items():
if action and act != action:
continue
if sources:
self.info("Uploading sources ")
- sources = map(str.strip, sources.split(";"))
+ sources = [str.strip(source) for source in sources.split(";")]
# Separate sources that should be downloaded from
# the web, from sources that should be uploaded from
})
env = self.path
- env += " ".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))) \
- if self.get(k) else "", envs.keys()))
+ env += " ".join(["%s=%s" % (envs.get(k), str(self.get(k))) \
+ if self.get(k) else "" for k in list(envs.keys())])
return env
})
env = self.ccnd.path
- env += " ".join(map(lambda k: "%s=%s" % (envs.get(k), self.get(k)) \
- if self.get(k) else "", envs.keys()))
+ env += " ".join(["%s=%s" % (envs.get(k), self.get(k)) \
+ if self.get(k) else "" for k in list(envs.keys())])
return env
if not isinstance(packages, list):
packages = [packages]
- cmd = " && ".join(map(lambda p:
- " { dpkg -s %(package)s || sudo -S apt-get -y install %(package)s ; } " % {
- 'package': p}, packages))
+ cmd = " && ".join([" { dpkg -s %(package)s || sudo -S apt-get -y install %(package)s ; } " % {
+ 'package': p} for p in packages])
#cmd = { dpkg -s vim || sudo -S apt-get -y install vim ; } && ..
return cmd
if not isinstance(packages, list):
packages = [packages]
- cmd = " && ".join(map(lambda p:
- " { dpkg -s %(package)s && sudo -S apt-get -y purge %(package)s ; } " % {
- 'package': p}, packages))
+ cmd = " && ".join([" { dpkg -s %(package)s && sudo -S apt-get -y purge %(package)s ; } " % {
+ 'package': p} for p in packages])
#cmd = { dpkg -s vim && sudo -S apt-get -y purge vim ; } && ..
return cmd
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
import base64
-import cPickle
+import pickle
import errno
import os
import socket
msg = [msg_type, args, kwargs]
def encode(item):
- item = cPickle.dumps(item)
+ item = pickle.dumps(item)
return base64.b64encode(item)
encoded = "|".join(map(encode, msg))
self.emulation.error(msg, reply, err)
raise RuntimeError(msg)
- reply = cPickle.loads(base64.b64decode(reply))
+ reply = pickle.loads(base64.b64decode(reply))
return reply
"""
command = " [ -e %s ] && echo 'DONE' " % self.remote_socket
- for i in xrange(200):
+ for i in range(200):
(out, err), proc = self.node.execute(command, retry = 1,
with_lock = True)
# adding the avoided pids filtered above (avoid_kill) to allow users keep process
# alive when using besides ssh connections
kill_pids = set(pids_temp.items()) - set(pids.items())
- kill_pids = ' '.join(dict(kill_pids).keys())
+ # py2/py3 : keep it simple
+ kill_pids = ' '.join(kill_pids)
# removing pids from beside connections and its process
kill_pids = kill_pids.split(' ')
if text and not os.path.isfile(src):
# src is text input that should be uploaded as file
# create a temporal file with the content to upload
- f = tempfile.NamedTemporaryFile(delete=False)
+ # in python3 we need to open in binary mode if str is bytes
+ mode = 'w' if isinstance(src, str) else 'wb'
+ f = tempfile.NamedTemporaryFile(mode=mode, delete=False)
f.write(src)
f.close()
src = f.name
# If dst files should not be overwritten, check that the files do not
- # exits already
+ # exist already
if isinstance(src, str):
- src = map(str.strip, src.split(";"))
+ src = [s.strip() for s in src.split(";")]
if overwrite == False:
src = self.filter_existing_files(src, dst)
if isinstance(paths, str):
paths = [paths]
- cmd = " ; ".join(map(lambda path: "rm -rf {}".format(path), paths))
+ cmd = " ; ".join(["rm -rf {}".format(path) for path in paths])
return self.execute(cmd, with_lock = True)
pid = ppid = None
delay = 1.0
- for i in xrange(2):
+ for i in range(2):
pidtuple = self.getpid(home = home, pidfile = pidfile)
if pidtuple:
if len(src) > 1 else {dst: src[0]}
command = []
- for d in dests.keys():
+ for d in dests:
command.append(" [ -f {dst} ] && echo '{dst}' ".format(dst=d) )
command = ";".join(command)
(out, err), proc = self.execute(command, retry = 1, with_lock = True)
- for d in dests.keys():
+ for d in dests:
if out.find(d) > -1:
del dests[d]
"prefix" : "CCND_PREFIX",
})
- env = ";".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))),
- [k for k in envs.keys() if self.get(k)]))
+ env = ";".join(["%s=%s" % (envs.get(k), str(self.get(k))) for k in [k for k in list(envs.keys()) if self.get(k)]])
return env
"ccnsSyncScope": "CCNS_SYNC_SCOPE",
})
- env = ";".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))),
- [k for k in envs.keys() if self.get(k)]))
+ env = ";".join(["%s=%s" % (envs.get(k), str(self.get(k))) for k in [k for k in list(envs.keys()) if self.get(k)]])
return env
"home": "HOME",
})
- env = ";".join(map(lambda k: "%s=%s" % (envs.get(k), str(self.get(k))),
- [k for k in envs.keys() if self.get(k)]))
+ env = ";".join(["%s=%s" % (envs.get(k), str(self.get(k))) for k in [k for k in list(envs.keys()) if self.get(k)]])
return env
node = self.endpoint_node(endpoint)
run_home = self.run_home(endpoint)
- for i in xrange(20):
+ for i in range(20):
(out, err), proc = node.check_output(run_home, filename)
if out:
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
import base64
-import cPickle
+import pickle
import errno
import os
import socket
msg = [msg_type, args, kwargs]
def encode(item):
- item = cPickle.dumps(item)
+ item = pickle.dumps(item)
return base64.b64encode(item)
encoded = "|".join(map(encode, msg))
self.simulation.error(msg, reply, err)
raise RuntimeError(msg)
- reply = cPickle.loads(base64.b64decode(reply))
+ reply = pickle.loads(base64.b64decode(reply))
return reply
"""
command = " [ -e %s ] && echo 'DONE' " % self.remote_socket
- for i in xrange(200):
+ for i in range(200):
(out, err), proc = self.node.execute(command, retry = 1,
with_lock = True)
cmd = install_rpmfusion_command(os)
if cmd: cmd += " ; "
- cmd += " && ".join(map(lambda p:
- " { rpm -q %(package)s || sudo -S yum -y install --nogpgcheck %(package)s ; } " % {
- 'package': p}, packages))
+ cmd += " && ".join([" { rpm -q %(package)s || sudo -S yum -y install --nogpgcheck %(package)s ; } " % {
+ 'package': p} for p in packages])
#cmd = { rpm -q rpmfusion-free-release || sudo -s rpm -i ... ; } && { rpm -q vim || sudo yum -y install vim ; } && ..
return cmd
if not isinstance(packages, list):
packages = [packages]
- cmd = " && ".join(map(lambda p:
- " { rpm -q %(package)s && sudo -S yum -y remove %(package)s ; } " % {
- 'package': p}, packages))
+ cmd = " && ".join([" { rpm -q %(package)s && sudo -S yum -y remove %(package)s ; } " % {
+ 'package': p} for p in packages])
#cmd = { rpm -q vim && sudo yum -y remove vim ; } && ..
return cmd
# even if the file exists and had the port number,
# the read operation returns empty string!
# Maybe a race condition?
- for i in xrange(10):
+ for i in range(10):
with open(remote_port_file, 'r') as f:
remote_port = f.read()
# even if the file exists and had the port number,
# the read operation returns empty string!
# Maybe a race condition?
- for i in xrange(10):
+ for i in range(10):
with open(remote_port_file, 'r') as f:
remote_port = f.read()
import time
def ipfmt(ip):
- ipbytes = map(ord,ip.decode("hex"))
- return '.'.join(map(str,ipbytes))
+ return '.'.join(str(ord(x)) for x in ip.decode("hex"))
tagtype = {
'0806' : 'arp',
buf,
))
-_padmap = [ chr(padding) * padding for padding in xrange(127) ]
+_padmap = [ chr(padding) * padding for padding in range(127) ]
del padding
def encrypt(packet, crypter, len=len, padmap=_padmap):
sent = 0
try:
try:
- for x in xrange(maxbatch):
+ for x in range(maxbatch):
packet = pullPacket(fwbuf)
if crypto_mode:
bwfree -= sent
if tun in wrdy:
try:
- for x in xrange(maxtbatch):
+ for x in range(maxtbatch):
packet = pullPacket(bkbuf)
twrite(tunfd, packet)
#wt += 1
# check incoming data packets
if tun in rdrdy:
try:
- for x in xrange(maxbatch):
+ for x in range(maxbatch):
packet = tread(tunfd,2000) # tun.read blocks until it gets 2k!
if not packet:
continue
if remote in rdrdy:
try:
try:
- for x in xrange(maxbatch):
+ for x in range(maxbatch):
packet = rread(remote,2000)
#rr += 1
def udp_connect(TERMINATE, local_addr, local_port, peer_addr, peer_port):
rsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
retrydelay = 1.0
- for i in xrange(30):
+ for i in range(30):
# TERMINATE is a array. An item can be added to TERMINATE, from
# outside this function to force termination of the loop
if TERMINATE:
pass
keepalive_thread = threading.Thread(target=keepalive)
keepalive_thread.start()
- for i in xrange(900):
+ for i in range(900):
if TERMINATE:
raise OSError("Killed")
try:
# will be forever blocked in the connect, so we put a reasonable timeout.
rsock.settimeout(10)
# We wait for
- for i in xrange(30):
+ for i in range(30):
if stop:
break
if TERMINATE:
retrydelay = 1.0
# We try to bind to the local virtual interface.
# It might not exist yet so we wait in a loop.
- for i in xrange(30):
+ for i in range(30):
if stop:
break
if TERMINATE:
# The other side might not be ready yet, so we also wait in a loop for timeouts.
timeout = 1
lsock.listen(1)
- for i in xrange(30):
+ for i in range(30):
if TERMINATE:
raise OSError("Killed")
rlist, wlist, xlist = select.select([lsock], [], [], timeout)
end = False
sock = None
- for i in xrange(0, 50):
+ for i in range(0, 50):
if end:
break
if TERMINATE:
result = None
delay = 1.0
- for i in xrange(20):
+ for i in range(20):
(out, err), proc = self.node.check_output(home, filename)
if out:
result = out.strip()
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
import base64
-import cPickle
+import pickle
import errno
import logging
import os
def decode(item):
item = base64.b64decode(item).rstrip()
- return cPickle.loads(item)
+ return pickle.loads(item)
- decoded = map(decode, msg.split("|"))
+ decoded = [ decode(x) for x in msg.split("|")]
# decoded message
dmsg_type = decoded.pop(0)
return (dmsg_type, dargs, dkwargs)
def send_reply(conn, reply):
- encoded = base64.b64encode(cPickle.dumps(reply))
+ encoded = base64.b64encode(pickle.dumps(reply))
conn.send("%s\n" % encoded)
def get_options():
def replace_kwargs(self, kwargs):
realkwargs = dict([(k, self.get_object(v) \
if str(v).startswith("uuid") else v) \
- for k,v in kwargs.iteritems()])
+ for k,v in kwargs.items()])
return realkwargs
return pprint.pformat(value)
def format_args(self, args):
- fargs = map(self.format_value, args)
- return "[%s]" % ",".join(fargs)
+ return "[%s]" % ",".join(self.format_value(arg) for arg in args)
def format_kwargs(self, kwargs):
- fkwargs = map(lambda (k,w):
- "%s: %s" % (self.format_value(k), self.format_value(w)),
- kwargs.iteritems())
+ fkwargs = ["%s: %s" % (self.format_value(k), self.format_value(v)) for (k, v) in kwargs.items()]
return "dict({%s})" % ",".join(fkwargs)
from nepi.execution.trace import Trace
from nepi.resources.ns3.ns3base import NS3Base
-import ipaddr
-
@clsinit_copy
class NS3Route(NS3Base):
_rtype = "ns3::Route"
# Author: Alina Quereilhac <alina.quereilhac@inria.fr>
import base64
-import cPickle
+import pickle
import errno
import logging
import os
def decode(item):
item = base64.b64decode(item).rstrip()
- return cPickle.loads(item)
+ return pickle.loads(item)
- decoded = map(decode, msg.split("|"))
+ decoded = [ decode(x) for x in msg.split("|") ]
# decoded message
dmsg_type = decoded.pop(0)
return (dmsg_type, dargs, dkwargs)
def send_reply(conn, reply):
- encoded = base64.b64encode(cPickle.dumps(reply))
+ encoded = base64.b64encode(pickle.dumps(reply))
conn.send("%s\n" % encoded)
def get_options():
tid_count = type_id.GetRegisteredN()
base = type_id.LookupByName("ns3::Object")
- for i in xrange(tid_count):
+ for i in range(tid_count):
tid = type_id.GetRegistered(i)
if tid.MustHideFromDocumentation() or \
factory = self.ns3.ObjectFactory()
factory.SetTypeId(type_name)
- for name, value in kwargs.iteritems():
+ for name, value in kwargs.items():
ns3_value = self._attr_from_string_to_ns3_value(type_name, name, value)
factory.Set(name, ns3_value)
def replace_kwargs(self, kwargs):
realkwargs = dict([(k, self.get_object(v) \
if str(v).startswith("uuid") else v) \
- for k,v in kwargs.iteritems()])
+ for k,v in kwargs.items()])
realkwargs = dict([(k, self._singleton(v) \
if str(v).startswith(SINGLETON) else v )\
- for k, v in realkwargs.iteritems()])
+ for k, v in realkwargs.items()])
return realkwargs
# For all the interfaces registered with the ipv4 object, find
# the one that matches the network of the nexthop
nifaces = ipv4.GetNInterfaces()
- for ifidx in xrange(nifaces):
+ for ifidx in range(nifaces):
iface = ipv4.GetInterface(ifidx)
naddress = iface.GetNAddresses()
- for addridx in xrange(naddress):
+ for addridx in range(naddress):
ifaddr = iface.GetAddress(addridx)
ifmask = ifaddr.GetMask()
newuuid = None
if search:
# search object
- for ouuid, oobj in self._objects.iteritems():
+ for ouuid, oobj in self._objects.items():
if nobj == oobj:
newuuid = ouuid
break
return pprint.pformat(value)
def format_args(self, args):
- fargs = map(self.format_value, args)
- return "[%s]" % ",".join(fargs)
+ return "[%s]" % ",".join(self.format_value(arg) for arg in args)
def format_kwargs(self, kwargs):
- fkwargs = map(lambda (k,w):
- "%s: %s" % (self.format_value(k), self.format_value(w)),
- kwargs.iteritems())
+ fkwargs = ["%s: %s" % (self.format_value(k), self.format_value(v)) for (k, v) in kwargs.items()]
return "dict({%s})" % ",".join(fkwargs)
base = type_id.LookupByName("ns3::Object")
# Create a .py file using the ns-3 RM template for each ns-3 TypeId
- for i in xrange(tid_count):
+ for i in range(tid_count):
tid = type_id.GetRegistered(i)
(base_class_import, base_class) = select_base_class(ns3, tid)
attributes = ""
attr_count = tid.GetAttributeN()
- for i in xrange(attr_count):
+ for i in range(attr_count):
attr_info = tid.GetAttribute(i)
if not attr_info.accessor.HasGetter():
continue
elif isinstance(value, ns3.EnumValue):
attr_type = "Types.Enumerate"
allowed = checker.GetUnderlyingTypeInformation().split("|")
- attr_allowed = "[%s]" % ",".join(map(lambda x: "\"%s\"" % x, allowed))
+ attr_allowed = "[%s]" % ",".join(["\"%s\"" % x for x in allowed])
elif isinstance(value, ns3.DoubleValue):
attr_type = "Types.Double"
# TODO: range
traces = ""
trace_count = tid.GetTraceSourceN()
- for i in xrange(trace_count):
+ for i in range(trace_count):
trace_info = tid.GetTraceSource(i)
trace_name = trace_info.name
trace_help = trace_info.help.replace('"', '\\"').replace("'", "\\'")
else:
properties = self._attr_element(payload,"props","")
- for prop in props.keys():
+ for prop in props:
if isinstance(props[prop],str):
self._attr_element(properties,prop,props[prop],type_key="type", type_value = "string")
elif isinstance(props[prop],dict):
key = self._attr_element(properties,prop,"",type_key="type", type_value = "hash")
- for comp in props[prop].keys():
+ for comp in props[prop]:
self._attr_element(key,comp,props[prop][comp],type_key="type", type_value = "string")
if guards :
guardians = self._attr_element(payload,"guard","")
- for guard in guards.keys():
+ for guard in guards:
self._attr_element(guardians,guard,guards[guard],type_key="type", type_value = "string")
return payload
if props :
properties = self._attr_element(payload,"props","")
- for prop in props.keys():
+ for prop in props:
self._attr_element(properties,prop,props[prop],type_key="type", type_value = "symbol")
if guards :
guardians = self._attr_element(payload,"guard","")
- for guard in guards.keys():
+ for guard in guards:
self._attr_element(guardians,guard,guards[guard],type_key="type", type_value = "string")
return payload
if props :
properties = self._attr_element(payload,"props","")
- for prop in props.keys():
+ for prop in list(props.keys()):
self._attr_element(properties,prop,props[prop])
if guards :
guardians = self._attr_element(payload,"guard","")
- for guard in guards.keys():
+ for guard in list(guards.keys()):
self._attr_element(guardians,guard,guards[guard])
return payload
if props :
properties = self._id_element(payload,"props","xmlns:frcp",
"http://schema.mytestbed.net/omf/6.0/protocol")
- for prop in props.keys():
+ for prop in list(props.keys()):
self._attr_element(properties,prop,props[prop])
if guards :
guardians = self._attr_element(payload,"guard","")
- for guard in guards.keys():
+ for guard in list(guards.keys()):
self._attr_element(guardians,guard,guards[guard])
return payload
def check_ready(self, xmpp):
delay = 1.0
- for i in xrange(15):
+ for i in range(15):
if xmpp.ready:
break
else:
event = self._check_for_tag(root, namespaces, "event")
log = "STATUS -- "
- for elt in props.keys():
+ for elt in props:
ns, tag = elt.split('}')
if tag == "it":
log = log + "membership : " + props[elt]+" -- "
attribute hostname, it will over write the previous value.
"""
hosts_hrn = self.sfaapi.get_resources_hrn()
- for host, hrn in hosts_hrn.iteritems():
+ for host, hrn in hosts_hrn.items():
if hrn == node:
host = host + '.wilab2.ilabt.iminds.be'
self.set("host", host)
nodes_id = []
filters = {}
- for attr_name, attr_obj in self._attrs.iteritems():
+ for attr_name, attr_obj in self._attrs.items():
attr_value = self.get(attr_name)
if attr_value is not None and attr_obj.has_flag(Flags.Filter) and \
def _retry(fn):
def rv(*p, **kw):
- for x in xrange(5):
+ for x in range(5):
try:
return fn(*p, **kw)
except (socket.error, IOError, OSError):
filters = filters, peer=None, **kw)
)
else:
- peer_filter = map(name_to_id, peer)
+ peer_filter = [name_to_id(x) for x in peer]
elif peer is None or peer == self._local_peer:
peer_filter = None
if not self._blacklisted(host_hrn):
if not self._reserved(host_hrn):
print(self.sfaapi._reserved ,self.guid)
- for hostname, hrn in nodes.iteritems():
+ for hostname, hrn in nodes.items():
if host_hrn == hrn:
print('hostname' ,hostname)
ping_ok = self._do_ping(hostname)
attribute hostname, it will over write the previous value.
"""
hosts_hrn = self.sfaapi.get_resources_hrn()
- for hostname, hrn in hosts_hrn.iteritems():
+ for hostname, hrn in hosts_hrn.items():
if hrn == node:
self.set("hostname", hostname)
if not exec_run_home:
exec_run_home = self.run_home
- for i in xrange(20):
+ for i in range(20):
(out, err), proc = self.node.check_output(self.run_home, "vif_name")
if proc.poll() > 0:
"""
if env:
export = ''
- for envkey, envval in env.iteritems():
+ for envkey, envval in env.items():
export += '%s=%s ' % (envkey, envval)
command = "%s %s" % (export, command)
if out:
try:
- return map(int,out.strip().split(' ',1))
+ return [ int(x) for x in out.strip().split(' ',1))]
except:
# Ignore, many ways to fail that don't matter that much
return None
filters = self._map_attr_to_resource_filters(filters)
qfilters = list()
- for filtername, filtervalue in filters.iteritems():
+ for filtername, filtervalue in filters.items():
newfilter = [filtername, "==", filtervalue]
qfilters.append(newfilter)
}
mapped_filters = dict()
- for filtername, filtervalue in filters.iteritems():
+ for filtername, filtervalue in filters.items():
if attr_to_filter[filtername]:
new_filtername = attr_to_filter[filtername]
mapped_filters[new_filtername] = filtervalue
nodesinbranch = (node_count - 1)/ BRANCHES
c = 1
- for i in xrange(BRANCHES):
+ for i in range(BRANCHES):
prev = 0
- for n in xrange(1, nodesinbranch + 1):
+ for n in range(1, nodesinbranch + 1):
graph.add_node(c)
graph.add_edge(prev, c)
prev = c
source = leaves.pop(random.randint(0, len(leaves) - 1))
else:
# options must not be already sources or targets
- options = [ k for k,v in self.topology.degree().iteritems() \
+ options = [ k for k,v in self.topology.degree().items() \
if (not kwargs.get("is_leaf") or v == 1) \
and not self.topology.node[k].get("source") \
and not self.topology.node[k].get("target")]
self.workers = []
# initialize workers
- for x in xrange(maxthreads):
+ for x in range(maxthreads):
worker = WorkerThread()
worker.attach(self.queue, self.rvqueue, self.delayed_exceptions)
worker.setDaemon(True)
rmsnode = doc.createElement("rms")
ecnode.appendChild(rmsnode)
- for guid, rm in ec._resources.iteritems():
+ for guid, rm in ec._resources.items():
self._rm_to_xml(doc, rmsnode, ec, guid, rm)
return doc
cnnode = doc.createElement("conditions")
conditions = False
- for action, conds in rm._conditions.iteritems():
+ for action, conds in rm._conditions.items():
conditions = True
for (group, state, time) in conds:
ccnnode = doc.createElement("condition")
networkx.draw(graph, pos = pos, node_color="white",
node_size = 500, with_labels=True)
- label = "\n".join(map(lambda v: "%s: %s" % (v[0], v[1]), labels.iteritems()))
+ label = "\n".join(["%s: %s" % (v[0], v[1]) for v in iter(labels.items())])
plt.annotate(label, xy=(0.05, 0.95), xycoords='axes fraction')
fpath += ".png"
labels = dict()
connections = set()
- for guid, rm in ec._resources.iteritems():
+ for guid, rm in ec._resources.items():
label = rm.get_rtype()
graph.add_node(guid,
else: slice_resources = []
if slice_resources:
slice_resources_hrn = self.get_resources_hrn(slice_resources)
- for s_hrn_key, s_hrn_value in slice_resources_hrn.iteritems():
+ for s_hrn_key, s_hrn_value in slice_resources_hrn.items():
s_parts = s_hrn_value.split('.')
s_hrn = '.'.join(s_parts[:2]) + '.' + '\\.'.join(s_parts[2:])
resources_hrn_new.append(s_hrn)
@functools.wraps(func)
def rv(*p, **kw):
retry = kw.pop("_retry", False)
- for i in xrange(0 if retry else 4):
+ for i in range(0 if retry else 4):
try:
return func(*p, **kw)
except (select.error, socket.error) as args:
if out:
try:
- return map(int,out.strip().split(' ',1))
+ return [ int(x) for x in out.strip().split(' ',1)) ]
except:
# Ignore, many ways to fail that don't matter that much
return None
tmp_known_hosts = None,
blocking = True):
- for x in xrange(retry):
+ for x in range(retry):
# display command actually invoked when debug is turned on
message = " ".join( [ "'{}'".format(arg) for arg in args ] )
log("sshfuncs: invoking {}".format(message), logging.DEBUG)
# The method communicate was re implemented for performance issues
# when using python subprocess communicate method the ssh commands
# last one minute each
+ #log("BEFORE communicate", level=logging.INFO); import time; beg=time.time()
out, err = _communicate(proc, input=None)
+ #log("AFTER communicate - {}s".format(time.time()-beg), level=logging.INFO)
elif stdout:
out = proc.stdout.read()
node = ec.register_resource("Node")
apps = list()
- for i in xrange(1000):
+ for i in range(1000):
app = ec.register_resource("Application")
ec.register_connection(app, node)
apps.append(app)
ec.register_connection(eapp, node)
apps.append(eapp)
- for i in xrange(10):
+ for i in range(10):
app = ec.register_resource("Application")
ec.register_connection(app, node)
apps.append(app)
apps = list()
ifaces = list()
- for i in xrange(node_count):
+ for i in range(node_count):
node = ec.register_resource("dummy::Node")
nodes.append(node)
ec.register_connection(node, iface)
ifaces.append(iface)
- for i in xrange(app_count):
+ for i in range(app_count):
app = ec.register_resource("dummy::Application")
ec.register_connection(node, app)
apps.append(app)
apps = list()
ifaces = list()
- for i in xrange(node_count):
+ for i in range(node_count):
node = ec.register_resource("dummy::Node")
nodes.append(node)
ec.register_connection(node, iface)
ifaces.append(iface)
- for i in xrange(app_count):
+ for i in range(app_count):
app = ec.register_resource("dummy::Application")
ec.register_connection(node, app)
apps.append(app)
argss = list(args)
argss.pop(0)
- for i in xrange(len(argss)/2):
+ for i in range(len(argss)/2):
username = argss[i*2]
hostname = argss[i*2+1]
node, ec = create_node(hostname, username)
def wrapped(*args, **kwargs):
argss = list(args)
argss.pop(0)
- for i in xrange(len(argss)/3):
+ for i in range(len(argss)/3):
username = argss[i*3]
hostname = argss[i*3+1]
identity = argss[i*3+2]
ec.set(node, "cleanProcesses", True)
apps = list()
- for i in xrange(50):
+ for i in range(50):
app = ec.register_resource("linux::Application")
cmd = "ping -c5 %s" % self.target
ec.set(app, "command", cmd)
def inc(count):
count[0] += 1
- for x in xrange(10):
+ for x in range(10):
runner.put(inc, count)
runner.destroy()
runner = ParallelRun(maxthreads = 4)
runner.start()
- for x in xrange(100):
+ for x in range(100):
runner.put(sleep)
runner.empty()
runner = ParallelRun(maxthreads = 4)
runner.start()
- for x in xrange(4):
+ for x in range(4):
runner.put(inc, count)
runner.put(error)
apps = list()
ifaces = list()
- for i in xrange(node_count):
+ for i in range(node_count):
node = ec.register_resource("dummy::Node")
nodes.append(node)
ec.register_connection(node, iface)
ifaces.append(iface)
- for i in xrange(app_count):
+ for i in range(app_count):
app = ec.register_resource("dummy::Application")
ec.register_connection(node, app)
apps.append(app)
apps = list()
ifaces = list()
- for i in xrange(node_count):
+ for i in range(node_count):
node = ec.register_resource("dummy::Node")
nodes.append(node)
ec.register_connection(node, iface)
ifaces.append(iface)
- for i in xrange(app_count):
+ for i in range(app_count):
app = ec.register_resource("dummy::Application")
ec.register_connection(node, app)
apps.append(app)
files.extend(names)
os.path.walk(destdir, recls, files)
- origfiles = map(lambda s: os.path.basename(s), [dirpath, f.name, f1.name])
+ origfiles = [os.path.basename(s) for s in [dirpath, f.name, f1.name]]
- self.assertEquals(sorted(origfiles), sorted(files))
+ self.assertEqual(sorted(origfiles), sorted(files))
os.remove(f1.name)
shutil.rmtree(dirpath)
files.extend(names)
os.path.walk(destdir, recls, files)
- origfiles = map(lambda s: os.path.basename(s), [dirpath, f.name, f1.name])
+ origfiles = [os.path.basename(s) for s in [dirpath, f.name, f1.name]]
- self.assertEquals(sorted(origfiles), sorted(files))
+ self.assertEqual(sorted(origfiles), sorted(files))
def test_rproc_manage(self):
env = test_environment()