### Compute metric: Avg number of Interests seen per content name
### normalized by the number of nodes in the shortest path
- content_name_count = len(list(content_names.values()))
+ content_name_count = len(content_names)
nodes_in_shortest_path = len(shortest_path) - 1
metric = interest_count / (float(content_name_count) * float(nodes_in_shortest_path))
# statistics on RTT
rtts = [content_names[content_name]["rtt"] \
- for content_name in list(content_names.keys())]
+ for content_name in content_names]
# sample mean and standard deviation
sample = numpy.array(rtts)
low = numpy.array([float(m[1]) for m in metrics])
high = numpy.array([float(m[2]) for m in metrics])
error = [y - low, high - y]
- x = list(range(1,runs + 1))
+ x = range(1,runs + 1)
# plot average RTT and confidence interval for each iteration
pyplot.errorbar(x, y, yerr = error, fmt='o')
### Compute metric: Avg number of Interests seen per content name
### normalized by the number of nodes in the shortest path
- content_name_count = len(list(content_names.values()))
+ content_name_count = len(content_names)
nodes_in_shortest_path = len(shortest_path) - 1
metric = interest_count / (float(content_name_count) * float(nodes_in_shortest_path))
# statistics on RTT
rtts = [content_names[content_name]["rtt"] \
- for content_name in list(content_names.keys())]
+ for content_name in content_names]
# sample mean and standard deviation
sample = numpy.array(rtts)
# Register a collector to automatically collect traces
collector = add_collector(ec, "stderr")
- for ccnd in list(ccnds.values()):
+ for ccnd in ccnds:
ec.register_connection(collector, ccnd)
# deploy all ResourceManagers
nodes = dict()
chann = add_channel(ec, channel, xmpp_slice, xmpp_host)
- for i in range(len(all_hosts)):
- node = add_node(ec,all_hosts[i], xmpp_slice, xmpp_host)
+ for i, host in enumerate(all_hosts):
+ node = add_node(ec ,host, xmpp_slice, xmpp_host)
iface = add_interface(ec, all_ip[i], xmpp_slice, xmpp_host)
ec.register_connection(node, iface)
ec.register_connection(iface, chann)
- nodes[all_hosts[i]] = node
+ nodes[host] = node
# CCN setup for the node
ccnds = dict()
ccnrs = dict()
- for i in range(len(all_hosts)):
- ccndstart = add_app(ec, nodes[all_hosts[i]], "#ccndstart", "ccndstart &",
+ for i, host in enumerate(all_hosts):
+ ccndstart = add_app(ec, nodes[host], "#ccndstart", "ccndstart &",
env, xmpp_slice, xmpp_host)
- ccnr = add_app(ec, nodes[all_hosts[i]], "#ccnr", "ccnr &",
+ ccnr = add_app(ec, nodes[host], "#ccnr", "ccnr &",
env, xmpp_slice, xmpp_host)
- ccnds[all_hosts[i]] = ccndstart
- ccnrs[all_hosts[i]] = ccnr
+ ccnds[host] = ccndstart
+ ccnrs[host] = ccnr
ec.register_condition(ccnr, ResourceAction.START, ccndstart, ResourceState.STARTED, "1s")
# CCNDC setup
link = [l1u, l1d, l2u, l2d, l3u, l3d, l4u, l4d, l5u, l5d, b1u, b1d, b2u, b2d]
# List of condition
- for i in range(len(all_hosts)):
- ec.register_condition(ccnrs[all_hosts[i]], ResourceAction.START, ccnds[all_hosts[i]], ResourceState.STARTED, "1s")
- ec.register_condition(link, ResourceAction.START, ccnrs[all_hosts[i]], ResourceState.STARTED, "1s")
+ for host in all_hosts:
+ ec.register_condition(ccnrs[host], ResourceAction.START, ccnds[host], ResourceState.STARTED, "1s")
+ ec.register_condition(link, ResourceAction.START, ccnrs[host], ResourceState.STARTED, "1s")
# Streaming Server
pub = add_publish(ec, nodes[host5], movie, xmpp_slice, xmpp_host)
nodes = dict()
chann = add_channel(ec, channel, xmpp_slice, xmpp_host)
- for i in range(len(all_hosts)):
- node = add_node(ec,all_hosts[i], xmpp_slice, xmpp_host)
+ for i, host in enumerate(all_hosts):
+ node = add_node(ec,host, xmpp_slice, xmpp_host)
iface = add_interface(ec, all_ip[i], xmpp_slice, xmpp_host)
ec.register_connection(node, iface)
ec.register_connection(iface, chann)
- nodes[all_hosts[i]] = node
+ nodes[host] = node
#### CCN setup for the node
### ccnds = dict()
# Do the iperf
iperfserv = dict()
iperfclient = dict()
- for i in range(len(all_hosts)):
- perfserv = add_app(ec, nodes[all_hosts[i]], "#perfserv", "iperf -s > /opt/iperfserv.txt",
+ for i, host in enumerate(all_hosts):
+ perfserv = add_app(ec, nodes[host], "#perfserv", "iperf -s > /opt/iperfserv.txt",
env, xmpp_slice, xmpp_host)
- iperfclient[all_hosts[i]] = []
+ iperfclient[host] = []
if i > 0:
cmd = "iperf -c " + all_ip[i-1] + " > /opt/iperclient1.txt"
- perfclient1 = add_app(ec, nodes[all_hosts[i]], "#perfclient1", cmd,
+ perfclient1 = add_app(ec, nodes[host], "#perfclient1", cmd,
env, xmpp_slice, xmpp_host)
- iperfclient[all_hosts[i]].append(perfclient1)
+ iperfclient[host].append(perfclient1)
if i < (len(all_hosts)-1):
cmd = "iperf -c " + all_ip[i+1] + " > /opt/iperclient2.txt"
- perfclient2 = add_app(ec, nodes[all_hosts[i]], "#perfclient2", cmd,
+ perfclient2 = add_app(ec, nodes[host], "#perfclient2", cmd,
env, xmpp_slice, xmpp_host)
- iperfclient[all_hosts[i]].append(perfclient2)
+ iperfclient[host].append(perfclient2)
- iperfserv[all_hosts[i]] = perfserv
+ iperfserv[host] = perfserv
- for i in range(len(all_hosts)):
- #ec.register_condition(iperfserv[all_hosts[i]], ResourceAction.START, link, ResourceState.STARTED, "2s")
- for elt in iperfclient[all_hosts[i]]:
- ec.register_condition(elt, ResourceAction.START, iperfserv[all_hosts[i]], ResourceState.STARTED, "3s")
+ for host in all_hosts:
+ #ec.register_condition(iperfserv[host], ResourceAction.START, link, ResourceState.STARTED, "2s")
+ for elt in iperfclient[host]:
+ ec.register_condition(elt, ResourceAction.START, iperfserv[host], ResourceState.STARTED, "3s")
## Streaming Server
## ccndstop = add_app(ec, nodes[all_hosts[i]], "#ccndstop", "ccndstop", env, xmpp_slice, xmpp_host)
## ccndstops.append(ccndstop)
perfkill = dict()
- for i in range(len(all_hosts)):
- kill = add_app(ec, nodes[all_hosts[i]], "#kill", "killall iperf", "", xmpp_slice, xmpp_host)
- perfkill[all_hosts[i]] = kill
+ for host in all_hosts:
+ kill = add_app(ec, nodes[host], "#kill", "killall iperf", "", xmpp_slice, xmpp_host)
+ perfkill[host] = kill
# Condition to stop and clean the experiment
apps = []
- for i in range(len(all_hosts)):
-# apps.append(ccnds[all_hosts[i]])
-# apps.append(ccnrs[all_hosts[i]])
- apps.append(iperfserv[all_hosts[i]])
- for elt in iperfclient[all_hosts[i]]:
+ for host in all_hosts:
+# apps.append(ccnds[host])
+# apps.append(ccnrs[host])
+ apps.append(iperfserv[host])
+ for elt in iperfclient[host]:
apps.append(elt)
# apps += link
#apps.append(pub)
# ec.register_condition(ccndstops + [killall], ResourceAction.STOP, ccndstops, ResourceState.STARTED, "1s")
killall = []
- for i in range(len(all_hosts)):
- killall.append(perfkill[all_hosts[i]])
+ for host in all_hosts:
+ killall.append(perfkill[host])
ec.register_condition(killall, ResourceAction.START, apps, ResourceState.STOPPED, "1s")
ec.register_condition(killall, ResourceAction.STOP, killall, ResourceState.STARTED, "1s")
ec.deploy()
-ec.wait_finished(list(apps.values()))
+# py3: no need to transform into a list
+# as wait_finished (wait in fact) will do it anyway
+ec.wait_finished(apps.values())
# collect results
for key, app in apps.items():
pl_password = options.pl_password
hosts = options.hosts
-hosts = list(map(str.strip, hosts.split(",")))
+hosts = [host.strip() for host in hosts.split(",")]
apps = []
## Create the experiment controller
fname = graph.node[nid]["history"]
history = load_content_history(fname)
- for content_name in list(history.keys()):
+ for content_name in history:
hist = history[content_name]
for (timestamp, message_type, nid1, nid2, nonce, size, line) in hist:
# Compute the time elapsed between the time an interest is sent
# in the consumer node and when the content is received back
- for content_name in list(content_names.keys()):
+ for content_name in content_names:
# order content and interest messages by timestamp
content_names[content_name]["content"] = sorted(
content_names[content_name]["content"])
if sources:
self.info("Uploading sources ")
- sources = list(map(str.strip, sources.split(";")))
+ sources = [str.strip(source) for source in sources.split(";")]
# Separate sources that should be downloaded from
# the web, from sources that should be uploaded from
# adding the avoided pids filtered above (avoid_kill) to allow users keep process
# alive when using besides ssh connections
kill_pids = set(pids_temp.items()) - set(pids.items())
- kill_pids = ' '.join(list(dict(kill_pids).keys()))
+ # py2/py3 : keep it simple
+ kill_pids = ' '.join(kill_pids)
# removing pids from beside connections and its process
kill_pids = kill_pids.split(' ')
src = f.name
# If dst files should not be overwritten, check that the files do not
- # exits already
+ # exist already
if isinstance(src, str):
- src = list(map(str.strip, src.split(";")))
+ src = [s.strip() for s in src.split(";")]
if overwrite == False:
src = self.filter_existing_files(src, dst)
if len(src) > 1 else {dst: src[0]}
command = []
- for d in list(dests.keys()):
+ for d in dests:
command.append(" [ -f {dst} ] && echo '{dst}' ".format(dst=d) )
command = ";".join(command)
(out, err), proc = self.execute(command, retry = 1, with_lock = True)
- for d in list(dests.keys()):
+ for d in dests:
if out.find(d) > -1:
del dests[d]
if not dests:
return []
+ # list(..) here added by 2to3 - leaving for safety
return list(dests.values())
import time
def ipfmt(ip):
- ipbytes = list(map(ord,ip.decode("hex")))
- return '.'.join(map(str,ipbytes))
+ return '.'.join(str(ord(x)) for x in ip.decode("hex"))
tagtype = {
'0806' : 'arp',
# If the result is an object (not a base value),
# then keep track of the object a return the object
# reference (newuuid)
+ # xxx the two instances of `int` are from 2to3
if not (result is None or type(result) in [
bool, float, int, str, int]):
self._objects[newuuid] = result
return pprint.pformat(value)
def format_args(self, args):
- fargs = list(map(self.format_value, args))
- return "[%s]" % ",".join(fargs)
+ return "[%s]" % ",".join(self.format_value(arg) for arg in args)
def format_kwargs(self, kwargs):
- fkwargs = ["%s: %s" % (self.format_value(k_w[0]), self.format_value(k_w[1])) for k_w in iter(kwargs.items())]
+ fkwargs = ["%s: %s" % (self.format_value(k), self.format_value(v)) for (k, v) in kwargs.items()]
return "dict({%s})" % ",".join(fkwargs)
return
kwargs = dict()
- for attr in list(self._attrs.values()):
+ for attr in self._attrs.values():
if not ( attr.has_flag(Flags.Construct) and attr.has_changed ):
continue
item = base64.b64decode(item).rstrip()
return pickle.loads(item)
- decoded = list(map(decode, msg.split("|")))
+ decoded = [ decode(x) for x in msg.split("|") ]
# decoded message
dmsg_type = decoded.pop(0)
def _register_attributes(cls):
standard = Attribute("Standard", "Wireless standard",
default = "WIFI_PHY_STANDARD_80211a",
+ # staying safe with 2to3's generated list
allowed = list(WIFI_STANDARDS.keys()),
type = Types.Enumerate,
flags = Flags.Design)
# If the result is an object (not a base value),
# then keep track of the object a return the object
# reference (newuuid)
+ # xxx the two instances of `int` are from 2to3
if not (result is None or type(result) in [
bool, float, int, str, int]):
self._objects[newuuid] = result
return pprint.pformat(value)
def format_args(self, args):
- fargs = list(map(self.format_value, args))
- return "[%s]" % ",".join(fargs)
+ return "[%s]" % ",".join(self.format_value(arg) for arg in args)
def format_kwargs(self, kwargs):
- fkwargs = ["%s: %s" % (self.format_value(k_w[0]), self.format_value(k_w[1])) for k_w in iter(kwargs.items())]
+ fkwargs = ["%s: %s" % (self.format_value(k), self.format_value(v)) for (k, v) in kwargs.items()]
return "dict({%s})" % ",".join(fkwargs)
else:
properties = self._attr_element(payload,"props","")
- for prop in list(props.keys()):
+ for prop in props:
if isinstance(props[prop],str):
self._attr_element(properties,prop,props[prop],type_key="type", type_value = "string")
elif isinstance(props[prop],dict):
key = self._attr_element(properties,prop,"",type_key="type", type_value = "hash")
- for comp in list(props[prop].keys()):
+ for comp in props[prop]:
self._attr_element(key,comp,props[prop][comp],type_key="type", type_value = "string")
if guards :
guardians = self._attr_element(payload,"guard","")
- for guard in list(guards.keys()):
+ for guard in guards:
self._attr_element(guardians,guard,guards[guard],type_key="type", type_value = "string")
return payload
if props :
properties = self._attr_element(payload,"props","")
- for prop in list(props.keys()):
+ for prop in props:
self._attr_element(properties,prop,props[prop],type_key="type", type_value = "symbol")
if guards :
guardians = self._attr_element(payload,"guard","")
- for guard in list(guards.keys()):
+ for guard in guards:
self._attr_element(guardians,guard,guards[guard],type_key="type", type_value = "string")
return payload
event = self._check_for_tag(root, namespaces, "event")
log = "STATUS -- "
- for elt in list(props.keys()):
+ for elt in props:
ns, tag = elt.split('}')
if tag == "it":
log = log + "membership : " + props[elt]+" -- "
slice_nodes = self.sfaapi.get_slice_resources(slicename)['resource']
if slice_nodes:
if len(slice_nodes[0]['services']) != 0:
- slice_nodes_hrn = list(self.sfaapi.get_resources_hrn(slice_nodes).values())
+ slice_nodes_hrn = self.sfaapi.get_resources_hrn(slice_nodes).values()
else: slice_nodes_hrn = []
nodes_inslice = list(set(hosts_hrn) & set(slice_nodes_hrn))
return nodes_inslice
filters = filters, peer=None, **kw)
)
else:
- peer_filter = list(map(name_to_id, peer))
+ peer_filter = [name_to_id(x) for x in peer]
elif peer is None or peer == self._local_peer:
peer_filter = None
super(PlanetlabSfaNode, self).do_discover()
else:
- hosts_hrn = list(nodes.values())
+ hosts_hrn = nodes.values()
nodes_inslice = self._check_if_in_slice(hosts_hrn)
nodes_not_inslice = list(set(hosts_hrn) - set(nodes_inslice))
host_hrn = None
slicename = 'ple.' + slicename
slice_nodes = self.sfaapi.get_slice_resources(slicename)['resource']
if slice_nodes:
- slice_nodes_hrn = list(self.sfaapi.get_resources_hrn(slice_nodes).values())
+ slice_nodes_hrn = self.sfaapi.get_resources_hrn(slice_nodes).values()
else: slice_nodes_hrn = []
nodes_inslice = list(set(hosts_hrn) & set(slice_nodes_hrn))
return nodes_inslice
def gen_auth_keys(pubkey, output, environ):
#opts = ['from="127.0.0.1/32"'] # fails in stupid yans setup
opts = []
- for k, v in list(environ.items()):
+ for k, v in environ.items():
opts.append('environment="%s=%s"' % (k, v))
lines = file(pubkey).readlines()
if out:
try:
- return list(map(int,out.strip().split(' ',1)))
+ return [ int(x) for x in out.strip().split(' ',1))]
except:
# Ignore, many ways to fail that don't matter that much
return None
anode = doc.createElement("attributes")
attributes = False
- for attr in list(rm._attrs.values()):
+ for attr in rm._attrs.values():
if attr.has_changed:
attributes = True
aanode = doc.createElement("attribute")
tnode = doc.createElement("traces")
traces = False
- for trace in list(rm._trcs.values()):
+ for trace in rm._trcs.values():
if trace.enabled:
traces = True
ttnode = doc.createElement("trace")
slice_res = self.get_slice_resources(slicename)['resource']
if slice_res:
if len(slice_res[0]['services']) != 0:
+ # 2to3 added list() and it is useful
slice_res_hrn = list(self.get_resources_hrn(slice_res).values())
if self._compare_lists(slice_res_hrn, resources_hrn):
return True
return objcopy
# We thus suppose we have a child of dict
objcopy = {}
- for k, v in list(obj.items()):
+ for k, v in obj.items():
objcopy[k] = self.make_dict_rec(v)
return objcopy
if out:
try:
- return list(map(int,out.strip().split(' ',1)))
+ return [ int(x) for x in out.strip().split(' ',1)) ]
except:
# Ignore, many ways to fail that don't matter that much
return None