tools.write_file(auth_keys, lambda f: f.write(new_keys))
# set access permissions and ownership properly
- os.chmod(dot_ssh, 0700)
+ os.chmod(dot_ssh, 0o700)
os.chown(dot_ssh, uid, gid)
- os.chmod(auth_keys, 0600)
+ os.chmod(auth_keys, 0o600)
os.chown(auth_keys, uid, gid)
# set self.keys to new_keys only when all of the above ops succeed
with the forward_api_calls shell.
"""
-import SimpleXMLRPCServer
-import SocketServer
+import xmlrpc.server
+import socketserver
import errno
import os
import pwd
import socket
import struct
import threading
-import xmlrpclib
+import xmlrpc.client
import sys
import database
API_SERVER_PORT = 812
UNIX_ADDR = '/tmp/nodemanager.api'
-class APIRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
+class APIRequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
# overriding _dispatch to achieve this effect is officially deprecated,
# but I can't figure out how to get access to .request without
# duplicating SimpleXMLRPCServer code here, which is more likely to
method_name = str(method_name_unicode)
try: method = api_method_dict[method_name]
except KeyError:
- api_method_list = api_method_dict.keys()
+ api_method_list = list(api_method_dict.keys())
api_method_list.sort()
- raise xmlrpclib.Fault(100, 'Invalid API method %s. Valid choices are %s' % \
+ raise xmlrpc.client.Fault(100, 'Invalid API method %s. Valid choices are %s' % \
(method_name, ', '.join(api_method_list)))
expected_nargs = nargs_dict[method_name]
if len(args) != expected_nargs:
- raise xmlrpclib.Fault(101, 'Invalid argument count: got %d, expecting %d.' % \
+ raise xmlrpc.client.Fault(101, 'Invalid argument count: got %d, expecting %d.' % \
(len(args), expected_nargs))
else:
# Figure out who's calling.
# Special case : the sfa component manager
if caller_name == PLC_SLICE_PREFIX+"_sfacm":
try: result = method(*args)
- except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err)
+ except Exception as err: raise xmlrpc.client.Fault(104, 'Error in call: %s' %err)
# Anyone can call these functions
elif method_name in ('Help', 'Ticket', 'GetXIDs', 'GetSSHKeys'):
try: result = method(*args)
- except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err)
+ except Exception as err: raise xmlrpc.client.Fault(104, 'Error in call: %s' %err)
else: # Execute anonymous call.
# Authenticate the caller if not in the above fncts.
if method_name == "GetRecord":
# only work on slivers or self. Sanity check.
if not (target_rec and target_rec['type'].startswith('sliver.')):
- raise xmlrpclib.Fault(102, \
+ raise xmlrpc.client.Fault(102, \
'Invalid argument: the first argument must be a sliver name.')
# only manipulate slivers who delegate you authority
if caller_name in (target_name, target_rec['delegations']):
try: result = method(target_rec, *args[1:])
- except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err)
+ except Exception as err: raise xmlrpc.client.Fault(104, 'Error in call: %s' %err)
else:
- raise xmlrpclib.Fault(108, '%s: Permission denied.' % caller_name)
+ raise xmlrpc.client.Fault(108, '%s: Permission denied.' % caller_name)
if result == None: result = 1
return result
-class APIServer_INET(SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer): allow_reuse_address = True
+class APIServer_INET(socketserver.ThreadingMixIn, xmlrpc.server.SimpleXMLRPCServer): allow_reuse_address = True
class APIServer_UNIX(APIServer_INET): address_family = socket.AF_UNIX
serv1 = APIServer_INET(('127.0.0.1', API_SERVER_PORT), requestHandler=APIRequestHandler, logRequests=0)
tools.as_daemon_thread(serv1.serve_forever)
try: os.unlink(UNIX_ADDR)
- except OSError, e:
+ except OSError as e:
if e.errno != errno.ENOENT: raise
serv2 = APIServer_UNIX(UNIX_ADDR, requestHandler=APIRequestHandler, logRequests=0)
tools.as_daemon_thread(serv2.serve_forever)
- os.chmod(UNIX_ADDR, 0666)
+ os.chmod(UNIX_ADDR, 0o666)
with the forward_api_calls shell.
"""
-import SimpleXMLRPCServer
-import SocketServer
+import xmlrpc.server
+import socketserver
import errno
import os
import pwd
import socket
import struct
import threading
-import xmlrpclib
+import xmlrpc.client
import slivermanager
try:
def export(method):
def args():
# Inspect method. Remove self from the argument list.
- max_args = method.func_code.co_varnames[0:method.func_code.co_argcount]
- defaults = method.func_defaults
+ max_args = method.__code__.co_varnames[0:method.__code__.co_argcount]
+ defaults = method.__defaults__
if defaults is None:
defaults = ()
min_args = max_args[0:len(max_args) - len(defaults)]
@export_to_api(0)
def Help():
"""Get a list of functions currently supported by the Node Manager API"""
- names=api_method_dict.keys()
+ names=list(api_method_dict.keys())
names.sort()
return ''.join(['**** ' + api_method_dict[name].__name__ + '\n' + api_method_dict[name].__doc__ + '\n'
for name in names])
deliver_ticket(data)
logger.log('api_calls: Ticket delivered for %s' % name)
Create(database.db.get(name))
- except Exception, err:
- raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))
+ except Exception as err:
+ raise xmlrpc.client.Fault(102, 'Ticket error: ' + str(err))
@export_to_docbook(roles=['self'],
accepts=[Parameter(str, 'A ticket returned from GetSlivers()')],
def AdminTicket(ticket):
"""Admin interface to create slivers based on ticket returned by GetSlivers()."""
try:
- data, = xmlrpclib.loads(ticket)[0]
+ data, = xmlrpc.client.loads(ticket)[0]
name = data['slivers'][0]['name']
if data != None:
deliver_ticket(data)
logger.log('api_calls: Admin Ticket delivered for %s' % name)
Create(database.db.get(name))
- except Exception, err:
- raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))
+ except Exception as err:
+ raise xmlrpc.client.Fault(102, 'Ticket error: ' + str(err))
@export_to_docbook(roles=['self'],
def GetSSHKeys():
"""Return an dictionary mapping slice names to SSH keys"""
keydict = {}
- for rec in database.db.itervalues():
+ for rec in database.db.values():
if 'keys' in rec:
keydict[rec['name']] = rec['keys']
return keydict
account.get(rec['name']).ensure_created(rec)
logger.log("api_calls: Create %s"%rec['name'])
else:
- raise Exception, "Only PLC can create non delegated slivers."
+ raise Exception("Only PLC can create non delegated slivers.")
@export_to_docbook(roles=['nm-controller', 'self'],
account.get(rec['name']).ensure_destroyed()
logger.log("api_calls: Destroy %s"%rec['name'])
else:
- raise Exception, "Only PLC can destroy non delegated slivers."
+ raise Exception("Only PLC can destroy non delegated slivers.")
@export_to_docbook(roles=['nm-controller', 'self'],
of loss of resources."""
rec = sliver_name
if not validate_loans(loans):
- raise xmlrpclib.Fault(102, 'Invalid argument: the second argument must be a well-formed loan specification')
+ raise xmlrpc.client.Fault(102, 'Invalid argument: the second argument must be a well-formed loan specification')
rec['_loans'] = loans
database.db.sync()
# Help
def usage():
- print "Usage: %s [OPTION]..." % sys.argv[0]
- print "Options:"
- print " -f, --config=FILE PLC configuration file (default: /etc/planetlab/plc_config)"
- print " -n, --node-id=FILE Node ID (or file)"
- print " -k, --key=FILE Node key (or file)"
- print " --help This message"
+ print("Usage: %s [OPTION]..." % sys.argv[0])
+ print("Options:")
+ print(" -f, --config=FILE PLC configuration file (default: /etc/planetlab/plc_config)")
+ print(" -n, --node-id=FILE Node ID (or file)")
+ print(" -k, --key=FILE Node key (or file)")
+ print(" --help This message")
sys.exit(1)
# Get options
"node=", "nodeid=", "node-id", "node_id",
"key=",
"help"])
- except getopt.GetoptError, err:
- print "Error: " + err.msg
+ except getopt.GetoptError as err:
+ print("Error: " + err.msg)
usage()
for (opt, optval) in opts:
plc = PLCAPI(config.plc_api_uri, config.cacert, session)
assert session == plc.GetSession()
- print session
+ print(session)
if __name__ == '__main__':
main()
# Since root is required for sanity, its not in the API/plc database, so pass {}
# to use defaults.
- if root_xid not in slices.keys():
+ if root_xid not in list(slices.keys()):
slices[root_xid] = Slice(root_xid, "root", {})
slices[root_xid].reset({}, {})
# Used by bwlimit. pass {} since there is no rspec (like above).
- if default_xid not in slices.keys():
+ if default_xid not in list(slices.keys()):
slices[default_xid] = Slice(default_xid, "default", {})
slices[default_xid].reset({}, {})
live = {}
# Get running slivers that should be on this node (from plc). {xid: name}
# db keys on name, bwmon keys on xid. db doesnt have xid either.
- for plcSliver in nmdbcopy.keys():
+ for plcSliver in list(nmdbcopy.keys()):
live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
- logger.verbose("bwmon: Found %s instantiated slices" % live.keys().__len__())
- logger.verbose("bwmon: Found %s slices in dat file" % slices.values().__len__())
+ logger.verbose("bwmon: Found %s instantiated slices" % list(live.keys()).__len__())
+ logger.verbose("bwmon: Found %s slices in dat file" % list(slices.values()).__len__())
# Get actual running values from tc.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
- logger.verbose("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__())
+ logger.verbose("bwmon: Found %s running HTBs" % list(kernelhtbs.keys()).__len__())
# The dat file has HTBs for slices, but the HTBs aren't running
nohtbslices = set(slices.keys()) - set(kernelhtbs.keys())
logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__())
# Reset tc counts.
for nohtbslice in nohtbslices:
- if live.has_key(nohtbslice):
+ if nohtbslice in live:
slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
else:
logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__())
for slicenodat in slicesnodat:
# But slice is running
- if live.has_key(slicenodat):
+ if slicenodat in live:
# init the slice. which means start accounting over since kernel
# htb was already there.
slices[slicenodat] = Slice(slicenodat,
for newslice in newslicesxids:
# Delegated slices dont have xids (which are uids) since they haven't been
# instantiated yet.
- if newslice != None and live[newslice].has_key('_rspec') == True:
+ if newslice != None and ('_rspec' in live[newslice]) == True:
# Check to see if we recently deleted this slice.
- if live[newslice]['name'] not in deaddb.keys():
+ if live[newslice]['name'] not in list(deaddb.keys()):
logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
# _rspec is the computed rspec: NM retrieved data from PLC, computed loans
# and made a dict of computed values.
if deadxid == root_xid or deadxid == default_xid:
continue
logger.log("bwmon: removing dead slice %s " % deadxid)
- if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid):
+ if deadxid in slices and deadxid in kernelhtbs:
# add slice (by name) to deaddb
logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
del slices[deadxid]
- if kernelhtbs.has_key(deadxid):
+ if deadxid in kernelhtbs:
logger.verbose("bwmon: Removing HTB for %s." % deadxid)
bwlimit.off(deadxid, dev = dev_default)
# Clean up deaddb
- for deadslice in deaddb.keys():
+ for deadslice in list(deaddb.keys()):
if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
logger.log("bwmon: Removing dead slice %s from dat." \
% deaddb[deadslice]['slice'].name)
# Get actual running values from tc since we've added and removed buckets.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
- logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__())
+ logger.verbose("bwmon: now %s running HTBs" % list(kernelhtbs.keys()).__len__())
# Update all byte limites on all slices
- for (xid, slice) in slices.iteritems():
+ for (xid, slice) in slices.items():
# Monitor only the specified slices
if xid == root_xid or xid == default_xid: continue
if names and name not in names:
# Update byte counts
slice.update(kernelhtbs[xid], live[xid]['_rspec'])
- logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(), DB_FILE))
+ logger.verbose("bwmon: Saving %s slices in %s" % (list(slices.keys()).__len__(), DB_FILE))
f = open(DB_FILE, "w")
pickle.dump((version, slices, deaddb), f)
f.close()
kernelhtbs = gethtbs(root_xid, default_xid)
if len(kernelhtbs):
logger.log("bwmon: Disabling all running HTBs.")
- for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default)
+ for htb in list(kernelhtbs.keys()): bwlimit.off(htb, dev = dev_default)
lock = threading.Event()
import os, os.path
import pyinotify
import logger
+from functools import reduce
# Base dir for libvirt
BASE_DIR = '/sys/fs/cgroup'
def get_cgroups():
""" Returns the list of cgroups active at this moment on the node """
- return map(os.path.basename, get_cgroup_paths())
+ return list(map(os.path.basename, get_cgroup_paths()))
def write(name, key, value, subsystem="cpuset"):
""" Writes a value to the file key with the cgroup with name """
base_path = get_cgroup_path(name, subsystem)
with open(os.path.join(base_path, key), 'w') as f:
- print >>f, value
+ print(value, file=f)
logger.verbose("cgroups.write: overwrote {}".format(base_path))
def append(name, key, value, subsystem="cpuset"):
""" Appends a value to the file key with the cgroup with name """
base_path = get_cgroup_path(name, subsystem)
with open(os.path.join(base_path, key), 'a') as f:
- print >>f, value
+ print(value, file=f)
logger.verbose("cgroups.append: appended {}".format(base_path))
if __name__ == '__main__':
subsystems = 'blkio cpu cpu,cpuacct cpuacct cpuset devices freezer memory net_cls perf_event systemd'.split()
for subsystem in subsystems:
- print 'get_cgroup_path({}, {}) = {}'.\
- format(name, subsystem, get_cgroup_path(name, subsystem))
+ print('get_cgroup_path({}, {}) = {}'.\
+ format(name, subsystem, get_cgroup_path(name, subsystem)))
# print 'get_cgroup_paths = {}'.format(get_cgroup_paths(subsystem))
import curlwrapper
import logger
import tools
-import xmlrpclib
+import xmlrpc.client
from config import Config
# right after net
try:
logger.verbose("conf_files: retrieving URL=%s"%url)
contents = curlwrapper.retrieve(url, self.config.cacert)
- except xmlrpclib.ProtocolError as e:
+ except xmlrpc.client.ProtocolError as e:
logger.log('conf_files: failed to retrieve %s from %s, skipping' % (dest, url))
return
if not cf_rec['always_update'] and sha(contents).digest() == self.checksum(dest):
if self.system(cf_rec['postinstall_cmd']): self.system(err_cmd)
def run_once(self, data):
- if data.has_key("conf_files"):
+ if "conf_files" in data:
for f in data['conf_files']:
try: self.update_conf_file(f)
except: logger.log_exc("conf_files: failed to update conf_file")
def __init__(self, file = "/etc/planetlab/plc_config"):
try:
- execfile(file, self.__dict__)
+ exec(compile(open(file).read(), file, 'exec'), self.__dict__)
except:
- raise Exception, "Could not parse " + file
+ raise Exception("Could not parse " + file)
if int(self.PLC_API_PORT) == 443:
uri = "https://"
elif os.path.exists('/usr/boot/cacert.pem'):
self.cacert = '/usr/boot/cacert.pem'
else:
- raise Exception, "No boot server certificate bundle available"
+ raise Exception("No boot server certificate bundle available")
else:
uri = "http://"
self.cacert = None
if __name__ == '__main__':
from pprint import pprint
- for (k, v) in Config().__dict__.iteritems():
+ for (k, v) in Config().__dict__.items():
if k not in ['__builtins__']:
pprint ( (k, v), )
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
- print >>etc_shells, shell
+ print(shell, file=etc_shells)
etc_shells.close()
import os
import os.path
import cgroups
+from functools import reduce
glo_coresched_simulate = False
joinpath = os.path.join
# allocate the cores to the slivers that have them reserved
# TODO: Need to sort this from biggest cpu_cores to smallest
- for name, rec in slivers.iteritems():
+ for name, rec in slivers.items():
rspec = rec["_rspec"]
cores = rspec.get(self.slice_attr_name, 0)
(cores, bestEffort) = self.decodeCoreSpec(cores)
# now check and see if any of our slices had the besteffort flag
# set
- for name, rec in slivers.iteritems():
+ for name, rec in slivers.items():
rspec = rec["_rspec"]
cores = rspec.get(self.slice_attr_name, 0)
(cores, bestEffort) = self.decodeCoreSpec(cores)
self.freezeUnits("freezer.state", freezeList)
def freezeUnits (self, var_name, freezeList):
- for (slicename, freeze) in freezeList.items():
+ for (slicename, freeze) in list(freezeList.items()):
try:
cgroup_path = cgroups.get_cgroup_path(slicename, 'freezer')
logger.verbose("CoreSched: setting freezer for {} to {} - path={} var={}"
break
if glo_coresched_simulate:
- print "F", cgroup
+ print("F", cgroup)
else:
with open(cgroup, "w") as f:
f.write(freeze)
cpus = default
if glo_coresched_simulate:
- print "R", cgroup + "/" + var_name, self.listToRange(cpus)
+ print("R", cgroup + "/" + var_name, self.listToRange(cpus))
else:
cgroups.write(cgroup, var_name, self.listToRange(cpus))
x = CoreSched()
- print "cgroups:", ",".join(x.get_cgroups())
+ print("cgroups:", ",".join(x.get_cgroups()))
- print "cpus:", x.listToRange(x.get_cpus())
- print "sibling map:"
+ print("cpus:", x.listToRange(x.get_cpus()))
+ print("sibling map:")
for item in x.get_cpus():
- print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])])
+ print(" ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])]))
- print "mems:", x.listToRange(x.get_mems())
- print "cpu to memory map:"
+ print("mems:", x.listToRange(x.get_mems()))
+ print("cpu to memory map:")
for item in x.get_mems():
- print " ", item, ",".join([str(y) for y in x.mems_map.get(item, [])])
+ print(" ", item, ",".join([str(y) for y in x.mems_map.get(item, [])]))
rspec_sl_test1 = {"cpu_cores": "1"}
rec_sl_test1 = {"_rspec": rspec_sl_test1}
# allocate the cores to the slivers that have them reserved
# TODO: Need to sort this from biggest cpu_cores to smallest
- for name, rec in slivers.iteritems():
+ for name, rec in slivers.items():
rspec = rec["_rspec"]
cores = rspec.get(self.slice_attr_name, 0)
(cores, bestEffort) = self.decodeCoreSpec(cores)
# now check and see if any of our slices had the besteffort flag
# set
- for name, rec in slivers.iteritems():
+ for name, rec in slivers.items():
rspec = rec["_rspec"]
cores = rspec.get(self.slice_attr_name, 0)
(cores, bestEffort) = self.decodeCoreSpec(cores)
cpus = default
if glo_coresched_simulate:
- print "R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus)
+ print("R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus))
else:
with opwn("/dev/cgroup/{}/{}".format(cgroup, var_name), "w") as f:
f.write( self.listToRange(cpus) + "\n" )
os.makedirs("/etc/vservers/.defaults/cgroup")
if glo_coresched_simulate:
- print "RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus)
+ print("RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus))
else:
with open("/etc/vservers/.defaults/cgroup/{}".format(var_name), "w") as f:
f.write( self.listToRange(cpus) + "\n" )
x = CoreSched()
- print "cgroups:", ",".join(x.get_cgroups())
+ print("cgroups:", ",".join(x.get_cgroups()))
- print "cpus:", x.listToRange(x.get_cpus())
- print "sibling map:"
+ print("cpus:", x.listToRange(x.get_cpus()))
+ print("sibling map:")
for item in x.get_cpus():
- print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])])
+ print(" ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])]))
- print "mems:", x.listToRange(x.get_mems())
- print "cpu to memory map:"
+ print("mems:", x.listToRange(x.get_mems()))
+ print("cpu to memory map:")
for item in x.get_mems():
- print " ", item, ",".join([str(y) for y in x.mems_map.get(item, [])])
+ print(" ", item, ",".join([str(y) for y in x.mems_map.get(item, [])]))
rspec_sl_test1 = {"cpu_cores": "1"}
rec_sl_test1 = {"_rspec": rspec_sl_test1}
from subprocess import PIPE, Popen
from select import select
-import xmlrpclib
+import xmlrpc.client
import signal
import os
command += ('--connect-timeout', str(timeout))
command += (url, )
if verbose:
- print 'Invoking ', command
- if postdata: print 'with postdata=', postdata
+ print('Invoking ', command)
+ if postdata: print('with postdata=', postdata)
p = Sopen(command , stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if postdata: p.stdin.write(postdata)
p.stdin.close()
if rc != 0:
# when this triggers, the error sometimes doesn't get printed
logger.log ("curlwrapper: retrieve, got stderr <%s>"%err)
- raise xmlrpclib.ProtocolError(url, rc, err, postdata)
+ raise xmlrpc.client.ProtocolError(url, rc, err, postdata)
else:
return data
import sys
-import cPickle
+import pickle
import threading
import time
'net_i2_max_rate': 8,
'net_share': 1,
}
-LOANABLE_RESOURCES = MINIMUM_ALLOCATION.keys()
+LOANABLE_RESOURCES = list(MINIMUM_ALLOCATION.keys())
DB_FILE = '/var/lib/nodemanager/database.pickle'
* and variable resid_rspec, which is the amount of resources the sliver
has after giving out loans but not receiving any."""
slivers = {}
- for name, rec in self.iteritems():
+ for name, rec in self.items():
if 'rspec' in rec:
rec['_rspec'] = rec['rspec'].copy()
slivers[name] = rec
- for rec in slivers.itervalues():
+ for rec in slivers.values():
eff_rspec = rec['_rspec']
resid_rspec = rec['rspec'].copy()
for target, resource_name, amount in rec.get('_loans', []):
old_rec = self.get(name)
if old_rec == None: self[name] = rec
elif rec['timestamp'] > old_rec['timestamp']:
- for key in old_rec.keys():
+ for key in list(old_rec.keys()):
if not key.startswith('_'): del old_rec[key]
old_rec.update(rec)
We use it to determine if a record is stale.
This method should be called whenever new GetSlivers() data comes in."""
self._min_timestamp = ts
- for name, rec in self.items():
+ for name, rec in list(self.items()):
if rec['timestamp'] < ts: del self[name]
def sync(self):
# delete expired records
now = time.time()
- for name, rec in self.items():
+ for name, rec in list(self.items()):
if rec.get('expires', now) < now: del self[name]
self._compute_effective_rspecs()
if name not in self:
logger.verbose("database: sync : ensure_destroy'ing %s"%name)
account.get(name).ensure_destroyed()
- for name, rec in self.iteritems():
+ for name, rec in self.items():
# protect this; if anything fails for a given sliver
# we still need the other ones to be handled
try:
while True:
db_lock.acquire()
while not dump_requested: db_cond.wait()
- db_pickle = cPickle.dumps(db, cPickle.HIGHEST_PROTOCOL)
+ db_pickle = pickle.dumps(db, pickle.HIGHEST_PROTOCOL)
dump_requested = False
db_lock.release()
try:
global db
try:
f = open(DB_FILE)
- try: db = cPickle.load(f)
+ try: db = pickle.load(f)
finally: f.close()
except IOError:
logger.log ("database: Could not load %s -- starting from a fresh database"%DB_FILE)
for func in dir(api_calls):
try:
f = api_calls.__getattribute__(func)
- if 'group' in f.__dict__.keys():
+ if 'group' in list(f.__dict__.keys()):
api_function_list += [api_calls.__getattribute__(func)]
except:
pass
self.initscript = new_initscript
code = self.initscript
sliver_initscript = "/vservers/%s/etc/rc.d/init.d/vinit.slice" % self.name
- if tools.replace_file_with_string(sliver_initscript, code, remove_if_empty=True, chmod=0755):
+ if tools.replace_file_with_string(sliver_initscript, code, remove_if_empty=True, chmod=0o755):
if code:
logger.log("Initscript: %s: Installed new initscript in %s" % (self.name, sliver_initscript))
if self.is_running():
# install in sliver
with open(vinit_source) as f:
code = f.read()
- if tools.replace_file_with_string(vinit_script, code, chmod=0755):
+ if tools.replace_file_with_string(vinit_script, code, chmod=0o755):
logger.log("Initscript: %s: installed generic vinit rc script" % self.name)
# create symlink for runlevel 3
if not os.path.islink(enable_link):
# install in sliver
with open(vinit_source) as f:
code = f.read()
- if tools.replace_file_with_string(vinit_unit_file, code, chmod=0755):
+ if tools.replace_file_with_string(vinit_unit_file, code, chmod=0o755):
logger.log("Initscript: %s: installed vinit.service unit file" % self.name)
# create symlink for enabling this unit
if not os.path.islink(enable_link):
if level > LOG_LEVEL:
return
try:
- fd = os.open(LOG_FILE, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
+ fd = os.open(LOG_FILE, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0o600)
if not msg.endswith('\n'):
msg += '\n'
os.write(fd, '%s: %s' % (time.asctime(time.gmtime()), msg))
# query running network interfaces
devs = sioc.gifconf()
- ips = dict(zip(devs.values(), devs.keys()))
+ ips = dict(list(zip(list(devs.values()), list(devs.keys()))))
macs = {}
for dev in devs:
macs[sioc.gifhwaddr(dev).lower()] = dev
# Get interface name preferably from MAC address, falling
# back on IP address.
hwaddr=interface['mac']
- if hwaddr <> None: hwaddr=hwaddr.lower()
+ if hwaddr != None: hwaddr=hwaddr.lower()
if hwaddr in macs:
dev = macs[interface['mac']]
elif interface['ip'] in ips:
# query running network interfaces
devs = sioc.gifconf()
- ips = dict(zip(devs.values(), devs.keys()))
+ ips = dict(list(zip(list(devs.values()), list(devs.keys()))))
macs = {}
for dev in devs:
macs[sioc.gifhwaddr(dev).lower()] = dev
# Get interface name preferably from MAC address, falling
# back on IP address.
hwaddr=interface['mac']
- if hwaddr <> None: hwaddr=hwaddr.lower()
+ if hwaddr != None: hwaddr=hwaddr.lower()
if hwaddr in macs:
dev = macs[interface['mac']]
elif interface['ip'] in ips:
import optparse
import time
-import xmlrpclib
+import xmlrpc.client
import socket
import os
import sys
try:
other_pid = tools.pid_file()
if other_pid != None:
- print """There might be another instance of the node manager running as pid {}.
-If this is not the case, please remove the pid file {}. -- exiting""".format(other_pid, tools.PID_FILE)
+ print("""There might be another instance of the node manager running as pid {}.
+If this is not the case, please remove the pid file {}. -- exiting""".format(other_pid, tools.PID_FILE))
return
- except OSError, err:
- print "Warning while writing PID file:", err
+ except OSError as err:
+ print("Warning while writing PID file:", err)
# load modules
self.loaded_modules = []
if isinstance(auth, (tuple, list)):
(self.node_id, self.key) = auth
self.session = None
- elif isinstance(auth, (str, unicode)):
+ elif isinstance(auth, str):
self.node_id = self.key = None
self.session = auth
else:
# Yes, the comments in the old implementation are
# misleading. Keys of dicts are not included in the
# hash.
- values += canonicalize(arg.values())
+ values += canonicalize(list(arg.values()))
else:
# We use unicode() instead of str().
- values.append(unicode(arg))
+ values.append(str(arg))
return values
codemuxslices = {}
# XXX Hack for planetflow
- if slicesinconf.has_key("root"):
+ if "root" in slicesinconf:
_writeconf = False
else:
_writeconf = True
# Check to see if sliver is running. If not, continue
if slivermanager.is_running(sliver['name']):
# Check if new or needs updating
- if (sliver['name'] not in slicesinconf.keys()) \
+ if (sliver['name'] not in list(slicesinconf.keys())) \
or (params not in slicesinconf.get(sliver['name'], [])):
logger.log("codemux: Updating slice %s using %s" % \
(sliver['name'], params['host']))
f.write("* root 1080 %s\n" % Config().PLC_API_HOST)
# Sort items for like domains
for mapping in slivers:
- for (host, params) in mapping.iteritems():
+ for (host, params) in mapping.items():
if params['slice'] == "root": continue
f.write("%s %s %s %s\n" % (host, params['slice'], params['port'], params['ip']))
f.truncate()
def sortDomains(slivers):
'''Given a dict of {slice: {domainname, port}}, return array of slivers with lower order domains first'''
dnames = {} # {host: slice}
- for (slice, params) in slivers.iteritems():
+ for (slice, params) in slivers.items():
for mapping in params:
dnames[mapping['host']] = {"slice":slice, "port": mapping['port'], "ip": mapping['ip']}
- hosts = dnames.keys()
+ hosts = list(dnames.keys())
# sort by length
hosts.sort(key=str.__len__)
# longer first
import os
import curlwrapper
import re
-import xmlrpclib
+import xmlrpc.client
try:
from hashlib import sha1 as sha
except ImportError:
import logger
import os
import curlwrapper
-import xmlrpclib
+import xmlrpc.client
try:
from hashlib import sha1 as sha
except ImportError:
url = mydict['url']
try:
contents = curlwrapper.retrieve(url)
- except xmlrpclib.ProtocolError as e:
+ except xmlrpc.client.ProtocolError as e:
logger.log('interfaces (%s): failed to retrieve %s' % (slicename, url))
continue
else:
logger.log('interfaces (%s): no DEVICE specified' % slicename)
continue
- for key, value in mydict.items():
+ for key, value in list(mydict.items()):
if key in ['bridge', 'vlan']:
continue
contents += '%s="%s"\n' % (key, value)
""" Private Bridge configurator. """
-import httplib
+import http.client
import os
import select
import shutil
for i in os.listdir("/sys/block"):
if not i.startswith("dm-"):
continue
- in_vg.extend(map(lambda x: x.replace("!", "/"),
- os.listdir("/sys/block/%s/slaves" % i)))
+ in_vg.extend([x.replace("!", "/") for x in os.listdir("/sys/block/%s/slaves" % i)])
# Read the list of partitions
with open("/proc/partitions") as partitions:
pat = re.compile("\s+")
except:
pass
try:
- os.makedirs(os.path.dirname(path), 0755)
+ os.makedirs(os.path.dirname(path), 0o755)
except:
pass
os.mknod(path, st.st_mode, st.st_rdev)
return ((int(time)+granularity/2)/granularity)*granularity
def clear_timers (self):
- for timer in self.timers.values():
+ for timer in list(self.timers.values()):
timer.cancel()
self.timers={}
timer.start()
def list_timers(self):
- timestamps=self.timers.keys()
+ timestamps=list(self.timers.keys())
timestamps.sort()
for timestamp in timestamps:
logger.log('reservation: TIMER armed for %s'%reservation.time_printable(timestamp))
@staticmethod
def lease_printable (lease):
- d=dict ( lease.iteritems())
+ d=dict ( iter(lease.items()))
d['from']=reservation.time_printable(lease['t_from'])
d['until']=reservation.time_printable(lease['t_from'])
s=[]
trusted_gid_names.append(relative_filename)
gid_filename = trusted_certs_dir + os.sep + relative_filename
if verbose:
- print "Writing GID for %s as %s" % (gid.get_hrn(), gid_filename)
+ print("Writing GID for %s as %s" % (gid.get_hrn(), gid_filename))
gid.save_to_file(gid_filename, save_parents=True)
# remove old certs
for gid_name in all_gids_names:
if gid_name not in trusted_gid_names:
if verbose:
- print "Removing old gid ", gid_name
+ print("Removing old gid ", gid_name)
os.unlink(trusted_certs_dir + os.sep + gid_name)
if not hmac:
# let python do its thing
random.seed()
- d = [random.choice(string.letters) for x in xrange(32)]
+ d = [random.choice(string.letters) for x in range(32)]
hmac = "".join(d)
SetSliverTag(plc, sliver['name'], 'hmac', hmac)
logger.log("sliverauth: %s: setting hmac" % sliver['name'])
path = '/vservers/%s/etc/planetlab' % sliver['name']
if os.path.exists(path):
keyfile = '%s/key' % path
- if (tools.replace_file_with_string(keyfile, hmac, chmod=0400)):
+ if (tools.replace_file_with_string(keyfile, hmac, chmod=0o400)):
logger.log ("sliverauth: (over)wrote hmac into %s " % keyfile)
# create the key if needed and returns the key contents
dotssh=os.path.dirname(keyfile)
# create dir if needed
if not os.path.isdir (dotssh):
- os.mkdir (dotssh, 0700)
+ os.mkdir (dotssh, 0o700)
logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), dotssh ] )
if not os.path.isfile(pubfile):
comment="%s@%s"%(sliver['name'], socket.gethostname())
logger.log_call( [ 'ssh-keygen', '-t', 'rsa', '-N', '', '-f', keyfile , '-C', comment] )
- os.chmod (keyfile, 0400)
+ os.chmod (keyfile, 0o400)
logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), keyfile, pubfile ] )
with open(pubfile) as f:
return f.read().strip()
# if it's lost b/c e.g. the sliver was destroyed we cannot save the tags content
ssh_key = generate_sshkey(sliver)
old_tag = find_tag (sliver, 'ssh_key')
- if ssh_key <> old_tag:
+ if ssh_key != old_tag:
SetSliverTag(plc, sliver['name'], 'ssh_key', ssh_key)
logger.log ("sliverauth: %s: setting ssh_key" % sliver['name'])
logger.log("specialaccounts: keys file changed: %s" % auth_keys)
# always set permissions properly
- os.chmod(dot_ssh, 0700)
+ os.chmod(dot_ssh, 0o700)
os.chown(dot_ssh, uid, gid)
- os.chmod(auth_keys, 0600)
+ os.chmod(auth_keys, 0o600)
os.chown(auth_keys, uid, gid)
logger.log('specialaccounts: installed ssh keys for %s' % name)
""" Syndicate configurator. """
-import httplib
+import http.client
import os
import shutil
import tools
logger.log("Syndicate: Http op %s on url %s to host %s" % (op, mountpoint, syndicate_ip))
try:
- conn = httplib.HTTPSConnection(syndicate_ip, timeout=60)
+ conn = http.client.HTTPSConnection(syndicate_ip, timeout=60)
conn.request(op, mountpoint)
r1 = conn.getresponse()
except:
# add to conf
slices.append(sliver['name'])
_restart = createVsysDir(sliver['name']) or _restart
- if attribute['value'] in scripts.keys():
+ if attribute['value'] in list(scripts.keys()):
scripts[attribute['value']].append(sliver['name'])
# Write the conf
# not the same as length of values of new scripts,
# and length of non intersection along new scripts is not 0,
# then dicts are different.
- for (acl, oldslivers) in oldscripts.iteritems():
+ for (acl, oldslivers) in oldscripts.items():
try:
if (len(oldslivers) != len(currentscripts[acl])) or \
(len(set(oldslivers) - set(currentscripts[acl])) != 0):
tag = attribute['tagname']
value = attribute['value']
if tag.startswith('vsys_'):
- if (privs.has_key(slice)):
+ if (slice in privs):
slice_priv = privs[slice]
- if (slice_priv.has_key(tag)):
+ if (tag in slice_priv):
slice_priv[tag].append(value)
else:
slice_priv[tag]=[value]
return cur_privs
def write_privs(cur_privs, privs):
- for slice in privs.keys():
+ for slice in list(privs.keys()):
variables = privs[slice]
slice_dir = os.path.join(VSYS_PRIV_DIR, slice)
if (not os.path.exists(slice_dir)):
os.mkdir(slice_dir)
# Add values that do not exist
- for k in variables.keys():
+ for k in list(variables.keys()):
v = variables[k]
- if (cur_privs.has_key(slice)
- and cur_privs[slice].has_key(k)
+ if (slice in cur_privs
+ and k in cur_privs[slice]
and cur_privs[slice][k] == v):
# The binding has not changed
pass
# Remove files and directories
# that are invalid
- for slice in cur_privs.keys():
+ for slice in list(cur_privs.keys()):
variables = cur_privs[slice]
slice_dir = os.path.join(VSYS_PRIV_DIR, slice)
# Add values that do not exist
- for k in variables.keys():
- if (privs.has_key(slice)
- and cur_privs[slice].has_key(k)):
+ for k in list(variables.keys()):
+ if (slice in privs
+ and k in cur_privs[slice]):
# ok, spare this tag
- print "Sparing %s, %s "%(slice, k)
+ print("Sparing %s, %s "%(slice, k))
else:
v_file = os.path.join(slice_dir, k)
os.remove(v_file)
- if (not privs.has_key(slice)):
+ if (slice not in privs):
os.rmdir(slice_dir)
"""Leverage curl to make XMLRPC requests that check the server's credentials."""
-import xmlrpclib
+import xmlrpc.client
import curlwrapper
-class CertificateCheckingSafeTransport (xmlrpclib.Transport):
+class CertificateCheckingSafeTransport (xmlrpc.client.Transport):
def __init__(self, cacert, timeout):
self.cacert = cacert
cacert = self.cacert,
postdata = request_body,
timeout = self.timeout)
- return xmlrpclib.loads(contents)[0]
+ return xmlrpc.client.loads(contents)[0]
-class ServerProxy(xmlrpclib.ServerProxy):
+class ServerProxy(xmlrpc.client.ServerProxy):
def __init__(self, uri, cacert, timeout = 300, **kwds):
- xmlrpclib.ServerProxy.__init__(self, uri,
+ xmlrpc.client.ServerProxy.__init__(self, uri,
CertificateCheckingSafeTransport(cacert, timeout),
**kwds)
try:
# create actually means start
self.dom.create()
- except Exception, e:
+ except Exception as e:
# XXX smbaker: attempt to resolve slivers that are stuck in
# "failed to allocate free veth".
if "ailed to allocate free veth" in str(e):
# Btrfs support quota per volumes
- if rec.has_key("rspec") and rec["rspec"].has_key("tags"):
+ if "rspec" in rec and "tags" in rec["rspec"]:
if cgroups.get_cgroup_path(self.name) == None:
# If configure is called before start, then the cgroups won't exist
# yet. NM will eventually re-run configure on the next iteration.
else:
tags = rec["rspec"]["tags"]
# It will depend on the FS selection
- if tags.has_key('disk_max'):
+ if 'disk_max' in tags:
disk_max = tags['disk_max']
if disk_max == 0:
# unlimited
pass
# Memory allocation
- if tags.has_key('memlock_hard'):
+ if 'memlock_hard' in tags:
mem = str(int(tags['memlock_hard']) * 1024) # hard limit in bytes
cgroups.write(self.name, 'memory.limit_in_bytes', mem, subsystem="memory")
- if tags.has_key('memlock_soft'):
+ if 'memlock_soft' in tags:
mem = str(int(tags['memlock_soft']) * 1024) # soft limit in bytes
cgroups.write(self.name, 'memory.soft_limit_in_bytes', mem, subsystem="memory")
# CPU allocation
# Only cpu_shares until figure out how to provide limits and guarantees
# (RT_SCHED?)
- if tags.has_key('cpu_share'):
+ if 'cpu_share' in tags:
cpu_share = tags['cpu_share']
cgroups.write(self.name, 'cpu.shares', cpu_share)
# populate the sliver/vserver specific default allocations table,
# which is used to look for slice attributes
DEFAULT_ALLOCATION = {}
-for rlimit in vserver.RLIMITS.keys():
+for rlimit in list(vserver.RLIMITS.keys()):
rlim = rlimit.lower()
DEFAULT_ALLOCATION["{}_min".format(rlim)] = KEEP_LIMIT
DEFAULT_ALLOCATION["{}_soft".format(rlim)] = KEEP_LIMIT
vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
Account.__init__ (self, name)
Initscript.__init__ (self, name)
- except Exception, err:
+ except Exception as err:
if not isinstance(err, vserver.NoSuchVServer):
# Probably a bad vserver or vserver configuration file
logger.log_exc("sliver_vs:__init__ (first chance)", name=name)
# get/set the min/soft/hard values for all of the vserver
# related RLIMITS. Note that vserver currently only
# implements support for hard limits.
- for limit in vserver.RLIMITS.keys():
+ for limit in list(vserver.RLIMITS.keys()):
type = limit.lower()
minimum = self.rspec['{}_min'.format(type)]
soft = self.rspec['{}_soft'.format(type)]
cpu_share = self.rspec['cpu_share']
count = 1
- for key in self.rspec.keys():
+ for key in list(self.rspec.keys()):
if key.find('sysctl.') == 0:
sysctl = key.split('.')
try:
# /etc/vservers/<guest>/sysctl/<id>/
dirname = "/etc/vservers/{}/sysctl/{}".format(self.name, count)
try:
- os.makedirs(dirname, 0755)
+ os.makedirs(dirname, 0o755)
except:
pass
with open("{}/setting".format(dirname), "w") as setting:
logger.log("sliver_vs: {}: writing {}={}"
.format(self.name, key, self.rspec[key]))
- except IOError, e:
+ except IOError as e:
logger.log("sliver_vs: {}: could not set {}={}"
.format(self.name, key, self.rspec[key]))
logger.log("sliver_vs: {}: error = {}".format(self.name, e))
slivers."""
logger.verbose("slivermanager: Entering GetSlivers with fullupdate=%r"%fullupdate)
- for key in data.keys():
+ for key in list(data.keys()):
logger.verbose('slivermanager: GetSlivers key : ' + key)
node_id = None
except:
logger.log_exc("slivermanager: GetSlivers failed to read /etc/planetlab/node_id")
- if data.has_key('node_id') and data['node_id'] != node_id: return
+ if 'node_id' in data and data['node_id'] != node_id: return
- if data.has_key('networks'):
+ if 'networks' in data:
for network in data['networks']:
if network['is_primary'] and network['bwlimit'] is not None:
DEFAULT_ALLOCATION['net_max_rate'] = network['bwlimit'] / 1000
# extract the implied rspec
rspec = {}
rec['rspec'] = rspec
- for resname, default_amount in DEFAULT_ALLOCATION.iteritems():
+ for resname, default_amount in DEFAULT_ALLOCATION.items():
try:
t = type(default_amount)
amount = t.__new__(t, attributes[resname])
rspec[resname] = amount
# add in sysctl attributes into the rspec
- for key in attributes.keys():
+ for key in list(attributes.keys()):
if key.find("sysctl.") == 0:
rspec[key] = attributes[key]
# No default allocation values for LXC yet, think if its necessary given
# that they are also default allocation values in this module
if implementation == 'vs':
- for resname, default_amount in sliver_vs.DEFAULT_ALLOCATION.iteritems():
+ for resname, default_amount in sliver_vs.DEFAULT_ALLOCATION.items():
DEFAULT_ALLOCATION[resname]=default_amount
account.register_class(sliver_class_to_register)
EnvironmentFile=/etc/sysconfig/nodemanager
Type=forking
PIDFile=/var/run/nodemanager.pid
-ExecStart=/usr/bin/env python /usr/share/NodeManager/nodemanager.py -d $OPTIONS
+ExecStart=/usr/bin/env python3 /usr/share/NodeManager/nodemanager.py -d $OPTIONS
[Install]
WantedBy=multi-user.target
"""
from subprocess import PIPE, Popen
-from xmlrpclib import dumps, loads
+from xmlrpc.client import dumps, loads
GPG = '/usr/bin/gpg'
os.setsid()
if os.fork() != 0: os._exit(0)
os.chdir('/')
- os.umask(0022)
+ os.umask(0o022)
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, 0)
# xxx fixme - this is just to make sure that nothing gets stupidly lost - should use devnull
- crashlog = os.open('/var/log/nodemanager.daemon', os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
+ crashlog = os.open('/var/log/nodemanager.daemon', os.O_RDWR | os.O_APPEND | os.O_CREAT, 0o644)
os.dup2(crashlog, 1)
os.dup2(crashlog, 2)
handle.close()
# check for a process with that pid by sending signal 0
try: os.kill(other_pid, 0)
- except OSError, e:
+ except OSError as e:
if e.errno == errno.ESRCH: other_pid = None # doesn't exist
else: raise # who knows
if other_pid == None:
class NMLock:
def __init__(self, file):
logger.log("tools: Lock {} initialized.".format(file), 2)
- self.fd = os.open(file, os.O_RDWR|os.O_CREAT, 0600)
+ self.fd = os.open(file, os.O_RDWR|os.O_CREAT, 0o600)
flags = fcntl.fcntl(self.fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fd, fcntl.F_SETFD, flags)
logger.log("tools: REBOOT {}".format(domain.name()) )
else:
raise Exception()
- except Exception, e:
+ except Exception as e:
logger.log("tools: FAILED to reboot {} ({})".format(domain.name(), e) )
logger.log("tools: Trying to DESTROY/CREATE {} instead...".format(domain.name()) )
try:
if result==0:
logger.log("tools: CREATED {}".format(domain.name()) )
else: logger.log("tools: FAILED in the CREATE call of {}".format(domain.name()) )
- except Exception, e:
+ except Exception as e:
logger.log("tools: FAILED to DESTROY/CREATE {} ({})".format(domain.name(), e) )
###################################################
ipv6candidatestrip = ipv6candidate.strip()
valid = is_valid_ipv6(ipv6candidatestrip)
if not valid:
- print line,
+ print(line, end=' ')
fileinput.close()
logger.log("tools: REMOVED IPv6 address from /etc/hosts file of slice={}"
.format(slicename) )