From 48a73b18fd7daed13c645c1adeddb57b560e7a2d Mon Sep 17 00:00:00 2001 From: Thierry Parmentelat Date: Fri, 28 Dec 2018 22:48:37 +0100 Subject: [PATCH] blind and brutal 2to3 --- account.py | 4 ++-- api.py | 30 +++++++++++++++--------------- api_calls.py | 30 +++++++++++++++--------------- bootauth.py | 18 +++++++++--------- bwmon.py | 34 +++++++++++++++++----------------- cgroups.py | 11 ++++++----- conf_files.py | 6 +++--- config.py | 8 ++++---- controller.py | 2 +- coresched_lxc.py | 25 +++++++++++++------------ coresched_vs.py | 22 +++++++++++----------- curlwrapper.py | 8 ++++---- database.py | 20 ++++++++++---------- doc/DocBookLocal.py | 2 +- initscript.py | 6 +++--- logger.py | 2 +- net.py | 8 ++++---- nodemanager.py | 10 +++++----- plcapi.py | 6 +++--- plugins/codemux.py | 10 +++++----- plugins/hostmap.py | 2 +- plugins/interfaces.py | 6 +++--- plugins/privatebridge.py | 2 +- plugins/rawdisk.py | 5 ++--- plugins/reservation.py | 6 +++--- plugins/sfagids.py | 4 ++-- plugins/sliverauth.py | 10 +++++----- plugins/specialaccounts.py | 4 ++-- plugins/syndicate.py | 4 ++-- plugins/vsys.py | 4 ++-- plugins/vsys_privs.py | 24 ++++++++++++------------ safexmlrpc.py | 10 +++++----- sliver_libvirt.py | 12 ++++++------ sliver_vs.py | 12 ++++++------ slivermanager.py | 12 ++++++------ systemd/nm.service | 2 +- ticket.py | 2 +- tools.py | 14 +++++++------- 38 files changed, 199 insertions(+), 198 deletions(-) diff --git a/account.py b/account.py index 576fe71..d45f533 100644 --- a/account.py +++ b/account.py @@ -125,9 +125,9 @@ class Account: tools.write_file(auth_keys, lambda f: f.write(new_keys)) # set access permissions and ownership properly - os.chmod(dot_ssh, 0700) + os.chmod(dot_ssh, 0o700) os.chown(dot_ssh, uid, gid) - os.chmod(auth_keys, 0600) + os.chmod(auth_keys, 0o600) os.chown(auth_keys, uid, gid) # set self.keys to new_keys only when all of the above ops succeed diff --git a/api.py b/api.py index 7b5ab95..756c894 100644 --- a/api.py +++ b/api.py @@ -9,15 +9,15 @@ domain socket that is accessible by ssh-ing into a delegate account with the forward_api_calls shell. """ -import SimpleXMLRPCServer -import SocketServer +import xmlrpc.server +import socketserver import errno import os import pwd import socket import struct import threading -import xmlrpclib +import xmlrpc.client import sys import database @@ -36,7 +36,7 @@ except: API_SERVER_PORT = 812 UNIX_ADDR = '/tmp/nodemanager.api' -class APIRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): +class APIRequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler): # overriding _dispatch to achieve this effect is officially deprecated, # but I can't figure out how to get access to .request without # duplicating SimpleXMLRPCServer code here, which is more likely to @@ -47,13 +47,13 @@ class APIRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): method_name = str(method_name_unicode) try: method = api_method_dict[method_name] except KeyError: - api_method_list = api_method_dict.keys() + api_method_list = list(api_method_dict.keys()) api_method_list.sort() - raise xmlrpclib.Fault(100, 'Invalid API method %s. Valid choices are %s' % \ + raise xmlrpc.client.Fault(100, 'Invalid API method %s. Valid choices are %s' % \ (method_name, ', '.join(api_method_list))) expected_nargs = nargs_dict[method_name] if len(args) != expected_nargs: - raise xmlrpclib.Fault(101, 'Invalid argument count: got %d, expecting %d.' % \ + raise xmlrpc.client.Fault(101, 'Invalid argument count: got %d, expecting %d.' % \ (len(args), expected_nargs)) else: # Figure out who's calling. @@ -66,11 +66,11 @@ class APIRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): # Special case : the sfa component manager if caller_name == PLC_SLICE_PREFIX+"_sfacm": try: result = method(*args) - except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err) + except Exception as err: raise xmlrpc.client.Fault(104, 'Error in call: %s' %err) # Anyone can call these functions elif method_name in ('Help', 'Ticket', 'GetXIDs', 'GetSSHKeys'): try: result = method(*args) - except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err) + except Exception as err: raise xmlrpc.client.Fault(104, 'Error in call: %s' %err) else: # Execute anonymous call. # Authenticate the caller if not in the above fncts. if method_name == "GetRecord": @@ -83,19 +83,19 @@ class APIRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler): # only work on slivers or self. Sanity check. if not (target_rec and target_rec['type'].startswith('sliver.')): - raise xmlrpclib.Fault(102, \ + raise xmlrpc.client.Fault(102, \ 'Invalid argument: the first argument must be a sliver name.') # only manipulate slivers who delegate you authority if caller_name in (target_name, target_rec['delegations']): try: result = method(target_rec, *args[1:]) - except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err) + except Exception as err: raise xmlrpc.client.Fault(104, 'Error in call: %s' %err) else: - raise xmlrpclib.Fault(108, '%s: Permission denied.' % caller_name) + raise xmlrpc.client.Fault(108, '%s: Permission denied.' % caller_name) if result == None: result = 1 return result -class APIServer_INET(SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer): allow_reuse_address = True +class APIServer_INET(socketserver.ThreadingMixIn, xmlrpc.server.SimpleXMLRPCServer): allow_reuse_address = True class APIServer_UNIX(APIServer_INET): address_family = socket.AF_UNIX @@ -105,8 +105,8 @@ def start(): serv1 = APIServer_INET(('127.0.0.1', API_SERVER_PORT), requestHandler=APIRequestHandler, logRequests=0) tools.as_daemon_thread(serv1.serve_forever) try: os.unlink(UNIX_ADDR) - except OSError, e: + except OSError as e: if e.errno != errno.ENOENT: raise serv2 = APIServer_UNIX(UNIX_ADDR, requestHandler=APIRequestHandler, logRequests=0) tools.as_daemon_thread(serv2.serve_forever) - os.chmod(UNIX_ADDR, 0666) + os.chmod(UNIX_ADDR, 0o666) diff --git a/api_calls.py b/api_calls.py index 592335d..e39032d 100644 --- a/api_calls.py +++ b/api_calls.py @@ -9,15 +9,15 @@ domain socket that is accessible by ssh-ing into a delegate account with the forward_api_calls shell. """ -import SimpleXMLRPCServer -import SocketServer +import xmlrpc.server +import socketserver import errno import os import pwd import socket import struct import threading -import xmlrpclib +import xmlrpc.client import slivermanager try: @@ -65,8 +65,8 @@ def export_to_docbook(**kwargs): def export(method): def args(): # Inspect method. Remove self from the argument list. - max_args = method.func_code.co_varnames[0:method.func_code.co_argcount] - defaults = method.func_defaults + max_args = method.__code__.co_varnames[0:method.__code__.co_argcount] + defaults = method.__defaults__ if defaults is None: defaults = () min_args = max_args[0:len(max_args) - len(defaults)] @@ -97,7 +97,7 @@ def export_to_docbook(**kwargs): @export_to_api(0) def Help(): """Get a list of functions currently supported by the Node Manager API""" - names=api_method_dict.keys() + names=list(api_method_dict.keys()) names.sort() return ''.join(['**** ' + api_method_dict[name].__name__ + '\n' + api_method_dict[name].__doc__ + '\n' for name in names]) @@ -120,8 +120,8 @@ def Ticket(ticket): deliver_ticket(data) logger.log('api_calls: Ticket delivered for %s' % name) Create(database.db.get(name)) - except Exception, err: - raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err)) + except Exception as err: + raise xmlrpc.client.Fault(102, 'Ticket error: ' + str(err)) @export_to_docbook(roles=['self'], accepts=[Parameter(str, 'A ticket returned from GetSlivers()')], @@ -130,14 +130,14 @@ def Ticket(ticket): def AdminTicket(ticket): """Admin interface to create slivers based on ticket returned by GetSlivers().""" try: - data, = xmlrpclib.loads(ticket)[0] + data, = xmlrpc.client.loads(ticket)[0] name = data['slivers'][0]['name'] if data != None: deliver_ticket(data) logger.log('api_calls: Admin Ticket delivered for %s' % name) Create(database.db.get(name)) - except Exception, err: - raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err)) + except Exception as err: + raise xmlrpc.client.Fault(102, 'Ticket error: ' + str(err)) @export_to_docbook(roles=['self'], @@ -155,7 +155,7 @@ def GetXIDs(): def GetSSHKeys(): """Return an dictionary mapping slice names to SSH keys""" keydict = {} - for rec in database.db.itervalues(): + for rec in database.db.values(): if 'keys' in rec: keydict[rec['name']] = rec['keys'] return keydict @@ -172,7 +172,7 @@ def Create(sliver_name): account.get(rec['name']).ensure_created(rec) logger.log("api_calls: Create %s"%rec['name']) else: - raise Exception, "Only PLC can create non delegated slivers." + raise Exception("Only PLC can create non delegated slivers.") @export_to_docbook(roles=['nm-controller', 'self'], @@ -186,7 +186,7 @@ def Destroy(sliver_name): account.get(rec['name']).ensure_destroyed() logger.log("api_calls: Destroy %s"%rec['name']) else: - raise Exception, "Only PLC can destroy non delegated slivers." + raise Exception("Only PLC can destroy non delegated slivers.") @export_to_docbook(roles=['nm-controller', 'self'], @@ -280,7 +280,7 @@ def SetLoans(sliver_name, loans): of loss of resources.""" rec = sliver_name if not validate_loans(loans): - raise xmlrpclib.Fault(102, 'Invalid argument: the second argument must be a well-formed loan specification') + raise xmlrpc.client.Fault(102, 'Invalid argument: the second argument must be a well-formed loan specification') rec['_loans'] = loans database.db.sync() diff --git a/bootauth.py b/bootauth.py index 5c9dfa7..3ebe30b 100755 --- a/bootauth.py +++ b/bootauth.py @@ -21,12 +21,12 @@ def main(): # Help def usage(): - print "Usage: %s [OPTION]..." % sys.argv[0] - print "Options:" - print " -f, --config=FILE PLC configuration file (default: /etc/planetlab/plc_config)" - print " -n, --node-id=FILE Node ID (or file)" - print " -k, --key=FILE Node key (or file)" - print " --help This message" + print("Usage: %s [OPTION]..." % sys.argv[0]) + print("Options:") + print(" -f, --config=FILE PLC configuration file (default: /etc/planetlab/plc_config)") + print(" -n, --node-id=FILE Node ID (or file)") + print(" -k, --key=FILE Node key (or file)") + print(" --help This message") sys.exit(1) # Get options @@ -36,8 +36,8 @@ def main(): "node=", "nodeid=", "node-id", "node_id", "key=", "help"]) - except getopt.GetoptError, err: - print "Error: " + err.msg + except getopt.GetoptError as err: + print("Error: " + err.msg) usage() for (opt, optval) in opts: @@ -72,7 +72,7 @@ def main(): plc = PLCAPI(config.plc_api_uri, config.cacert, session) assert session == plc.GetSession() - print session + print(session) if __name__ == '__main__': main() diff --git a/bwmon.py b/bwmon.py index 476ef6d..691d4b3 100644 --- a/bwmon.py +++ b/bwmon.py @@ -520,35 +520,35 @@ def sync(nmdbcopy): # Since root is required for sanity, its not in the API/plc database, so pass {} # to use defaults. - if root_xid not in slices.keys(): + if root_xid not in list(slices.keys()): slices[root_xid] = Slice(root_xid, "root", {}) slices[root_xid].reset({}, {}) # Used by bwlimit. pass {} since there is no rspec (like above). - if default_xid not in slices.keys(): + if default_xid not in list(slices.keys()): slices[default_xid] = Slice(default_xid, "default", {}) slices[default_xid].reset({}, {}) live = {} # Get running slivers that should be on this node (from plc). {xid: name} # db keys on name, bwmon keys on xid. db doesnt have xid either. - for plcSliver in nmdbcopy.keys(): + for plcSliver in list(nmdbcopy.keys()): live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver] - logger.verbose("bwmon: Found %s instantiated slices" % live.keys().__len__()) - logger.verbose("bwmon: Found %s slices in dat file" % slices.values().__len__()) + logger.verbose("bwmon: Found %s instantiated slices" % list(live.keys()).__len__()) + logger.verbose("bwmon: Found %s slices in dat file" % list(slices.values()).__len__()) # Get actual running values from tc. # Update slice totals and bandwidth. {xid: {values}} kernelhtbs = gethtbs(root_xid, default_xid) - logger.verbose("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__()) + logger.verbose("bwmon: Found %s running HTBs" % list(kernelhtbs.keys()).__len__()) # The dat file has HTBs for slices, but the HTBs aren't running nohtbslices = set(slices.keys()) - set(kernelhtbs.keys()) logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__()) # Reset tc counts. for nohtbslice in nohtbslices: - if live.has_key(nohtbslice): + if nohtbslice in live: slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] ) else: logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice) @@ -559,7 +559,7 @@ def sync(nmdbcopy): logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__()) for slicenodat in slicesnodat: # But slice is running - if live.has_key(slicenodat): + if slicenodat in live: # init the slice. which means start accounting over since kernel # htb was already there. slices[slicenodat] = Slice(slicenodat, @@ -575,9 +575,9 @@ def sync(nmdbcopy): for newslice in newslicesxids: # Delegated slices dont have xids (which are uids) since they haven't been # instantiated yet. - if newslice != None and live[newslice].has_key('_rspec') == True: + if newslice != None and ('_rspec' in live[newslice]) == True: # Check to see if we recently deleted this slice. - if live[newslice]['name'] not in deaddb.keys(): + if live[newslice]['name'] not in list(deaddb.keys()): logger.log( "bwmon: new slice %s" % live[newslice]['name'] ) # _rspec is the computed rspec: NM retrieved data from PLC, computed loans # and made a dict of computed values. @@ -615,17 +615,17 @@ def sync(nmdbcopy): if deadxid == root_xid or deadxid == default_xid: continue logger.log("bwmon: removing dead slice %s " % deadxid) - if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid): + if deadxid in slices and deadxid in kernelhtbs: # add slice (by name) to deaddb logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name) deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]} del slices[deadxid] - if kernelhtbs.has_key(deadxid): + if deadxid in kernelhtbs: logger.verbose("bwmon: Removing HTB for %s." % deadxid) bwlimit.off(deadxid, dev = dev_default) # Clean up deaddb - for deadslice in deaddb.keys(): + for deadslice in list(deaddb.keys()): if (time.time() >= (deaddb[deadslice]['slice'].time + period)): logger.log("bwmon: Removing dead slice %s from dat." \ % deaddb[deadslice]['slice'].name) @@ -634,10 +634,10 @@ def sync(nmdbcopy): # Get actual running values from tc since we've added and removed buckets. # Update slice totals and bandwidth. {xid: {values}} kernelhtbs = gethtbs(root_xid, default_xid) - logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__()) + logger.verbose("bwmon: now %s running HTBs" % list(kernelhtbs.keys()).__len__()) # Update all byte limites on all slices - for (xid, slice) in slices.iteritems(): + for (xid, slice) in slices.items(): # Monitor only the specified slices if xid == root_xid or xid == default_xid: continue if names and name not in names: @@ -656,7 +656,7 @@ def sync(nmdbcopy): # Update byte counts slice.update(kernelhtbs[xid], live[xid]['_rspec']) - logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(), DB_FILE)) + logger.verbose("bwmon: Saving %s slices in %s" % (list(slices.keys()).__len__(), DB_FILE)) f = open(DB_FILE, "w") pickle.dump((version, slices, deaddb), f) f.close() @@ -687,7 +687,7 @@ def allOff(): kernelhtbs = gethtbs(root_xid, default_xid) if len(kernelhtbs): logger.log("bwmon: Disabling all running HTBs.") - for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default) + for htb in list(kernelhtbs.keys()): bwlimit.off(htb, dev = dev_default) lock = threading.Event() diff --git a/cgroups.py b/cgroups.py index a1c38b4..0611d1d 100644 --- a/cgroups.py +++ b/cgroups.py @@ -6,6 +6,7 @@ import os, os.path import pyinotify import logger +from functools import reduce # Base dir for libvirt BASE_DIR = '/sys/fs/cgroup' @@ -51,20 +52,20 @@ def get_base_path(): def get_cgroups(): """ Returns the list of cgroups active at this moment on the node """ - return map(os.path.basename, get_cgroup_paths()) + return list(map(os.path.basename, get_cgroup_paths())) def write(name, key, value, subsystem="cpuset"): """ Writes a value to the file key with the cgroup with name """ base_path = get_cgroup_path(name, subsystem) with open(os.path.join(base_path, key), 'w') as f: - print >>f, value + print(value, file=f) logger.verbose("cgroups.write: overwrote {}".format(base_path)) def append(name, key, value, subsystem="cpuset"): """ Appends a value to the file key with the cgroup with name """ base_path = get_cgroup_path(name, subsystem) with open(os.path.join(base_path, key), 'a') as f: - print >>f, value + print(value, file=f) logger.verbose("cgroups.append: appended {}".format(base_path)) if __name__ == '__main__': @@ -75,7 +76,7 @@ if __name__ == '__main__': subsystems = 'blkio cpu cpu,cpuacct cpuacct cpuset devices freezer memory net_cls perf_event systemd'.split() for subsystem in subsystems: - print 'get_cgroup_path({}, {}) = {}'.\ - format(name, subsystem, get_cgroup_path(name, subsystem)) + print('get_cgroup_path({}, {}) = {}'.\ + format(name, subsystem, get_cgroup_path(name, subsystem))) # print 'get_cgroup_paths = {}'.format(get_cgroup_paths(subsystem)) diff --git a/conf_files.py b/conf_files.py index a5b2cb2..7b0cada 100644 --- a/conf_files.py +++ b/conf_files.py @@ -12,7 +12,7 @@ import string import curlwrapper import logger import tools -import xmlrpclib +import xmlrpc.client from config import Config # right after net @@ -63,7 +63,7 @@ class conf_files: try: logger.verbose("conf_files: retrieving URL=%s"%url) contents = curlwrapper.retrieve(url, self.config.cacert) - except xmlrpclib.ProtocolError as e: + except xmlrpc.client.ProtocolError as e: logger.log('conf_files: failed to retrieve %s from %s, skipping' % (dest, url)) return if not cf_rec['always_update'] and sha(contents).digest() == self.checksum(dest): @@ -78,7 +78,7 @@ class conf_files: if self.system(cf_rec['postinstall_cmd']): self.system(err_cmd) def run_once(self, data): - if data.has_key("conf_files"): + if "conf_files" in data: for f in data['conf_files']: try: self.update_conf_file(f) except: logger.log_exc("conf_files: failed to update conf_file") diff --git a/config.py b/config.py index 98bb224..a56fb47 100644 --- a/config.py +++ b/config.py @@ -17,9 +17,9 @@ class Config: def __init__(self, file = "/etc/planetlab/plc_config"): try: - execfile(file, self.__dict__) + exec(compile(open(file).read(), file, 'exec'), self.__dict__) except: - raise Exception, "Could not parse " + file + raise Exception("Could not parse " + file) if int(self.PLC_API_PORT) == 443: uri = "https://" @@ -28,7 +28,7 @@ class Config: elif os.path.exists('/usr/boot/cacert.pem'): self.cacert = '/usr/boot/cacert.pem' else: - raise Exception, "No boot server certificate bundle available" + raise Exception("No boot server certificate bundle available") else: uri = "http://" self.cacert = None @@ -42,6 +42,6 @@ class Config: if __name__ == '__main__': from pprint import pprint - for (k, v) in Config().__dict__.iteritems(): + for (k, v) in Config().__dict__.items(): if k not in ['__builtins__']: pprint ( (k, v), ) diff --git a/controller.py b/controller.py index c66ddff..0732ee5 100644 --- a/controller.py +++ b/controller.py @@ -34,5 +34,5 @@ def add_shell(shell): etc_shells.close() if shell not in valid_shells: etc_shells = open('/etc/shells', 'a') - print >>etc_shells, shell + print(shell, file=etc_shells) etc_shells.close() diff --git a/coresched_lxc.py b/coresched_lxc.py index 50b5330..c6b7f4d 100644 --- a/coresched_lxc.py +++ b/coresched_lxc.py @@ -6,6 +6,7 @@ import logger import os import os.path import cgroups +from functools import reduce glo_coresched_simulate = False joinpath = os.path.join @@ -168,7 +169,7 @@ class CoreSched: # allocate the cores to the slivers that have them reserved # TODO: Need to sort this from biggest cpu_cores to smallest - for name, rec in slivers.iteritems(): + for name, rec in slivers.items(): rspec = rec["_rspec"] cores = rspec.get(self.slice_attr_name, 0) (cores, bestEffort) = self.decodeCoreSpec(cores) @@ -208,7 +209,7 @@ class CoreSched: # now check and see if any of our slices had the besteffort flag # set - for name, rec in slivers.iteritems(): + for name, rec in slivers.items(): rspec = rec["_rspec"] cores = rspec.get(self.slice_attr_name, 0) (cores, bestEffort) = self.decodeCoreSpec(cores) @@ -238,7 +239,7 @@ class CoreSched: self.freezeUnits("freezer.state", freezeList) def freezeUnits (self, var_name, freezeList): - for (slicename, freeze) in freezeList.items(): + for (slicename, freeze) in list(freezeList.items()): try: cgroup_path = cgroups.get_cgroup_path(slicename, 'freezer') logger.verbose("CoreSched: setting freezer for {} to {} - path={} var={}" @@ -249,7 +250,7 @@ class CoreSched: break if glo_coresched_simulate: - print "F", cgroup + print("F", cgroup) else: with open(cgroup, "w") as f: f.write(freeze) @@ -283,7 +284,7 @@ class CoreSched: cpus = default if glo_coresched_simulate: - print "R", cgroup + "/" + var_name, self.listToRange(cpus) + print("R", cgroup + "/" + var_name, self.listToRange(cpus)) else: cgroups.write(cgroup, var_name, self.listToRange(cpus)) @@ -396,17 +397,17 @@ if __name__=="__main__": x = CoreSched() - print "cgroups:", ",".join(x.get_cgroups()) + print("cgroups:", ",".join(x.get_cgroups())) - print "cpus:", x.listToRange(x.get_cpus()) - print "sibling map:" + print("cpus:", x.listToRange(x.get_cpus())) + print("sibling map:") for item in x.get_cpus(): - print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])]) + print(" ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])])) - print "mems:", x.listToRange(x.get_mems()) - print "cpu to memory map:" + print("mems:", x.listToRange(x.get_mems())) + print("cpu to memory map:") for item in x.get_mems(): - print " ", item, ",".join([str(y) for y in x.mems_map.get(item, [])]) + print(" ", item, ",".join([str(y) for y in x.mems_map.get(item, [])])) rspec_sl_test1 = {"cpu_cores": "1"} rec_sl_test1 = {"_rspec": rspec_sl_test1} diff --git a/coresched_vs.py b/coresched_vs.py index 06aeb54..f940574 100644 --- a/coresched_vs.py +++ b/coresched_vs.py @@ -156,7 +156,7 @@ class CoreSched: # allocate the cores to the slivers that have them reserved # TODO: Need to sort this from biggest cpu_cores to smallest - for name, rec in slivers.iteritems(): + for name, rec in slivers.items(): rspec = rec["_rspec"] cores = rspec.get(self.slice_attr_name, 0) (cores, bestEffort) = self.decodeCoreSpec(cores) @@ -194,7 +194,7 @@ class CoreSched: # now check and see if any of our slices had the besteffort flag # set - for name, rec in slivers.iteritems(): + for name, rec in slivers.items(): rspec = rec["_rspec"] cores = rspec.get(self.slice_attr_name, 0) (cores, bestEffort) = self.decodeCoreSpec(cores) @@ -240,7 +240,7 @@ class CoreSched: cpus = default if glo_coresched_simulate: - print "R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus) + print("R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus)) else: with opwn("/dev/cgroup/{}/{}".format(cgroup, var_name), "w") as f: f.write( self.listToRange(cpus) + "\n" ) @@ -250,7 +250,7 @@ class CoreSched: os.makedirs("/etc/vservers/.defaults/cgroup") if glo_coresched_simulate: - print "RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus) + print("RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus)) else: with open("/etc/vservers/.defaults/cgroup/{}".format(var_name), "w") as f: f.write( self.listToRange(cpus) + "\n" ) @@ -350,17 +350,17 @@ if __name__=="__main__": x = CoreSched() - print "cgroups:", ",".join(x.get_cgroups()) + print("cgroups:", ",".join(x.get_cgroups())) - print "cpus:", x.listToRange(x.get_cpus()) - print "sibling map:" + print("cpus:", x.listToRange(x.get_cpus())) + print("sibling map:") for item in x.get_cpus(): - print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])]) + print(" ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])])) - print "mems:", x.listToRange(x.get_mems()) - print "cpu to memory map:" + print("mems:", x.listToRange(x.get_mems())) + print("cpu to memory map:") for item in x.get_mems(): - print " ", item, ",".join([str(y) for y in x.mems_map.get(item, [])]) + print(" ", item, ",".join([str(y) for y in x.mems_map.get(item, [])])) rspec_sl_test1 = {"cpu_cores": "1"} rec_sl_test1 = {"_rspec": rspec_sl_test1} diff --git a/curlwrapper.py b/curlwrapper.py index 4383a70..a877b07 100644 --- a/curlwrapper.py +++ b/curlwrapper.py @@ -7,7 +7,7 @@ from subprocess import PIPE, Popen from select import select -import xmlrpclib +import xmlrpc.client import signal import os @@ -30,8 +30,8 @@ def retrieve(url, cacert=None, postdata=None, timeout=90): command += ('--connect-timeout', str(timeout)) command += (url, ) if verbose: - print 'Invoking ', command - if postdata: print 'with postdata=', postdata + print('Invoking ', command) + if postdata: print('with postdata=', postdata) p = Sopen(command , stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) if postdata: p.stdin.write(postdata) p.stdin.close() @@ -45,6 +45,6 @@ def retrieve(url, cacert=None, postdata=None, timeout=90): if rc != 0: # when this triggers, the error sometimes doesn't get printed logger.log ("curlwrapper: retrieve, got stderr <%s>"%err) - raise xmlrpclib.ProtocolError(url, rc, err, postdata) + raise xmlrpc.client.ProtocolError(url, rc, err, postdata) else: return data diff --git a/database.py b/database.py index 0680b6c..aef900d 100644 --- a/database.py +++ b/database.py @@ -15,7 +15,7 @@ partition, the database is constantly being dumped to disk. import sys -import cPickle +import pickle import threading import time @@ -39,7 +39,7 @@ MINIMUM_ALLOCATION = {'cpu_pct': 0, 'net_i2_max_rate': 8, 'net_share': 1, } -LOANABLE_RESOURCES = MINIMUM_ALLOCATION.keys() +LOANABLE_RESOURCES = list(MINIMUM_ALLOCATION.keys()) DB_FILE = '/var/lib/nodemanager/database.pickle' @@ -77,11 +77,11 @@ In order to do the accounting, we store three different rspecs: * and variable resid_rspec, which is the amount of resources the sliver has after giving out loans but not receiving any.""" slivers = {} - for name, rec in self.iteritems(): + for name, rec in self.items(): if 'rspec' in rec: rec['_rspec'] = rec['rspec'].copy() slivers[name] = rec - for rec in slivers.itervalues(): + for rec in slivers.values(): eff_rspec = rec['_rspec'] resid_rspec = rec['rspec'].copy() for target, resource_name, amount in rec.get('_loans', []): @@ -100,7 +100,7 @@ keys.""" old_rec = self.get(name) if old_rec == None: self[name] = rec elif rec['timestamp'] > old_rec['timestamp']: - for key in old_rec.keys(): + for key in list(old_rec.keys()): if not key.startswith('_'): del old_rec[key] old_rec.update(rec) @@ -109,7 +109,7 @@ keys.""" We use it to determine if a record is stale. This method should be called whenever new GetSlivers() data comes in.""" self._min_timestamp = ts - for name, rec in self.items(): + for name, rec in list(self.items()): if rec['timestamp'] < ts: del self[name] def sync(self): @@ -120,7 +120,7 @@ It may be necessary in the future to do something smarter.""" # delete expired records now = time.time() - for name, rec in self.items(): + for name, rec in list(self.items()): if rec.get('expires', now) < now: del self[name] self._compute_effective_rspecs() @@ -138,7 +138,7 @@ It may be necessary in the future to do something smarter.""" if name not in self: logger.verbose("database: sync : ensure_destroy'ing %s"%name) account.get(name).ensure_destroyed() - for name, rec in self.iteritems(): + for name, rec in self.items(): # protect this; if anything fails for a given sliver # we still need the other ones to be handled try: @@ -179,7 +179,7 @@ It proceeds to handle dump requests forever.""" while True: db_lock.acquire() while not dump_requested: db_cond.wait() - db_pickle = cPickle.dumps(db, cPickle.HIGHEST_PROTOCOL) + db_pickle = pickle.dumps(db, pickle.HIGHEST_PROTOCOL) dump_requested = False db_lock.release() try: @@ -190,7 +190,7 @@ It proceeds to handle dump requests forever.""" global db try: f = open(DB_FILE) - try: db = cPickle.load(f) + try: db = pickle.load(f) finally: f.close() except IOError: logger.log ("database: Could not load %s -- starting from a fresh database"%DB_FILE) diff --git a/doc/DocBookLocal.py b/doc/DocBookLocal.py index 62853a8..c697c3d 100755 --- a/doc/DocBookLocal.py +++ b/doc/DocBookLocal.py @@ -11,7 +11,7 @@ def api_methods(): for func in dir(api_calls): try: f = api_calls.__getattribute__(func) - if 'group' in f.__dict__.keys(): + if 'group' in list(f.__dict__.keys()): api_function_list += [api_calls.__getattribute__(func)] except: pass diff --git a/initscript.py b/initscript.py index d02fad4..392b66b 100644 --- a/initscript.py +++ b/initscript.py @@ -18,7 +18,7 @@ class Initscript: self.initscript = new_initscript code = self.initscript sliver_initscript = "/vservers/%s/etc/rc.d/init.d/vinit.slice" % self.name - if tools.replace_file_with_string(sliver_initscript, code, remove_if_empty=True, chmod=0755): + if tools.replace_file_with_string(sliver_initscript, code, remove_if_empty=True, chmod=0o755): if code: logger.log("Initscript: %s: Installed new initscript in %s" % (self.name, sliver_initscript)) if self.is_running(): @@ -57,7 +57,7 @@ class Initscript: # install in sliver with open(vinit_source) as f: code = f.read() - if tools.replace_file_with_string(vinit_script, code, chmod=0755): + if tools.replace_file_with_string(vinit_script, code, chmod=0o755): logger.log("Initscript: %s: installed generic vinit rc script" % self.name) # create symlink for runlevel 3 if not os.path.islink(enable_link): @@ -97,7 +97,7 @@ class Initscript: # install in sliver with open(vinit_source) as f: code = f.read() - if tools.replace_file_with_string(vinit_unit_file, code, chmod=0755): + if tools.replace_file_with_string(vinit_unit_file, code, chmod=0o755): logger.log("Initscript: %s: installed vinit.service unit file" % self.name) # create symlink for enabling this unit if not os.path.islink(enable_link): diff --git a/logger.py b/logger.py index 796064f..569d2e0 100644 --- a/logger.py +++ b/logger.py @@ -34,7 +34,7 @@ def log(msg, level=LOG_NODE): if level > LOG_LEVEL: return try: - fd = os.open(LOG_FILE, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600) + fd = os.open(LOG_FILE, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0o600) if not msg.endswith('\n'): msg += '\n' os.write(fd, '%s: %s' % (time.asctime(time.gmtime()), msg)) diff --git a/net.py b/net.py index 23a13b6..884898e 100644 --- a/net.py +++ b/net.py @@ -59,7 +59,7 @@ def InitNodeLimit(data): # query running network interfaces devs = sioc.gifconf() - ips = dict(zip(devs.values(), devs.keys())) + ips = dict(list(zip(list(devs.values()), list(devs.keys())))) macs = {} for dev in devs: macs[sioc.gifhwaddr(dev).lower()] = dev @@ -68,7 +68,7 @@ def InitNodeLimit(data): # Get interface name preferably from MAC address, falling # back on IP address. hwaddr=interface['mac'] - if hwaddr <> None: hwaddr=hwaddr.lower() + if hwaddr != None: hwaddr=hwaddr.lower() if hwaddr in macs: dev = macs[interface['mac']] elif interface['ip'] in ips: @@ -126,7 +126,7 @@ def InitNAT(plc, data): # query running network interfaces devs = sioc.gifconf() - ips = dict(zip(devs.values(), devs.keys())) + ips = dict(list(zip(list(devs.values()), list(devs.keys())))) macs = {} for dev in devs: macs[sioc.gifhwaddr(dev).lower()] = dev @@ -136,7 +136,7 @@ def InitNAT(plc, data): # Get interface name preferably from MAC address, falling # back on IP address. hwaddr=interface['mac'] - if hwaddr <> None: hwaddr=hwaddr.lower() + if hwaddr != None: hwaddr=hwaddr.lower() if hwaddr in macs: dev = macs[interface['mac']] elif interface['ip'] in ips: diff --git a/nodemanager.py b/nodemanager.py index 78a6d1a..dfe6284 100755 --- a/nodemanager.py +++ b/nodemanager.py @@ -11,7 +11,7 @@ import optparse import time -import xmlrpclib +import xmlrpc.client import socket import os import sys @@ -198,11 +198,11 @@ class NodeManager: try: other_pid = tools.pid_file() if other_pid != None: - print """There might be another instance of the node manager running as pid {}. -If this is not the case, please remove the pid file {}. -- exiting""".format(other_pid, tools.PID_FILE) + print("""There might be another instance of the node manager running as pid {}. +If this is not the case, please remove the pid file {}. -- exiting""".format(other_pid, tools.PID_FILE)) return - except OSError, err: - print "Warning while writing PID file:", err + except OSError as err: + print("Warning while writing PID file:", err) # load modules self.loaded_modules = [] diff --git a/plcapi.py b/plcapi.py index acccda0..a03e9e0 100644 --- a/plcapi.py +++ b/plcapi.py @@ -28,7 +28,7 @@ class PLCAPI: if isinstance(auth, (tuple, list)): (self.node_id, self.key) = auth self.session = None - elif isinstance(auth, (str, unicode)): + elif isinstance(auth, str): self.node_id = self.key = None self.session = auth else: @@ -85,10 +85,10 @@ class PLCAPI: # Yes, the comments in the old implementation are # misleading. Keys of dicts are not included in the # hash. - values += canonicalize(arg.values()) + values += canonicalize(list(arg.values())) else: # We use unicode() instead of str(). - values.append(unicode(arg)) + values.append(str(arg)) return values diff --git a/plugins/codemux.py b/plugins/codemux.py index 0560fa8..7ad0039 100644 --- a/plugins/codemux.py +++ b/plugins/codemux.py @@ -40,7 +40,7 @@ def GetSlivers(data, config, plc = None): codemuxslices = {} # XXX Hack for planetflow - if slicesinconf.has_key("root"): + if "root" in slicesinconf: _writeconf = False else: _writeconf = True @@ -68,7 +68,7 @@ def GetSlivers(data, config, plc = None): # Check to see if sliver is running. If not, continue if slivermanager.is_running(sliver['name']): # Check if new or needs updating - if (sliver['name'] not in slicesinconf.keys()) \ + if (sliver['name'] not in list(slicesinconf.keys())) \ or (params not in slicesinconf.get(sliver['name'], [])): logger.log("codemux: Updating slice %s using %s" % \ (sliver['name'], params['host'])) @@ -107,7 +107,7 @@ def writeConf(slivers, conf = CODEMUXCONF): f.write("* root 1080 %s\n" % Config().PLC_API_HOST) # Sort items for like domains for mapping in slivers: - for (host, params) in mapping.iteritems(): + for (host, params) in mapping.items(): if params['slice'] == "root": continue f.write("%s %s %s %s\n" % (host, params['slice'], params['port'], params['ip'])) f.truncate() @@ -121,10 +121,10 @@ def writeConf(slivers, conf = CODEMUXCONF): def sortDomains(slivers): '''Given a dict of {slice: {domainname, port}}, return array of slivers with lower order domains first''' dnames = {} # {host: slice} - for (slice, params) in slivers.iteritems(): + for (slice, params) in slivers.items(): for mapping in params: dnames[mapping['host']] = {"slice":slice, "port": mapping['port'], "ip": mapping['ip']} - hosts = dnames.keys() + hosts = list(dnames.keys()) # sort by length hosts.sort(key=str.__len__) # longer first diff --git a/plugins/hostmap.py b/plugins/hostmap.py index 7f47afd..d73a6ce 100644 --- a/plugins/hostmap.py +++ b/plugins/hostmap.py @@ -6,7 +6,7 @@ import logger import os import curlwrapper import re -import xmlrpclib +import xmlrpc.client try: from hashlib import sha1 as sha except ImportError: diff --git a/plugins/interfaces.py b/plugins/interfaces.py index bc24b4e..fe303a3 100644 --- a/plugins/interfaces.py +++ b/plugins/interfaces.py @@ -5,7 +5,7 @@ Configure interfaces inside a container by pulling down files via URL. import logger import os import curlwrapper -import xmlrpclib +import xmlrpc.client try: from hashlib import sha1 as sha except ImportError: @@ -55,7 +55,7 @@ def GetSlivers(data, config=None, plc=None): url = mydict['url'] try: contents = curlwrapper.retrieve(url) - except xmlrpclib.ProtocolError as e: + except xmlrpc.client.ProtocolError as e: logger.log('interfaces (%s): failed to retrieve %s' % (slicename, url)) continue else: @@ -66,7 +66,7 @@ def GetSlivers(data, config=None, plc=None): logger.log('interfaces (%s): no DEVICE specified' % slicename) continue - for key, value in mydict.items(): + for key, value in list(mydict.items()): if key in ['bridge', 'vlan']: continue contents += '%s="%s"\n' % (key, value) diff --git a/plugins/privatebridge.py b/plugins/privatebridge.py index b77f881..7938c48 100644 --- a/plugins/privatebridge.py +++ b/plugins/privatebridge.py @@ -2,7 +2,7 @@ """ Private Bridge configurator. """ -import httplib +import http.client import os import select import shutil diff --git a/plugins/rawdisk.py b/plugins/rawdisk.py index 90f267a..eb3df0f 100644 --- a/plugins/rawdisk.py +++ b/plugins/rawdisk.py @@ -30,8 +30,7 @@ def get_unused_devices(): for i in os.listdir("/sys/block"): if not i.startswith("dm-"): continue - in_vg.extend(map(lambda x: x.replace("!", "/"), - os.listdir("/sys/block/%s/slaves" % i))) + in_vg.extend([x.replace("!", "/") for x in os.listdir("/sys/block/%s/slaves" % i)]) # Read the list of partitions with open("/proc/partitions") as partitions: pat = re.compile("\s+") @@ -73,7 +72,7 @@ def GetSlivers(data, config=None, plc=None): except: pass try: - os.makedirs(os.path.dirname(path), 0755) + os.makedirs(os.path.dirname(path), 0o755) except: pass os.mknod(path, st.st_mode, st.st_rdev) diff --git a/plugins/reservation.py b/plugins/reservation.py index b5728d9..dd422b8 100644 --- a/plugins/reservation.py +++ b/plugins/reservation.py @@ -100,7 +100,7 @@ class reservation: return ((int(time)+granularity/2)/granularity)*granularity def clear_timers (self): - for timer in self.timers.values(): + for timer in list(self.timers.values()): timer.cancel() self.timers={} @@ -137,7 +137,7 @@ class reservation: timer.start() def list_timers(self): - timestamps=self.timers.keys() + timestamps=list(self.timers.keys()) timestamps.sort() for timestamp in timestamps: logger.log('reservation: TIMER armed for %s'%reservation.time_printable(timestamp)) @@ -150,7 +150,7 @@ class reservation: @staticmethod def lease_printable (lease): - d=dict ( lease.iteritems()) + d=dict ( iter(lease.items())) d['from']=reservation.time_printable(lease['t_from']) d['until']=reservation.time_printable(lease['t_from']) s=[] diff --git a/plugins/sfagids.py b/plugins/sfagids.py index 21ea581..a3e1813 100644 --- a/plugins/sfagids.py +++ b/plugins/sfagids.py @@ -109,7 +109,7 @@ def install_trusted_certs(api): trusted_gid_names.append(relative_filename) gid_filename = trusted_certs_dir + os.sep + relative_filename if verbose: - print "Writing GID for %s as %s" % (gid.get_hrn(), gid_filename) + print("Writing GID for %s as %s" % (gid.get_hrn(), gid_filename)) gid.save_to_file(gid_filename, save_parents=True) # remove old certs @@ -117,7 +117,7 @@ def install_trusted_certs(api): for gid_name in all_gids_names: if gid_name not in trusted_gid_names: if verbose: - print "Removing old gid ", gid_name + print("Removing old gid ", gid_name) os.unlink(trusted_certs_dir + os.sep + gid_name) diff --git a/plugins/sliverauth.py b/plugins/sliverauth.py index ec65c72..503afa1 100644 --- a/plugins/sliverauth.py +++ b/plugins/sliverauth.py @@ -88,7 +88,7 @@ def manage_hmac (plc, sliver): if not hmac: # let python do its thing random.seed() - d = [random.choice(string.letters) for x in xrange(32)] + d = [random.choice(string.letters) for x in range(32)] hmac = "".join(d) SetSliverTag(plc, sliver['name'], 'hmac', hmac) logger.log("sliverauth: %s: setting hmac" % sliver['name']) @@ -96,7 +96,7 @@ def manage_hmac (plc, sliver): path = '/vservers/%s/etc/planetlab' % sliver['name'] if os.path.exists(path): keyfile = '%s/key' % path - if (tools.replace_file_with_string(keyfile, hmac, chmod=0400)): + if (tools.replace_file_with_string(keyfile, hmac, chmod=0o400)): logger.log ("sliverauth: (over)wrote hmac into %s " % keyfile) # create the key if needed and returns the key contents @@ -110,12 +110,12 @@ def generate_sshkey (sliver): dotssh=os.path.dirname(keyfile) # create dir if needed if not os.path.isdir (dotssh): - os.mkdir (dotssh, 0700) + os.mkdir (dotssh, 0o700) logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), dotssh ] ) if not os.path.isfile(pubfile): comment="%s@%s"%(sliver['name'], socket.gethostname()) logger.log_call( [ 'ssh-keygen', '-t', 'rsa', '-N', '', '-f', keyfile , '-C', comment] ) - os.chmod (keyfile, 0400) + os.chmod (keyfile, 0o400) logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), keyfile, pubfile ] ) with open(pubfile) as f: return f.read().strip() @@ -127,6 +127,6 @@ def manage_sshkey (plc, sliver): # if it's lost b/c e.g. the sliver was destroyed we cannot save the tags content ssh_key = generate_sshkey(sliver) old_tag = find_tag (sliver, 'ssh_key') - if ssh_key <> old_tag: + if ssh_key != old_tag: SetSliverTag(plc, sliver['name'], 'ssh_key', ssh_key) logger.log ("sliverauth: %s: setting ssh_key" % sliver['name']) diff --git a/plugins/specialaccounts.py b/plugins/specialaccounts.py index 718d283..ff984e6 100644 --- a/plugins/specialaccounts.py +++ b/plugins/specialaccounts.py @@ -56,9 +56,9 @@ def GetSlivers(data, conf = None, plc = None): logger.log("specialaccounts: keys file changed: %s" % auth_keys) # always set permissions properly - os.chmod(dot_ssh, 0700) + os.chmod(dot_ssh, 0o700) os.chown(dot_ssh, uid, gid) - os.chmod(auth_keys, 0600) + os.chmod(auth_keys, 0o600) os.chown(auth_keys, uid, gid) logger.log('specialaccounts: installed ssh keys for %s' % name) diff --git a/plugins/syndicate.py b/plugins/syndicate.py index e8ddccd..4a9d4de 100644 --- a/plugins/syndicate.py +++ b/plugins/syndicate.py @@ -2,7 +2,7 @@ """ Syndicate configurator. """ -import httplib +import http.client import os import shutil import tools @@ -21,7 +21,7 @@ def syndicate_op(op, mountpoint, syndicate_ip): logger.log("Syndicate: Http op %s on url %s to host %s" % (op, mountpoint, syndicate_ip)) try: - conn = httplib.HTTPSConnection(syndicate_ip, timeout=60) + conn = http.client.HTTPSConnection(syndicate_ip, timeout=60) conn.request(op, mountpoint) r1 = conn.getresponse() except: diff --git a/plugins/vsys.py b/plugins/vsys.py index 4e0a661..3818763 100644 --- a/plugins/vsys.py +++ b/plugins/vsys.py @@ -39,7 +39,7 @@ def GetSlivers(data, config=None, plc=None): # add to conf slices.append(sliver['name']) _restart = createVsysDir(sliver['name']) or _restart - if attribute['value'] in scripts.keys(): + if attribute['value'] in list(scripts.keys()): scripts[attribute['value']].append(sliver['name']) # Write the conf @@ -105,7 +105,7 @@ def writeAcls(currentscripts, oldscripts): # not the same as length of values of new scripts, # and length of non intersection along new scripts is not 0, # then dicts are different. - for (acl, oldslivers) in oldscripts.iteritems(): + for (acl, oldslivers) in oldscripts.items(): try: if (len(oldslivers) != len(currentscripts[acl])) or \ (len(set(oldslivers) - set(currentscripts[acl])) != 0): diff --git a/plugins/vsys_privs.py b/plugins/vsys_privs.py index b6e69e2..974bfd5 100755 --- a/plugins/vsys_privs.py +++ b/plugins/vsys_privs.py @@ -34,9 +34,9 @@ def GetSlivers(data, config=None, plc=None): tag = attribute['tagname'] value = attribute['value'] if tag.startswith('vsys_'): - if (privs.has_key(slice)): + if (slice in privs): slice_priv = privs[slice] - if (slice_priv.has_key(tag)): + if (tag in slice_priv): slice_priv[tag].append(value) else: slice_priv[tag]=[value] @@ -74,17 +74,17 @@ def read_privs(): return cur_privs def write_privs(cur_privs, privs): - for slice in privs.keys(): + for slice in list(privs.keys()): variables = privs[slice] slice_dir = os.path.join(VSYS_PRIV_DIR, slice) if (not os.path.exists(slice_dir)): os.mkdir(slice_dir) # Add values that do not exist - for k in variables.keys(): + for k in list(variables.keys()): v = variables[k] - if (cur_privs.has_key(slice) - and cur_privs[slice].has_key(k) + if (slice in cur_privs + and k in cur_privs[slice] and cur_privs[slice][k] == v): # The binding has not changed pass @@ -98,21 +98,21 @@ def write_privs(cur_privs, privs): # Remove files and directories # that are invalid - for slice in cur_privs.keys(): + for slice in list(cur_privs.keys()): variables = cur_privs[slice] slice_dir = os.path.join(VSYS_PRIV_DIR, slice) # Add values that do not exist - for k in variables.keys(): - if (privs.has_key(slice) - and cur_privs[slice].has_key(k)): + for k in list(variables.keys()): + if (slice in privs + and k in cur_privs[slice]): # ok, spare this tag - print "Sparing %s, %s "%(slice, k) + print("Sparing %s, %s "%(slice, k)) else: v_file = os.path.join(slice_dir, k) os.remove(v_file) - if (not privs.has_key(slice)): + if (slice not in privs): os.rmdir(slice_dir) diff --git a/safexmlrpc.py b/safexmlrpc.py index 2392c0d..71bf4d6 100644 --- a/safexmlrpc.py +++ b/safexmlrpc.py @@ -1,11 +1,11 @@ """Leverage curl to make XMLRPC requests that check the server's credentials.""" -import xmlrpclib +import xmlrpc.client import curlwrapper -class CertificateCheckingSafeTransport (xmlrpclib.Transport): +class CertificateCheckingSafeTransport (xmlrpc.client.Transport): def __init__(self, cacert, timeout): self.cacert = cacert @@ -19,11 +19,11 @@ class CertificateCheckingSafeTransport (xmlrpclib.Transport): cacert = self.cacert, postdata = request_body, timeout = self.timeout) - return xmlrpclib.loads(contents)[0] + return xmlrpc.client.loads(contents)[0] -class ServerProxy(xmlrpclib.ServerProxy): +class ServerProxy(xmlrpc.client.ServerProxy): def __init__(self, uri, cacert, timeout = 300, **kwds): - xmlrpclib.ServerProxy.__init__(self, uri, + xmlrpc.client.ServerProxy.__init__(self, uri, CertificateCheckingSafeTransport(cacert, timeout), **kwds) diff --git a/sliver_libvirt.py b/sliver_libvirt.py index cf6b0c2..cea8a39 100644 --- a/sliver_libvirt.py +++ b/sliver_libvirt.py @@ -170,7 +170,7 @@ class Sliver_Libvirt(Account): try: # create actually means start self.dom.create() - except Exception, e: + except Exception as e: # XXX smbaker: attempt to resolve slivers that are stuck in # "failed to allocate free veth". if "ailed to allocate free veth" in str(e): @@ -225,7 +225,7 @@ class Sliver_Libvirt(Account): # Btrfs support quota per volumes - if rec.has_key("rspec") and rec["rspec"].has_key("tags"): + if "rspec" in rec and "tags" in rec["rspec"]: if cgroups.get_cgroup_path(self.name) == None: # If configure is called before start, then the cgroups won't exist # yet. NM will eventually re-run configure on the next iteration. @@ -235,7 +235,7 @@ class Sliver_Libvirt(Account): else: tags = rec["rspec"]["tags"] # It will depend on the FS selection - if tags.has_key('disk_max'): + if 'disk_max' in tags: disk_max = tags['disk_max'] if disk_max == 0: # unlimited @@ -245,17 +245,17 @@ class Sliver_Libvirt(Account): pass # Memory allocation - if tags.has_key('memlock_hard'): + if 'memlock_hard' in tags: mem = str(int(tags['memlock_hard']) * 1024) # hard limit in bytes cgroups.write(self.name, 'memory.limit_in_bytes', mem, subsystem="memory") - if tags.has_key('memlock_soft'): + if 'memlock_soft' in tags: mem = str(int(tags['memlock_soft']) * 1024) # soft limit in bytes cgroups.write(self.name, 'memory.soft_limit_in_bytes', mem, subsystem="memory") # CPU allocation # Only cpu_shares until figure out how to provide limits and guarantees # (RT_SCHED?) - if tags.has_key('cpu_share'): + if 'cpu_share' in tags: cpu_share = tags['cpu_share'] cgroups.write(self.name, 'cpu.shares', cpu_share) diff --git a/sliver_vs.py b/sliver_vs.py index ac2381a..7fd295a 100644 --- a/sliver_vs.py +++ b/sliver_vs.py @@ -40,7 +40,7 @@ KEEP_LIMIT = vserver.VC_LIM_KEEP # populate the sliver/vserver specific default allocations table, # which is used to look for slice attributes DEFAULT_ALLOCATION = {} -for rlimit in vserver.RLIMITS.keys(): +for rlimit in list(vserver.RLIMITS.keys()): rlim = rlimit.lower() DEFAULT_ALLOCATION["{}_min".format(rlim)] = KEEP_LIMIT DEFAULT_ALLOCATION["{}_soft".format(rlim)] = KEEP_LIMIT @@ -61,7 +61,7 @@ class Sliver_VS(vserver.VServer, Account, Initscript): vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager') Account.__init__ (self, name) Initscript.__init__ (self, name) - except Exception, err: + except Exception as err: if not isinstance(err, vserver.NoSuchVServer): # Probably a bad vserver or vserver configuration file logger.log_exc("sliver_vs:__init__ (first chance)", name=name) @@ -222,7 +222,7 @@ class Sliver_VS(vserver.VServer, Account, Initscript): # get/set the min/soft/hard values for all of the vserver # related RLIMITS. Note that vserver currently only # implements support for hard limits. - for limit in vserver.RLIMITS.keys(): + for limit in list(vserver.RLIMITS.keys()): type = limit.lower() minimum = self.rspec['{}_min'.format(type)] soft = self.rspec['{}_soft'.format(type)] @@ -241,14 +241,14 @@ class Sliver_VS(vserver.VServer, Account, Initscript): cpu_share = self.rspec['cpu_share'] count = 1 - for key in self.rspec.keys(): + for key in list(self.rspec.keys()): if key.find('sysctl.') == 0: sysctl = key.split('.') try: # /etc/vservers//sysctl// dirname = "/etc/vservers/{}/sysctl/{}".format(self.name, count) try: - os.makedirs(dirname, 0755) + os.makedirs(dirname, 0o755) except: pass with open("{}/setting".format(dirname), "w") as setting: @@ -259,7 +259,7 @@ class Sliver_VS(vserver.VServer, Account, Initscript): logger.log("sliver_vs: {}: writing {}={}" .format(self.name, key, self.rspec[key])) - except IOError, e: + except IOError as e: logger.log("sliver_vs: {}: could not set {}={}" .format(self.name, key, self.rspec[key])) logger.log("sliver_vs: {}: error = {}".format(self.name, e)) diff --git a/slivermanager.py b/slivermanager.py index 4a3c3eb..6db569a 100644 --- a/slivermanager.py +++ b/slivermanager.py @@ -132,7 +132,7 @@ def GetSlivers(data, config = None, plc=None, fullupdate=True): slivers.""" logger.verbose("slivermanager: Entering GetSlivers with fullupdate=%r"%fullupdate) - for key in data.keys(): + for key in list(data.keys()): logger.verbose('slivermanager: GetSlivers key : ' + key) node_id = None @@ -145,9 +145,9 @@ def GetSlivers(data, config = None, plc=None, fullupdate=True): except: logger.log_exc("slivermanager: GetSlivers failed to read /etc/planetlab/node_id") - if data.has_key('node_id') and data['node_id'] != node_id: return + if 'node_id' in data and data['node_id'] != node_id: return - if data.has_key('networks'): + if 'networks' in data: for network in data['networks']: if network['is_primary'] and network['bwlimit'] is not None: DEFAULT_ALLOCATION['net_max_rate'] = network['bwlimit'] / 1000 @@ -205,7 +205,7 @@ def GetSlivers(data, config = None, plc=None, fullupdate=True): # extract the implied rspec rspec = {} rec['rspec'] = rspec - for resname, default_amount in DEFAULT_ALLOCATION.iteritems(): + for resname, default_amount in DEFAULT_ALLOCATION.items(): try: t = type(default_amount) amount = t.__new__(t, attributes[resname]) @@ -213,7 +213,7 @@ def GetSlivers(data, config = None, plc=None, fullupdate=True): rspec[resname] = amount # add in sysctl attributes into the rspec - for key in attributes.keys(): + for key in list(attributes.keys()): if key.find("sysctl.") == 0: rspec[key] = attributes[key] @@ -233,7 +233,7 @@ def start(): # No default allocation values for LXC yet, think if its necessary given # that they are also default allocation values in this module if implementation == 'vs': - for resname, default_amount in sliver_vs.DEFAULT_ALLOCATION.iteritems(): + for resname, default_amount in sliver_vs.DEFAULT_ALLOCATION.items(): DEFAULT_ALLOCATION[resname]=default_amount account.register_class(sliver_class_to_register) diff --git a/systemd/nm.service b/systemd/nm.service index f84fc82..0c3fe56 100644 --- a/systemd/nm.service +++ b/systemd/nm.service @@ -10,7 +10,7 @@ After=network.target EnvironmentFile=/etc/sysconfig/nodemanager Type=forking PIDFile=/var/run/nodemanager.pid -ExecStart=/usr/bin/env python /usr/share/NodeManager/nodemanager.py -d $OPTIONS +ExecStart=/usr/bin/env python3 /usr/share/NodeManager/nodemanager.py -d $OPTIONS [Install] WantedBy=multi-user.target diff --git a/ticket.py b/ticket.py index c6dc4f7..1904b81 100644 --- a/ticket.py +++ b/ticket.py @@ -5,7 +5,7 @@ You must already have the key in the keyring. """ from subprocess import PIPE, Popen -from xmlrpclib import dumps, loads +from xmlrpc.client import dumps, loads GPG = '/usr/bin/gpg' diff --git a/tools.py b/tools.py index 546288f..c7b6256 100644 --- a/tools.py +++ b/tools.py @@ -62,11 +62,11 @@ def daemon(): os.setsid() if os.fork() != 0: os._exit(0) os.chdir('/') - os.umask(0022) + os.umask(0o022) devnull = os.open(os.devnull, os.O_RDWR) os.dup2(devnull, 0) # xxx fixme - this is just to make sure that nothing gets stupidly lost - should use devnull - crashlog = os.open('/var/log/nodemanager.daemon', os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644) + crashlog = os.open('/var/log/nodemanager.daemon', os.O_RDWR | os.O_APPEND | os.O_CREAT, 0o644) os.dup2(crashlog, 1) os.dup2(crashlog, 2) @@ -112,7 +112,7 @@ The return value is the pid of the other running process, or None otherwise. handle.close() # check for a process with that pid by sending signal 0 try: os.kill(other_pid, 0) - except OSError, e: + except OSError as e: if e.errno == errno.ESRCH: other_pid = None # doesn't exist else: raise # who knows if other_pid == None: @@ -199,7 +199,7 @@ def root_context_arch(): class NMLock: def __init__(self, file): logger.log("tools: Lock {} initialized.".format(file), 2) - self.fd = os.open(file, os.O_RDWR|os.O_CREAT, 0600) + self.fd = os.open(file, os.O_RDWR|os.O_CREAT, 0o600) flags = fcntl.fcntl(self.fd, fcntl.F_GETFD) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(self.fd, fcntl.F_SETFD, flags) @@ -433,7 +433,7 @@ def reboot_slivers(): logger.log("tools: REBOOT {}".format(domain.name()) ) else: raise Exception() - except Exception, e: + except Exception as e: logger.log("tools: FAILED to reboot {} ({})".format(domain.name(), e) ) logger.log("tools: Trying to DESTROY/CREATE {} instead...".format(domain.name()) ) try: @@ -445,7 +445,7 @@ def reboot_slivers(): if result==0: logger.log("tools: CREATED {}".format(domain.name()) ) else: logger.log("tools: FAILED in the CREATE call of {}".format(domain.name()) ) - except Exception, e: + except Exception as e: logger.log("tools: FAILED to DESTROY/CREATE {} ({})".format(domain.name(), e) ) ################################################### @@ -503,7 +503,7 @@ def remove_all_ipv6addr_hosts(slicename, node): ipv6candidatestrip = ipv6candidate.strip() valid = is_valid_ipv6(ipv6candidatestrip) if not valid: - print line, + print(line, end=' ') fileinput.close() logger.log("tools: REMOVED IPv6 address from /etc/hosts file of slice={}" .format(slicename) ) -- 2.43.0