# autoconf compatible variables
datadir := /usr/share
bindir := /usr/bin
-initdir=/etc/rc.d/init.d
systemddir := /usr/lib/systemd/system
-# call with either WITH_SYSTEMD=true or WITH_INIT=true
-# otherwise we try to guess some reasonable default
-ifeq "$(WITH_INIT)$(WITH_SYSTEMD)" ""
-ifeq "$(wildcard $systemddir/*)" ""
-WITH_INIT=true
-else
-WITH_SYSTEMD=true
-endif
-endif
+PYTHON = python3
+
####################
all: forward_api_calls
- python setup.py build
+ $(PYTHON) setup.py build
forward_api_calls: forward_api_calls.c
$(CC) -Wall -Os -o $@ $?
#################### install
install: install-miscell install-startup
- python setup.py install \
+ $(PYTHON) setup.py install \
--install-purelib=$(DESTDIR)/$(datadir)/NodeManager \
- --install-platlib=$(DESTDIR)/$(datadir)/NodeManager \
- --install-scripts=$(DESTDIR)/$(bindir)
+ --install-platlib=$(DESTDIR)/$(datadir)/NodeManager
chmod +x $(DESTDIR)/$(datadir)/NodeManager/conf_files.py
+
# might be better in setup.py ?
# NOTE: the sliver-initscripts/ and sliver-systemd stuff, being, well, for slivers,
-# need to ship on all nodes regardless of WITH_INIT and WITH_SYSTEMD that
+# need to ship on all nodes regardless of WITH_INIT and WITH_SYSTEMD that
# impacts how nodemanager itself gets started
install-miscell:
+ install -D -m 755 forward_api_calls $(DESTDIR)/$(bindir)/forward_api_calls
install -d -m 755 $(DESTDIR)/var/lib/nodemanager
- install -D -m 644 /dev/null $(DESTDIR)/etc/sysconfig/nodemanager
+ install -D -m 644 /dev/null $(DESTDIR)/etc/sysconfig/nodemanager
install -D -m 444 README $(DESTDIR)/$(datadir)/NodeManager/README
install -D -m 644 logrotate/nodemanager $(DESTDIR)/etc/logrotate.d/nodemanager
mkdir -p $(DESTDIR)/$(datadir)/NodeManager/sliver-initscripts
rsync -av sliver-systemd/ $(DESTDIR)/$(datadir)/NodeManager/sliver-systemd/
chmod 755 $(DESTDIR)/$(datadir)/NodeManager/sliver-systemd/
-# this now is for the startup of nodemanager itself
-ifneq "$(WITH_SYSTEMD)" ""
install-startup: install-systemd
-endif
-ifneq "$(WITH_INIT)" ""
-install-startup: install-init
-endif
-
-install-init:
- mkdir -p $(DESTDIR)$(initdir)
- chmod 755 initscripts/*
- rsync -av initscripts/ $(DESTDIR)$(initdir)/
install-systemd:
mkdir -p $(DESTDIR)/$(systemddir)
#################### clean
clean:
- python setup.py clean
+ $(PYTHON) setup.py clean
rm -f forward_api_calls *.pyc build
.PHONY: all install clean
-#################### debian-related
-# This is called from the build with the following variables set
-# (see build/Makefile and target_debian)
-# (.) RPMTARBALL
-# (.) RPMVERSION
-# (.) RPMRELEASE
-# (.) RPMNAME
-DEBVERSION=$(RPMVERSION).$(RPMRELEASE)
-DEBTARBALL=../$(RPMNAME)_$(DEBVERSION).orig.tar.bz2
-DATE=$(shell date -u +"%a, %d %b %Y %T")
-force:
-
-debian: DESTDIR=debian/tmp
-debian: forward_api_calls install debian/changelog debian.source debian.package
-
-debian/changelog: debian/changelog.in
- sed -e "s|@VERSION@|$(DEBVERSION)|" -e "s|@DATE@|$(DATE)|" debian/changelog.in > debian/changelog
-
-debian.source: force
- rsync -a $(RPMTARBALL) $(DEBTARBALL)
-
-debian.package:
- debuild --set-envvar PREFIX=/usr -uc -us -b
-
-debian.clean:
- $(MAKE) -f debian/rules clean
- rm -rf build/ MANIFEST ../*.tar.gz ../*.dsc ../*.build
- find . -name '*.pyc' -delete
-
################################################## devel-oriented
tags:
git ls-files | xargs etags
# and then just run
# $ make sync
-LOCAL_RSYNC_EXCLUDES := --exclude '*.pyc'
+LOCAL_RSYNC_EXCLUDES := --exclude '*.pyc'
RSYNC_EXCLUDES := --exclude .git --exclude .svn --exclude '*~' --exclude TAGS $(LOCAL_RSYNC_EXCLUDES)
RSYNC_COND_DRY_RUN := $(if $(findstring n,$(MAKEFLAGS)),--dry-run,)
RSYNC := rsync -e "ssh -i $(NODE).key.rsa" -a -v $(RSYNC_COND_DRY_RUN) $(RSYNC_EXCLUDES)
@echo WARNING : this target might not be very reliable - use with care
@echo xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+$(RSYNC) $(LXC_EXCLUDES) --delete-excluded ./ $(NODEURL)/usr/share/NodeManager/
-# +$(RSYNC) ./initscripts/ $(NODEURL)/etc/init.d/
+$(RSYNC) ./systemd/ $(NODEURL)/usr/lib/systemd/system/
-$(SYNC_RESTART) && { ssh -i $(NODE).key.rsa root@$(NODE) service nm restart ; } ||:
endif
-# this is for vs only, we need to exclude the lxc stuff that otherwise messes up everything on node
-# xxx keep this in sync with setup.spec
-VS_EXCLUDES= --exclude sliver_libvirt.py --exclude sliver_lxc.py --exclude cgroups.py --exclude coresched_lxc.py --exclude privatebridge.py
-
-syncvs: $(NODE).key.rsa
-ifeq (,$(NODEURL))
- @echo "syncvs: You must define NODE on the command line"
- @echo " e.g. make sync NODE=vnode01.inria.fr"
- @exit 1
-else
- @echo xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- @echo WARNING : this target might not be very reliable - use with care
- @echo xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
- +$(RSYNC) $(VS_EXCLUDES) --delete-excluded ./ $(NODEURL)/usr/share/NodeManager/
- +$(RSYNC) ./initscripts/ $(NODEURL)/etc/init.d/
-# +$(RSYNC) ./systemd/ $(NODEURL)/usr/lib/systemd/system/
- -$(SYNC_RESTART) && { ssh -i $(NODE).key.rsa root@$(NODE) service nm restart ; } ||:
-endif
-
-
### fetching the key
TESTMASTER ?= testmaster.onelab.eu
tools.write_file(auth_keys, lambda f: f.write(new_keys))
# set access permissions and ownership properly
- os.chmod(dot_ssh, 0700)
+ os.chmod(dot_ssh, 0o700)
os.chown(dot_ssh, uid, gid)
- os.chmod(auth_keys, 0600)
+ os.chmod(auth_keys, 0o600)
os.chown(auth_keys, uid, gid)
# set self.keys to new_keys only when all of the above ops succeed
with the forward_api_calls shell.
"""
-import SimpleXMLRPCServer
-import SocketServer
+import xmlrpc.server
+import socketserver
import errno
import os
import pwd
import socket
import struct
import threading
-import xmlrpclib
+import xmlrpc.client
import sys
import database
API_SERVER_PORT = 812
UNIX_ADDR = '/tmp/nodemanager.api'
-class APIRequestHandler(SimpleXMLRPCServer.SimpleXMLRPCRequestHandler):
+class APIRequestHandler(xmlrpc.server.SimpleXMLRPCRequestHandler):
# overriding _dispatch to achieve this effect is officially deprecated,
# but I can't figure out how to get access to .request without
# duplicating SimpleXMLRPCServer code here, which is more likely to
method_name = str(method_name_unicode)
try: method = api_method_dict[method_name]
except KeyError:
- api_method_list = api_method_dict.keys()
+ api_method_list = list(api_method_dict.keys())
api_method_list.sort()
- raise xmlrpclib.Fault(100, 'Invalid API method %s. Valid choices are %s' % \
+ raise xmlrpc.client.Fault(100, 'Invalid API method %s. Valid choices are %s' % \
(method_name, ', '.join(api_method_list)))
expected_nargs = nargs_dict[method_name]
if len(args) != expected_nargs:
- raise xmlrpclib.Fault(101, 'Invalid argument count: got %d, expecting %d.' % \
+ raise xmlrpc.client.Fault(101, 'Invalid argument count: got %d, expecting %d.' % \
(len(args), expected_nargs))
else:
# Figure out who's calling.
# Special case : the sfa component manager
if caller_name == PLC_SLICE_PREFIX+"_sfacm":
try: result = method(*args)
- except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err)
+ except Exception as err: raise xmlrpc.client.Fault(104, 'Error in call: %s' %err)
# Anyone can call these functions
elif method_name in ('Help', 'Ticket', 'GetXIDs', 'GetSSHKeys'):
try: result = method(*args)
- except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err)
+ except Exception as err: raise xmlrpc.client.Fault(104, 'Error in call: %s' %err)
else: # Execute anonymous call.
# Authenticate the caller if not in the above fncts.
if method_name == "GetRecord":
# only work on slivers or self. Sanity check.
if not (target_rec and target_rec['type'].startswith('sliver.')):
- raise xmlrpclib.Fault(102, \
+ raise xmlrpc.client.Fault(102, \
'Invalid argument: the first argument must be a sliver name.')
# only manipulate slivers who delegate you authority
if caller_name in (target_name, target_rec['delegations']):
try: result = method(target_rec, *args[1:])
- except Exception, err: raise xmlrpclib.Fault(104, 'Error in call: %s' %err)
+ except Exception as err: raise xmlrpc.client.Fault(104, 'Error in call: %s' %err)
else:
- raise xmlrpclib.Fault(108, '%s: Permission denied.' % caller_name)
+ raise xmlrpc.client.Fault(108, '%s: Permission denied.' % caller_name)
if result == None: result = 1
return result
-class APIServer_INET(SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer): allow_reuse_address = True
+class APIServer_INET(socketserver.ThreadingMixIn, xmlrpc.server.SimpleXMLRPCServer): allow_reuse_address = True
class APIServer_UNIX(APIServer_INET): address_family = socket.AF_UNIX
serv1 = APIServer_INET(('127.0.0.1', API_SERVER_PORT), requestHandler=APIRequestHandler, logRequests=0)
tools.as_daemon_thread(serv1.serve_forever)
try: os.unlink(UNIX_ADDR)
- except OSError, e:
+ except OSError as e:
if e.errno != errno.ENOENT: raise
serv2 = APIServer_UNIX(UNIX_ADDR, requestHandler=APIRequestHandler, logRequests=0)
tools.as_daemon_thread(serv2.serve_forever)
- os.chmod(UNIX_ADDR, 0666)
+ os.chmod(UNIX_ADDR, 0o666)
with the forward_api_calls shell.
"""
-import SimpleXMLRPCServer
-import SocketServer
+import xmlrpc.server
+import socketserver
import errno
import os
import pwd
import socket
import struct
import threading
-import xmlrpclib
+import xmlrpc.client
import slivermanager
try:
def export(method):
def args():
# Inspect method. Remove self from the argument list.
- max_args = method.func_code.co_varnames[0:method.func_code.co_argcount]
- defaults = method.func_defaults
+ max_args = method.__code__.co_varnames[0:method.__code__.co_argcount]
+ defaults = method.__defaults__
if defaults is None:
defaults = ()
min_args = max_args[0:len(max_args) - len(defaults)]
@export_to_api(0)
def Help():
"""Get a list of functions currently supported by the Node Manager API"""
- names=api_method_dict.keys()
+ names=list(api_method_dict.keys())
names.sort()
return ''.join(['**** ' + api_method_dict[name].__name__ + '\n' + api_method_dict[name].__doc__ + '\n'
for name in names])
deliver_ticket(data)
logger.log('api_calls: Ticket delivered for %s' % name)
Create(database.db.get(name))
- except Exception, err:
- raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))
+ except Exception as err:
+ raise xmlrpc.client.Fault(102, 'Ticket error: ' + str(err))
@export_to_docbook(roles=['self'],
accepts=[Parameter(str, 'A ticket returned from GetSlivers()')],
def AdminTicket(ticket):
"""Admin interface to create slivers based on ticket returned by GetSlivers()."""
try:
- data, = xmlrpclib.loads(ticket)[0]
+ data, = xmlrpc.client.loads(ticket)[0]
name = data['slivers'][0]['name']
if data != None:
deliver_ticket(data)
logger.log('api_calls: Admin Ticket delivered for %s' % name)
Create(database.db.get(name))
- except Exception, err:
- raise xmlrpclib.Fault(102, 'Ticket error: ' + str(err))
+ except Exception as err:
+ raise xmlrpc.client.Fault(102, 'Ticket error: ' + str(err))
@export_to_docbook(roles=['self'],
def GetSSHKeys():
"""Return an dictionary mapping slice names to SSH keys"""
keydict = {}
- for rec in database.db.itervalues():
+ for rec in database.db.values():
if 'keys' in rec:
keydict[rec['name']] = rec['keys']
return keydict
account.get(rec['name']).ensure_created(rec)
logger.log("api_calls: Create %s"%rec['name'])
else:
- raise Exception, "Only PLC can create non delegated slivers."
+ raise Exception("Only PLC can create non delegated slivers.")
@export_to_docbook(roles=['nm-controller', 'self'],
account.get(rec['name']).ensure_destroyed()
logger.log("api_calls: Destroy %s"%rec['name'])
else:
- raise Exception, "Only PLC can destroy non delegated slivers."
+ raise Exception("Only PLC can destroy non delegated slivers.")
@export_to_docbook(roles=['nm-controller', 'self'],
of loss of resources."""
rec = sliver_name
if not validate_loans(loans):
- raise xmlrpclib.Fault(102, 'Invalid argument: the second argument must be a well-formed loan specification')
+ raise xmlrpc.client.Fault(102, 'Invalid argument: the second argument must be a well-formed loan specification')
rec['_loans'] = loans
database.db.sync()
-#!/usr/bin/python
+#!/usr/bin/python3
#
# Obtaining a node session key. Usually, the Boot
# Manager obtains it, then writes it to /etc/planetlab/session.
# Help
def usage():
- print "Usage: %s [OPTION]..." % sys.argv[0]
- print "Options:"
- print " -f, --config=FILE PLC configuration file (default: /etc/planetlab/plc_config)"
- print " -n, --node-id=FILE Node ID (or file)"
- print " -k, --key=FILE Node key (or file)"
- print " --help This message"
+ print("Usage: %s [OPTION]..." % sys.argv[0])
+ print("Options:")
+ print(" -f, --config=FILE PLC configuration file (default: /etc/planetlab/plc_config)")
+ print(" -n, --node-id=FILE Node ID (or file)")
+ print(" -k, --key=FILE Node key (or file)")
+ print(" --help This message")
sys.exit(1)
# Get options
"node=", "nodeid=", "node-id", "node_id",
"key=",
"help"])
- except getopt.GetoptError, err:
- print "Error: " + err.msg
+ except getopt.GetoptError as err:
+ print("Error: " + err.msg)
usage()
for (opt, optval) in opts:
plc = PLCAPI(config.plc_api_uri, config.cacert, session)
assert session == plc.GetSession()
- print session
+ print(session)
if __name__ == '__main__':
main()
-#!/usr/bin/python
+#!/usr/bin/python3
#
# Average bandwidth monitoring script. Run periodically via NM db.sync to
# enforce a soft limit on daily bandwidth usage for each slice. If a
self.bytes = 0
self.i2bytes = 0
self.MaxRate = default_MaxRate
- self.MinRate = bwlimit.bwmin / 1000
+ self.MinRate = bwlimit.bwmin // 1000
self.Maxi2Rate = default_Maxi2Rate
- self.Mini2Rate = bwlimit.bwmin / 1000
+ self.Mini2Rate = bwlimit.bwmin // 1000
self.MaxKByte = default_MaxKByte
self.ThreshKByte = int(.8 * self.MaxKByte)
self.Maxi2KByte = default_Maxi2KByte
self.capped = False
self.updateSliceTags(rspec)
- bwlimit.set(xid = self.xid, dev = dev_default,
- minrate = self.MinRate * 1000,
- maxrate = self.MaxRate * 1000,
- maxexemptrate = self.Maxi2Rate * 1000,
- minexemptrate = self.Mini2Rate * 1000,
- share = self.Share)
+ bwlimit.set(
+ xid=self.xid, dev=dev_default,
+ minrate=self.MinRate * 1000,
+ maxrate=self.MaxRate * 1000,
+ maxexemptrate=self.Maxi2Rate * 1000,
+ minexemptrate=self.Mini2Rate * 1000,
+ share=self.Share)
def __repr__(self):
return self.name
# Sanity check plus policy decision for MinRate:
# Minrate cant be greater than 25% of MaxRate or NodeCap.
- MinRate = int(rspec.get("net_min_rate", bwlimit.bwmin / 1000))
+ MinRate = int(rspec.get("net_min_rate", bwlimit.bwmin // 1000))
if MinRate > int(.25 * default_MaxRate):
MinRate = int(.25 * default_MaxRate)
if MinRate != self.MinRate:
self.MaxRate = MaxRate
logger.log("bwmon: Updating %s: Max Rate = %s" %(self.name, self.MaxRate))
- Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin / 1000))
+ Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin // 1000))
if Mini2Rate != self.Mini2Rate:
self.Mini2Rate = Mini2Rate
logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate))
bytesused = usedbytes - self.bytes
timeused = int(time.time() - self.time)
# Calcuate new rate. in bit/s
- new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))
+ new_maxrate = int(((maxbyte - bytesused) * 8)
+ / (period - timeused))
# Never go under MinRate
if new_maxrate < (self.MinRate * 1000):
new_maxrate = self.MinRate * 1000
i2bytesused = usedi2bytes - self.i2bytes
timeused = int(time.time() - self.time)
# Calcuate New Rate.
- new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused))
+ new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)
+ /(period - timeused))
# Never go under MinRate
if new_maxi2rate < (self.Mini2Rate * 1000):
new_maxi2rate = self.Mini2Rate * 1000
if default_MaxRate == -1:
default_MaxRate = 1000000
- # xxx $Id$
+ # xxx $Id$
# with svn we used to have a trick to detect upgrades of this file
# this has gone with the move to git, without any noticeable effect on operations though
try:
# Since root is required for sanity, its not in the API/plc database, so pass {}
# to use defaults.
- if root_xid not in slices.keys():
+ if root_xid not in list(slices.keys()):
slices[root_xid] = Slice(root_xid, "root", {})
slices[root_xid].reset({}, {})
# Used by bwlimit. pass {} since there is no rspec (like above).
- if default_xid not in slices.keys():
+ if default_xid not in list(slices.keys()):
slices[default_xid] = Slice(default_xid, "default", {})
slices[default_xid].reset({}, {})
live = {}
# Get running slivers that should be on this node (from plc). {xid: name}
# db keys on name, bwmon keys on xid. db doesnt have xid either.
- for plcSliver in nmdbcopy.keys():
+ for plcSliver in list(nmdbcopy.keys()):
live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
- logger.verbose("bwmon: Found %s instantiated slices" % live.keys().__len__())
- logger.verbose("bwmon: Found %s slices in dat file" % slices.values().__len__())
+ logger.verbose("bwmon: Found %s instantiated slices" % list(live.keys()).__len__())
+ logger.verbose("bwmon: Found %s slices in dat file" % list(slices.values()).__len__())
# Get actual running values from tc.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
- logger.verbose("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__())
+ logger.verbose("bwmon: Found %s running HTBs" % list(kernelhtbs.keys()).__len__())
# The dat file has HTBs for slices, but the HTBs aren't running
nohtbslices = set(slices.keys()) - set(kernelhtbs.keys())
logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__())
# Reset tc counts.
for nohtbslice in nohtbslices:
- if live.has_key(nohtbslice):
+ if nohtbslice in live:
slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
else:
logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__())
for slicenodat in slicesnodat:
# But slice is running
- if live.has_key(slicenodat):
+ if slicenodat in live:
# init the slice. which means start accounting over since kernel
# htb was already there.
slices[slicenodat] = Slice(slicenodat,
for newslice in newslicesxids:
# Delegated slices dont have xids (which are uids) since they haven't been
# instantiated yet.
- if newslice != None and live[newslice].has_key('_rspec') == True:
+ if newslice != None and ('_rspec' in live[newslice]) == True:
# Check to see if we recently deleted this slice.
- if live[newslice]['name'] not in deaddb.keys():
+ if live[newslice]['name'] not in list(deaddb.keys()):
logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
# _rspec is the computed rspec: NM retrieved data from PLC, computed loans
# and made a dict of computed values.
if deadxid == root_xid or deadxid == default_xid:
continue
logger.log("bwmon: removing dead slice %s " % deadxid)
- if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid):
+ if deadxid in slices and deadxid in kernelhtbs:
# add slice (by name) to deaddb
logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
del slices[deadxid]
- if kernelhtbs.has_key(deadxid):
+ if deadxid in kernelhtbs:
logger.verbose("bwmon: Removing HTB for %s." % deadxid)
bwlimit.off(deadxid, dev = dev_default)
# Clean up deaddb
- for deadslice in deaddb.keys():
+ for deadslice in list(deaddb.keys()):
if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
logger.log("bwmon: Removing dead slice %s from dat." \
% deaddb[deadslice]['slice'].name)
# Get actual running values from tc since we've added and removed buckets.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
- logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__())
+ logger.verbose("bwmon: now %s running HTBs" % list(kernelhtbs.keys()).__len__())
# Update all byte limites on all slices
- for (xid, slice) in slices.iteritems():
+ for (xid, slice) in slices.items():
# Monitor only the specified slices
if xid == root_xid or xid == default_xid: continue
if names and name not in names:
# Update byte counts
slice.update(kernelhtbs[xid], live[xid]['_rspec'])
- logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(), DB_FILE))
+ logger.verbose("bwmon: Saving %s slices in %s" % (list(slices.keys()).__len__(), DB_FILE))
f = open(DB_FILE, "w")
pickle.dump((version, slices, deaddb), f)
f.close()
kernelhtbs = gethtbs(root_xid, default_xid)
if len(kernelhtbs):
logger.log("bwmon: Disabling all running HTBs.")
- for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default)
+ for htb in list(kernelhtbs.keys()): bwlimit.off(htb, dev = dev_default)
lock = threading.Event()
import os, os.path
import pyinotify
import logger
+from functools import reduce
# Base dir for libvirt
BASE_DIR = '/sys/fs/cgroup'
def get_cgroups():
""" Returns the list of cgroups active at this moment on the node """
- return map(os.path.basename, get_cgroup_paths())
+ return list(map(os.path.basename, get_cgroup_paths()))
def write(name, key, value, subsystem="cpuset"):
""" Writes a value to the file key with the cgroup with name """
base_path = get_cgroup_path(name, subsystem)
with open(os.path.join(base_path, key), 'w') as f:
- print >>f, value
+ print(value, file=f)
logger.verbose("cgroups.write: overwrote {}".format(base_path))
def append(name, key, value, subsystem="cpuset"):
""" Appends a value to the file key with the cgroup with name """
base_path = get_cgroup_path(name, subsystem)
with open(os.path.join(base_path, key), 'a') as f:
- print >>f, value
+ print(value, file=f)
logger.verbose("cgroups.append: appended {}".format(base_path))
if __name__ == '__main__':
subsystems = 'blkio cpu cpu,cpuacct cpuacct cpuset devices freezer memory net_cls perf_event systemd'.split()
for subsystem in subsystems:
- print 'get_cgroup_path({}, {}) = {}'.\
- format(name, subsystem, get_cgroup_path(name, subsystem))
+ print('get_cgroup_path({}, {}) = {}'.\
+ format(name, subsystem, get_cgroup_path(name, subsystem)))
# print 'get_cgroup_paths = {}'.format(get_cgroup_paths(subsystem))
-#!/usr/bin/env python
+#!/usr/bin/env python3
-"""configuration files"""
+# pylint: disable=c0111
+
+"""
+update local configuration files from PLC
+"""
-import grp
import os
+import time
import pwd
-try:
- from hashlib import sha1 as sha
-except ImportError:
- from sha import sha
-import string
+import grp
+from hashlib import sha1 as sha
+import xmlrpc.client
import curlwrapper
import logger
import tools
-import xmlrpclib
from config import Config
# right after net
priority = 2
-class conf_files:
+class ConfFiles:
def __init__(self, noscripts=False):
self.config = Config()
self.noscripts = noscripts
def checksum(self, path):
try:
- f = open(path)
- try: return sha(f.read()).digest()
- finally: f.close()
- except IOError: return None
+ with open(path) as feed:
+ return sha(feed.read().encode()).digest()
+ except IOError:
+ return None
def system(self, cmd):
if not self.noscripts and cmd:
else: return 0
def update_conf_file(self, cf_rec):
- if not cf_rec['enabled']: return
+ if not cf_rec['enabled']:
+ return
dest = cf_rec['dest']
err_cmd = cf_rec['error_cmd']
- mode = string.atoi(cf_rec['file_permissions'], base=8)
+ mode = int(cf_rec['file_permissions'], base=8)
try:
uid = pwd.getpwnam(cf_rec['file_owner'])[2]
except:
- logger.log('conf_files: cannot find user %s -- %s not updated'%(cf_rec['file_owner'], dest))
+ logger.log('conf_files: cannot find user %s -- %s not updated'
+ %(cf_rec['file_owner'], dest))
return
try:
gid = grp.getgrnam(cf_rec['file_group'])[2]
except:
- logger.log('conf_files: cannot find group %s -- %s not updated'%(cf_rec['file_group'], dest))
+ logger.log('conf_files: cannot find group %s -- %s not updated'
+ %(cf_rec['file_group'], dest))
return
url = 'https://%s/%s' % (self.config.PLC_BOOT_HOST, cf_rec['source'])
# set node_id at the end of the request - hacky
if tools.node_id():
- if url.find('?') >0: url += '&'
- else: url += '?'
+ if url.find('?') > 0:
+ url += '&'
+ else:
+ url += '?'
url += "node_id=%d"%tools.node_id()
else:
- logger.log('conf_files: %s -- WARNING, cannot add node_id to request'%dest)
+ logger.log('conf_files: %s -- WARNING, cannot add node_id to request'
+ % dest)
try:
logger.verbose("conf_files: retrieving URL=%s"%url)
contents = curlwrapper.retrieve(url, self.config.cacert)
- except xmlrpclib.ProtocolError as e:
+ except xmlrpc.client.ProtocolError as e:
logger.log('conf_files: failed to retrieve %s from %s, skipping' % (dest, url))
return
if not cf_rec['always_update'] and sha(contents).digest() == self.checksum(dest):
return
if self.system(cf_rec['preinstall_cmd']):
self.system(err_cmd)
- if not cf_rec['ignore_cmd_errors']: return
+ if not cf_rec['ignore_cmd_errors']:
+ return
logger.log('conf_files: installing file %s from %s' % (dest, url))
- try: os.makedirs(os.path.dirname(dest))
- except OSError: pass
- tools.write_file(dest, lambda f: f.write(contents), mode=mode, uidgid=(uid, gid))
- if self.system(cf_rec['postinstall_cmd']): self.system(err_cmd)
+ try:
+ os.makedirs(os.path.dirname(dest))
+ except OSError:
+ pass
+ tools.write_file(dest, lambda f: f.write(contents.decode()),
+ mode=mode, uidgid=(uid, gid))
+ if self.system(cf_rec['postinstall_cmd']):
+ self.system(err_cmd)
def run_once(self, data):
- if data.has_key("conf_files"):
- for f in data['conf_files']:
- try: self.update_conf_file(f)
- except: logger.log_exc("conf_files: failed to update conf_file")
+ if "conf_files" in data:
+ for file in data['conf_files']:
+ try:
+ self.update_conf_file(file)
+ except:
+ logger.log_exc("conf_files: failed to update conf_file")
else:
logger.log_missing_data("conf_files.run_once", 'conf_files')
-def start(): pass
+def start():
+ pass
-def GetSlivers(data, config = None, plc = None):
+
+def GetSlivers(data, config=None, plc=None):
logger.log("conf_files: Running.")
- cf = conf_files()
- cf.run_once(data)
+ instance = ConfFiles()
+ instance.run_once(data)
logger.log("conf_files: Done.")
-if __name__ == '__main__':
- import optparse
- parser = optparse.OptionParser()
- parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config', help='PLC configuration file')
- parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session', help='API session key (or file)')
- parser.add_option('--noscripts', action='store_true', dest='noscripts', default=False, help='Do not run pre- or post-install scripts')
- (options, args) = parser.parse_args()
+
+def main():
+ from argparse import ArgumentParser
+ from plcapi import PLCAPI
+
+ parser = ArgumentParser()
+ parser.add_argument('-f', '--config', action='store', dest='config',
+ default='/etc/planetlab/plc_config',
+ help='PLC configuration file')
+ parser.add_argument('-k', '--session', action='store', dest='session',
+ default='/etc/planetlab/session',
+ help='API session key (or file)')
+ parser.add_argument('--noscripts', action='store_true', dest='noscripts',
+ default=False,
+ help='Do not run pre- or post-install scripts')
+ parser.add_argument('--max-attempts', action='store', dest='max_attempts',
+ default=10,
+ help='Max number of attempts')
+ parser.add_argument('--period', action='store', dest='period',
+ help='Time in seconds to wait between attempts')
+ args = parser.parse_args()
# Load /etc/planetlab/plc_config
- config = Config(options.config)
+ config = Config(args.config)
# Load /etc/planetlab/session
- if os.path.exists(options.session):
- with open(options.session) as f:
- session = f.read().strip()
+ if os.path.exists(args.session):
+ with open(args.session) as feed:
+ session = feed.read().strip()
else:
- session = options.session
+ session = args.session
+
+ # loop until it succeeds once
+ # this is a change that comes with python3/fedora29 in late 2018,
+ # because although the conf_files service is defined to systemd
+ # as a dependency of the network, it triggers too early
+ # at a point where eth0 is not ready
# Initialize XML-RPC client
- from plcapi import PLCAPI
- plc = PLCAPI(config.plc_api_uri, config.cacert, auth = session)
+ attempts = 0
+ while True:
+ try:
+ plc = PLCAPI(config.plc_api_uri, config.cacert, auth=session)
+ data = plc.GetSlivers()
+ instance = ConfFiles(args.noscripts)
+ instance.run_once(data)
+ return 0
+ except Exception as exc:
+ logger.log_exc("Could not receive GetSlivers() from PLC")
+ attempts += 1
+ if attempts >= args.max_attempts:
+ return 1
+ logger.log("Waiting for {}s before trying again".format(args.period))
+ time.sleep(args.period)
- main = conf_files(options.noscripts)
- data = plc.GetSlivers()
- main.run_once(data)
+
+if __name__ == '__main__':
+ main()
-#!/usr/bin/python
+#!/usr/bin/python3
#
# Parses the PLC configuration file /etc/planetlab/plc_config, which
# is bootstrapped by Boot Manager, but managed by us.
def __init__(self, file = "/etc/planetlab/plc_config"):
try:
- execfile(file, self.__dict__)
+ exec(compile(open(file).read(), file, 'exec'), self.__dict__)
except:
- raise Exception, "Could not parse " + file
+ raise Exception("Could not parse " + file)
if int(self.PLC_API_PORT) == 443:
uri = "https://"
elif os.path.exists('/usr/boot/cacert.pem'):
self.cacert = '/usr/boot/cacert.pem'
else:
- raise Exception, "No boot server certificate bundle available"
+ raise Exception("No boot server certificate bundle available")
else:
uri = "http://"
self.cacert = None
if __name__ == '__main__':
from pprint import pprint
- for (k, v) in Config().__dict__.iteritems():
+ for (k, v) in Config().__dict__.items():
if k not in ['__builtins__']:
pprint ( (k, v), )
etc_shells.close()
if shell not in valid_shells:
etc_shells = open('/etc/shells', 'a')
- print >>etc_shells, shell
+ print(shell, file=etc_shells)
etc_shells.close()
import os
import os.path
import cgroups
+from functools import reduce
glo_coresched_simulate = False
joinpath = os.path.join
# allocate the cores to the slivers that have them reserved
# TODO: Need to sort this from biggest cpu_cores to smallest
- for name, rec in slivers.iteritems():
+ for name, rec in slivers.items():
rspec = rec["_rspec"]
cores = rspec.get(self.slice_attr_name, 0)
(cores, bestEffort) = self.decodeCoreSpec(cores)
# now check and see if any of our slices had the besteffort flag
# set
- for name, rec in slivers.iteritems():
+ for name, rec in slivers.items():
rspec = rec["_rspec"]
cores = rspec.get(self.slice_attr_name, 0)
(cores, bestEffort) = self.decodeCoreSpec(cores)
self.freezeUnits("freezer.state", freezeList)
def freezeUnits (self, var_name, freezeList):
- for (slicename, freeze) in freezeList.items():
+ for (slicename, freeze) in list(freezeList.items()):
try:
cgroup_path = cgroups.get_cgroup_path(slicename, 'freezer')
logger.verbose("CoreSched: setting freezer for {} to {} - path={} var={}"
break
if glo_coresched_simulate:
- print "F", cgroup
+ print("F", cgroup)
else:
with open(cgroup, "w") as f:
f.write(freeze)
cpus = default
if glo_coresched_simulate:
- print "R", cgroup + "/" + var_name, self.listToRange(cpus)
+ print("R", cgroup + "/" + var_name, self.listToRange(cpus))
else:
cgroups.write(cgroup, var_name, self.listToRange(cpus))
x = CoreSched()
- print "cgroups:", ",".join(x.get_cgroups())
+ print("cgroups:", ",".join(x.get_cgroups()))
- print "cpus:", x.listToRange(x.get_cpus())
- print "sibling map:"
+ print("cpus:", x.listToRange(x.get_cpus()))
+ print("sibling map:")
for item in x.get_cpus():
- print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])])
+ print(" ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])]))
- print "mems:", x.listToRange(x.get_mems())
- print "cpu to memory map:"
+ print("mems:", x.listToRange(x.get_mems()))
+ print("cpu to memory map:")
for item in x.get_mems():
- print " ", item, ",".join([str(y) for y in x.mems_map.get(item, [])])
+ print(" ", item, ",".join([str(y) for y in x.mems_map.get(item, [])]))
rspec_sl_test1 = {"cpu_cores": "1"}
rec_sl_test1 = {"_rspec": rspec_sl_test1}
# allocate the cores to the slivers that have them reserved
# TODO: Need to sort this from biggest cpu_cores to smallest
- for name, rec in slivers.iteritems():
+ for name, rec in slivers.items():
rspec = rec["_rspec"]
cores = rspec.get(self.slice_attr_name, 0)
(cores, bestEffort) = self.decodeCoreSpec(cores)
# now check and see if any of our slices had the besteffort flag
# set
- for name, rec in slivers.iteritems():
+ for name, rec in slivers.items():
rspec = rec["_rspec"]
cores = rspec.get(self.slice_attr_name, 0)
(cores, bestEffort) = self.decodeCoreSpec(cores)
cpus = default
if glo_coresched_simulate:
- print "R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus)
+ print("R", "/dev/cgroup/" + cgroup + "/" + var_name, self.listToRange(cpus))
else:
with opwn("/dev/cgroup/{}/{}".format(cgroup, var_name), "w") as f:
f.write( self.listToRange(cpus) + "\n" )
os.makedirs("/etc/vservers/.defaults/cgroup")
if glo_coresched_simulate:
- print "RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus)
+ print("RDEF", "/etc/vservers/.defaults/cgroup/" + var_name, self.listToRange(cpus))
else:
with open("/etc/vservers/.defaults/cgroup/{}".format(var_name), "w") as f:
f.write( self.listToRange(cpus) + "\n" )
x = CoreSched()
- print "cgroups:", ",".join(x.get_cgroups())
+ print("cgroups:", ",".join(x.get_cgroups()))
- print "cpus:", x.listToRange(x.get_cpus())
- print "sibling map:"
+ print("cpus:", x.listToRange(x.get_cpus()))
+ print("sibling map:")
for item in x.get_cpus():
- print " ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])])
+ print(" ", item, ",".join([str(y) for y in x.cpu_siblings.get(item, [])]))
- print "mems:", x.listToRange(x.get_mems())
- print "cpu to memory map:"
+ print("mems:", x.listToRange(x.get_mems()))
+ print("cpu to memory map:")
for item in x.get_mems():
- print " ", item, ",".join([str(y) for y in x.mems_map.get(item, [])])
+ print(" ", item, ",".join([str(y) for y in x.mems_map.get(item, [])]))
rspec_sl_test1 = {"cpu_cores": "1"}
rec_sl_test1 = {"_rspec": rspec_sl_test1}
from subprocess import PIPE, Popen
from select import select
-import xmlrpclib
+import xmlrpc.client
import signal
import os
command += ('--connect-timeout', str(timeout))
command += (url, )
if verbose:
- print 'Invoking ', command
- if postdata: print 'with postdata=', postdata
+ print('Invoking ', command)
+ if postdata: print('with postdata=', postdata)
p = Sopen(command , stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if postdata: p.stdin.write(postdata)
p.stdin.close()
if rc != 0:
# when this triggers, the error sometimes doesn't get printed
logger.log ("curlwrapper: retrieve, got stderr <%s>"%err)
- raise xmlrpclib.ProtocolError(url, rc, err, postdata)
+ raise xmlrpc.client.ProtocolError(url, rc, err, postdata)
else:
return data
import sys
-import cPickle
+import pickle
import threading
import time
'net_i2_max_rate': 8,
'net_share': 1,
}
-LOANABLE_RESOURCES = MINIMUM_ALLOCATION.keys()
+LOANABLE_RESOURCES = list(MINIMUM_ALLOCATION.keys())
DB_FILE = '/var/lib/nodemanager/database.pickle'
* and variable resid_rspec, which is the amount of resources the sliver
has after giving out loans but not receiving any."""
slivers = {}
- for name, rec in self.iteritems():
+ for name, rec in self.items():
if 'rspec' in rec:
rec['_rspec'] = rec['rspec'].copy()
slivers[name] = rec
- for rec in slivers.itervalues():
+ for rec in slivers.values():
eff_rspec = rec['_rspec']
resid_rspec = rec['rspec'].copy()
for target, resource_name, amount in rec.get('_loans', []):
if rec['timestamp'] < self._min_timestamp: return
name = rec['name']
old_rec = self.get(name)
- if old_rec == None: self[name] = rec
+ if old_rec == None:
+ self[name] = rec
elif rec['timestamp'] > old_rec['timestamp']:
- for key in old_rec.keys():
- if not key.startswith('_'): del old_rec[key]
+ for key in list(old_rec.keys()):
+ if not key.startswith('_'):
+ del old_rec[key]
old_rec.update(rec)
def set_min_timestamp(self, ts):
We use it to determine if a record is stale.
This method should be called whenever new GetSlivers() data comes in."""
self._min_timestamp = ts
- for name, rec in self.items():
+ for name, rec in list(self.items()):
if rec['timestamp'] < ts: del self[name]
def sync(self):
# delete expired records
now = time.time()
- for name, rec in self.items():
+ for name, rec in list(self.items()):
if rec.get('expires', now) < now: del self[name]
self._compute_effective_rspecs()
if name not in self:
logger.verbose("database: sync : ensure_destroy'ing %s"%name)
account.get(name).ensure_destroyed()
- for name, rec in self.iteritems():
+ for name, rec in self.items():
# protect this; if anything fails for a given sliver
# we still need the other ones to be handled
try:
while True:
db_lock.acquire()
while not dump_requested: db_cond.wait()
- db_pickle = cPickle.dumps(db, cPickle.HIGHEST_PROTOCOL)
+ db_pickle = pickle.dumps(db, pickle.HIGHEST_PROTOCOL)
dump_requested = False
db_lock.release()
try:
- tools.write_file(DB_FILE, lambda f: f.write(db_pickle))
+ tools.write_file(
+ DB_FILE, lambda f: f.write(db_pickle), binary=True)
logger.log_database(db)
except:
logger.log_exc("database.start: failed to pickle/dump")
global db
try:
f = open(DB_FILE)
- try: db = cPickle.load(f)
+ try: db = pickle.load(f)
finally: f.close()
except IOError:
logger.log ("database: Could not load %s -- starting from a fresh database"%DB_FILE)
+++ /dev/null
-nodemanager (@VERSION@) UNRELEASED; urgency=low
-
- * Initial release.
-
- -- Thierry Parmentelat <thierry.parmentelat@inria.fr> @DATE@ +0000
+++ /dev/null
-Source: nodemanager
-Maintainer: Thierry Parmentelat <Thierry.Parmentelat@inria.fr>
-Section: misc
-Priority: optional
-Standards-Version: 3.9.2
-# hopefully we do not need manifold at build-time
-Build-Depends: devscripts, debhelper (>=7.0.50~), debconf, dpatch, python-setuptools, make
-X-Python-Version: >= 2.7
-
-Package: nodemanager-lib
-Architecture: any
-# xxx sliceimage-%{slicefamily} not mentioned here yet
-Depends: python, python-curl, gnupg, pyplnet >= 4.3, plnode-utils
-Description: Generic nodemanager framework
-
-Package: nodemanager-lxc
-Architecture: any
-# xxx lxc-sliceimage not mentioned here yet
-Depends: libvirt, libvirt-python, openvswitch, python-inotify, nodemanager-lib,
-Description: Nodemanager variant for lxc-containers
-
+++ /dev/null
-# OK so this does not work and won't work properly until the whole module/packaging
-# business is reviewed from scratch even on rpm
-# we would ideally need to
-# . have the python code installed at the standard location
-# and imported using import nodemanager.<> or import nodemanager.plugins.<>
-# . review the plugin loading mechanism to probe the contents of plugins at run-time from this location
-# . remove as much as we can from Makefile and put it in setup.py
-# so this is way more than we are ready to invest in this module ...
-###
-# this for now needs to be kept in sync with the toplevel specfile
-# also the rpm packaging goes in /usr/share/NodeManager but
-# under debian it seems like setup.py does not honor --install-purelib and all this, so...
-usr/lib*/python*/dist-packages/account.*
-usr/lib*/python*/dist-packages/api.*
-usr/lib*/python*/dist-packages/api_calls.*
-usr/lib*/python*/dist-packages/bwmon.*
-usr/lib*/python*/dist-packages/conf_files.*
-usr/lib*/python*/dist-packages/config.*
-usr/lib*/python*/dist-packages/controller.*
-usr/lib*/python*/dist-packages/curlwrapper.*
-usr/lib*/python*/dist-packages/database.*
-usr/lib*/python*/dist-packages/initscript.*
-usr/lib*/python*/dist-packages/iptables.*
-usr/lib*/python*/dist-packages/logger.*
-usr/lib*/python*/dist-packages/net.*
-usr/lib*/python*/dist-packages/nodemanager.*
-usr/lib*/python*/dist-packages/plcapi.*
-usr/lib*/python*/dist-packages/safexmlrpc.*
-usr/lib*/python*/dist-packages/slivermanager.*
-usr/lib*/python*/dist-packages/ticket.*
-usr/lib*/python*/dist-packages/tools.*
-usr/lib*/python*/dist-packages/plugins/__init__.*
-usr/lib*/python*/dist-packages/plugins/codemux.*
-usr/lib*/python*/dist-packages/plugins/hostmap.*
-usr/lib*/python*/dist-packages/plugins/interfaces.*
-usr/lib*/python*/dist-packages/plugins/omf_resctl.*
-usr/lib*/python*/dist-packages/plugins/privatebridge.*
-usr/lib*/python*/dist-packages/plugins/rawdisk.*
-usr/lib*/python*/dist-packages/plugins/reservation.*
-usr/lib*/python*/dist-packages/plugins/sfagids.*
-usr/lib*/python*/dist-packages/plugins/sliverauth.*
-usr/lib*/python*/dist-packages/plugins/specialaccounts.*
-usr/lib*/python*/dist-packages/plugins/syndicate.*
-usr/lib*/python*/dist-packages/plugins/vsys.*
-usr/lib*/python*/dist-packages/plugins/vsys_privs.*
-usr/share/NodeManager/sliver-initscripts/
-usr/share/NodeManager/sliver-systemd/
-usr/bin/forward_api_calls
-# not quite sure if debian or ubuntu has / plans to have / systemd at some point ?
-etc/init.d/
-etc/logrotate.d/nodemanager
-var/lib/nodemanager/
-etc/sysconfig/nodemanager
+++ /dev/null
-#!/bin/bash
-set -x
-echo "No postinsatll script for nodemanager-lib yet"
+++ /dev/null
-# see also nodemanager-lib.install
-usr/lib*/python*/dist-packages/sliver_libvirt.*
-usr/lib*/python*/dist-packages/sliver_lxc.*
-usr/lib*/python*/dist-packages/cgroups.*
-usr/lib*/python*/dist-packages/coresched_lxc.*
+++ /dev/null
-#!/usr/bin/make -f
-# -*- makefile -*-
-
-%:
- dh $@ --with python2 --buildsystem=python_distutils
-
+++ /dev/null
-3.0 (quilt)
-#!/usr/bin/env python
+#!/usr/bin/env python3
# PATHS to be set by the build system
# this is in ..
for func in dir(api_calls):
try:
f = api_calls.__getattribute__(func)
- if 'group' in f.__dict__.keys():
+ if 'group' in f.__dict__:
api_function_list += [api_calls.__getattribute__(func)]
except:
pass
self.initscript = new_initscript
code = self.initscript
sliver_initscript = "/vservers/%s/etc/rc.d/init.d/vinit.slice" % self.name
- if tools.replace_file_with_string(sliver_initscript, code, remove_if_empty=True, chmod=0755):
+ if tools.replace_file_with_string(sliver_initscript, code, remove_if_empty=True, chmod=0o755):
if code:
logger.log("Initscript: %s: Installed new initscript in %s" % (self.name, sliver_initscript))
if self.is_running():
# install in sliver
with open(vinit_source) as f:
code = f.read()
- if tools.replace_file_with_string(vinit_script, code, chmod=0755):
+ if tools.replace_file_with_string(vinit_script, code, chmod=0o755):
logger.log("Initscript: %s: installed generic vinit rc script" % self.name)
# create symlink for runlevel 3
if not os.path.islink(enable_link):
# install in sliver
with open(vinit_source) as f:
code = f.read()
- if tools.replace_file_with_string(vinit_unit_file, code, chmod=0755):
+ if tools.replace_file_with_string(vinit_unit_file, code, chmod=0o755):
logger.log("Initscript: %s: installed vinit.service unit file" % self.name)
# create symlink for enabling this unit
if not os.path.islink(enable_link):
+++ /dev/null
-#!/bin/bash
-#
-# conf_files Updates node configuration files at startup
-#
-# chkconfig: 3 20 80
-# description: Updates node configuration files at startup
-#
-
-# Source function library.
-. /etc/init.d/functions
-
-case "$1" in
- start|restart|reload)
- shift
- action $"Updating node configuration files: " python /usr/share/NodeManager/conf_files.py $*
- ;;
- *)
- echo $"Usage: $0 {start|restart}"
- exit 1
-esac
-
-exit 0
+++ /dev/null
-#!/bin/bash
-#
-# fuse-pl Start FUSE support on PlanetLab
-#
-# chkconfig: 3 87 27
-# description: Start FUSE support on PlanetLab
-#
-
-# Source function library.
-. /etc/init.d/functions
-
-: ${UTIL_VSERVER_VARS:=`echo /usr/lib*/util-vserver/util-vserver-vars`}
-test -e "$UTIL_VSERVER_VARS" || {
- echo "Can not find util-vserver installation; aborting..."
- exit 1
-}
-. "$UTIL_VSERVER_VARS"
-
-
-PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
-MOUNTPOINT=/sys/fs/fuse/connections
-
-case "$1" in
- start|restart|reload)
- shift
- # stolen from the /etc/init.d/fuse that comes with the fuse source
- if ! grep -qw fuse /proc/filesystems; then
- echo -n "Loading fuse module"
- if ! modprobe fuse >/dev/null 2>&1; then
- echo " failed!"
- exit 0
- else
- echo "."
- fi
- fi
- if grep -qw fusectl /proc/filesystems && \
- ! grep -qw $MOUNTPOINT /proc/mounts; then
- echo -n "Mounting fuse control filesystem"
- if ! mount -t fusectl fusectl $MOUNTPOINT >/dev/null 2>&1; then
- echo " failed!"
- exit 1
- else
- echo "."
- fi
- fi
- # end stolen code
- #
- # To enable FUSE for existing slices, copy the FUSE device
- # node into the slice
- if [ ! -e /dev/fuse ]; then
- echo "No FUSE device! Exiting."
- exit -1
- fi
- for slice in `ls $__CONFDIR`; do
- cp -a /dev/fuse $__DEFAULT_VSERVERDIR/$slice/dev/ > /dev/null 2>&1
- done
- ;;
- stop)
- ;;
- *)
- echo $"Usage: $0 {start|restart|stop}"
- exit 1
-esac
-
-exit 0
+++ /dev/null
-#!/bin/bash
-#
-# nm Starts and stops Node Manager daemon
-#
-# chkconfig: 3 97 26
-# description: Starts and stops Node Manager daemon
-#
-
-# Source function library.
-. /etc/init.d/functions
-
-[ -f /etc/sysconfig/nodemanager ] && . /etc/sysconfig/nodemanager
-
-# Wait for libvirt to finish initializing
-sleep 10
-
-options=${OPTIONS-"-d"}
-# turn on verbosity
-verboseoptions=${DEBUGOPTIONS-"-v -d"}
-# debug mode is interactive, and has faster period
-# run in deamon mode with service nm restardebug -d
-debugoptions=${DEBUGOPTIONS-"-v -p 30 -r 15"}
-
-nodemanager=${NODEMANAGER-"python /usr/share/NodeManager/nodemanager.py"}
-prog="Node Manager"
-pidfile=${PIDFILE-/var/run/nodemanager.pid}
-
-RETVAL=0
-
-function start() {
- action $"Starting $prog: " daemon --pidfile=$pidfile --check=nodemanager $nodemanager "$@"
-}
-
-function stop() {
- action $"Stopping $prog: " killproc -p $pidfile nodemanager
-}
-
-case "$1" in
- start)
- start $options
- ;;
- stop)
- stop
- ;;
- status)
- status -p $pidfile nodemanager
- RETVAL=$?
- ;;
- restart|reload)
- shift
- stop
- start $options "$@"
- ;;
- condrestart)
- shift
- [ -f ${pidfile} ] && { stop; start $options "$@"; }
- ;;
- restartverbose)
- shift
- stop
- $nodemanager $verboseoptions "$@"
- ;;
- restartdebug)
- shift
- stop
- echo "Restarting with $debugoptions $@ .."
- $nodemanager $debugoptions "$@"
- ;;
- *)
- echo $"Usage: $0 {start|stop|status|restart|condrestart|restartdebug [-d]}"
- exit 1
- ;;
-esac
-
-exit $RETVAL
-#!/usr/bin/python -tt
+#!/usr/bin/python3 -tt
#
# Author: Daniel Hokka Zakrisson <daniel@hozac.com>
+"""
+A very simple logger that tries to be concurrency-safe.
+"""
-"""A very simple logger that tries to be concurrency-safe."""
+# pylint: disable=c0111
-import os, sys
+import sys
+import os
import time
import traceback
import subprocess
def set_level(level):
global LOG_LEVEL
- try:
- assert level in [LOG_NONE, LOG_NODE, LOG_VERBOSE]
+ if level in (LOG_NONE, LOG_NODE, LOG_VERBOSE):
LOG_LEVEL = level
- except:
- logger.log("Failed to set LOG_LEVEL to %s" % level)
+ else:
+ log("Failed to set LOG_LEVEL to %s" % level)
def verbose(msg):
log('(v) ' + msg, LOG_VERBOSE)
def log(msg, level=LOG_NODE):
- """Write <msg> to the log file if level >= current log level (default LOG_NODE)."""
+ """
+ Write <msg> to the log file if level >= current log level (default LOG_NODE).
+ """
if level > LOG_LEVEL:
return
try:
- fd = os.open(LOG_FILE, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
+ fd = os.open(LOG_FILE, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0o600)
if not msg.endswith('\n'):
msg += '\n'
- os.write(fd, '%s: %s' % (time.asctime(time.gmtime()), msg))
+ to_write = '%s: %s' % (time.asctime(time.gmtime()), msg)
+ os.write(fd, to_write.encode())
os.close(fd)
except OSError:
sys.stderr.write(msg)
def add(self, c):
self.buffer += c
- if c=='\n':
+ if c == '\n':
self.flush()
def flush(self):
trigger=time.time()+timeout
result = False
try:
- child = subprocess.Popen(command, bufsize=1,
- stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
+ child = subprocess.Popen(
+ command, bufsize=1,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ close_fds=True,
+ universal_newlines=True)
buffer = Buffer()
while True:
# see if anything can be read within the poll interval
-"""network configuration"""
+"""
+network configuration
+"""
# system provided modules
-import os, string, time, socket
+import os
# PlanetLab system modules
import sioc, plnet
# local modules
import plnode.bwlimit as bwlimit
-import logger, iptables, tools
+import logger
+import iptables
+import tools
# we can't do anything without a network
-priority=1
+priority = 1
dev_default = tools.get_default_if()
# query running network interfaces
devs = sioc.gifconf()
- ips = dict(zip(devs.values(), devs.keys()))
+ ips = dict(list(zip(list(devs.values()), list(devs.keys()))))
macs = {}
for dev in devs:
macs[sioc.gifhwaddr(dev).lower()] = dev
# Get interface name preferably from MAC address, falling
# back on IP address.
hwaddr=interface['mac']
- if hwaddr <> None: hwaddr=hwaddr.lower()
+ if hwaddr != None: hwaddr=hwaddr.lower()
if hwaddr in macs:
dev = macs[interface['mac']]
elif interface['ip'] in ips:
# query running network interfaces
devs = sioc.gifconf()
- ips = dict(zip(devs.values(), devs.keys()))
+ ips = dict(list(zip(list(devs.values()), list(devs.keys()))))
macs = {}
for dev in devs:
macs[sioc.gifhwaddr(dev).lower()] = dev
# Get interface name preferably from MAC address, falling
# back on IP address.
hwaddr=interface['mac']
- if hwaddr <> None: hwaddr=hwaddr.lower()
+ if hwaddr != None: hwaddr=hwaddr.lower()
if hwaddr in macs:
dev = macs[interface['mac']]
elif interface['ip'] in ips:
-#!/usr/bin/python
+#!/usr/bin/python3
#
# Useful information can be found at https://svn.planet-lab.org/wiki/NodeManager
#
# Copyright (C) 2008 The Trustees of Princeton University
-"""Node Manager"""
+"""
+Node Manager
+"""
-import optparse
import time
-import xmlrpclib
-import socket
import os
import sys
import glob
import pickle
import random
-import resource
+from argparse import ArgumentParser
import logger
import tools
def __init__ (self):
- parser = optparse.OptionParser()
- parser.add_option('-d', '--daemon', action='store_true', dest='daemon', default=False,
- help='run daemonized')
- parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config',
- help='PLC configuration file')
- parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session',
- help='API session key (or file)')
- parser.add_option('-p', '--period', action='store', dest='period', default=NodeManager.default_period,
- help='Polling interval (sec) - default {}'
- .format(NodeManager.default_period))
- parser.add_option('-r', '--random', action='store', dest='random', default=NodeManager.default_random,
- help='Range for additional random polling interval (sec) -- default {}'
- .format(NodeManager.default_random))
- parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
- help='more verbose log')
- parser.add_option('-P', '--path', action='store', dest='path', default=NodeManager.PLUGIN_PATH,
- help='Path to plugins directory')
-
- # NOTE: BUG the 'help' for this parser.add_option() wont list plugins from the --path argument
- parser.add_option('-m', '--module', action='store', dest='user_module', default='', help='run a single module')
- (self.options, args) = parser.parse_args()
-
- if len(args) != 0:
- parser.print_help()
- sys.exit(1)
+ parser = ArgumentParser()
+ parser.add_argument(
+ '-d', '--daemon', action='store_true', dest='daemon',
+ default=False,
+ help='run daemonized')
+ parser.add_argument(
+ '-f', '--config', action='store', dest='config',
+ default='/etc/planetlab/plc_config',
+ help='PLC configuration file')
+ parser.add_argument(
+ '-k', '--session', action='store', dest='session',
+ default='/etc/planetlab/session',
+ help='API session key (or file)')
+ parser.add_argument(
+ '-p', '--period', action='store', dest='period',
+ default=NodeManager.default_period,
+ help='Polling interval (sec) - default {}'
+ .format(NodeManager.default_period))
+ parser.add_argument(
+ '-r', '--random', action='store', dest='random',
+ default=NodeManager.default_random,
+ help='Range for additional random polling interval (sec) -- default {}'
+ .format(NodeManager.default_random))
+ parser.add_argument(
+ '-v', '--verbose', action='store_true', dest='verbose',
+ default=False,
+ help='more verbose log')
+ parser.add_argument(
+ '-P', '--path', action='store', dest='path',
+ default=NodeManager.PLUGIN_PATH,
+ help='Path to plugins directory')
+
+ parser.add_argument(
+ '-m', '--module', action='store', dest='user_module',
+ default='',
+ help='run a single module')
+ self.options = parser.parse_args()
# determine the modules to be run
self.modules = NodeManager.core_modules
# Deal with plugins directory
if os.path.exists(self.options.path):
sys.path.append(self.options.path)
- plugins = [ os.path.split(os.path.splitext(x)[0])[1]
- for x in glob.glob( os.path.join(self.options.path, '*.py') )
- if not x.endswith("/__init__.py")
- ]
+ plugins = [
+ os.path.split(os.path.splitext(x)[0])[1]
+ for x in glob.glob( os.path.join(self.options.path, '*.py') )
+ if not x.endswith("/__init__.py")
+ ]
self.modules += plugins
if self.options.user_module:
assert self.options.user_module in self.modules
def GetSlivers(self, config, plc):
- """Retrieves GetSlivers at PLC and triggers callbacks defined in modules/plugins"""
+ """
+ Retrieve GetSlivers at PLC and trigger callbacks defined in modules/plugins
+ """
try:
logger.log("nodemanager: Syncing w/ PLC")
# retrieve GetSlivers from PLC
# use the magic 'default' slice to retrieve system-wide defaults
self.getPLCDefaults(data, config)
# tweak the 'vref' attribute from GetSliceFamily
- self.setSliversVref (data)
+ self.setSliversVref(data)
# dump it too, so it can be retrieved later in case of comm. failure
self.dumpSlivers(data)
# log it for debug purposes, no matter what verbose is
last_data = data
except:
logger.log_exc("nodemanager: failed in GetSlivers")
- # XXX So some modules can at least boostrap.
+ # XXX So some modules can at least boostrap.
logger.log("nodemanager: Can't contact PLC to GetSlivers(). Continuing.")
data = {}
# for modules that request it though the 'persistent_data' property
last_data = self.loadSlivers()
+
# Invoke GetSlivers() functions from the callback modules
for module in self.loaded_modules:
logger.verbose('nodemanager: triggering {}.GetSlivers'.format(module.__name__))
for slice in data.get('slivers'):
if slice['name'] == config.PLC_SLICE_PREFIX + "_default":
attr_dict = {}
- for attr in slice.get('attributes'): attr_dict[attr['tagname']] = attr['value']
- if len(attr_dict):
- logger.verbose("nodemanager: Found default slice overrides.\n {}".format(attr_dict))
+ for attr in slice.get('attributes'):
+ attr_dict[attr['tagname']] = attr['value']
+ if attr_dict:
+ logger.verbose("nodemanager: Found default slice overrides.\n {}"
+ .format(attr_dict))
config.OVERRIDES = attr_dict
return
# NOTE: if an _default slice existed, it would have been found above and
# the routine would return. Thus, if we've gotten here, then no default
# slice is bound to this node.
- if 'OVERRIDES' in dir(config): del config.OVERRIDES
+ if 'OVERRIDES' in dir(config):
+ del config.OVERRIDES
def setSliversVref (self, data):
if att['tagname'] == 'vref':
att['value'] = slicefamily
continue
- sliver['attributes'].append({ 'tagname':'vref', 'value':slicefamily})
- except:
- logger.log_exc("nodemanager: Could not overwrite 'vref' attribute from 'GetSliceFamily'",
- name=sliver['name'])
+ sliver['attributes'].append(
+ {'tagname': 'vref', 'value': slicefamily})
+ except Exception:
+ logger.log_exc(
+ "nodemanager: Could not overwrite 'vref' attribute from 'GetSliceFamily'",
+ name=sliver['name'])
+
def dumpSlivers (self, slivers):
- f = open(NodeManager.DB_FILE, "w")
- logger.log ("nodemanager: saving successfully fetched GetSlivers in {}".format(NodeManager.DB_FILE))
- pickle.dump(slivers, f)
- f.close()
+ with open(NodeManager.DB_FILE, "wb") as feed:
+ logger.log ("nodemanager: saving successfully fetched GetSlivers in {}"
+ .format(NodeManager.DB_FILE))
+ pickle.dump(slivers, feed)
+
def loadSlivers (self):
try:
- f = open(NodeManager.DB_FILE, "r+")
- logger.log("nodemanager: restoring latest known GetSlivers from {}".format(NodeManager.DB_FILE))
- slivers = pickle.load(f)
- f.close()
+ with open(NodeManager.DB_FILE, "rb+") as feed:
+ logger.log("nodemanager: restoring latest known GetSlivers from {}"
+ .format(NodeManager.DB_FILE))
+ slivers = pickle.load(feed)
return slivers
except:
- logger.log("Could not restore GetSlivers from {}".format(NodeManager.DB_FILE))
+ logger.log("Could not restore GetSlivers from {}"
+ .format(NodeManager.DB_FILE))
return {}
def run(self):
tools.daemon()
# set log level
- if (self.options.verbose):
+ if self.options.verbose:
logger.set_level(logger.LOG_VERBOSE)
tools.init_signals()
try:
other_pid = tools.pid_file()
- if other_pid != None:
- print """There might be another instance of the node manager running as pid {}.
-If this is not the case, please remove the pid file {}. -- exiting""".format(other_pid, tools.PID_FILE)
+ if other_pid is not None:
+ print("""There might be another instance of the node manager running as pid {}.
+If this is not the case, please remove the pid file {}. -- exiting"""
+ .format(other_pid, tools.PID_FILE))
return
- except OSError, err:
- print "Warning while writing PID file:", err
+ except OSError as err:
+ print("Warning while writing PID file:", err)
# load modules
self.loaded_modules = []
try:
m = __import__(module)
logger.verbose("nodemanager: triggering {}.start".format(m.__name__))
- try: m.start()
- except: logger.log("WARNING: module {} did not start".format(m.__name__))
+ try:
+ m.start()
+ except Exception:
+ logger.log("WARNING: module {} did not start".format(m.__name__))
self.loaded_modules.append(m)
- except:
+ except Exception:
if module not in NodeManager.core_modules:
- logger.log_exc ("ERROR while loading module {} - skipped".format(module))
+ logger.log_exc("ERROR while loading module {} - skipped"
+ .format(module))
else:
logger.log("FATAL : failed to start core module {}".format(module))
sys.exit(1)
# sort on priority (lower first)
- def module_priority (m):
- return getattr(m, 'priority', NodeManager.default_priority)
+ def module_priority(module):
+ return getattr(module, 'priority', NodeManager.default_priority)
self.loaded_modules.sort(key=module_priority)
logger.log('ordered modules:')
for module in self.loaded_modules:
- logger.log ('{}: {}'.format(getattr(module, 'priority', NodeManager.default_priority),
- module.__name__))
+ logger.log('{}: {}'
+ .format(getattr(module, 'priority',
+ NodeManager.default_priority),
+ module.__name__))
# Load /etc/planetlab/session
if os.path.exists(self.options.session):
- with open(self.options.session) as f:
- session = f.read().strip()
+ with open(self.options.session) as feed:
+ session = feed.read().strip()
else:
session = None
#check auth
logger.log("nodemanager: Checking Auth.")
- while plc.check_authentication() != True:
+ while not plc.check_authentication():
try:
plc.update_session()
logger.log("nodemanager: Authentication Failure. Retrying")
- except Exception as e:
- logger.log("nodemanager: Retry Failed. ({}); Waiting..".format(e))
+ except Exception as exc:
+ logger.log("nodemanager: Retry Failed. ({}); Waiting.."
+ .format(exc))
time.sleep(iperiod)
logger.log("nodemanager: Authentication Succeeded!")
time.sleep(delay)
except SystemExit:
pass
- except:
+ except:
logger.log_exc("nodemanager: failed in run")
def run():
%define slicefamily %{pldistro}-%{distroname}-%{_arch}
%define name nodemanager-lib
-%define version 5.2
-%define taglevel 20
+%define version 7.0
+%define taglevel 0
%define release %{taglevel}%{?pldistro:.%{pldistro}}%{?date:.%{date}}
%define _unpackaged_files_terminate_build 0
##############################
-# use initscripts or systemd unit files to start installed services
-%if "%{distro}" == "Fedora" && "%{distrorelease}" >= "18"
+# only systemd unit files to start installed services
%define make_options WITH_SYSTEMD=true
-%define initdir /usr/lib/systemd/system
+%define systemddir /usr/lib/systemd/system
%define build_lxc 1
-%else
-%define make_options WITH_INIT=true
-%define initdir %{_initrddir}
-%define build_vs 1
-%endif
##############################
Summary: PlanetLab Node Manager Library
#BuildArch: noarch
# make sure we can invoke systemctl in post install script
-%if "%{initdir}" != "%{_initrddir}"
Requires: systemd
-%endif
# Uses function decorators
-Requires: python >= 2.7
+Requires: python3
# connecting PLC
-Requires: python-pycurl
+Requires: python3-pycurl
# Signed tickets
Requires: gnupg
# sioc/plnet
##############################
%post
-########## traditional init
-%if "%{initdir}" == "%{_initrddir}"
-##########
-chkconfig --add conf_files
-chkconfig conf_files on
-chkconfig --add nm
-chkconfig nm on
-chkconfig --add fuse-pl
-chkconfig fuse-pl on
-if [ "$PL_BOOTCD" != "1" ] ; then
- service nm restart
- service fuse-pl restart
-fi
-##########
-%else
-########## systemd
systemctl enable nm.service
systemctl enable conf_files.service
# empty
systemctl restart nm.service
# systemctl restart fuse-pl.service
fi
-##########
-%endif
##############################
%preun
# 0 = erase, 1 = upgrade
-########## traditional init
-%if "%{initdir}" == "%{_initrddir}"
-##########
-if [ $1 -eq 0 ] ; then
- chkconfig fuse-pl off
- chkconfig --del fuse-pl
- chkconfig nm off
- chkconfig --del nm
- chkconfig conf_files off
- chkconfig --del conf_files
-fi
-##########
-%else
-########## systemd
if [ $1 -eq 0 ] ; then
# systemctl disable fuse-pl.service
systemctl disable conf_files.service
systemctl disable nm.service
fi
-##########
-%endif
##############################
%clean
%{_datadir}/NodeManager/sliver-initscripts/
%{_datadir}/NodeManager/sliver-systemd/
%{_bindir}/forward_api_calls
-%{initdir}/
+%{systemddir}/
%{_sysconfdir}/logrotate.d/nodemanager
/var/lib/nodemanager/
%config(noreplace) /etc/sysconfig/nodemanager
Group: System Environment/Daemons
# we use libvirt
Requires: libvirt
-Requires: libvirt-python
+Requires: python3-libvirt
# cgroups.py needs this
-Requires: python-inotify
+Requires: python3-inotify
# the common package for nodemanager
Requires: nodemanager-lib = %{version}
# the lxc-specific tools for using slice images
# old name, when all came as a single package with vserver wired in
Obsoletes: NodeManager
-# for nodeupdate
+# for nodeupdate
Provides: nodemanager
# our interface to the vserver patch
##############################
%changelog
-* Mon Jan 07 2019 Thierry <Parmentelat> - nodemanager-5.2-20
+* Mon Jan 07 2019 Thierry Parmentelat <thierry.parmentelat@inria.fr> - nodemanager-7.0-0
+- ported to python3
+- add a systemd dependency to network-online so the service won't start too early
+- only support systemd, removed init-oriented business
+- also removed debian-oriented business
+
+* Mon Jan 07 2019 Thierry Parmentelat <thierry.parmentelat@inria.fr> - nodemanager-5.2-20
- simply make conf_files.py executable, so that bootmanager can be py2/py3 compliant
* Sun Jul 10 2016 Thierry Parmentelat <thierry.parmentelat@sophia.inria.fr> - nodemanager-5.2-19
* Wed Oct 03 2007 Faiyaz Ahmed <faiyaza@cs.princeton.edu> .
- Switched to SVN.
-* Mon Nov 13 2006 Mark Huang <mlhuang@paris.CS.Princeton.EDU> -
+* Mon Nov 13 2006 Mark Huang <mlhuang@paris.CS.Princeton.EDU> -
- Initial build.
if isinstance(auth, (tuple, list)):
(self.node_id, self.key) = auth
self.session = None
- elif isinstance(auth, (str, unicode)):
+ elif isinstance(auth, str):
self.node_id = self.key = None
self.session = auth
else:
# Yes, the comments in the old implementation are
# misleading. Keys of dicts are not included in the
# hash.
- values += canonicalize(arg.values())
+ values += canonicalize(list(arg.values()))
else:
# We use unicode() instead of str().
- values.append(unicode(arg))
+ values.append(str(arg))
return values
codemuxslices = {}
# XXX Hack for planetflow
- if slicesinconf.has_key("root"):
+ if "root" in slicesinconf:
_writeconf = False
else:
_writeconf = True
# Check to see if sliver is running. If not, continue
if slivermanager.is_running(sliver['name']):
# Check if new or needs updating
- if (sliver['name'] not in slicesinconf.keys()) \
+ if (sliver['name'] not in list(slicesinconf.keys())) \
or (params not in slicesinconf.get(sliver['name'], [])):
logger.log("codemux: Updating slice %s using %s" % \
(sliver['name'], params['host']))
f.write("* root 1080 %s\n" % Config().PLC_API_HOST)
# Sort items for like domains
for mapping in slivers:
- for (host, params) in mapping.iteritems():
+ for (host, params) in mapping.items():
if params['slice'] == "root": continue
f.write("%s %s %s %s\n" % (host, params['slice'], params['port'], params['ip']))
f.truncate()
def sortDomains(slivers):
'''Given a dict of {slice: {domainname, port}}, return array of slivers with lower order domains first'''
dnames = {} # {host: slice}
- for (slice, params) in slivers.iteritems():
+ for (slice, params) in slivers.items():
for mapping in params:
dnames[mapping['host']] = {"slice":slice, "port": mapping['port'], "ip": mapping['ip']}
- hosts = dnames.keys()
+ hosts = list(dnames.keys())
# sort by length
hosts.sort(key=str.__len__)
# longer first
-#!/usr/bin/python
+#!/usr/bin/python3
""" DRL configurator. """
import os
import curlwrapper
import re
-import xmlrpclib
+import xmlrpc.client
try:
from hashlib import sha1 as sha
except ImportError:
import logger
import os
import curlwrapper
-import xmlrpclib
+import xmlrpc.client
try:
from hashlib import sha1 as sha
except ImportError:
url = mydict['url']
try:
contents = curlwrapper.retrieve(url)
- except xmlrpclib.ProtocolError as e:
+ except xmlrpc.client.ProtocolError as e:
logger.log('interfaces (%s): failed to retrieve %s' % (slicename, url))
continue
else:
logger.log('interfaces (%s): no DEVICE specified' % slicename)
continue
- for key, value in mydict.items():
+ for key, value in list(mydict.items()):
if key in ['bridge', 'vlan']:
continue
contents += '%s="%s"\n' % (key, value)
-#!/usr/bin/env python
+"""
+Private Bridge configurator.
+"""
-""" Private Bridge configurator. """
-
-import httplib
-import os
import select
-import shutil
import subprocess
import time
import tools
-from threading import Thread
import logger
-import tools
priority = 9
class OvsException (Exception) :
- def __init__ (self, message="no message"):
- self.message=message
- def __repr__ (self): return message
+ def __init__(self, message="no message"):
+ self.message = message
+ def __repr__(self):
+ return self.message
def start():
logger.log('private bridge plugin starting up...')
def log_call_read(command, timeout=logger.default_timeout_minutes*60, poll=1):
- message=" ".join(command)
+ message = " ".join(command)
logger.log("log_call: running command %s" % message)
logger.verbose("log_call: timeout=%r s" % timeout)
logger.verbose("log_call: poll=%r s" % poll)
- trigger=time.time()+timeout
+ trigger = time.time()+timeout
try:
- child = subprocess.Popen(command, bufsize=1,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
+ child = subprocess.Popen(
+ command, bufsize=1,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ close_fds=True,
+ universal_newlines=True)
stdout = ""
while True:
# see if anything can be read within the poll interval
- (r, w, x)=select.select([child.stdout], [], [], poll)
- if r: stdout = stdout + child.stdout.read(1)
+ (r, w, x) = select.select([child.stdout], [], [], poll)
+ if r:
+ stdout = stdout + child.stdout.read(1)
# is process over ?
returncode=child.poll()
# yes
return (returncode, stdout)
# child has failed
else:
- log("log_call:end command (%s) returned with code %d" %(message, returncode))
+ log("log_call:end command (%s) returned with code %d"
+ %(message, returncode))
return (returncode, stdout)
# no : still within timeout ?
if time.time() >= trigger:
child.terminate()
- logger.log("log_call:end terminating command (%s) - exceeded timeout %d s"%(message, timeout))
+ logger.log("log_call:end terminating command (%s) - exceeded timeout %d s"
+ %(message, timeout))
return (-2, None)
break
except Exception as e:
return (-1, None)
### Thierry - 23 Sept 2014
-# regardless of this being shipped on lxc-only or on all nodes,
-# it is safer to check for the availability of the ovs-vsctl command and just print
+# regardless of this being shipped on lxc-only or on all nodes,
+# it is safer to check for the availability of the ovs-vsctl command and just print
# out a warning when it's not there, instead of a nasty traceback
def ovs_available ():
"return True if ovs-vsctl can be run"
if key:
args = args + ["options:key=" + str(key)]
- (returncode, stdout) = ovs_vsctl(args)
- if (returncode != 0): raise OvsException("add-port")
+ returncode, stdout = ovs_vsctl(args)
+ if (returncode != 0):
+ raise OvsException("add-port")
def ovs_delport(name, portname):
(returncode, stdout) = ovs_vsctl(["del-port", name, portname])
sliver_name = sliver['name']
# build a dict of attributes, because it's more convenient
- attributes={}
+ attributes = {}
for attribute in sliver['attributes']:
attributes[attribute['tagname']] = attribute['value']
logger.log("privatebridge: deleting unused bridge %s" % bridge_name)
ovs_delbridge(bridge_name)
-
-
-
-#!/usr/bin/python -tt
+#!/usr/bin/python3 -tt
# vim:set ts=4 sw=4 expandtab:
#
# NodeManager plugin to support mapping unused raw disks into a slice
for i in os.listdir("/sys/block"):
if not i.startswith("dm-"):
continue
- in_vg.extend(map(lambda x: x.replace("!", "/"),
- os.listdir("/sys/block/%s/slaves" % i)))
+ in_vg.extend([x.replace("!", "/") for x in os.listdir("/sys/block/%s/slaves" % i)])
# Read the list of partitions
with open("/proc/partitions") as partitions:
pat = re.compile("\s+")
except:
pass
try:
- os.makedirs(os.path.dirname(path), 0755)
+ os.makedirs(os.path.dirname(path), 0o755)
except:
pass
os.mknod(path, st.st_mode, st.st_rdev)
return ((int(time)+granularity/2)/granularity)*granularity
def clear_timers (self):
- for timer in self.timers.values():
+ for timer in list(self.timers.values()):
timer.cancel()
self.timers={}
timer.start()
def list_timers(self):
- timestamps=self.timers.keys()
+ timestamps=list(self.timers.keys())
timestamps.sort()
for timestamp in timestamps:
logger.log('reservation: TIMER armed for %s'%reservation.time_printable(timestamp))
@staticmethod
def lease_printable (lease):
- d=dict ( lease.iteritems())
+ d=dict ( iter(lease.items()))
d['from']=reservation.time_printable(lease['t_from'])
d['until']=reservation.time_printable(lease['t_from'])
s=[]
-#!/usr/bin/python -tt
+#!/usr/bin/python3 -tt
# vim:set ts=4 sw=4 expandtab:
#
# NodeManager plugin for installing SFA GID's in slivers
trusted_gid_names.append(relative_filename)
gid_filename = trusted_certs_dir + os.sep + relative_filename
if verbose:
- print "Writing GID for %s as %s" % (gid.get_hrn(), gid_filename)
+ print("Writing GID for %s as %s" % (gid.get_hrn(), gid_filename))
gid.save_to_file(gid_filename, save_parents=True)
# remove old certs
for gid_name in all_gids_names:
if gid_name not in trusted_gid_names:
if verbose:
- print "Removing old gid ", gid_name
+ print("Removing old gid ", gid_name)
os.unlink(trusted_certs_dir + os.sep + gid_name)
-#!/usr/bin/python -tt
+#!/usr/bin/python3 -tt
# vim:set ts=4 sw=4 expandtab:
#
# NodeManager plugin for creating credentials in slivers
if not hmac:
# let python do its thing
random.seed()
- d = [random.choice(string.letters) for x in xrange(32)]
+ d = [random.choice(string.letters) for x in range(32)]
hmac = "".join(d)
SetSliverTag(plc, sliver['name'], 'hmac', hmac)
logger.log("sliverauth: %s: setting hmac" % sliver['name'])
path = '/vservers/%s/etc/planetlab' % sliver['name']
if os.path.exists(path):
keyfile = '%s/key' % path
- if (tools.replace_file_with_string(keyfile, hmac, chmod=0400)):
+ if (tools.replace_file_with_string(keyfile, hmac, chmod=0o400)):
logger.log ("sliverauth: (over)wrote hmac into %s " % keyfile)
# create the key if needed and returns the key contents
dotssh=os.path.dirname(keyfile)
# create dir if needed
if not os.path.isdir (dotssh):
- os.mkdir (dotssh, 0700)
+ os.mkdir (dotssh, 0o700)
logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), dotssh ] )
if not os.path.isfile(pubfile):
comment="%s@%s"%(sliver['name'], socket.gethostname())
logger.log_call( [ 'ssh-keygen', '-t', 'rsa', '-N', '', '-f', keyfile , '-C', comment] )
- os.chmod (keyfile, 0400)
+ os.chmod (keyfile, 0o400)
logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), keyfile, pubfile ] )
with open(pubfile) as f:
return f.read().strip()
# if it's lost b/c e.g. the sliver was destroyed we cannot save the tags content
ssh_key = generate_sshkey(sliver)
old_tag = find_tag (sliver, 'ssh_key')
- if ssh_key <> old_tag:
+ if ssh_key != old_tag:
SetSliverTag(plc, sliver['name'], 'ssh_key', ssh_key)
logger.log ("sliverauth: %s: setting ssh_key" % sliver['name'])
-#!/usr/bin/python -tt
+#!/usr/bin/python3 -tt
# vim:set ts=4 sw=4 expandtab:
#
#
logger.log("specialaccounts: keys file changed: %s" % auth_keys)
# always set permissions properly
- os.chmod(dot_ssh, 0700)
+ os.chmod(dot_ssh, 0o700)
os.chown(dot_ssh, uid, gid)
- os.chmod(auth_keys, 0600)
+ os.chmod(auth_keys, 0o600)
os.chown(auth_keys, uid, gid)
logger.log('specialaccounts: installed ssh keys for %s' % name)
-#!/usr/bin/python
+#!/usr/bin/python3
""" Syndicate configurator. """
-import httplib
+import http.client
import os
import shutil
import tools
logger.log("Syndicate: Http op %s on url %s to host %s" % (op, mountpoint, syndicate_ip))
try:
- conn = httplib.HTTPSConnection(syndicate_ip, timeout=60)
+ conn = http.client.HTTPSConnection(syndicate_ip, timeout=60)
conn.request(op, mountpoint)
r1 = conn.getresponse()
except:
# add to conf
slices.append(sliver['name'])
_restart = createVsysDir(sliver['name']) or _restart
- if attribute['value'] in scripts.keys():
+ if attribute['value'] in list(scripts.keys()):
scripts[attribute['value']].append(sliver['name'])
# Write the conf
# not the same as length of values of new scripts,
# and length of non intersection along new scripts is not 0,
# then dicts are different.
- for (acl, oldslivers) in oldscripts.iteritems():
+ for (acl, oldslivers) in oldscripts.items():
try:
if (len(oldslivers) != len(currentscripts[acl])) or \
(len(set(oldslivers) - set(currentscripts[acl])) != 0):
tag = attribute['tagname']
value = attribute['value']
if tag.startswith('vsys_'):
- if (privs.has_key(slice)):
+ if (slice in privs):
slice_priv = privs[slice]
- if (slice_priv.has_key(tag)):
+ if (tag in slice_priv):
slice_priv[tag].append(value)
else:
slice_priv[tag]=[value]
return cur_privs
def write_privs(cur_privs, privs):
- for slice in privs.keys():
+ for slice in list(privs.keys()):
variables = privs[slice]
slice_dir = os.path.join(VSYS_PRIV_DIR, slice)
if (not os.path.exists(slice_dir)):
os.mkdir(slice_dir)
# Add values that do not exist
- for k in variables.keys():
+ for k in list(variables.keys()):
v = variables[k]
- if (cur_privs.has_key(slice)
- and cur_privs[slice].has_key(k)
+ if (slice in cur_privs
+ and k in cur_privs[slice]
and cur_privs[slice][k] == v):
# The binding has not changed
pass
# Remove files and directories
# that are invalid
- for slice in cur_privs.keys():
+ for slice in list(cur_privs.keys()):
variables = cur_privs[slice]
slice_dir = os.path.join(VSYS_PRIV_DIR, slice)
# Add values that do not exist
- for k in variables.keys():
- if (privs.has_key(slice)
- and cur_privs[slice].has_key(k)):
+ for k in list(variables.keys()):
+ if (slice in privs
+ and k in cur_privs[slice]):
# ok, spare this tag
- print "Sparing %s, %s "%(slice, k)
+ print("Sparing %s, %s "%(slice, k))
else:
v_file = os.path.join(slice_dir, k)
os.remove(v_file)
- if (not privs.has_key(slice)):
+ if (slice not in privs):
os.rmdir(slice_dir)
"""Leverage curl to make XMLRPC requests that check the server's credentials."""
-import xmlrpclib
+import xmlrpc.client
import curlwrapper
-class CertificateCheckingSafeTransport (xmlrpclib.Transport):
+class CertificateCheckingSafeTransport (xmlrpc.client.Transport):
def __init__(self, cacert, timeout):
self.cacert = cacert
cacert = self.cacert,
postdata = request_body,
timeout = self.timeout)
- return xmlrpclib.loads(contents)[0]
+ return xmlrpc.client.loads(contents)[0]
-class ServerProxy(xmlrpclib.ServerProxy):
+class ServerProxy(xmlrpc.client.ServerProxy):
def __init__(self, uri, cacert, timeout = 300, **kwds):
- xmlrpclib.ServerProxy.__init__(self, uri,
+ xmlrpc.client.ServerProxy.__init__(self, uri,
CertificateCheckingSafeTransport(cacert, timeout),
**kwds)
-#!/usr/bin/python
+#!/usr/bin/python3
#
# Setup script for the Node Manager application
#
'plugins.vsys',
'plugins.vsys_privs',
'plugins.ipv6',
- 'plugins.update_ipv6addr_slivertag',
+ 'plugins.update_ipv6addr_slivertag',
# lxc
'sliver_libvirt',
'sliver_lxc',
# this plugin uses vserver for now
'plugins.drl',
],
- scripts = [
- 'forward_api_calls',
- ],
- packages =[
- ],
)
try:
# create actually means start
self.dom.create()
- except Exception, e:
+ except Exception as e:
# XXX smbaker: attempt to resolve slivers that are stuck in
# "failed to allocate free veth".
if "ailed to allocate free veth" in str(e):
# Btrfs support quota per volumes
- if rec.has_key("rspec") and rec["rspec"].has_key("tags"):
+ if "rspec" in rec and "tags" in rec["rspec"]:
if cgroups.get_cgroup_path(self.name) == None:
# If configure is called before start, then the cgroups won't exist
# yet. NM will eventually re-run configure on the next iteration.
else:
tags = rec["rspec"]["tags"]
# It will depend on the FS selection
- if tags.has_key('disk_max'):
+ if 'disk_max' in tags:
disk_max = tags['disk_max']
if disk_max == 0:
# unlimited
pass
# Memory allocation
- if tags.has_key('memlock_hard'):
+ if 'memlock_hard' in tags:
mem = str(int(tags['memlock_hard']) * 1024) # hard limit in bytes
cgroups.write(self.name, 'memory.limit_in_bytes', mem, subsystem="memory")
- if tags.has_key('memlock_soft'):
+ if 'memlock_soft' in tags:
mem = str(int(tags['memlock_soft']) * 1024) # soft limit in bytes
cgroups.write(self.name, 'memory.soft_limit_in_bytes', mem, subsystem="memory")
# CPU allocation
# Only cpu_shares until figure out how to provide limits and guarantees
# (RT_SCHED?)
- if tags.has_key('cpu_share'):
+ if 'cpu_share' in tags:
cpu_share = tags['cpu_share']
cgroups.write(self.name, 'cpu.shares', cpu_share)
.format(self.name)
command = plain.split()
logger.log_call(command, timeout=3)
-
-
+
+
@staticmethod
def create(name, rec=None):
'''
# this assumes the reference image is in its own subvolume
command = ['btrfs', 'subvolume', 'snapshot', refImgDir, containerDir]
if not logger.log_call(command, timeout=BTRFS_TIMEOUT):
- logger.log('sliver_lxc: ERROR Could not create BTRFS snapshot at', containerDir)
+ logger.log('sliver_lxc: ERROR Could not create BTRFS snapshot at {}'
+ .format(containerDir))
return
command = ['chmod', '755', containerDir]
logger.log_call(command)
# populate the sliver/vserver specific default allocations table,
# which is used to look for slice attributes
DEFAULT_ALLOCATION = {}
-for rlimit in vserver.RLIMITS.keys():
+for rlimit in list(vserver.RLIMITS.keys()):
rlim = rlimit.lower()
DEFAULT_ALLOCATION["{}_min".format(rlim)] = KEEP_LIMIT
DEFAULT_ALLOCATION["{}_soft".format(rlim)] = KEEP_LIMIT
vserver.VServer.__init__(self, name, logfile='/var/log/nodemanager')
Account.__init__ (self, name)
Initscript.__init__ (self, name)
- except Exception, err:
+ except Exception as err:
if not isinstance(err, vserver.NoSuchVServer):
# Probably a bad vserver or vserver configuration file
logger.log_exc("sliver_vs:__init__ (first chance)", name=name)
# get/set the min/soft/hard values for all of the vserver
# related RLIMITS. Note that vserver currently only
# implements support for hard limits.
- for limit in vserver.RLIMITS.keys():
+ for limit in list(vserver.RLIMITS.keys()):
type = limit.lower()
minimum = self.rspec['{}_min'.format(type)]
soft = self.rspec['{}_soft'.format(type)]
cpu_share = self.rspec['cpu_share']
count = 1
- for key in self.rspec.keys():
+ for key in list(self.rspec.keys()):
if key.find('sysctl.') == 0:
sysctl = key.split('.')
try:
# /etc/vservers/<guest>/sysctl/<id>/
dirname = "/etc/vservers/{}/sysctl/{}".format(self.name, count)
try:
- os.makedirs(dirname, 0755)
+ os.makedirs(dirname, 0o755)
except:
pass
with open("{}/setting".format(dirname), "w") as setting:
logger.log("sliver_vs: {}: writing {}={}"
.format(self.name, key, self.rspec[key]))
- except IOError, e:
+ except IOError as e:
logger.log("sliver_vs: {}: could not set {}={}"
.format(self.name, key, self.rspec[key]))
logger.log("sliver_vs: {}: error = {}".format(self.name, e))
slivers."""
logger.verbose("slivermanager: Entering GetSlivers with fullupdate=%r"%fullupdate)
- for key in data.keys():
+ for key in list(data.keys()):
logger.verbose('slivermanager: GetSlivers key : ' + key)
node_id = None
except:
logger.log_exc("slivermanager: GetSlivers failed to read /etc/planetlab/node_id")
- if data.has_key('node_id') and data['node_id'] != node_id: return
+ if 'node_id' in data and data['node_id'] != node_id: return
- if data.has_key('networks'):
+ if 'networks' in data:
for network in data['networks']:
if network['is_primary'] and network['bwlimit'] is not None:
DEFAULT_ALLOCATION['net_max_rate'] = network['bwlimit'] / 1000
# extract the implied rspec
rspec = {}
rec['rspec'] = rspec
- for resname, default_amount in DEFAULT_ALLOCATION.iteritems():
+ for resname, default_amount in DEFAULT_ALLOCATION.items():
try:
t = type(default_amount)
amount = t.__new__(t, attributes[resname])
rspec[resname] = amount
# add in sysctl attributes into the rspec
- for key in attributes.keys():
+ for key in list(attributes.keys()):
if key.find("sysctl.") == 0:
rspec[key] = attributes[key]
# No default allocation values for LXC yet, think if its necessary given
# that they are also default allocation values in this module
if implementation == 'vs':
- for resname, default_amount in sliver_vs.DEFAULT_ALLOCATION.iteritems():
+ for resname, default_amount in sliver_vs.DEFAULT_ALLOCATION.items():
DEFAULT_ALLOCATION[resname]=default_amount
account.register_class(sliver_class_to_register)
[Unit]
Description=Fetch configuration files as defined by controlling MyPLC
Before=lxc-sliceimage.service
+After=network-online.target
+Wants=network-online.target
[Service]
Type=oneshot
-ExecStart=/usr/bin/env python /usr/share/NodeManager/conf_files.py
+ExecStart=/usr/bin/env python3 /usr/share/NodeManager/conf_files.py
[Install]
WantedBy=multi-user.target
EnvironmentFile=/etc/sysconfig/nodemanager
Type=forking
PIDFile=/var/run/nodemanager.pid
-ExecStart=/usr/bin/env python /usr/share/NodeManager/nodemanager.py -d $OPTIONS
+ExecStart=/usr/bin/env python3 /usr/share/NodeManager/nodemanager.py -d $OPTIONS
[Install]
WantedBy=multi-user.target
"""
from subprocess import PIPE, Popen
-from xmlrpclib import dumps, loads
+from xmlrpc.client import dumps, loads
GPG = '/usr/bin/gpg'
"""A few things that didn't seem to fit anywhere else."""
-import os, os.path
+import os
+import os.path
import pwd
import tempfile
import fcntl
PID_FILE = '/var/run/nodemanager.pid'
####################
+
+
def get_default_if():
interface = get_if_from_hwaddr(get_hwaddr_from_plnode())
- if not interface: interface = "eth0"
+ if not interface:
+ interface = "eth0"
return interface
+
def get_hwaddr_from_plnode():
try:
for line in open("/usr/boot/plnode.txt", 'r').readlines():
pass
return None
+
def get_if_from_hwaddr(hwaddr):
import sioc
devs = sioc.gifconf()
for dev in devs:
dev_hwaddr = sioc.gifhwaddr(dev)
- if dev_hwaddr == hwaddr: return dev
+ if dev_hwaddr == hwaddr:
+ return dev
return None
####################
# daemonizing
+
+
def as_daemon_thread(run):
- """Call function <run> with no arguments in its own thread."""
+ """
+ Call function <run> with no arguments in its own thread.
+ """
thr = threading.Thread(target=run)
thr.setDaemon(True)
thr.start()
+
def close_nonstandard_fds():
- """Close all open file descriptors other than 0, 1, and 2."""
+ """
+ Close all open file descriptors other than 0, 1, and 2.
+ """
_SC_OPEN_MAX = 4
for fd in range(3, os.sysconf(_SC_OPEN_MAX)):
- try: os.close(fd)
- except OSError: pass # most likely an fd that isn't open
+ try:
+ os.close(fd)
+ except OSError:
+ pass # most likely an fd that isn't open
# after http://www.erlenstar.demon.co.uk/unix/faq_2.html
+
+
def daemon():
- """Daemonize the current process."""
- if os.fork() != 0: os._exit(0)
+ """
+ Daemonize the current process.
+ """
+ if os.fork() != 0:
+ os._exit(0)
os.setsid()
- if os.fork() != 0: os._exit(0)
+ if os.fork() != 0:
+ os._exit(0)
os.chdir('/')
- os.umask(0022)
+ os.umask(0o022)
devnull = os.open(os.devnull, os.O_RDWR)
os.dup2(devnull, 0)
# xxx fixme - this is just to make sure that nothing gets stupidly lost - should use devnull
- crashlog = os.open('/var/log/nodemanager.daemon', os.O_RDWR | os.O_APPEND | os.O_CREAT, 0644)
+ crashlog = os.open('/var/log/nodemanager.daemon',
+ os.O_RDWR | os.O_APPEND | os.O_CREAT, 0o644)
os.dup2(crashlog, 1)
os.dup2(crashlog, 2)
+
def fork_as(su, function, *args):
"""
-fork(), cd / to avoid keeping unused directories open,
-close all nonstandard file descriptors (to avoid capturing open sockets),
-fork() again (to avoid zombies) and call <function>
-with arguments <args> in the grandchild process.
-If <su> is not None, set our group and user ids
- appropriately in the child process.
+ fork(), cd / to avoid keeping unused directories open, close all nonstandard
+ file descriptors (to avoid capturing open sockets), fork() again (to avoid
+ zombies) and call <function> with arguments <args> in the grandchild
+ process. If <su> is not None, set our group and user ids appropriately in
+ the child process.
"""
child_pid = os.fork()
if child_pid == 0:
os.setegid(pw_ent[3])
os.seteuid(pw_ent[2])
child_pid = os.fork()
- if child_pid == 0: function(*args)
+ if child_pid == 0:
+ function(*args)
except:
os.seteuid(os.getuid()) # undo su so we can write the log file
os.setegid(os.getgid())
logger.log_exc("tools: fork_as")
os._exit(0)
- else: os.waitpid(child_pid, 0)
+ else:
+ os.waitpid(child_pid, 0)
####################
# manage files
+
+
def pid_file():
"""
-We use a pid file to ensure that only one copy of NM is running at a given time.
-If successful, this function will write a pid file containing the pid of the current process.
-The return value is the pid of the other running process, or None otherwise.
+ We use a pid file to ensure that only one copy of NM is running at a given
+ time. If successful, this function will write a pid file containing the pid
+ of the current process. The return value is the pid of the other running
+ process, or None otherwise.
"""
other_pid = None
if os.access(PID_FILE, os.F_OK): # check for a pid file
other_pid = int(handle.read())
handle.close()
# check for a process with that pid by sending signal 0
- try: os.kill(other_pid, 0)
- except OSError, e:
- if e.errno == errno.ESRCH: other_pid = None # doesn't exist
- else: raise # who knows
+ try:
+ os.kill(other_pid, 0)
+ except OSError as e:
+ if e.errno == errno.ESRCH:
+ other_pid = None # doesn't exist
+ else:
+ raise # who knows
if other_pid == None:
# write a new pid file
write_file(PID_FILE, lambda f: f.write(str(os.getpid())))
return other_pid
+
def write_file(filename, do_write, **kw_args):
"""
-Write file <filename> atomically by opening a temporary file,
-using <do_write> to write that file, and then renaming the temporary file.
+ Write file <filename> atomically by opening a temporary file,
+ using <do_write> to write that file, and then renaming the temporary file.
"""
shutil.move(write_temp_file(do_write, **kw_args), filename)
-def write_temp_file(do_write, mode=None, uidgid=None):
+
+def write_temp_file(do_write, mode=None, uidgid=None, binary=False):
fd, temporary_filename = tempfile.mkstemp()
- if mode: os.chmod(temporary_filename, mode)
- if uidgid: os.chown(temporary_filename, *uidgid)
- f = os.fdopen(fd, 'w')
- try: do_write(f)
- finally: f.close()
+ if mode:
+ os.chmod(temporary_filename, mode)
+ if uidgid:
+ os.chown(temporary_filename, *uidgid)
+ open_mode = 'wb' if binary else 'w'
+ f = os.fdopen(fd, open_mode)
+ try:
+ do_write(f)
+ finally:
+ f.close()
return temporary_filename
-def replace_file_with_string (target, new_contents, chmod=None, remove_if_empty=False):
+
+def replace_file_with_string(target, new_contents,
+ chmod=None, remove_if_empty=False):
"""
-Replace a target file with a new contents
-checks for changes: does not do anything if previous state was already right
-can handle chmod if requested
-can also remove resulting file if contents are void, if requested
-performs atomically:
-writes in a tmp file, which is then renamed (from sliverauth originally)
-returns True if a change occurred, or the file is deleted
+ Replace a target file with a new contents checks for changes: does not do
+ anything if previous state was already right can handle chmod if requested
+ can also remove resulting file if contents are void, if requested performs
+ atomically: writes in a tmp file, which is then renamed (from sliverauth
+ originally) returns True if a change occurred, or the file is deleted
"""
try:
- with open(target) as f:
- current = f.read()
+ with open(target) as feed:
+ current = feed.read()
except:
current = ""
if current == new_contents:
# if turns out to be an empty string, and remove_if_empty is set,
# then make sure to trash the file if it exists
if remove_if_empty and not new_contents and os.path.isfile(target):
- logger.verbose("tools.replace_file_with_string: removing file {}".format(target))
- try: os.unlink(target)
- finally: return True
+ logger.verbose(
+ "tools.replace_file_with_string: removing file {}".format(target))
+ try:
+ os.unlink(target)
+ finally:
+ return True
return False
# overwrite target file: create a temp in the same directory
path = os.path.dirname(target) or '.'
fd, name = tempfile.mkstemp('', 'repl', path)
- os.write(fd, new_contents)
+ os.write(fd, new_contents.encode())
os.close(fd)
if os.path.exists(target):
os.unlink(target)
shutil.move(name, target)
- if chmod: os.chmod(target, chmod)
+ if chmod:
+ os.chmod(target, chmod)
return True
####################
# utilities functions to get (cached) information from the node
+
# get node_id from /etc/planetlab/node_id and cache it
_node_id = None
+
+
def node_id():
global _node_id
if _node_id is None:
_node_id = ""
return _node_id
+
_root_context_arch = None
+
+
def root_context_arch():
global _root_context_arch
if not _root_context_arch:
class NMLock:
def __init__(self, file):
logger.log("tools: Lock {} initialized.".format(file), 2)
- self.fd = os.open(file, os.O_RDWR|os.O_CREAT, 0600)
+ self.fd = os.open(file, os.O_RDWR | os.O_CREAT, 0o600)
flags = fcntl.fcntl(self.fd, fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fd, fcntl.F_SETFD, flags)
+
def __del__(self):
os.close(self.fd)
+
def acquire(self):
logger.log("tools: Lock acquired.", 2)
fcntl.lockf(self.fd, fcntl.LOCK_SH)
+
def release(self):
logger.log("tools: Lock released.", 2)
fcntl.lockf(self.fd, fcntl.LOCK_UN)
# Utilities for getting the IP address of a LXC/Openvswitch slice. Do this by
# running ifconfig inside of the slice's context.
+
def get_sliver_process(slice_name, process_cmdline):
"""
Utility function to find a process inside of an LXC sliver. Returns
output = os.popen(cmd).readlines()
except:
# the slice couldn't be found
- logger.log("get_sliver_process: couldn't find slice {}".format(slice_name))
+ logger.log(
+ "get_sliver_process: couldn't find slice {}".format(slice_name))
return (None, None)
cgroup_fn = None
# Added by Guilherme Sperb Machado <gsm@machados.org>
###################################################
+
try:
import re
import socket
logger.log("Could not import 'sliver_lxc' or 'libvirt' or 'sliver_libvirt'.")
###################################################
+
def get_sliver_ifconfig(slice_name, device="eth0"):
"""
return the output of "ifconfig" run from inside the sliver.
setns.chcontext(path)
args = ["/sbin/ifconfig", device]
- sub = subprocess.Popen(args, stderr = subprocess.PIPE, stdout = subprocess.PIPE)
+ sub = subprocess.Popen(
+ args, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
sub.wait()
if (sub.returncode != 0):
- logger.log("get_slice_ifconfig: error in ifconfig: {}".format(sub.stderr.read()))
+ logger.log("get_slice_ifconfig: error in ifconfig: {}".format(
+ sub.stderr.read()))
result = sub.stdout.read()
finally:
return result
+
def get_sliver_ip(slice_name):
ifconfig = get_sliver_ifconfig(slice_name)
if not ifconfig:
# Get the slice ipv6 address
# Only for LXC!
###################################################
+
+
def get_sliver_ipv6(slice_name):
ifconfig = get_sliver_ifconfig(slice_name)
if not ifconfig:
return None, None
# example: 'inet6 2001:67c:16dc:1302:5054:ff:fea7:7882 prefixlen 64 scopeid 0x0<global>'
- prog = re.compile(r'inet6\s+(.*)\s+prefixlen\s+(\d+)\s+scopeid\s+(.+)<global>')
+ prog = re.compile(
+ r'inet6\s+(.*)\s+prefixlen\s+(\d+)\s+scopeid\s+(.+)<global>')
for line in ifconfig.split("\n"):
search = prog.search(line)
if search:
###################################################
# Check if the address is a AF_INET6 family address
###################################################
+
+
def is_valid_ipv6(ipv6addr):
try:
socket.inet_pton(socket.AF_INET6, ipv6addr)
return False
return True
-### this returns the kind of virtualization on the node
+
+# this returns the kind of virtualization on the node
# either 'vs' or 'lxc'
# also caches it in /etc/planetlab/virt for next calls
# could be promoted to core nm if need be
virt_stamp = "/etc/planetlab/virt"
-def get_node_virt ():
+
+
+def get_node_virt():
try:
with open(virt_stamp) as f:
return f.read().strip()
pass
logger.log("Computing virt..")
try:
- virt = 'vs' if subprocess.call ([ 'vserver', '--help' ]) == 0 else 'lxc'
+ virt = 'vs' if subprocess.call(['vserver', '--help']) == 0 else 'lxc'
except:
- virt='lxc'
- with file(virt_stamp, "w") as f:
+ virt = 'lxc'
+ with open(virt_stamp, "w") as f:
f.write(virt)
return virt
-### this return True or False to indicate that systemctl is present on that box
+
+# this return True or False to indicate that systemctl is present on that box
# cache result in memory as _has_systemctl
_has_systemctl = None
-def has_systemctl ():
+
+
+def has_systemctl():
global _has_systemctl
if _has_systemctl is None:
- _has_systemctl = (subprocess.call([ 'systemctl', '--help' ]) == 0)
+ _has_systemctl = (subprocess.call(['systemctl', '--help']) == 0)
return _has_systemctl
###################################################
# This method was developed to support the ipv6 plugin
# Only for LXC!
###################################################
+
+
def reboot_slivers():
type = 'sliver.LXC'
# connecting to the libvirtd
# set the flag VIR_DOMAIN_REBOOT_INITCTL, which uses "initctl"
result = domain.reboot(0x04)
if result == 0:
- logger.log("tools: REBOOT {}".format(domain.name()) )
+ logger.log("tools: REBOOT {}".format(domain.name()))
else:
raise Exception()
- except Exception, e:
- logger.log("tools: FAILED to reboot {} ({})".format(domain.name(), e) )
- logger.log("tools: Trying to DESTROY/CREATE {} instead...".format(domain.name()) )
+ except Exception as e:
+ logger.log("tools: FAILED to reboot {} ({})".format(
+ domain.name(), e))
+ logger.log(
+ "tools: Trying to DESTROY/CREATE {} instead...".format(domain.name()))
try:
result = domain.destroy()
- if result==0:
- logger.log("tools: DESTROYED {}".format(domain.name()) )
- else: logger.log("tools: FAILED in the DESTROY call of {}".format(domain.name()) )
+ if result == 0:
+ logger.log("tools: DESTROYED {}".format(domain.name()))
+ else:
+ logger.log(
+ "tools: FAILED in the DESTROY call of {}".format(domain.name()))
result = domain.create()
- if result==0:
- logger.log("tools: CREATED {}".format(domain.name()) )
- else: logger.log("tools: FAILED in the CREATE call of {}".format(domain.name()) )
- except Exception, e:
- logger.log("tools: FAILED to DESTROY/CREATE {} ({})".format(domain.name(), e) )
+ if result == 0:
+ logger.log("tools: CREATED {}".format(domain.name()))
+ else:
+ logger.log(
+ "tools: FAILED in the CREATE call of {}".format(domain.name()))
+ except Exception as e:
+ logger.log(
+ "tools: FAILED to DESTROY/CREATE {} ({})".format(domain.name(), e))
###################################################
# Author: Guilherme Sperb Machado <gsm@machados.org>
###################################################
# Get the /etc/hosts file path
###################################################
+
+
def get_hosts_file_path(slicename):
containerDir = os.path.join(sliver_lxc.Sliver_LXC.CON_BASE_DIR, slicename)
return os.path.join(containerDir, 'etc', 'hosts')
# If the parameter 'ipv6addr' is None, then search
# for any ipv6 address
###################################################
+
+
def search_ipv6addr_hosts(slicename, ipv6addr):
hostsFilePath = get_hosts_file_path(slicename)
found = False
# Removes all ipv6 addresses from the /etc/hosts
# file of a given slice
###################################################
+
+
def remove_all_ipv6addr_hosts(slicename, node):
hostsFilePath = get_hosts_file_path(slicename)
try:
for line in fileinput.input(r'{}'.format(hostsFilePath), inplace=True):
- search = re.search(r'^(.*)\s+({}|{})$'.format(node, 'localhost'), line)
+ search = re.search(
+ r'^(.*)\s+({}|{})$'.format(node, 'localhost'), line)
if search:
ipv6candidate = search.group(1)
ipv6candidatestrip = ipv6candidate.strip()
valid = is_valid_ipv6(ipv6candidatestrip)
if not valid:
- print line,
+ print(line, end=' ')
fileinput.close()
logger.log("tools: REMOVED IPv6 address from /etc/hosts file of slice={}"
- .format(slicename) )
+ .format(slicename))
except:
logger.log("tools: FAILED to remove the IPv6 address from /etc/hosts file of slice={}"
- .format(slicename) )
+ .format(slicename))
###################################################
# Author: Guilherme Sperb Machado <gsm@machados.org>
###################################################
# Adds an ipv6 address to the /etc/hosts file within a slice
###################################################
+
+
def add_ipv6addr_hosts_line(slicename, node, ipv6addr):
hostsFilePath = get_hosts_file_path(slicename)
- logger.log("tools: {}".format(hostsFilePath) )
+ logger.log("tools: {}".format(hostsFilePath))
# debugging purposes:
#string = "127.0.0.1\tlocalhost\n192.168.100.179\tmyplc-node1-vm.mgmt.local\n"
#string = "127.0.0.1\tlocalhost\n"
file.write(ipv6addr + " " + node + "\n")
file.close()
logger.log("tools: ADDED IPv6 address to /etc/hosts file of slice={}"
- .format(slicename) )
+ .format(slicename))
except:
logger.log("tools: FAILED to add the IPv6 address to /etc/hosts file of slice={}"
- .format(slicename) )
-
+ .format(slicename))
# how to run a command in a slice
# lxc: lxcsu slicename "command and its arguments"
# which, OK, is no big deal as long as the command is simple enough,
# but do not stretch it with arguments that have spaces or need quoting as that will become a nightmare
-def command_in_slice (slicename, argv):
+def command_in_slice(slicename, argv):
virt = get_node_virt()
if virt == 'vs':
- return [ 'vserver', slicename, 'exec', ] + argv
+ return ['vserver', slicename, 'exec', ] + argv
elif virt == 'lxc':
# wrap up argv in a single string for -c
- return [ 'lxcsu', slicename, ] + [ " ".join(argv) ]
+ return ['lxcsu', slicename, ] + [" ".join(argv)]
logger.log("command_in_slice: WARNING: could not find a valid virt")
return argv
####################
-def init_signals ():
- def handler (signum, frame):
+
+
+def init_signals():
+ def handler(signum, frame):
logger.log("Received signal {} - exiting".format(signum))
os._exit(1)
signal.signal(signal.SIGHUP, handler)