From 22d40df4ed31c001fd58966640ed0c5079d486e6 Mon Sep 17 00:00:00 2001 From: Thierry Parmentelat Date: Wed, 2 Jun 2010 16:22:39 +0000 Subject: [PATCH] dummy/scaffolding plugin for reservations modules that set 'persistent_data' are provided with the latest known getslivers data instead of {} nodemanager now is a class bwmon.data renamed more consistently into /var/lib/nodemanager/bwmon.pickle cleaned up deprecated uses of Set --- NodeManager.spec | 4 +- bwmon.py | 30 ++-- database.py | 2 +- nodemanager.py | 385 +++++++++++++++++++++++------------------ plugins/codemux.py | 3 +- plugins/reservation.py | 36 ++++ plugins/vsys.py | 7 +- plugins/vsys_privs.py | 1 - 8 files changed, 271 insertions(+), 197 deletions(-) create mode 100644 plugins/reservation.py diff --git a/NodeManager.spec b/NodeManager.spec index 444706e..be2d049 100644 --- a/NodeManager.spec +++ b/NodeManager.spec @@ -77,8 +77,8 @@ install -D -m 644 logrotate/nodemanager $RPM_BUILD_ROOT/%{_sysconfdir}/logrotate %post # tmp - handle file renamings; old names are from 2.0-8 renamings=" -/var/lib/misc/bwmon.dat@/var/lib/nodemanager/bwmon.dat -/root/sliver_mgr_db.pickle@/var/lib/nodemanager/nodemanager.pickle +/var/lib/misc/bwmon.dat@/var/lib/nodemanager/bwmon.pickle +/root/sliver_mgr_db.pickle@/var/lib/nodemanager/database.pickle /var/log/getslivers.txt@/var/lib/nodemanager/getslivers.txt /var/log/nm@/var/log/nodemanager /var/log/nm.daemon@/var/log/nodemanager.daemon diff --git a/bwmon.py b/bwmon.py index 1d7081a..f18c710 100644 --- a/bwmon.py +++ b/bwmon.py @@ -31,8 +31,6 @@ import tools import bwlimit import database -from sets import Set - priority = 20 # Defaults @@ -41,7 +39,7 @@ DEBUG = False # Set ENABLE to False to setup buckets, but not limit. ENABLE = True -datafile = "/var/lib/nodemanager/bwmon.dat" +DB_FILE = "/var/lib/nodemanager/bwmon.pickle" try: sys.path.append("/etc/planetlab") @@ -483,10 +481,12 @@ def gethtbs(root_xid, default_xid): def sync(nmdbcopy): """ - Syncs tc, db, and bwmon.dat. Then, starts new slices, kills old ones, and updates byte accounts for each running slice. Sends emails and caps those that went over their limit. + Syncs tc, db, and bwmon.pickle. + Then, starts new slices, kills old ones, and updates byte accounts for each running slice. + Sends emails and caps those that went over their limit. """ # Defaults - global datafile, \ + global DB_FILE, \ period, \ default_MaxRate, \ default_Maxi2Rate, \ @@ -496,7 +496,7 @@ def sync(nmdbcopy): # All slices names = [] - # Incase the limits have changed. + # In case the limits have changed. default_MaxRate = int(bwlimit.get_bwcap() / 1000) default_Maxi2Rate = int(bwlimit.bwmax / 1000) @@ -505,13 +505,13 @@ def sync(nmdbcopy): default_MaxRate = 1000000 try: - f = open(datafile, "r+") - logger.log("bwmon: Loading %s" % datafile, 2) + f = open(DB_FILE, "r+") + logger.log("bwmon: Loading %s" % DB_FILE, 2) (version, slices, deaddb) = pickle.load(f) f.close() # Check version of data file if version != "$Id$": - logger.log("bwmon: Not using old version '%s' data file %s" % (version, datafile)) + logger.log("bwmon: Not using old version '%s' data file %s" % (version, DB_FILE)) raise Exception except Exception: version = "$Id$" @@ -548,7 +548,7 @@ def sync(nmdbcopy): logger.log("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__(), 2) # The dat file has HTBs for slices, but the HTBs aren't running - nohtbslices = Set(slices.keys()) - Set(kernelhtbs.keys()) + nohtbslices = set(slices.keys()) - set(kernelhtbs.keys()) logger.log( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__(), 2) # Reset tc counts. for nohtbslice in nohtbslices: @@ -559,7 +559,7 @@ def sync(nmdbcopy): del slices[nohtbslice] # The dat file doesnt have HTB for the slice but kern has HTB - slicesnodat = Set(kernelhtbs.keys()) - Set(slices.keys()) + slicesnodat = set(kernelhtbs.keys()) - set(slices.keys()) logger.log( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__(), 2) for slicenodat in slicesnodat: # But slice is running @@ -572,7 +572,7 @@ def sync(nmdbcopy): # Get new slices. # Slices in GetSlivers but not running HTBs - newslicesxids = Set(live.keys()) - Set(kernelhtbs.keys()) + newslicesxids = set(live.keys()) - set(kernelhtbs.keys()) logger.log("bwmon: Found %s new slices" % newslicesxids.__len__(), 2) # Setup new slices @@ -613,7 +613,7 @@ def sync(nmdbcopy): # aren't instantiated by PLC into the dead dict until # recording period is over. This is to avoid the case where a slice is dynamically created # and destroyed then recreated to get around byte limits. - deadxids = Set(slices.keys()) - Set(live.keys()) + deadxids = set(slices.keys()) - set(live.keys()) logger.log("bwmon: Found %s dead slices" % (deadxids.__len__() - 2), 2) for deadxid in deadxids: if deadxid == root_xid or deadxid == default_xid: @@ -660,8 +660,8 @@ def sync(nmdbcopy): # Update byte counts slice.update(kernelhtbs[xid], live[xid]['_rspec']) - logger.log("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),datafile), 2) - f = open(datafile, "w") + logger.log("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),DB_FILE), 2) + f = open(DB_FILE, "w") pickle.dump((version, slices, deaddb), f) f.close() diff --git a/database.py b/database.py index eaf78d4..ba6fbb4 100644 --- a/database.py +++ b/database.py @@ -36,7 +36,7 @@ MINIMUM_ALLOCATION = {'cpu_pct': 0, } LOANABLE_RESOURCES = MINIMUM_ALLOCATION.keys() -DB_FILE = '/var/lib/nodemanager/nodemanager.pickle' +DB_FILE = '/var/lib/nodemanager/database.pickle' # database object and associated lock diff --git a/nodemanager.py b/nodemanager.py index ee328c9..386776f 100755 --- a/nodemanager.py +++ b/nodemanager.py @@ -20,6 +20,7 @@ import os import sys import resource import glob +import pickle import logger import tools @@ -28,186 +29,226 @@ from config import Config from plcapi import PLCAPI import random -id="$Id$" -savedargv = sys.argv[:] - -# NOTE: modules listed here should also be loaded in this order -# see the priority set in each module - lower comes first -known_modules=['net','conf_files', 'sm', 'bwmon'] - -plugin_path = "/usr/share/NodeManager/plugins" - -default_period=600 -default_random=301 - -parser = optparse.OptionParser() -parser.add_option('-d', '--daemon', action='store_true', dest='daemon', default=False, help='run daemonized') -parser.add_option('-s', '--startup', action='store_true', dest='startup', default=False, help='run all sliver startup scripts') -parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config', help='PLC configuration file') -parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session', help='API session key (or file)') -parser.add_option('-p', '--period', action='store', dest='period', default=default_period, - help='Polling interval (sec) - default %d'%default_period) -parser.add_option('-r', '--random', action='store', dest='random', default=default_random, - help='Range for additional random polling interval (sec) -- default %d'%default_random) -parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='more verbose log') -parser.add_option('-P', '--path', action='store', dest='path', default=plugin_path, help='Path to plugins directory') - -# NOTE: BUG the 'help' for this parser.add_option() wont list plugins from the --path argument -parser.add_option('-m', '--module', action='store', dest='module', default='', help='run a single module among '+' '.join(known_modules)) -(options, args) = parser.parse_args() - -# Deal with plugins directory -if os.path.exists(options.path): - sys.path.append(options.path) - plugins = [ os.path.split(os.path.splitext(x)[0])[1] for x in glob.glob( os.path.join(options.path,'*.py') ) ] - known_modules += plugins - -modules = [] - -def GetSlivers(config, plc): - '''Run call backs defined in modules''' - try: - logger.log("nodemanager: Syncing w/ PLC") - # retrieve GetSlivers from PLC - data = plc.GetSlivers() - # use the magic 'default' slice to retrieve system-wide defaults - getPLCDefaults(data, config) - # tweak the 'vref' attribute from GetSliceFamily - setSliversVref (data) - # always dump it for debug purposes - # used to be done only in verbose; very helpful though, and tedious to obtain, - # so let's dump this unconditionnally - logger.log_slivers(data) - logger.verbose("nodemanager: Sync w/ PLC done") - except: - logger.log_exc("nodemanager: failed in GetSlivers") - # XXX So some modules can at least boostrap. - logger.log("nodemanager: Can't contact PLC to GetSlivers(). Continuing.") - data = {} - # Invoke GetSlivers() functions from the callback modules - for module in modules: - logger.verbose('trigerring GetSlivers callback for module %s'%module.__name__) - try: - callback = getattr(module, 'GetSlivers') - callback(data, config, plc) + +class NodeManager: + + id="$Id$" + + PLUGIN_PATH = "/usr/share/NodeManager/plugins" + + DB_FILE = "/var/lib/nodemanager/getslivers.pickle" + + # the modules in this directory that need to be run + # NOTE: modules listed here will also be loaded in this order + # once loaded, they get re-ordered after their priority (lower comes first) + # for determining the runtime order + core_modules=['net','conf_files', 'sm', 'bwmon'] + + default_period=600 + default_random=301 + default_priority=100 + + def __init__ (self): + + parser = optparse.OptionParser() + parser.add_option('-d', '--daemon', action='store_true', dest='daemon', default=False, help='run daemonized') + parser.add_option('-s', '--startup', action='store_true', dest='startup', default=False, help='run all sliver startup scripts') + parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config', help='PLC configuration file') + parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session', help='API session key (or file)') + parser.add_option('-p', '--period', action='store', dest='period', default=NodeManager.default_period, + help='Polling interval (sec) - default %d'%NodeManager.default_period) + parser.add_option('-r', '--random', action='store', dest='random', default=NodeManager.default_random, + help='Range for additional random polling interval (sec) -- default %d'%NodeManager.default_random) + parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='more verbose log') + parser.add_option('-P', '--path', action='store', dest='path', default=NodeManager.PLUGIN_PATH, help='Path to plugins directory') + + # NOTE: BUG the 'help' for this parser.add_option() wont list plugins from the --path argument + parser.add_option('-m', '--module', action='store', dest='user_module', default='', help='run a single module') + (self.options, args) = parser.parse_args() + + if len(args) != 0: + parser.print_help() + sys.exit(1) + + # determine the modules to be run + self.modules = NodeManager.core_modules + # Deal with plugins directory + if os.path.exists(self.options.path): + sys.path.append(self.options.path) + plugins = [ os.path.split(os.path.splitext(x)[0])[1] for x in glob.glob( os.path.join(self.options.path,'*.py') ) ] + self.modules += plugins + if self.options.user_module: + assert self.options.user_module in self.modules + self.modules=[self.options.user_module] + logger.verbose('nodemanager: Running single module %s'%self.options.user_module) + + + def GetSlivers(self, config, plc): + """Run call backs defined in modules""" + try: + logger.log("nodemanager: Syncing w/ PLC") + # retrieve GetSlivers from PLC + data = plc.GetSlivers() + # use the magic 'default' slice to retrieve system-wide defaults + self.getPLCDefaults(data, config) + # tweak the 'vref' attribute from GetSliceFamily + self.setSliversVref (data) + # log it for debug purposes, no matter what verbose is + logger.log_slivers(data) + # dump it too, so it can be retrieved later in case of comm. failure + self.dumpSlivers(data) + logger.verbose("nodemanager: Sync w/ PLC done") + last_data=data except: - logger.log_exc("nodemanager: GetSlivers failed to run callback for module %r"%module) - - -def getPLCDefaults(data, config): - ''' - Get PLC wide defaults from _default system slice. Adds them to config class. - ''' - for slice in data.get('slivers'): - if slice['name'] == config.PLC_SLICE_PREFIX+"_default": - attr_dict = {} - for attr in slice.get('attributes'): attr_dict[attr['tagname']] = attr['value'] - if len(attr_dict): - logger.verbose("nodemanager: Found default slice overrides.\n %s" % attr_dict) - config.OVERRIDES = attr_dict - return - # NOTE: if an _default slice existed, it would have been found above and - # the routine would return. Thus, if we've gotten here, then no default - # slice is bound to this node. - if 'OVERRIDES' in dir(config): del config.OVERRIDES - - -def setSliversVref (data): - ''' - Tweak the 'vref' attribute in all slivers based on the 'GetSliceFamily' key - ''' - # GetSlivers exposes the result of GetSliceFamily() as an separate key in data - # It is safe to override the attributes with this, as this method has the right logic - for sliver in data.get('slivers'): + logger.log_exc("nodemanager: failed in GetSlivers") + # XXX So some modules can at least boostrap. + logger.log("nodemanager: Can't contact PLC to GetSlivers(). Continuing.") + data = {} + # for modules that request it though the 'persistent_data' property + last_data=self.loadSlivers() + # Invoke GetSlivers() functions from the callback modules + for module in self.loaded_modules: + logger.verbose('triggering GetSlivers callback for module %s'%module.__name__) + try: + callback = getattr(module, 'GetSlivers') + module_data=data + if getattr(module,'persistent_data',False): + module_data=last_data + callback(data, config, plc) + except: + logger.log_exc("nodemanager: GetSlivers failed to run callback for module %r"%module) + + + def getPLCDefaults(self, data, config): + """ + Get PLC wide defaults from _default system slice. Adds them to config class. + """ + for slice in data.get('slivers'): + if slice['name'] == config.PLC_SLICE_PREFIX+"_default": + attr_dict = {} + for attr in slice.get('attributes'): attr_dict[attr['tagname']] = attr['value'] + if len(attr_dict): + logger.verbose("nodemanager: Found default slice overrides.\n %s" % attr_dict) + config.OVERRIDES = attr_dict + return + # NOTE: if an _default slice existed, it would have been found above and + # the routine would return. Thus, if we've gotten here, then no default + # slice is bound to this node. + if 'OVERRIDES' in dir(config): del config.OVERRIDES + + + def setSliversVref (self, data): + """ + Tweak the 'vref' attribute in all slivers based on the 'GetSliceFamily' key + """ + # GetSlivers exposes the result of GetSliceFamily() as an separate key in data + # It is safe to override the attributes with this, as this method has the right logic + for sliver in data.get('slivers'): + try: + slicefamily=sliver.get('GetSliceFamily') + for att in sliver['attributes']: + if att['tagname']=='vref': + att['value']=slicefamily + continue + sliver['attributes'].append({ 'tagname':'vref','value':slicefamily}) + except: + logger.log_exc("nodemanager: Could not overwrite 'vref' attribute from 'GetSliceFamily'",name=sliver['name']) + + def dumpSlivers (self, slivers): + f = open(NodeManager.DB_FILE, "w") + logger.log ("nodemanager: saving successfully fetched GetSlivers in %s" % NodeManager.DB_FILE) + pickle.dump(slivers, f) + f.close() + + def loadSlivers (self): try: - slicefamily=sliver.get('GetSliceFamily') - for att in sliver['attributes']: - if att['tagname']=='vref': - att['value']=slicefamily - continue - sliver['attributes'].append({ 'tagname':'vref','value':slicefamily}) + f = open(NodeManager.DB_FILE, "r+") + logger.log("nodemanager: restoring latest known GetSlivers from %s" % NodeManager.DB_FILE) + slivers = pickle.load(f) + f.close() + return slivers except: - logger.log_exc("nodemanager: Could not overwrite 'vref' attribute from 'GetSliceFamily'",name=sliver['name']) + logger.log("Could not restore GetSlivers from %s" % NodeManager.DB_FILE) + return {} - -def run(): - try: - if options.daemon: tools.daemon() - - # set log level - if (options.verbose): - logger.set_level(logger.LOG_VERBOSE) - - # Load /etc/planetlab/plc_config - config = Config(options.config) - + def run(self): try: - other_pid = tools.pid_file() - if other_pid != None: - print """There might be another instance of the node manager running as pid %d. If this is not the case, please remove the pid file %s""" % (other_pid, tools.PID_FILE) - return - except OSError, err: - print "Warning while writing PID file:", err - - # Load and start modules - if options.module: - assert options.module in known_modules - running_modules=[options.module] - logger.verbose('nodemanager: Running single module %s'%options.module) - else: - running_modules=known_modules - for module in running_modules: - try: - m = __import__(module) - m.start(options, config) - modules.append(m) - except ImportError, err: - print "Warning while loading module %s:" % module, err - - default_priority=100 - # sort on priority (lower first) - def sort_module_priority (m1,m2): - return getattr(m1,'priority',default_priority) - getattr(m2,'priority',default_priority) - modules.sort(sort_module_priority) - - logger.log('ordered modules:') - for module in modules: logger.log ('%s: %s'%(getattr(module,'priority',default_priority),module.__name__)) - - # Load /etc/planetlab/session - if os.path.exists(options.session): - session = file(options.session).read().strip() - else: - session = None - - # Initialize XML-RPC client - iperiod=int(options.period) - irandom=int(options.random) - plc = PLCAPI(config.plc_api_uri, config.cacert, session, timeout=iperiod/2) - - #check auth - logger.log("nodemanager: Checking Auth.") - while plc.check_authentication() != True: + if self.options.daemon: tools.daemon() + + # set log level + if (self.options.verbose): + logger.set_level(logger.LOG_VERBOSE) + + # Load /etc/planetlab/plc_config + config = Config(self.options.config) + try: - plc.update_session() - logger.log("nodemanager: Authentication Failure. Retrying") - except Exception,e: - logger.log("nodemanager: Retry Failed. (%r); Waiting.."%e) - time.sleep(iperiod) - logger.log("nodemanager: Authentication Succeeded!") - - - while True: - # Main nodemanager Loop - logger.log('nodemanager: mainloop - calling GetSlivers - period=%d random=%d'%(iperiod,irandom)) - GetSlivers(config, plc) - delay=iperiod + random.randrange(0,irandom) - logger.log('nodemanager: mainloop - sleeping for %d s'%delay) - time.sleep(delay) - except: logger.log_exc("nodemanager: failed in run") - + other_pid = tools.pid_file() + if other_pid != None: + print """There might be another instance of the node manager running as pid %d. If this is not the case, please remove the pid file %s""" % (other_pid, tools.PID_FILE) + return + except OSError, err: + print "Warning while writing PID file:", err + + # load modules + self.loaded_modules = [] + for module in self.modules: + try: + m = __import__(module) + m.start(self.options, config) + self.loaded_modules.append(m) + except ImportError, err: + print "Warning while loading module %s:" % module, err + + # sort on priority (lower first) + def sort_module_priority (m1,m2): + return getattr(m1,'priority',NodeManager.default_priority) - getattr(m2,'priority',NodeManager.default_priority) + self.loaded_modules.sort(sort_module_priority) + + logger.log('ordered modules:') + for module in self.loaded_modules: + logger.log ('%s: %s'%(getattr(module,'priority',NodeManager.default_priority),module.__name__)) + + # Load /etc/planetlab/session + if os.path.exists(self.options.session): + session = file(self.options.session).read().strip() + else: + session = None + + + # get random periods + iperiod=int(self.options.period) + irandom=int(self.options.random) + + # Initialize XML-RPC client + plc = PLCAPI(config.plc_api_uri, config.cacert, session, timeout=iperiod/2) + + #check auth + logger.log("nodemanager: Checking Auth.") + while plc.check_authentication() != True: + try: + plc.update_session() + logger.log("nodemanager: Authentication Failure. Retrying") + except Exception,e: + logger.log("nodemanager: Retry Failed. (%r); Waiting.."%e) + time.sleep(iperiod) + logger.log("nodemanager: Authentication Succeeded!") + + + while True: + # Main nodemanager Loop + logger.log('nodemanager: mainloop - calling GetSlivers - period=%d random=%d'%(iperiod,irandom)) + self.GetSlivers(config, plc) + delay=iperiod + random.randrange(0,irandom) + logger.log('nodemanager: mainloop - sleeping for %d s'%delay) + time.sleep(delay) + except: logger.log_exc("nodemanager: failed in run") +def run(): + logger.log("======================================== Entering nodemanager.py "+NodeManager.id) + NodeManager().run() + if __name__ == '__main__': - logger.log("======================================== Entering nodemanager.py "+id) run() else: # This is for debugging purposes. Open a copy of Python and import nm diff --git a/plugins/codemux.py b/plugins/codemux.py index 14f2b4b..ef8634f 100644 --- a/plugins/codemux.py +++ b/plugins/codemux.py @@ -6,7 +6,6 @@ import logger import os import vserver -from sets import Set from config import Config CODEMUXCONF="/etc/codemux/codemux.conf" @@ -73,7 +72,7 @@ def GetSlivers(data, config, plc = None): pass # Remove slices from conf that no longer have the attribute - for deadslice in Set(slicesinconf.keys()) - Set(codemuxslices.keys()): + for deadslice in set(slicesinconf.keys()) - set(codemuxslices.keys()): # XXX Hack for root slice if deadslice != "root": logger.log("codemux: Removing %s" % deadslice) diff --git a/plugins/reservation.py b/plugins/reservation.py new file mode 100644 index 0000000..21e3e55 --- /dev/null +++ b/plugins/reservation.py @@ -0,0 +1,36 @@ +# $Id$ +# $URL$ +# +# NodeManager plugin - first step of handling omf_controlled slices + +""" +Overwrites the 'resctl' tag of slivers controlled by OMF so sm.py does the right thing +""" + +import logger + +priority = 45 +# this instructs nodemanager that we want to use the latest known data when the plc link is down +persistent_data = True + +def start(options, conf): + logger.log("reservation: plugin starting up...") + +def GetSlivers(data, conf = None, plc = None): + + if 'reservation_policy' not in data: + logger.log_missing_data("reservation.GetSlivers",'reservation_policy') + return + reservation_policy=data['reservation_policy'] + + if 'leases' not in data: + logger.log_missing_data("reservation.GetSlivers",'leases') + return + + if reservation_policy in ['lease_or_idle','lease_or_shared']: + logger.log( 'reservation.GetSlivers - scaffolding...') + elif reservation_policy == 'none': + return + else: + logger.log("reservation: ignoring -- unexpected value for reservation_policy %r"%reservation_policy) + return diff --git a/plugins/vsys.py b/plugins/vsys.py index 5c9b753..1315371 100644 --- a/plugins/vsys.py +++ b/plugins/vsys.py @@ -5,7 +5,6 @@ import logger import os -from sets import Set VSYSCONF="/etc/vsys.conf" VSYSBKEND="/vsys" @@ -71,7 +70,7 @@ def touchAcls(): acls.append(file.replace(".acl", "")) else: scripts.append(file) - for new in (Set(scripts) - Set(acls)): + for new in (set(scripts) - set(acls)): logger.log("vsys: Found new script %s. Writing empty acl." % new) f = open("%s/%s.acl" %(VSYSBKEND, new), "w") f.write("\n") @@ -90,7 +89,7 @@ def writeAcls(currentscripts, oldscripts): # then dicts are different. for (acl, oldslivers) in oldscripts.iteritems(): if (len(oldslivers) != len(currentscripts[acl])) or \ - (len(Set(oldslivers) - Set(currentscripts[acl])) != 0): + (len(set(oldslivers) - set(currentscripts[acl])) != 0): _restartvsys = True logger.log("vsys: Updating %s.acl w/ slices %s" % (acl, currentscripts[acl])) f = open("%s/%s.acl" % (VSYSBKEND, acl), "w") @@ -123,7 +122,7 @@ def writeConf(slivers, oldslivers): # and the non intersection of both arrays has length 0, # then the arrays are identical. if (len(slivers) != len(oldslivers)) or \ - (len(Set(oldslivers) - Set(slivers)) != 0): + (len(set(oldslivers) - set(slivers)) != 0): logger.log("vsys: Updating %s" % VSYSCONF) f = open(VSYSCONF,"w") for sliver in slivers: diff --git a/plugins/vsys_privs.py b/plugins/vsys_privs.py index 1a3e4b2..c3b632b 100755 --- a/plugins/vsys_privs.py +++ b/plugins/vsys_privs.py @@ -9,7 +9,6 @@ node for the reference of vsys scripts. import logger import os -from sets import Set VSYS_PRIV_DIR = "/etc/planetlab/vsys-attributes" -- 2.43.0