import copy
import threading
-import bwlimitlxc as bwlimit
+import plnode.bwlimit as bwlimit
+
import logger
import tools
import database
if (runningrates['maxrate'] != new_maxrate) or \
(runningrates['minrate'] != self.MinRate * 1000) or \
(runningrates['maxexemptrate'] != new_maxi2rate) or \
- (runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \
+ ('minexemptrate' in runningrates and runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \
(runningrates['share'] != self.Share):
# Apply parameters
bwlimit.set(xid = self.xid, dev = dev_default,
period, \
default_MaxRate, \
default_Maxi2Rate, \
- default_MaxKByte,\
- default_Maxi2KByte,\
+ default_MaxKByte, \
+ default_Maxi2KByte, \
default_Share, \
dev_default
if default_MaxRate == -1:
default_MaxRate = 1000000
+ # xxx $Id$
+ # with svn we used to have a trick to detect upgrades of this file
+ # this has gone with the move to git, without any noticeable effect on operations though
try:
f = open(DB_FILE, "r+")
logger.verbose("bwmon: Loading %s" % DB_FILE)
# Since root is required for sanity, its not in the API/plc database, so pass {}
# to use defaults.
- if root_xid not in slices.keys():
+ if root_xid not in list(slices.keys()):
slices[root_xid] = Slice(root_xid, "root", {})
slices[root_xid].reset({}, {})
# Used by bwlimit. pass {} since there is no rspec (like above).
- if default_xid not in slices.keys():
+ if default_xid not in list(slices.keys()):
slices[default_xid] = Slice(default_xid, "default", {})
slices[default_xid].reset({}, {})
live = {}
# Get running slivers that should be on this node (from plc). {xid: name}
# db keys on name, bwmon keys on xid. db doesnt have xid either.
- for plcSliver in nmdbcopy.keys():
+ for plcSliver in list(nmdbcopy.keys()):
live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
- logger.verbose("bwmon: Found %s instantiated slices" % live.keys().__len__())
- logger.verbose("bwmon: Found %s slices in dat file" % slices.values().__len__())
+ logger.verbose("bwmon: Found %s instantiated slices" % list(live.keys()).__len__())
+ logger.verbose("bwmon: Found %s slices in dat file" % list(slices.values()).__len__())
# Get actual running values from tc.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
- logger.verbose("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__())
+ logger.verbose("bwmon: Found %s running HTBs" % list(kernelhtbs.keys()).__len__())
# The dat file has HTBs for slices, but the HTBs aren't running
nohtbslices = set(slices.keys()) - set(kernelhtbs.keys())
logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__())
# Reset tc counts.
for nohtbslice in nohtbslices:
- if live.has_key(nohtbslice):
+ if nohtbslice in live:
slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
else:
logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__())
for slicenodat in slicesnodat:
# But slice is running
- if live.has_key(slicenodat):
+ if slicenodat in live:
# init the slice. which means start accounting over since kernel
# htb was already there.
slices[slicenodat] = Slice(slicenodat,
for newslice in newslicesxids:
# Delegated slices dont have xids (which are uids) since they haven't been
# instantiated yet.
- if newslice != None and live[newslice].has_key('_rspec') == True:
+ if newslice != None and ('_rspec' in live[newslice]) == True:
# Check to see if we recently deleted this slice.
- if live[newslice]['name'] not in deaddb.keys():
+ if live[newslice]['name'] not in list(deaddb.keys()):
logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
# _rspec is the computed rspec: NM retrieved data from PLC, computed loans
# and made a dict of computed values.
if deadxid == root_xid or deadxid == default_xid:
continue
logger.log("bwmon: removing dead slice %s " % deadxid)
- if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid):
+ if deadxid in slices and deadxid in kernelhtbs:
# add slice (by name) to deaddb
logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
del slices[deadxid]
- if kernelhtbs.has_key(deadxid):
+ if deadxid in kernelhtbs:
logger.verbose("bwmon: Removing HTB for %s." % deadxid)
bwlimit.off(deadxid, dev = dev_default)
# Clean up deaddb
- for deadslice in deaddb.keys():
+ for deadslice in list(deaddb.keys()):
if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
logger.log("bwmon: Removing dead slice %s from dat." \
% deaddb[deadslice]['slice'].name)
# Get actual running values from tc since we've added and removed buckets.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
- logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__())
+ logger.verbose("bwmon: now %s running HTBs" % list(kernelhtbs.keys()).__len__())
# Update all byte limites on all slices
- for (xid, slice) in slices.iteritems():
+ for (xid, slice) in slices.items():
# Monitor only the specified slices
if xid == root_xid or xid == default_xid: continue
if names and name not in names:
# Update byte counts
slice.update(kernelhtbs[xid], live[xid]['_rspec'])
- logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),DB_FILE))
+ logger.verbose("bwmon: Saving %s slices in %s" % (list(slices.keys()).__len__(), DB_FILE))
f = open(DB_FILE, "w")
pickle.dump((version, slices, deaddb), f)
f.close()
kernelhtbs = gethtbs(root_xid, default_xid)
if len(kernelhtbs):
logger.log("bwmon: Disabling all running HTBs.")
- for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default)
+ for htb in list(kernelhtbs.keys()): bwlimit.off(htb, dev = dev_default)
lock = threading.Event()