self.MinRate = MinRate
logger.log("bwmon: Updating %s: Min Rate = %s" %(self.name, self.MinRate))
- MaxRate = int(rspec.get('net_max_rate', bwlimit.get_bwcap() / 1000))
+ MaxRate = int(rspec.get('net_max_rate', default_MaxRate))
if MaxRate != self.MaxRate:
self.MaxRate = MaxRate
logger.log("bwmon: Updating %s: Max Rate = %s" %(self.name, self.MaxRate))
self.Mini2Rate = Mini2Rate
logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate))
- Maxi2Rate = int(rspec.get('net_i2_max_rate', bwlimit.bwmax / 1000))
+ Maxi2Rate = int(rspec.get('net_i2_max_rate', default_Maxi2Rate))
if Maxi2Rate != self.Maxi2Rate:
self.Maxi2Rate = Maxi2Rate
logger.log("bwmon: Updating %s: Max i2 Rate = %s" %(self.name, self.Maxi2Rate))
Begin a new recording period. Remove caps by restoring limits
to their default values.
"""
-
# Query Node Manager for max rate overrides
self.updateSliceAttributes(rspec)
exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period.
Recalculate every time module runs.
"""
-
+
+ # copy self.Min* and self.*share values for comparison later.
+ runningMinRate = self.MinRate
+ runningMini2Rate = self.Mini2Rate
+ runningshare = self.Share
+ runningsharei2 = self.Sharei2
+
# Query Node Manager for max rate overrides
self.updateSliceAttributes(rspec)
maxbyte = self.MaxKByte * 1024
bytesused = usedbytes - self.bytes
timeused = int(time.time() - self.time)
- # Calcuate new rate.
+ # Calcuate new rate. in bit/s
new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))
# Never go under MinRate
if new_maxrate < (self.MinRate * 1000):
new_maxi2rate = self.Maxi2Rate * 1000
self.capped += False
- # Apply parameters
- bwlimit.set(xid = self.xid,
+ # Check running values against newly calculated values so as not to run tc
+ # unnecessarily
+ if (runningmaxrate != new_maxrate) or \
+ (runningMinRate != self.MinRate) or \
+ (runningmaxi2rate != new_maxi2rate) or \
+ (runningMini2Rate != self.Mini2Rate) or \
+ (runningshare != self.Share):
+ # Apply parameters
+ bwlimit.set(xid = self.xid,
minrate = self.MinRate * 1000,
maxrate = new_maxrate,
minexemptrate = self.Mini2Rate * 1000,
# Incase default isn't set yet.
if default_MaxRate == -1:
- default_MaxRate = 1000000
+ default_MaxRate = 10000000
try:
f = open(datafile, "r+")
pickle.dump((version, slices, deaddb), f)
f.close()
+
+def getDefaults(nmdbcopy):
+ '''
+ Get defaults from default slice's slice attributes.
+ '''
+ status = True
+ # default slice
+ dfltslice = nmdbcopy.get(PLC_SLICE_PREFIX+"_default")
+ if dfltslice:
+ if dfltslice['rspec']['net_max_rate'] == -1:
+ allOff()
+ status = False
+ return status
+
+
+def allOff():
+ """
+ Turn off all slice HTBs
+ """
+ # Get/set special slice IDs
+ root_xid = bwlimit.get_xid("root")
+ default_xid = bwlimit.get_xid("default")
+ kernelhtbs = gethtbs(root_xid, default_xid)
+ if len(kernelhtbs):
+ logger.log("bwlimit: Disabling all running HTBs.")
+ for htb in kernelhtbs.keys(): bwlimit.off(htb)
+
+
lock = threading.Event()
def run():
- """When run as a thread, wait for event, lock db, deep copy it, release it, run bwmon.GetSlivers(), then go back to waiting."""
+ """
+ When run as a thread, wait for event, lock db, deep copy it, release it,
+ run bwmon.GetSlivers(), then go back to waiting.
+ """
logger.log("bwmon: Thread started", 2)
while True:
lock.wait()
database.db_lock.acquire()
nmdbcopy = copy.deepcopy(database.db)
database.db_lock.release()
- try: sync(nmdbcopy)
+ try:
+ if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev eth0")) > 0:
+ # class show to check if net:InitNodeLimit:bwlimit.init has run.
+ sync(nmdbcopy)
+ else: logger.log("bwmon: BW limits DISABLED.")
except: logger.log_exc()
lock.clear()