#!/usr/bin/python
#
+# $Id$
+# $URL$
+#
# Average bandwidth monitoring script. Run periodically via NM db.sync to
# enforce a soft limit on daily bandwidth usage for each slice. If a
# slice is found to have transmitted 80% of its daily byte limit usage,
# Faiyaz Ahmed <faiyaza@cs.princeton.edu>
# Copyright (C) 2004-2008 The Trustees of Princeton University
#
-# $Id$
-#
import os
import sys
import time
import pickle
import socket
-import logger
import copy
import threading
-import tools
+import logger
+import tools
import bwlimit
import database
from sets import Set
+priority = 20
+
# Defaults
# Set DEBUG to True if you don't want to send emails
DEBUG = False
seconds_per_day = 24 * 60 * 60
bits_per_byte = 8
+dev_default = tools.get_default_if()
# Burst to line rate (or node cap). Set by NM. in KBit/s
-default_MaxRate = int(bwlimit.get_bwcap() / 1000)
+default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
default_Maxi2Rate = int(bwlimit.bwmax / 1000)
# 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
# 5.4 Gbyte per day max allowed transfered per recording period
-default_MaxKByte = 5662310
+# 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
+# but its better to keep a higher byte total and keep people happy than correct
+# the problem and piss people off.
+# default_MaxKByte = 5662310
+
+# -- 6/1/09
+# llp wants to double these, so we use the following
+# 1mbit * 24hrs * 60mins * 60secs = bits/day
+# 1000000 * 24 * 60 * 60 / (1024 * 8)
+default_MaxKByte = 10546875
+
# 16.4 Gbyte per day max allowed transfered per recording period to I2
-default_Maxi2KByte = 17196646
+# default_Maxi2KByte = 17196646
+
+# -- 6/1/09
+# 3Mb/s for 24hrs a day (30.17 gigs)
+default_Maxi2KByte = 31640625
+
# Default share quanta
default_Share = 1
self.emailed = False
self.capped = False
- self.updateSliceAttributes(rspec)
+ self.updateSliceTags(rspec)
bwlimit.set(xid = self.xid,
minrate = self.MinRate * 1000,
maxrate = self.MaxRate * 1000,
def __repr__(self):
return self.name
- def updateSliceAttributes(self, rspec):
+ def updateSliceTags(self, rspec):
'''
Use respects from GetSlivers to PLC to populate slice object. Also
do some sanity checking.
to their default values.
"""
# Cache share for later comparison
- runningrates.get('share', 1) = self.Share
+ self.Share = runningrates.get('share', 1)
# Query Node Manager for max rate overrides
- self.updateSliceAttributes(rspec)
+ self.updateSliceTags(rspec)
# Reset baseline time
self.time = time.time()
(self.name,
bwlimit.format_tc_rate(maxrate),
bwlimit.format_tc_rate(maxi2rate)), 1)
- bwlimit.set(xid = self.xid,
+ bwlimit.set(xid = self.xid, dev = dev_default,
minrate = self.MinRate * 1000,
maxrate = self.MaxRate * 1000,
maxexemptrate = self.Maxi2Rate * 1000,
runningrates['share'] = self.Share
# Query Node Manager for max rate overrides
- self.updateSliceAttributes(rspec)
+ self.updateSliceTags(rspec)
usedbytes = runningrates['usedbytes']
usedi2bytes = runningrates['usedi2bytes']
if newslice != None and live[newslice].has_key('_rspec') == True:
# Check to see if we recently deleted this slice.
if live[newslice]['name'] not in deaddb.keys():
- logger.log( "bwmon: New Slice %s" % live[newslice]['name'] )
+ logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
# _rspec is the computed rspec: NM retrieved data from PLC, computed loans
# and made a dict of computed values.
slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
pickle.dump((version, slices, deaddb), f)
f.close()
+# doesnt use generic default interface because this runs as its own thread.
+# changing the config variable will not have an effect since GetSlivers: pass
+def getDefaults(nmdbcopy):
+ '''
+ Get defaults from default slice's slice attributes.
+ '''
+ status = True
+ # default slice
+ dfltslice = nmdbcopy.get(PLC_SLICE_PREFIX+"_default")
+ if dfltslice:
+ if dfltslice['rspec']['net_max_rate'] == -1:
+ allOff()
+ status = False
+ return status
+
+
+def allOff():
+ """
+ Turn off all slice HTBs
+ """
+ # Get/set special slice IDs
+ root_xid = bwlimit.get_xid("root")
+ default_xid = bwlimit.get_xid("default")
+ kernelhtbs = gethtbs(root_xid, default_xid)
+ if len(kernelhtbs):
+ logger.log("bwmon: Disabling all running HTBs.")
+ for htb in kernelhtbs.keys(): bwlimit.off(htb)
+
+
lock = threading.Event()
def run():
- """When run as a thread, wait for event, lock db, deep copy it, release it, run bwmon.GetSlivers(), then go back to waiting."""
+ """
+ When run as a thread, wait for event, lock db, deep copy it, release it,
+ run bwmon.GetSlivers(), then go back to waiting.
+ """
logger.log("bwmon: Thread started", 2)
while True:
lock.wait()
database.db_lock.acquire()
nmdbcopy = copy.deepcopy(database.db)
database.db_lock.release()
- try: sync(nmdbcopy)
- except: logger.log_exc()
+ try:
+ if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0:
+ # class show to check if net:InitNodeLimit:bwlimit.init has run.
+ sync(nmdbcopy)
+ else: logger.log("bwmon: BW limits DISABLED.")
+ except: logger.log_exc("bwmon failed")
lock.clear()
def start(*args):
tools.as_daemon_thread(run)
def GetSlivers(*args):
+ logger.verbose ("bwmon: triggering dummy GetSlivers")
pass