X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=bwmon.py;h=bc9191f37734c95181e34159e0f315102446d854;hb=f1c6c10681e05ee283340297167fd1b19f166d5c;hp=85bc2a9976578df62d5af28f44b293fff0ba4e4f;hpb=ebf549a8b15266472e94a7cbdfac60d5fc782411;p=nodemanager.git diff --git a/bwmon.py b/bwmon.py index 85bc2a9..bc9191f 100644 --- a/bwmon.py +++ b/bwmon.py @@ -1,21 +1,20 @@ #!/usr/bin/python # -# Average bandwidth monitoring script. Run periodically via cron(8) to +# Average bandwidth monitoring script. Run periodically via NM db.sync to # enforce a soft limit on daily bandwidth usage for each slice. If a -# slice is found to have exceeded its daily bandwidth usage when the -# script is run, its instantaneous rate will be capped at the desired -# average rate. Thus, in the worst case, a slice will only be able to -# send a little more than twice its average daily limit. +# slice is found to have transmitted 80% of its daily byte limit usage, +# its instantaneous rate will be capped at the bytes remaning in the limit +# over the time remaining in the recording period. # # Two separate limits are enforced, one for destinations exempt from -# the node bandwidth cap, and the other for all other destinations. +# the node bandwidth cap (i.e. Internet2), and the other for all other destinations. # # Mark Huang # Andy Bavier # Faiyaz Ahmed -# Copyright (C) 2004-2006 The Trustees of Princeton University +# Copyright (C) 2004-2008 The Trustees of Princeton University # -# $Id: bwmon.py,v 1.22 2007/06/26 18:00:46 faiyaza Exp $ +# $Id$ # import os @@ -33,26 +32,22 @@ import database from sets import Set +# Defaults +debug = False +verbose = False +datafile = "/var/lib/misc/bwmon.dat" + try: sys.path.append("/etc/planetlab") from plc_config import * except: - logger.log("bwmon: Warning: Configuration file /etc/planetlab/plc_config.py not found") - PLC_NAME = "PlanetLab" - PLC_SLICE_PREFIX = "pl" - PLC_MAIL_SUPPORT_ADDRESS = "support@planet-lab.org" - PLC_MAIL_SLICE_ADDRESS = "SLICE@slices.planet-lab.org" + logger.log("bwmon: Warning: Configuration file /etc/planetlab/plc_config.py not found", 2) + logger.log("bwmon: Running in DEBUG mode. Logging to file and not emailing.", 1) # Constants seconds_per_day = 24 * 60 * 60 bits_per_byte = 8 -# Defaults -debug = False -verbose = False -datafile = "/var/lib/misc/bwmon.dat" -#nm = None - # Burst to line rate (or node cap). Set by NM. in KBit/s default_MaxRate = int(bwlimit.get_bwcap() / 1000) default_Maxi2Rate = int(bwlimit.bwmax / 1000) @@ -62,10 +57,8 @@ default_Mini2Rate = 0 # 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G # 5.4 Gbyte per day max allowed transfered per recording period default_MaxKByte = 5662310 -default_ThreshKByte = int(.8 * default_MaxKByte) # 16.4 Gbyte per day max allowed transfered per recording period to I2 default_Maxi2KByte = 17196646 -default_Threshi2KByte = int(.8 * default_Maxi2KByte) # Default share quanta default_Share = 1 @@ -130,13 +123,14 @@ def format_period(seconds): return "%.0f seconds" % seconds def slicemail(slice, subject, body): + ''' + Front end to sendmail. Sends email to slice alias with given subject and body. + ''' + sendmail = os.popen("/usr/sbin/sendmail -N never -t -f%s" % PLC_MAIL_SUPPORT_ADDRESS, "w") - # PLC has a separate list for pl_mom messages - if PLC_MAIL_SUPPORT_ADDRESS == "support@planet-lab.org": - to = ["pl-mom@planet-lab.org"] - else: - to = [PLC_MAIL_SUPPORT_ADDRESS] + # Parsed from MyPLC config + to = [PLC_MAIL_MOM_LIST_ADDRESS] if slice is not None and slice != "root": to.append(PLC_MAIL_SLICE_ADDRESS.replace("SLICE", slice)) @@ -173,13 +167,15 @@ class Slice: time - beginning of recording period in UNIX seconds bytes - low bandwidth bytes transmitted at the beginning of the recording period i2bytes - high bandwidth bytes transmitted at the beginning of the recording period (for I2 -F) - ByteMax - total volume of data allowed - ByteThresh - After thresh, cap node to (maxbyte - bytes)/(time left in period) - ExemptByteMax - Same as above, but for i2. - ExemptByteThresh - i2 ByteThresh - maxrate - max_rate slice attribute. - maxexemptrate - max_exempt_rate slice attribute. - self.emailed = did we email during this recording period + MaxKByte - total volume of data allowed + ThreshKbyte - After thresh, cap node to (maxkbyte - bytes)/(time left in period) + Maxi2KByte - same as MaxKByte, but for i2 + Threshi2Kbyte - same as Threshi2KByte, but for i2 + MaxRate - max_rate slice attribute. + Maxi2Rate - max_exempt_rate slice attribute. + Share - Used by Sirius to loan min rates + Sharei2 - Used by Sirius to loan min rates for i2 + self.emailed - did slice recv email during this recording period """ @@ -194,12 +190,13 @@ class Slice: self.Maxi2Rate = default_Maxi2Rate self.Mini2Rate = default_Mini2Rate self.MaxKByte = default_MaxKByte - self.ThreshKByte = default_ThreshKByte + self.ThreshKByte = (.8 * self.MaxKByte) self.Maxi2KByte = default_Maxi2KByte - self.Threshi2KByte = default_Threshi2KByte + self.Threshi2KByte = (.8 * self.Maxi2KByte) self.Share = default_Share self.Sharei2 = default_Share self.emailed = False + self.capped = False self.updateSliceAttributes(rspec) bwlimit.set(xid = self.xid, @@ -213,7 +210,10 @@ class Slice: return self.name def updateSliceAttributes(self, rspec): - # Get attributes + ''' + Use respects from GetSlivers to PLC to populate slice object. Also + do some sanity checking. + ''' # Sanity check plus policy decision for MinRate: # Minrate cant be greater than 25% of MaxRate or NodeCap. @@ -249,12 +249,12 @@ class Slice: self.Maxi2KByte = Maxi2KByte logger.log("bwmon: Updating %s: Max i2 KByte = %s" %(self.name, self.Maxi2KByte)) - ThreshKByte = int(rspec.get('net_thresh_kbyte', default_ThreshKByte)) + ThreshKByte = int(rspec.get('net_thresh_kbyte', (MaxKByte * .8))) if ThreshKByte != self.ThreshKByte: self.ThreshKByte = ThreshKByte logger.log("bwmon: Updating %s: Thresh KByte = %s" %(self.name, self.ThreshKByte)) - Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', default_Threshi2KByte)) + Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', (Maxi2KByte * .8))) if Threshi2KByte != self.Threshi2KByte: self.Threshi2KByte = Threshi2KByte logger.log("bwmon: Updating %s: i2 Thresh KByte = %s" %(self.name, self.Threshi2KByte)) @@ -288,14 +288,16 @@ class Slice: # Reset email self.emailed = False - maxrate = self.MaxRate * 1000 - maxi2rate = self.Maxi2Rate * 1000 + # Reset flag + self.capped = False # Reset rates. + maxrate = self.MaxRate * 1000 + maxi2rate = self.Maxi2Rate * 1000 if (self.MaxRate != runningmaxrate) or (self.Maxi2Rate != runningmaxi2rate): logger.log("bwmon: %s reset to %s/%s" % \ (self.name, bwlimit.format_tc_rate(maxrate), - bwlimit.format_tc_rate(maxi2rate))) + bwlimit.format_tc_rate(maxi2rate)), 1) bwlimit.set(xid = self.xid, minrate = self.MinRate * 1000, maxrate = self.MaxRate * 1000, @@ -303,92 +305,121 @@ class Slice: minexemptrate = self.Mini2Rate * 1000, share = self.Share) - def update(self, runningmaxrate, runningmaxi2rate, usedbytes, usedi2bytes, rspec): + def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes): """ - Update byte counts and check if byte limits have been - exceeded. + Notify the slice it's being capped. """ - - # Query Node Manager for max rate overrides - self.updateSliceAttributes(rspec) - - # Prepare message parameters from the template + # Prepare message parameters from the template message = "" params = {'slice': self.name, 'hostname': socket.gethostname(), 'since': time.asctime(time.gmtime(self.time)) + " GMT", 'until': time.asctime(time.gmtime(self.time + period)) + " GMT", 'date': time.asctime(time.gmtime()) + " GMT", - 'period': format_period(period)} + 'period': format_period(period)} + + if new_maxrate != self.MaxRate: + # Format template parameters for low bandwidth message + params['class'] = "low bandwidth" + params['bytes'] = format_bytes(usedbytes - self.bytes) + params['limit'] = format_bytes(self.MaxKByte * 1024) + params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate) + + # Cap low bandwidth burst rate + message += template % params + logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params) + + if new_maxexemptrate != self.Maxi2Rate: + # Format template parameters for high bandwidth message + params['class'] = "high bandwidth" + params['bytes'] = format_bytes(usedi2bytes - self.i2bytes) + params['limit'] = format_bytes(self.Maxi2KByte * 1024) + params['new_maxexemptrate'] = bwlimit.format_tc_rate(new_maxi2rate) + + message += template % params + logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params) + + # Notify slice + if message and self.emailed == False: + subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params + if debug: + logger.log("bwmon: "+ subject) + logger.log("bwmon: "+ message + (footer % params)) + else: + self.emailed = True + slicemail(self.name, subject, message + (footer % params)) + + + def update(self, runningmaxrate, runningmaxi2rate, usedbytes, usedi2bytes, runningshare, rspec): + """ + Update byte counts and check if byte thresholds have been + exceeded. If exceeded, cap to remaining bytes in limit over remaining in period. + Recalculate every time module runs. + """ + + # Query Node Manager for max rate overrides + self.updateSliceAttributes(rspec) + # Check shares for Sirius loans. + if runningshare != self.Share: + logger.log("bwmon: Updating share to %s" % self.share) + bwlimit.set(xid = self.xid, + minrate = self.MinRate * 1000, + maxrate = self.MaxRate * 1000, + maxexemptrate = self.Maxi2Rate * 1000, + minexemptrate = self.Mini2Rate * 1000, + share = self.Share) + + # Prepare message parameters from the template + #message = "" + #params = {'slice': self.name, 'hostname': socket.gethostname(), + # 'since': time.asctime(time.gmtime(self.time)) + " GMT", + # 'until': time.asctime(time.gmtime(self.time + period)) + " GMT", + # 'date': time.asctime(time.gmtime()) + " GMT", + # 'period': format_period(period)} + + # Check limits. if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)): - if verbose: - logger.log("bwmon: %s over thresh %s" \ - % (self.name, format_bytes(self.ThreshKByte * 1024))) sum = self.bytes + (self.ThreshKByte * 1024) maxbyte = self.MaxKByte * 1024 bytesused = usedbytes - self.bytes timeused = int(time.time() - self.time) + # Calcuate new rate. new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused)) + # Never go under MinRate if new_maxrate < (self.MinRate * 1000): new_maxrate = self.MinRate * 1000 + # State information. I'm capped. + self.capped = True else: - new_maxrate = self.MaxRate * 1000 - - # Format template parameters for low bandwidth message - params['class'] = "low bandwidth" - params['bytes'] = format_bytes(usedbytes - self.bytes) - params['limit'] = format_bytes(self.MaxKByte * 1024) - params['thresh'] = format_bytes(self.ThreshKByte * 1024) - params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate) - - if verbose: - logger.log("bwmon: %(slice)s %(class)s " \ - "%(bytes)s of %(limit)s max %(thresh)s thresh (%(new_maxrate)s/s maxrate)" % \ - params) - - # Cap low bandwidth burst rate - if new_maxrate != runningmaxrate: - message += template % params - logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params) - + # Sanity Check + new_maxrate = self.MaxRate * 1000 + self.capped = False + + if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)): maxi2byte = self.Maxi2KByte * 1024 i2bytesused = usedi2bytes - self.i2bytes timeused = int(time.time() - self.time) + # Calcuate New Rate. new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused)) + # Never go under MinRate if new_maxi2rate < (self.Mini2Rate * 1000): new_maxi2rate = self.Mini2Rate * 1000 + # State information. I'm capped. + self.capped = True else: + # Sanity new_maxi2rate = self.Maxi2Rate * 1000 - - # Format template parameters for high bandwidth message - params['class'] = "high bandwidth" - params['bytes'] = format_bytes(usedi2bytes - self.i2bytes) - params['limit'] = format_bytes(self.Maxi2KByte * 1024) - params['new_maxexemptrate'] = bwlimit.format_tc_rate(new_maxi2rate) - - if verbose: - logger.log("bwmon: %(slice)s %(class)s " \ - "%(bytes)s of %(limit)s (%(new_maxrate)s/s maxrate)" % params) - - # Cap high bandwidth burst rate - if new_maxi2rate != runningmaxi2rate: - message += template % params - logger.log("bwmon: %(slice)s %(class)s capped at %(new_maxexemptrate)s/s" % params) + self.capped = False # Apply parameters if new_maxrate != runningmaxrate or new_maxi2rate != runningmaxi2rate: bwlimit.set(xid = self.xid, maxrate = new_maxrate, maxexemptrate = new_maxi2rate) # Notify slice - if message and self.emailed == False: - subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params - if debug: - logger.log("bwmon: "+ subject) - logger.log("bwmon: "+ message + (footer % params)) - else: - self.emailed = True - slicemail(self.name, subject, message + (footer % params)) + if self.capped == True and self.emailed == False: + self.notify(newmaxrate, newmaxexemptrate, usedbytes, usedi2bytes) + def gethtbs(root_xid, default_xid): """ @@ -404,14 +435,12 @@ def gethtbs(root_xid, default_xid): name = bwlimit.get_slice(xid) - - if (name is None) \ and (xid != root_xid) \ and (xid != default_xid): # Orphaned (not associated with a slice) class name = "%d?" % xid - logger.log("bwmon: Found orphaned HTB %s. Removing." %name) + logger.log("bwmon: Found orphaned HTB %s. Removing." %name, 1) bwlimit.off(xid) livehtbs[xid] = {'share': share, @@ -454,16 +483,17 @@ def sync(nmdbcopy): try: f = open(datafile, "r+") - logger.log("bwmon: Loading %s" % datafile) - (version, slices) = pickle.load(f) + logger.log("bwmon: Loading %s" % datafile, 2) + (version, slices, deaddb) = pickle.load(f) f.close() # Check version of data file - if version != "$Id: bwmon.py,v 1.22 2007/06/26 18:00:46 faiyaza Exp $": + if version != "$Id$": logger.log("bwmon: Not using old version '%s' data file %s" % (version, datafile)) raise Exception except Exception: - version = "$Id: bwmon.py,v 1.22 2007/06/26 18:00:46 faiyaza Exp $" + version = "$Id$" slices = {} + deaddb = {} # Get/set special slice IDs root_xid = bwlimit.get_xid("root") @@ -486,54 +516,108 @@ def sync(nmdbcopy): for plcSliver in nmdbcopy.keys(): live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver] - logger.log("bwmon: Found %s instantiated slices" % live.keys().__len__()) - logger.log("bwmon: Found %s slices in dat file" % slices.values().__len__()) + logger.log("bwmon: Found %s instantiated slices" % live.keys().__len__(), 2) + logger.log("bwmon: Found %s slices in dat file" % slices.values().__len__(), 2) # Get actual running values from tc. # Update slice totals and bandwidth. {xid: {values}} - livehtbs = gethtbs(root_xid, default_xid) - logger.log("bwmon: Found %s running HTBs" % livehtbs.keys().__len__()) + kernelhtbs = gethtbs(root_xid, default_xid) + logger.log("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__(), 2) + + # The dat file has HTBs for slices, but the HTBs aren't running + nohtbslices = Set(slices.keys()) - Set(kernelhtbs.keys()) + logger.log( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__(), 2) + # Reset tc counts. + for nohtbslice in nohtbslices: + if live.has_key(nohtbslice): + slices[nohtbslice].reset( 0, 0, 0, 0, live[nohtbslice]['_rspec'] ) + else: + logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice) + del slices[nohtbslice] + + # The dat file doesnt have HTB for the slice but kern has HTB + slicesnodat = Set(kernelhtbs.keys()) - Set(slices.keys()) + logger.log( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__(), 2) + for slicenodat in slicesnodat: + # But slice is running + if live.has_key(slicenodat): + # init the slice. which means start accounting over since kernel + # htb was already there. + slices[slicenodat] = Slice(slicenodat, + live[slicenodat]['name'], + live[slicenodat]['_rspec']) # Get new slices. - # live.xids - runing(slices).xids = new.xids - #newslicesxids = Set(live.keys()) - Set(slices.keys()) - newslicesxids = Set(live.keys()) - Set(livehtbs.keys()) - logger.log("bwmon: Found %s new slices" % newslicesxids.__len__()) - - # Incase we upgraded nm and need to keep track of already running htbs - norecxids = Set(livehtbs.keys()) - Set(slices.keys()) - logger.log("bwmon: Found %s slices that have htbs but not in dat." % norecxids.__len__()) - newslicesxids.update(norecxids) - + # Slices in GetSlivers but not running HTBs + newslicesxids = Set(live.keys()) - Set(kernelhtbs.keys()) + logger.log("bwmon: Found %s new slices" % newslicesxids.__len__(), 2) + # Setup new slices for newslice in newslicesxids: # Delegated slices dont have xids (which are uids) since they haven't been # instantiated yet. if newslice != None and live[newslice].has_key('_rspec') == True: - logger.log("bwmon: New Slice %s" % live[newslice]['name']) - # _rspec is the computed rspec: NM retrieved data from PLC, computed loans - # and made a dict of computed values. - slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec']) - slices[newslice].reset(0, 0, 0, 0, live[newslice]['_rspec']) + # Check to see if we recently deleted this slice. + if live[newslice]['name'] not in deaddb.keys(): + logger.log( "bwmon: New Slice %s" % live[newslice]['name'] ) + # _rspec is the computed rspec: NM retrieved data from PLC, computed loans + # and made a dict of computed values. + slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec']) + slices[newslice].reset( 0, 0, 0, 0, live[newslice]['_rspec'] ) + # Double check time for dead slice in deaddb is within 24hr recording period. + elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)): + deadslice = deaddb[live[newslice]['name']] + logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name']) + slices[newslice] = deadslice['slice'] + slices[newslice].xid = newslice + # Start the HTB + slices[newslice].reset(deadslice['slice'].MaxRate, + deadslice['slice'].Maxi2Rate, + deadslice['htb']['usedbytes'], + deadslice['htb']['usedi2bytes'], + live[newslice]['_rspec']) + # Bring up to date + slices[newslice].update(deadslice['slice'].MaxRate, + deadslice['slice'].Maxi2Rate, + deadslice['htb']['usedbytes'], + deadslice['htb']['usedi2bytes'], + deadslice['htb']['share'], + live[newslice]['_rspec']) + # Since the slice has been reinitialed, remove from dead database. + del deaddb[deadslice['slice'].name] else: - logger.log("bwmon Slice %s doesn't have xid. Must be delegated. Skipping." % live[newslice]['name']) - - # Delete dead slices. - # First delete dead slices that exist in the pickle file, but - # aren't instantiated by PLC. - dead = Set(slices.keys()) - Set(live.keys()) - logger.log("bwmon: Found %s dead slices" % (dead.__len__() - 2)) - for xid in dead: - if xid == root_xid or xid == default_xid: + logger.log("bwmon: Slice %s doesn't have xid. Skipping." % live[newslice]['name']) + + # Move dead slices that exist in the pickle file, but + # aren't instantiated by PLC into the dead dict until + # recording period is over. This is to avoid the case where a slice is dynamically created + # and destroyed then recreated to get around byte limits. + deadxids = Set(slices.keys()) - Set(live.keys()) + logger.log("bwmon: Found %s dead slices" % (deadxids.__len__() - 2), 2) + for deadxid in deadxids: + if deadxid == root_xid or deadxid == default_xid: continue - logger.log("bwmon: removing dead slice %s " % xid) - if slices.has_key(xid): del slices[xid] - if livehtbs.has_key(xid): bwlimit.off(xid) + logger.log("bwmon: removing dead slice %s " % deadxid) + if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid): + # add slice (by name) to deaddb + logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name) + deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]} + del slices[deadxid] + if kernelhtbs.has_key(deadxid): + logger.log("bwmon: Removing HTB for %s." % deadxid, 2) + bwlimit.off(deadxid) + + # Clean up deaddb + for deadslice in deaddb.keys(): + if (time.time() >= (deaddb[deadslice]['slice'].time + period)): + logger.log("bwmon: Removing dead slice %s from dat." \ + % deaddb[deadslice]['slice'].name) + del deaddb[deadslice] # Get actual running values from tc since we've added and removed buckets. # Update slice totals and bandwidth. {xid: {values}} - livehtbs = gethtbs(root_xid, default_xid) - logger.log("bwmon: now %s running HTBs" % livehtbs.keys().__len__()) + kernelhtbs = gethtbs(root_xid, default_xid) + logger.log("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__(), 2) for (xid, slice) in slices.iteritems(): # Monitor only the specified slices @@ -542,38 +626,39 @@ def sync(nmdbcopy): continue if (time.time() >= (slice.time + period)) or \ - (livehtbs[xid]['usedbytes'] < slice.bytes) or \ - (livehtbs[xid]['usedi2bytes'] < slice.i2bytes): + (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \ + (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes): # Reset to defaults every 24 hours or if it appears # that the byte counters have overflowed (or, more # likely, the node was restarted or the HTB buckets # were re-initialized). - slice.reset(livehtbs[xid]['maxrate'], \ - livehtbs[xid]['maxexemptrate'], \ - livehtbs[xid]['usedbytes'], \ - livehtbs[xid]['usedi2bytes'], \ + slice.reset(kernelhtbs[xid]['maxrate'], \ + kernelhtbs[xid]['maxexemptrate'], \ + kernelhtbs[xid]['usedbytes'], \ + kernelhtbs[xid]['usedi2bytes'], \ live[xid]['_rspec']) else: - if debug: logger.log("bwmon: Updating slice %s" % slice.name) + logger.log("bwmon: Updating slice %s" % slice.name, 2) # Update byte counts - slice.update(livehtbs[xid]['maxrate'], \ - livehtbs[xid]['maxexemptrate'], \ - livehtbs[xid]['usedbytes'], \ - livehtbs[xid]['usedi2bytes'], \ + slice.update(kernelhtbs[xid]['maxrate'], \ + kernelhtbs[xid]['maxexemptrate'], \ + kernelhtbs[xid]['usedbytes'], \ + kernelhtbs[xid]['usedi2bytes'], \ + kernelhtbs[xid]['share'], live[xid]['_rspec']) - - logger.log("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),datafile)) + + logger.log("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),datafile), 2) f = open(datafile, "w") - pickle.dump((version, slices), f) + pickle.dump((version, slices, deaddb), f) f.close() lock = threading.Event() def run(): """When run as a thread, wait for event, lock db, deep copy it, release it, run bwmon.GetSlivers(), then go back to waiting.""" - if debug: logger.log("bwmon: Thread started") + logger.log("bwmon: Thread started", 2) while True: lock.wait() - if debug: logger.log("bwmon: Event received. Running.") + logger.log("bwmon: Event received. Running.", 2) database.db_lock.acquire() nmdbcopy = copy.deepcopy(database.db) database.db_lock.release()