X-Git-Url: http://git.onelab.eu/?p=nodemanager.git;a=blobdiff_plain;f=bwmon.py;h=691d4b3e10d8f9b1b6c169c3295199d9fc37fbbe;hp=476ef6daebc151780aed229660670be885d80a92;hb=48a73b18fd7daed13c645c1adeddb57b560e7a2d;hpb=7b8fc390afd0349706c45c3ae970770cdf9dceae diff --git a/bwmon.py b/bwmon.py index 476ef6d..691d4b3 100644 --- a/bwmon.py +++ b/bwmon.py @@ -520,35 +520,35 @@ def sync(nmdbcopy): # Since root is required for sanity, its not in the API/plc database, so pass {} # to use defaults. - if root_xid not in slices.keys(): + if root_xid not in list(slices.keys()): slices[root_xid] = Slice(root_xid, "root", {}) slices[root_xid].reset({}, {}) # Used by bwlimit. pass {} since there is no rspec (like above). - if default_xid not in slices.keys(): + if default_xid not in list(slices.keys()): slices[default_xid] = Slice(default_xid, "default", {}) slices[default_xid].reset({}, {}) live = {} # Get running slivers that should be on this node (from plc). {xid: name} # db keys on name, bwmon keys on xid. db doesnt have xid either. - for plcSliver in nmdbcopy.keys(): + for plcSliver in list(nmdbcopy.keys()): live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver] - logger.verbose("bwmon: Found %s instantiated slices" % live.keys().__len__()) - logger.verbose("bwmon: Found %s slices in dat file" % slices.values().__len__()) + logger.verbose("bwmon: Found %s instantiated slices" % list(live.keys()).__len__()) + logger.verbose("bwmon: Found %s slices in dat file" % list(slices.values()).__len__()) # Get actual running values from tc. # Update slice totals and bandwidth. {xid: {values}} kernelhtbs = gethtbs(root_xid, default_xid) - logger.verbose("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__()) + logger.verbose("bwmon: Found %s running HTBs" % list(kernelhtbs.keys()).__len__()) # The dat file has HTBs for slices, but the HTBs aren't running nohtbslices = set(slices.keys()) - set(kernelhtbs.keys()) logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__()) # Reset tc counts. for nohtbslice in nohtbslices: - if live.has_key(nohtbslice): + if nohtbslice in live: slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] ) else: logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice) @@ -559,7 +559,7 @@ def sync(nmdbcopy): logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__()) for slicenodat in slicesnodat: # But slice is running - if live.has_key(slicenodat): + if slicenodat in live: # init the slice. which means start accounting over since kernel # htb was already there. slices[slicenodat] = Slice(slicenodat, @@ -575,9 +575,9 @@ def sync(nmdbcopy): for newslice in newslicesxids: # Delegated slices dont have xids (which are uids) since they haven't been # instantiated yet. - if newslice != None and live[newslice].has_key('_rspec') == True: + if newslice != None and ('_rspec' in live[newslice]) == True: # Check to see if we recently deleted this slice. - if live[newslice]['name'] not in deaddb.keys(): + if live[newslice]['name'] not in list(deaddb.keys()): logger.log( "bwmon: new slice %s" % live[newslice]['name'] ) # _rspec is the computed rspec: NM retrieved data from PLC, computed loans # and made a dict of computed values. @@ -615,17 +615,17 @@ def sync(nmdbcopy): if deadxid == root_xid or deadxid == default_xid: continue logger.log("bwmon: removing dead slice %s " % deadxid) - if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid): + if deadxid in slices and deadxid in kernelhtbs: # add slice (by name) to deaddb logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name) deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]} del slices[deadxid] - if kernelhtbs.has_key(deadxid): + if deadxid in kernelhtbs: logger.verbose("bwmon: Removing HTB for %s." % deadxid) bwlimit.off(deadxid, dev = dev_default) # Clean up deaddb - for deadslice in deaddb.keys(): + for deadslice in list(deaddb.keys()): if (time.time() >= (deaddb[deadslice]['slice'].time + period)): logger.log("bwmon: Removing dead slice %s from dat." \ % deaddb[deadslice]['slice'].name) @@ -634,10 +634,10 @@ def sync(nmdbcopy): # Get actual running values from tc since we've added and removed buckets. # Update slice totals and bandwidth. {xid: {values}} kernelhtbs = gethtbs(root_xid, default_xid) - logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__()) + logger.verbose("bwmon: now %s running HTBs" % list(kernelhtbs.keys()).__len__()) # Update all byte limites on all slices - for (xid, slice) in slices.iteritems(): + for (xid, slice) in slices.items(): # Monitor only the specified slices if xid == root_xid or xid == default_xid: continue if names and name not in names: @@ -656,7 +656,7 @@ def sync(nmdbcopy): # Update byte counts slice.update(kernelhtbs[xid], live[xid]['_rspec']) - logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(), DB_FILE)) + logger.verbose("bwmon: Saving %s slices in %s" % (list(slices.keys()).__len__(), DB_FILE)) f = open(DB_FILE, "w") pickle.dump((version, slices, deaddb), f) f.close() @@ -687,7 +687,7 @@ def allOff(): kernelhtbs = gethtbs(root_xid, default_xid) if len(kernelhtbs): logger.log("bwmon: Disabling all running HTBs.") - for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default) + for htb in list(kernelhtbs.keys()): bwlimit.off(htb, dev = dev_default) lock = threading.Event()