3 # Average bandwidth monitoring script. Run periodically via NM db.sync to
4 # enforce a soft limit on daily bandwidth usage for each slice. If a
5 # slice is found to have transmitted 80% of its daily byte limit usage,
6 # its instantaneous rate will be capped at the bytes remaning in the limit
7 # over the time remaining in the recording period.
9 # Two separate limits are enforced, one for destinations exempt from
10 # the node bandwidth cap (i.e. Internet2), and the other for all other destinations.
12 # Mark Huang <mlhuang@cs.princeton.edu>
13 # Andy Bavier <acb@cs.princeton.edu>
14 # Faiyaz Ahmed <faiyaza@cs.princeton.edu>
15 # Copyright (C) 2004-2008 The Trustees of Princeton University
30 from config import Config
35 # Set DEBUG to True if you don't want to send emails
37 # Set ENABLE to False to setup buckets, but not limit.
40 DB_FILE = "/var/lib/nodemanager/bwmon.pickle"
43 seconds_per_day = 24 * 60 * 60
46 dev_default = tools.get_default_if()
47 # Burst to line rate (or node cap). Set by NM. in KBit/s
48 default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
49 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
50 # 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
51 # 5.4 Gbyte per day max allowed transfered per recording period
52 # 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
53 # but its better to keep a higher byte total and keep people happy than correct
54 # the problem and piss people off.
55 # default_MaxKByte = 5662310
58 # llp wants to double these, so we use the following
59 # 1mbit * 24hrs * 60mins * 60secs = bits/day
60 # 1000000 * 24 * 60 * 60 / (1024 * 8)
61 default_MaxKByte = 10546875
63 # 16.4 Gbyte per day max allowed transfered per recording period to I2
64 # default_Maxi2KByte = 17196646
67 # 3Mb/s for 24hrs a day (30.17 gigs)
68 default_Maxi2KByte = 31640625
70 # Default share quanta
74 period = 1 * seconds_per_day
79 The slice %(slice)s has transmitted more than %(bytes)s from
80 %(hostname)s to %(class)s destinations
83 Its maximum %(class)s burst rate will be capped at %(new_maxrate)s/s
86 Please reduce the average %(class)s transmission rate
87 of the slice to %(limit)s per %(period)s.
93 %(date)s %(hostname)s bwcap %(slice)s
96 def format_bytes(bytes, si = True):
98 Formats bytes into a string
103 # Officially, a kibibyte
106 if bytes >= (kilo * kilo * kilo):
107 return "%.1f GB" % (bytes / (kilo * kilo * kilo))
108 elif bytes >= 1000000:
109 return "%.1f MB" % (bytes / (kilo * kilo))
111 return "%.1f KB" % (bytes / kilo)
113 return "%.0f bytes" % bytes
115 def format_period(seconds):
117 Formats a period in seconds into a string
120 if seconds == (24 * 60 * 60):
122 elif seconds == (60 * 60):
124 elif seconds > (24 * 60 * 60):
125 return "%.1f days" % (seconds / 24. / 60. / 60.)
126 elif seconds > (60 * 60):
127 return "%.1f hours" % (seconds / 60. / 60.)
129 return "%.1f minutes" % (seconds / 60.)
131 return "%.0f seconds" % seconds
133 def slicemail(slice, subject, body):
135 Front end to sendmail. Sends email to slice alias with given subject and body.
138 sendmail = os.popen("/usr/sbin/sendmail -N never -t -f%s" % config.PLC_MAIL_SUPPORT_ADDRESS, "w")
140 # Parsed from MyPLC config
141 to = [config.PLC_MAIL_MOM_LIST_ADDRESS]
143 if slice is not None and slice != "root":
144 to.append(config.PLC_MAIL_SLICE_ADDRESS.replace("SLICE", slice))
146 header = {'from': "%s Support <%s>" % (config.PLC_NAME,
147 config.PLC_MAIL_SUPPORT_ADDRESS),
149 'version': sys.version.split(" ")[0],
155 Content-type: text/plain
159 X-Mailer: Python/%(version)s
162 """.lstrip() % header)
172 Stores the last recorded bandwidth parameters of a slice.
174 xid - slice context/VServer ID
176 time - beginning of recording period in UNIX seconds
177 bytes - low bandwidth bytes transmitted at the beginning of the recording period
178 i2bytes - high bandwidth bytes transmitted at the beginning of the recording period (for I2 -F)
179 MaxKByte - total volume of data allowed
180 ThreshKbyte - After thresh, cap node to (maxkbyte - bytes)/(time left in period)
181 Maxi2KByte - same as MaxKByte, but for i2
182 Threshi2Kbyte - same as Threshi2KByte, but for i2
183 MaxRate - max_rate slice attribute.
184 Maxi2Rate - max_exempt_rate slice attribute.
185 Share - Used by Sirius to loan min rates
186 Sharei2 - Used by Sirius to loan min rates for i2
187 self.emailed - did slice recv email during this recording period
191 def __init__(self, xid, name, rspec):
197 self.MaxRate = default_MaxRate
198 self.MinRate = bwlimit.bwmin / 1000
199 self.Maxi2Rate = default_Maxi2Rate
200 self.Mini2Rate = bwlimit.bwmin / 1000
201 self.MaxKByte = default_MaxKByte
202 self.ThreshKByte = int(.8 * self.MaxKByte)
203 self.Maxi2KByte = default_Maxi2KByte
204 self.Threshi2KByte = int(.8 * self.Maxi2KByte)
205 self.Share = default_Share
206 self.Sharei2 = default_Share
210 self.updateSliceTags(rspec)
211 bwlimit.set(xid = self.xid, dev = dev_default,
212 minrate = self.MinRate * 1000,
213 maxrate = self.MaxRate * 1000,
214 maxexemptrate = self.Maxi2Rate * 1000,
215 minexemptrate = self.Mini2Rate * 1000,
221 def updateSliceTags(self, rspec):
223 Use respects from GetSlivers to PLC to populate slice object. Also
224 do some sanity checking.
227 # Sanity check plus policy decision for MinRate:
228 # Minrate cant be greater than 25% of MaxRate or NodeCap.
229 MinRate = int(rspec.get("net_min_rate", bwlimit.bwmin / 1000))
230 if MinRate > int(.25 * default_MaxRate):
231 MinRate = int(.25 * default_MaxRate)
232 if MinRate != self.MinRate:
233 self.MinRate = MinRate
234 logger.log("bwmon: Updating %s: Min Rate = %s" %(self.name, self.MinRate))
236 MaxRate = int(rspec.get('net_max_rate', default_MaxRate))
237 if MaxRate != self.MaxRate:
238 self.MaxRate = MaxRate
239 logger.log("bwmon: Updating %s: Max Rate = %s" %(self.name, self.MaxRate))
241 Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin / 1000))
242 if Mini2Rate != self.Mini2Rate:
243 self.Mini2Rate = Mini2Rate
244 logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate))
246 Maxi2Rate = int(rspec.get('net_i2_max_rate', default_Maxi2Rate))
247 if Maxi2Rate != self.Maxi2Rate:
248 self.Maxi2Rate = Maxi2Rate
249 logger.log("bwmon: Updating %s: Max i2 Rate = %s" %(self.name, self.Maxi2Rate))
251 MaxKByte = int(rspec.get('net_max_kbyte', default_MaxKByte))
252 if MaxKByte != self.MaxKByte:
253 self.MaxKByte = MaxKByte
254 logger.log("bwmon: Updating %s: Max KByte lim = %s" %(self.name, self.MaxKByte))
256 Maxi2KByte = int(rspec.get('net_i2_max_kbyte', default_Maxi2KByte))
257 if Maxi2KByte != self.Maxi2KByte:
258 self.Maxi2KByte = Maxi2KByte
259 logger.log("bwmon: Updating %s: Max i2 KByte = %s" %(self.name, self.Maxi2KByte))
261 ThreshKByte = int(rspec.get('net_thresh_kbyte', (MaxKByte * .8)))
262 if ThreshKByte != self.ThreshKByte:
263 self.ThreshKByte = ThreshKByte
264 logger.log("bwmon: Updating %s: Thresh KByte = %s" %(self.name, self.ThreshKByte))
266 Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', (Maxi2KByte * .8)))
267 if Threshi2KByte != self.Threshi2KByte:
268 self.Threshi2KByte = Threshi2KByte
269 logger.log("bwmon: Updating %s: i2 Thresh KByte = %s" %(self.name, self.Threshi2KByte))
271 Share = int(rspec.get('net_share', default_Share))
272 if Share != self.Share:
274 logger.log("bwmon: Updating %s: Net Share = %s" %(self.name, self.Share))
276 Sharei2 = int(rspec.get('net_i2_share', default_Share))
277 if Sharei2 != self.Sharei2:
278 self.Sharei2 = Sharei2
279 logger.log("bwmon: Updating %s: Net i2 Share = %s" %(self.name, self.i2Share))
282 def reset(self, runningrates, rspec):
284 Begin a new recording period. Remove caps by restoring limits
285 to their default values.
287 # Cache share for later comparison
288 self.Share = runningrates.get('share', 1)
290 # Query Node Manager for max rate overrides
291 self.updateSliceTags(rspec)
293 # Reset baseline time
294 self.time = time.time()
296 # Reset baseline byte coutns
297 self.bytes = runningrates.get('usedbytes', 0)
298 self.i2bytes = runningrates.get('usedi2bytes', 0)
305 maxrate = self.MaxRate * 1000
306 minrate = self.MinRate * 1000
307 maxi2rate = self.Maxi2Rate * 1000
308 mini2rate = self.Mini2Rate * 1000
310 if (maxrate != runningrates.get('maxrate', 0)) or \
311 (minrate != runningrates.get('maxrate', 0)) or \
312 (maxi2rate != runningrates.get('maxexemptrate', 0)) or \
313 (mini2rate != runningrates.get('minexemptrate', 0)) or \
314 (self.Share != runningrates.get('share', 0)):
315 logger.log("bwmon: %s reset to %s/%s" % \
317 bwlimit.format_tc_rate(maxrate),
318 bwlimit.format_tc_rate(maxi2rate)))
319 bwlimit.set(xid = self.xid, dev = dev_default,
320 minrate = self.MinRate * 1000,
321 maxrate = self.MaxRate * 1000,
322 maxexemptrate = self.Maxi2Rate * 1000,
323 minexemptrate = self.Mini2Rate * 1000,
326 def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes):
328 Notify the slice it's being capped.
330 # Prepare message parameters from the template
332 params = {'slice': self.name, 'hostname': socket.gethostname(),
333 'since': time.asctime(time.gmtime(self.time)) + " GMT",
334 'until': time.asctime(time.gmtime(self.time + period)) + " GMT",
335 'date': time.asctime(time.gmtime()) + " GMT",
336 'period': format_period(period)}
338 if new_maxrate != (self.MaxRate * 1000):
339 # Format template parameters for low bandwidth message
340 params['class'] = "low bandwidth"
341 params['bytes'] = format_bytes(usedbytes - self.bytes)
342 params['limit'] = format_bytes(self.MaxKByte * 1024)
343 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate)
345 # Cap low bandwidth burst rate
346 message += template % params
347 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
349 if new_maxexemptrate != (self.Maxi2Rate * 1000):
350 # Format template parameters for high bandwidth message
351 params['class'] = "high bandwidth"
352 params['bytes'] = format_bytes(usedi2bytes - self.i2bytes)
353 params['limit'] = format_bytes(self.Maxi2KByte * 1024)
354 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate)
356 message += template % params
357 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
360 if self.emailed == False:
361 subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params
363 logger.log("bwmon: "+ subject)
364 logger.log("bwmon: "+ message + (footer % params))
367 logger.log("bwmon: Emailing %s" % self.name)
368 slicemail(self.name, subject, message + (footer % params))
371 def update(self, runningrates, rspec):
373 Update byte counts and check if byte thresholds have been
374 exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period.
375 Recalculate every time module runs.
377 # cache share for later comparison
378 runningrates['share'] = self.Share
380 # Query Node Manager for max rate overrides
381 self.updateSliceTags(rspec)
383 usedbytes = runningrates['usedbytes']
384 usedi2bytes = runningrates['usedi2bytes']
387 if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):
388 sum = self.bytes + (self.ThreshKByte * 1024)
389 maxbyte = self.MaxKByte * 1024
390 bytesused = usedbytes - self.bytes
391 timeused = int(time.time() - self.time)
392 # Calcuate new rate. in bit/s
393 new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))
394 # Never go under MinRate
395 if new_maxrate < (self.MinRate * 1000):
396 new_maxrate = self.MinRate * 1000
397 # State information. I'm capped.
401 new_maxrate = self.MaxRate * 1000
404 if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):
405 maxi2byte = self.Maxi2KByte * 1024
406 i2bytesused = usedi2bytes - self.i2bytes
407 timeused = int(time.time() - self.time)
409 new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused))
410 # Never go under MinRate
411 if new_maxi2rate < (self.Mini2Rate * 1000):
412 new_maxi2rate = self.Mini2Rate * 1000
413 # State information. I'm capped.
417 new_maxi2rate = self.Maxi2Rate * 1000
420 # Check running values against newly calculated values so as not to run tc
422 if (runningrates['maxrate'] != new_maxrate) or \
423 (runningrates['minrate'] != self.MinRate * 1000) or \
424 (runningrates['maxexemptrate'] != new_maxi2rate) or \
425 (runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \
426 (runningrates['share'] != self.Share):
428 bwlimit.set(xid = self.xid, dev = dev_default,
429 minrate = self.MinRate * 1000,
430 maxrate = new_maxrate,
431 minexemptrate = self.Mini2Rate * 1000,
432 maxexemptrate = new_maxi2rate,
436 if self.capped == True:
437 self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)
440 def gethtbs(root_xid, default_xid):
442 Return dict {xid: {*rates}} of running htbs as reported by tc that have names.
443 Turn off HTBs without names.
446 for params in bwlimit.get(dev = dev_default):
449 minexemptrate, maxexemptrate,
450 usedbytes, usedi2bytes) = params
452 name = bwlimit.get_slice(xid)
455 and (xid != root_xid) \
456 and (xid != default_xid):
457 # Orphaned (not associated with a slice) class
459 logger.log("bwmon: Found orphaned HTB %s. Removing." %name)
460 bwlimit.off(xid, dev = dev_default)
462 livehtbs[xid] = {'share': share,
465 'maxexemptrate': maxexemptrate,
466 'minexemptrate': minexemptrate,
467 'usedbytes': usedbytes,
469 'usedi2bytes': usedi2bytes}
475 Syncs tc, db, and bwmon.pickle.
476 Then, starts new slices, kills old ones, and updates byte accounts for each running slice.
477 Sends emails and caps those that went over their limit.
491 # In case the limits have changed.
492 default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
493 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
495 # Incase default isn't set yet.
496 if default_MaxRate == -1:
497 default_MaxRate = 1000000
500 f = open(DB_FILE, "r+")
501 logger.verbose("bwmon: Loading %s" % DB_FILE)
502 (version, slices, deaddb) = pickle.load(f)
504 # Check version of data file
505 if version != "$Id$":
506 logger.log("bwmon: Not using old version '%s' data file %s" % (version, DB_FILE))
513 # Get/set special slice IDs
514 root_xid = bwlimit.get_xid("root")
515 default_xid = bwlimit.get_xid("default")
517 # Since root is required for sanity, its not in the API/plc database, so pass {}
519 if root_xid not in slices.keys():
520 slices[root_xid] = Slice(root_xid, "root", {})
521 slices[root_xid].reset({}, {})
523 # Used by bwlimit. pass {} since there is no rspec (like above).
524 if default_xid not in slices.keys():
525 slices[default_xid] = Slice(default_xid, "default", {})
526 slices[default_xid].reset({}, {})
529 # Get running slivers that should be on this node (from plc). {xid: name}
530 # db keys on name, bwmon keys on xid. db doesnt have xid either.
531 for plcSliver in nmdbcopy.keys():
532 live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
534 logger.verbose("bwmon: Found %s instantiated slices" % live.keys().__len__())
535 logger.verbose("bwmon: Found %s slices in dat file" % slices.values().__len__())
537 # Get actual running values from tc.
538 # Update slice totals and bandwidth. {xid: {values}}
539 kernelhtbs = gethtbs(root_xid, default_xid)
540 logger.verbose("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__())
542 # The dat file has HTBs for slices, but the HTBs aren't running
543 nohtbslices = set(slices.keys()) - set(kernelhtbs.keys())
544 logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__())
546 for nohtbslice in nohtbslices:
547 if live.has_key(nohtbslice):
548 slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
550 logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
551 del slices[nohtbslice]
553 # The dat file doesnt have HTB for the slice but kern has HTB
554 slicesnodat = set(kernelhtbs.keys()) - set(slices.keys())
555 logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__())
556 for slicenodat in slicesnodat:
557 # But slice is running
558 if live.has_key(slicenodat):
559 # init the slice. which means start accounting over since kernel
560 # htb was already there.
561 slices[slicenodat] = Slice(slicenodat,
562 live[slicenodat]['name'],
563 live[slicenodat]['_rspec'])
566 # Slices in GetSlivers but not running HTBs
567 newslicesxids = set(live.keys()) - set(kernelhtbs.keys())
568 logger.verbose("bwmon: Found %s new slices" % newslicesxids.__len__())
571 for newslice in newslicesxids:
572 # Delegated slices dont have xids (which are uids) since they haven't been
574 if newslice != None and live[newslice].has_key('_rspec') == True:
575 # Check to see if we recently deleted this slice.
576 if live[newslice]['name'] not in deaddb.keys():
577 logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
578 # _rspec is the computed rspec: NM retrieved data from PLC, computed loans
579 # and made a dict of computed values.
580 slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
581 slices[newslice].reset( {}, live[newslice]['_rspec'] )
582 # Double check time for dead slice in deaddb is within 24hr recording period.
583 elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)):
584 deadslice = deaddb[live[newslice]['name']]
585 logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name'])
586 slices[newslice] = deadslice['slice']
587 slices[newslice].xid = newslice
589 newvals = {"maxrate": deadslice['slice'].MaxRate * 1000,
590 "minrate": deadslice['slice'].MinRate * 1000,
591 "maxexemptrate": deadslice['slice'].Maxi2Rate * 1000,
592 "usedbytes": deadslice['htb']['usedbytes'] * 1000,
593 "usedi2bytes": deadslice['htb']['usedi2bytes'],
594 "share":deadslice['htb']['share']}
595 slices[newslice].reset(newvals, live[newslice]['_rspec'])
597 slices[newslice].update(newvals, live[newslice]['_rspec'])
598 # Since the slice has been reinitialed, remove from dead database.
599 del deaddb[deadslice['slice'].name]
602 logger.log("bwmon: Slice %s doesn't have xid. Skipping." % live[newslice]['name'])
604 # Move dead slices that exist in the pickle file, but
605 # aren't instantiated by PLC into the dead dict until
606 # recording period is over. This is to avoid the case where a slice is dynamically created
607 # and destroyed then recreated to get around byte limits.
608 deadxids = set(slices.keys()) - set(live.keys())
609 logger.verbose("bwmon: Found %s dead slices" % (deadxids.__len__() - 2))
610 for deadxid in deadxids:
611 if deadxid == root_xid or deadxid == default_xid:
613 logger.log("bwmon: removing dead slice %s " % deadxid)
614 if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid):
615 # add slice (by name) to deaddb
616 logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
617 deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
619 if kernelhtbs.has_key(deadxid):
620 logger.verbose("bwmon: Removing HTB for %s." % deadxid)
621 bwlimit.off(deadxid, dev = dev_default)
624 for deadslice in deaddb.keys():
625 if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
626 logger.log("bwmon: Removing dead slice %s from dat." \
627 % deaddb[deadslice]['slice'].name)
628 del deaddb[deadslice]
630 # Get actual running values from tc since we've added and removed buckets.
631 # Update slice totals and bandwidth. {xid: {values}}
632 kernelhtbs = gethtbs(root_xid, default_xid)
633 logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__())
635 # Update all byte limites on all slices
636 for (xid, slice) in slices.iteritems():
637 # Monitor only the specified slices
638 if xid == root_xid or xid == default_xid: continue
639 if names and name not in names:
642 if (time.time() >= (slice.time + period)) or \
643 (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \
644 (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes):
645 # Reset to defaults every 24 hours or if it appears
646 # that the byte counters have overflowed (or, more
647 # likely, the node was restarted or the HTB buckets
648 # were re-initialized).
649 slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
651 logger.verbose("bwmon: Updating slice %s" % slice.name)
653 slice.update(kernelhtbs[xid], live[xid]['_rspec'])
655 logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),DB_FILE))
656 f = open(DB_FILE, "w")
657 pickle.dump((version, slices, deaddb), f)
660 # doesnt use generic default interface because this runs as its own thread.
661 # changing the config variable will not have an effect since GetSlivers: pass
662 def getDefaults(nmdbcopy):
664 Get defaults from default slice's slice attributes.
668 dfltslice = nmdbcopy.get(Config().PLC_SLICE_PREFIX+"_default")
670 if dfltslice['rspec']['net_max_rate'] == -1:
678 Turn off all slice HTBs
680 # Get/set special slice IDs
681 root_xid = bwlimit.get_xid("root")
682 default_xid = bwlimit.get_xid("default")
683 kernelhtbs = gethtbs(root_xid, default_xid)
685 logger.log("bwmon: Disabling all running HTBs.")
686 for htb in kernelhtbs.keys(): bwlimit.off(htb, dev = dev_default)
689 lock = threading.Event()
692 When run as a thread, wait for event, lock db, deep copy it, release it,
693 run bwmon.GetSlivers(), then go back to waiting.
695 logger.verbose("bwmon: Thread started")
698 logger.verbose("bwmon: Event received. Running.")
699 database.db_lock.acquire()
700 nmdbcopy = copy.deepcopy(database.db)
701 database.db_lock.release()
703 if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0:
704 # class show to check if net:InitNodeLimit:bwlimit.init has run.
706 else: logger.log("bwmon: BW limits DISABLED.")
707 except: logger.log_exc("bwmon failed")
711 tools.as_daemon_thread(run)
713 def GetSlivers(*args):
714 logger.verbose ("bwmon: triggering dummy GetSlivers")