6 # Average bandwidth monitoring script. Run periodically via NM db.sync to
7 # enforce a soft limit on daily bandwidth usage for each slice. If a
8 # slice is found to have transmitted 80% of its daily byte limit usage,
9 # its instantaneous rate will be capped at the bytes remaning in the limit
10 # over the time remaining in the recording period.
12 # Two separate limits are enforced, one for destinations exempt from
13 # the node bandwidth cap (i.e. Internet2), and the other for all other destinations.
15 # Mark Huang <mlhuang@cs.princeton.edu>
16 # Andy Bavier <acb@cs.princeton.edu>
17 # Faiyaz Ahmed <faiyaza@cs.princeton.edu>
18 # Copyright (C) 2004-2008 The Trustees of Princeton University
37 # Set DEBUG to True if you don't want to send emails
39 # Set ENABLE to False to setup buckets, but not limit.
42 DB_FILE = "/var/lib/nodemanager/bwmon.pickle"
45 sys.path.append("/etc/planetlab")
46 from plc_config import *
49 logger.verbose("bwmon: Warning: Configuration file /etc/planetlab/plc_config.py not found")
50 logger.log("bwmon: Running in DEBUG mode. Logging to file and not emailing.")
53 seconds_per_day = 24 * 60 * 60
56 dev_default = tools.get_default_if()
57 # Burst to line rate (or node cap). Set by NM. in KBit/s
58 default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
59 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
60 # 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
61 # 5.4 Gbyte per day max allowed transfered per recording period
62 # 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
63 # but its better to keep a higher byte total and keep people happy than correct
64 # the problem and piss people off.
65 # default_MaxKByte = 5662310
68 # llp wants to double these, so we use the following
69 # 1mbit * 24hrs * 60mins * 60secs = bits/day
70 # 1000000 * 24 * 60 * 60 / (1024 * 8)
71 default_MaxKByte = 10546875
73 # 16.4 Gbyte per day max allowed transfered per recording period to I2
74 # default_Maxi2KByte = 17196646
77 # 3Mb/s for 24hrs a day (30.17 gigs)
78 default_Maxi2KByte = 31640625
80 # Default share quanta
84 period = 1 * seconds_per_day
89 The slice %(slice)s has transmitted more than %(bytes)s from
90 %(hostname)s to %(class)s destinations
93 Its maximum %(class)s burst rate will be capped at %(new_maxrate)s/s
96 Please reduce the average %(class)s transmission rate
97 of the slice to %(limit)s per %(period)s.
103 %(date)s %(hostname)s bwcap %(slice)s
106 def format_bytes(bytes, si = True):
108 Formats bytes into a string
113 # Officially, a kibibyte
116 if bytes >= (kilo * kilo * kilo):
117 return "%.1f GB" % (bytes / (kilo * kilo * kilo))
118 elif bytes >= 1000000:
119 return "%.1f MB" % (bytes / (kilo * kilo))
121 return "%.1f KB" % (bytes / kilo)
123 return "%.0f bytes" % bytes
125 def format_period(seconds):
127 Formats a period in seconds into a string
130 if seconds == (24 * 60 * 60):
132 elif seconds == (60 * 60):
134 elif seconds > (24 * 60 * 60):
135 return "%.1f days" % (seconds / 24. / 60. / 60.)
136 elif seconds > (60 * 60):
137 return "%.1f hours" % (seconds / 60. / 60.)
139 return "%.1f minutes" % (seconds / 60.)
141 return "%.0f seconds" % seconds
143 def slicemail(slice, subject, body):
145 Front end to sendmail. Sends email to slice alias with given subject and body.
148 sendmail = os.popen("/usr/sbin/sendmail -N never -t -f%s" % PLC_MAIL_SUPPORT_ADDRESS, "w")
150 # Parsed from MyPLC config
151 to = [PLC_MAIL_MOM_LIST_ADDRESS]
153 if slice is not None and slice != "root":
154 to.append(PLC_MAIL_SLICE_ADDRESS.replace("SLICE", slice))
156 header = {'from': "%s Support <%s>" % (PLC_NAME, PLC_MAIL_SUPPORT_ADDRESS),
158 'version': sys.version.split(" ")[0],
164 Content-type: text/plain
168 X-Mailer: Python/%(version)s
171 """.lstrip() % header)
181 Stores the last recorded bandwidth parameters of a slice.
183 xid - slice context/VServer ID
185 time - beginning of recording period in UNIX seconds
186 bytes - low bandwidth bytes transmitted at the beginning of the recording period
187 i2bytes - high bandwidth bytes transmitted at the beginning of the recording period (for I2 -F)
188 MaxKByte - total volume of data allowed
189 ThreshKbyte - After thresh, cap node to (maxkbyte - bytes)/(time left in period)
190 Maxi2KByte - same as MaxKByte, but for i2
191 Threshi2Kbyte - same as Threshi2KByte, but for i2
192 MaxRate - max_rate slice attribute.
193 Maxi2Rate - max_exempt_rate slice attribute.
194 Share - Used by Sirius to loan min rates
195 Sharei2 - Used by Sirius to loan min rates for i2
196 self.emailed - did slice recv email during this recording period
200 def __init__(self, xid, name, rspec):
206 self.MaxRate = default_MaxRate
207 self.MinRate = bwlimit.bwmin / 1000
208 self.Maxi2Rate = default_Maxi2Rate
209 self.Mini2Rate = bwlimit.bwmin / 1000
210 self.MaxKByte = default_MaxKByte
211 self.ThreshKByte = int(.8 * self.MaxKByte)
212 self.Maxi2KByte = default_Maxi2KByte
213 self.Threshi2KByte = int(.8 * self.Maxi2KByte)
214 self.Share = default_Share
215 self.Sharei2 = default_Share
219 self.updateSliceTags(rspec)
220 bwlimit.set(xid = self.xid,
221 minrate = self.MinRate * 1000,
222 maxrate = self.MaxRate * 1000,
223 maxexemptrate = self.Maxi2Rate * 1000,
224 minexemptrate = self.Mini2Rate * 1000,
230 def updateSliceTags(self, rspec):
232 Use respects from GetSlivers to PLC to populate slice object. Also
233 do some sanity checking.
236 # Sanity check plus policy decision for MinRate:
237 # Minrate cant be greater than 25% of MaxRate or NodeCap.
238 MinRate = int(rspec.get("net_min_rate", bwlimit.bwmin / 1000))
239 if MinRate > int(.25 * default_MaxRate):
240 MinRate = int(.25 * default_MaxRate)
241 if MinRate != self.MinRate:
242 self.MinRate = MinRate
243 logger.log("bwmon: Updating %s: Min Rate = %s" %(self.name, self.MinRate))
245 MaxRate = int(rspec.get('net_max_rate', default_MaxRate))
246 if MaxRate != self.MaxRate:
247 self.MaxRate = MaxRate
248 logger.log("bwmon: Updating %s: Max Rate = %s" %(self.name, self.MaxRate))
250 Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin / 1000))
251 if Mini2Rate != self.Mini2Rate:
252 self.Mini2Rate = Mini2Rate
253 logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate))
255 Maxi2Rate = int(rspec.get('net_i2_max_rate', default_Maxi2Rate))
256 if Maxi2Rate != self.Maxi2Rate:
257 self.Maxi2Rate = Maxi2Rate
258 logger.log("bwmon: Updating %s: Max i2 Rate = %s" %(self.name, self.Maxi2Rate))
260 MaxKByte = int(rspec.get('net_max_kbyte', default_MaxKByte))
261 if MaxKByte != self.MaxKByte:
262 self.MaxKByte = MaxKByte
263 logger.log("bwmon: Updating %s: Max KByte lim = %s" %(self.name, self.MaxKByte))
265 Maxi2KByte = int(rspec.get('net_i2_max_kbyte', default_Maxi2KByte))
266 if Maxi2KByte != self.Maxi2KByte:
267 self.Maxi2KByte = Maxi2KByte
268 logger.log("bwmon: Updating %s: Max i2 KByte = %s" %(self.name, self.Maxi2KByte))
270 ThreshKByte = int(rspec.get('net_thresh_kbyte', (MaxKByte * .8)))
271 if ThreshKByte != self.ThreshKByte:
272 self.ThreshKByte = ThreshKByte
273 logger.log("bwmon: Updating %s: Thresh KByte = %s" %(self.name, self.ThreshKByte))
275 Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', (Maxi2KByte * .8)))
276 if Threshi2KByte != self.Threshi2KByte:
277 self.Threshi2KByte = Threshi2KByte
278 logger.log("bwmon: Updating %s: i2 Thresh KByte = %s" %(self.name, self.Threshi2KByte))
280 Share = int(rspec.get('net_share', default_Share))
281 if Share != self.Share:
283 logger.log("bwmon: Updating %s: Net Share = %s" %(self.name, self.Share))
285 Sharei2 = int(rspec.get('net_i2_share', default_Share))
286 if Sharei2 != self.Sharei2:
287 self.Sharei2 = Sharei2
288 logger.log("bwmon: Updating %s: Net i2 Share = %s" %(self.name, self.i2Share))
291 def reset(self, runningrates, rspec):
293 Begin a new recording period. Remove caps by restoring limits
294 to their default values.
296 # Cache share for later comparison
297 self.Share = runningrates.get('share', 1)
299 # Query Node Manager for max rate overrides
300 self.updateSliceTags(rspec)
302 # Reset baseline time
303 self.time = time.time()
305 # Reset baseline byte coutns
306 self.bytes = runningrates.get('usedbytes', 0)
307 self.i2bytes = runningrates.get('usedi2bytes', 0)
314 maxrate = self.MaxRate * 1000
315 minrate = self.MinRate * 1000
316 maxi2rate = self.Maxi2Rate * 1000
317 mini2rate = self.Mini2Rate * 1000
319 if (maxrate != runningrates.get('maxrate', 0)) or \
320 (minrate != runningrates.get('maxrate', 0)) or \
321 (maxi2rate != runningrates.get('maxexemptrate', 0)) or \
322 (mini2rate != runningrates.get('minexemptrate', 0)) or \
323 (self.Share != runningrates.get('share', 0)):
324 logger.log("bwmon: %s reset to %s/%s" % \
326 bwlimit.format_tc_rate(maxrate),
327 bwlimit.format_tc_rate(maxi2rate)))
328 bwlimit.set(xid = self.xid, dev = dev_default,
329 minrate = self.MinRate * 1000,
330 maxrate = self.MaxRate * 1000,
331 maxexemptrate = self.Maxi2Rate * 1000,
332 minexemptrate = self.Mini2Rate * 1000,
335 def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes):
337 Notify the slice it's being capped.
339 # Prepare message parameters from the template
341 params = {'slice': self.name, 'hostname': socket.gethostname(),
342 'since': time.asctime(time.gmtime(self.time)) + " GMT",
343 'until': time.asctime(time.gmtime(self.time + period)) + " GMT",
344 'date': time.asctime(time.gmtime()) + " GMT",
345 'period': format_period(period)}
347 if new_maxrate != (self.MaxRate * 1000):
348 # Format template parameters for low bandwidth message
349 params['class'] = "low bandwidth"
350 params['bytes'] = format_bytes(usedbytes - self.bytes)
351 params['limit'] = format_bytes(self.MaxKByte * 1024)
352 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate)
354 # Cap low bandwidth burst rate
355 message += template % params
356 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
358 if new_maxexemptrate != (self.Maxi2Rate * 1000):
359 # Format template parameters for high bandwidth message
360 params['class'] = "high bandwidth"
361 params['bytes'] = format_bytes(usedi2bytes - self.i2bytes)
362 params['limit'] = format_bytes(self.Maxi2KByte * 1024)
363 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate)
365 message += template % params
366 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
369 if self.emailed == False:
370 subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params
372 logger.log("bwmon: "+ subject)
373 logger.log("bwmon: "+ message + (footer % params))
376 logger.log("bwmon: Emailing %s" % self.name)
377 slicemail(self.name, subject, message + (footer % params))
380 def update(self, runningrates, rspec):
382 Update byte counts and check if byte thresholds have been
383 exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period.
384 Recalculate every time module runs.
386 # cache share for later comparison
387 runningrates['share'] = self.Share
389 # Query Node Manager for max rate overrides
390 self.updateSliceTags(rspec)
392 usedbytes = runningrates['usedbytes']
393 usedi2bytes = runningrates['usedi2bytes']
396 if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):
397 sum = self.bytes + (self.ThreshKByte * 1024)
398 maxbyte = self.MaxKByte * 1024
399 bytesused = usedbytes - self.bytes
400 timeused = int(time.time() - self.time)
401 # Calcuate new rate. in bit/s
402 new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))
403 # Never go under MinRate
404 if new_maxrate < (self.MinRate * 1000):
405 new_maxrate = self.MinRate * 1000
406 # State information. I'm capped.
410 new_maxrate = self.MaxRate * 1000
413 if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):
414 maxi2byte = self.Maxi2KByte * 1024
415 i2bytesused = usedi2bytes - self.i2bytes
416 timeused = int(time.time() - self.time)
418 new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused))
419 # Never go under MinRate
420 if new_maxi2rate < (self.Mini2Rate * 1000):
421 new_maxi2rate = self.Mini2Rate * 1000
422 # State information. I'm capped.
426 new_maxi2rate = self.Maxi2Rate * 1000
429 # Check running values against newly calculated values so as not to run tc
431 if (runningrates['maxrate'] != new_maxrate) or \
432 (runningrates['minrate'] != self.MinRate * 1000) or \
433 (runningrates['maxexemptrate'] != new_maxi2rate) or \
434 (runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \
435 (runningrates['share'] != self.Share):
437 bwlimit.set(xid = self.xid,
438 minrate = self.MinRate * 1000,
439 maxrate = new_maxrate,
440 minexemptrate = self.Mini2Rate * 1000,
441 maxexemptrate = new_maxi2rate,
445 if self.capped == True:
446 self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)
449 def gethtbs(root_xid, default_xid):
451 Return dict {xid: {*rates}} of running htbs as reported by tc that have names.
452 Turn off HTBs without names.
455 for params in bwlimit.get():
458 minexemptrate, maxexemptrate,
459 usedbytes, usedi2bytes) = params
461 name = bwlimit.get_slice(xid)
464 and (xid != root_xid) \
465 and (xid != default_xid):
466 # Orphaned (not associated with a slice) class
468 logger.log("bwmon: Found orphaned HTB %s. Removing." %name)
471 livehtbs[xid] = {'share': share,
474 'maxexemptrate': maxexemptrate,
475 'minexemptrate': minexemptrate,
476 'usedbytes': usedbytes,
478 'usedi2bytes': usedi2bytes}
484 Syncs tc, db, and bwmon.pickle.
485 Then, starts new slices, kills old ones, and updates byte accounts for each running slice.
486 Sends emails and caps those that went over their limit.
499 # In case the limits have changed.
500 default_MaxRate = int(bwlimit.get_bwcap() / 1000)
501 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
503 # Incase default isn't set yet.
504 if default_MaxRate == -1:
505 default_MaxRate = 1000000
508 f = open(DB_FILE, "r+")
509 logger.verbose("bwmon: Loading %s" % DB_FILE)
510 (version, slices, deaddb) = pickle.load(f)
512 # Check version of data file
513 if version != "$Id$":
514 logger.log("bwmon: Not using old version '%s' data file %s" % (version, DB_FILE))
521 # Get/set special slice IDs
522 root_xid = bwlimit.get_xid("root")
523 default_xid = bwlimit.get_xid("default")
525 # Since root is required for sanity, its not in the API/plc database, so pass {}
527 if root_xid not in slices.keys():
528 slices[root_xid] = Slice(root_xid, "root", {})
529 slices[root_xid].reset({}, {})
531 # Used by bwlimit. pass {} since there is no rspec (like above).
532 if default_xid not in slices.keys():
533 slices[default_xid] = Slice(default_xid, "default", {})
534 slices[default_xid].reset({}, {})
537 # Get running slivers that should be on this node (from plc). {xid: name}
538 # db keys on name, bwmon keys on xid. db doesnt have xid either.
539 for plcSliver in nmdbcopy.keys():
540 live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
542 logger.verbose("bwmon: Found %s instantiated slices" % live.keys().__len__())
543 logger.verbose("bwmon: Found %s slices in dat file" % slices.values().__len__())
545 # Get actual running values from tc.
546 # Update slice totals and bandwidth. {xid: {values}}
547 kernelhtbs = gethtbs(root_xid, default_xid)
548 logger.verbose("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__())
550 # The dat file has HTBs for slices, but the HTBs aren't running
551 nohtbslices = set(slices.keys()) - set(kernelhtbs.keys())
552 logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__())
554 for nohtbslice in nohtbslices:
555 if live.has_key(nohtbslice):
556 slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
558 logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
559 del slices[nohtbslice]
561 # The dat file doesnt have HTB for the slice but kern has HTB
562 slicesnodat = set(kernelhtbs.keys()) - set(slices.keys())
563 logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__())
564 for slicenodat in slicesnodat:
565 # But slice is running
566 if live.has_key(slicenodat):
567 # init the slice. which means start accounting over since kernel
568 # htb was already there.
569 slices[slicenodat] = Slice(slicenodat,
570 live[slicenodat]['name'],
571 live[slicenodat]['_rspec'])
574 # Slices in GetSlivers but not running HTBs
575 newslicesxids = set(live.keys()) - set(kernelhtbs.keys())
576 logger.verbose("bwmon: Found %s new slices" % newslicesxids.__len__())
579 for newslice in newslicesxids:
580 # Delegated slices dont have xids (which are uids) since they haven't been
582 if newslice != None and live[newslice].has_key('_rspec') == True:
583 # Check to see if we recently deleted this slice.
584 if live[newslice]['name'] not in deaddb.keys():
585 logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
586 # _rspec is the computed rspec: NM retrieved data from PLC, computed loans
587 # and made a dict of computed values.
588 slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
589 slices[newslice].reset( {}, live[newslice]['_rspec'] )
590 # Double check time for dead slice in deaddb is within 24hr recording period.
591 elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)):
592 deadslice = deaddb[live[newslice]['name']]
593 logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name'])
594 slices[newslice] = deadslice['slice']
595 slices[newslice].xid = newslice
597 newvals = {"maxrate": deadslice['slice'].MaxRate * 1000,
598 "minrate": deadslice['slice'].MinRate * 1000,
599 "maxexemptrate": deadslice['slice'].Maxi2Rate * 1000,
600 "usedbytes": deadslice['htb']['usedbytes'] * 1000,
601 "usedi2bytes": deadslice['htb']['usedi2bytes'],
602 "share":deadslice['htb']['share']}
603 slices[newslice].reset(newvals, live[newslice]['_rspec'])
605 slices[newslice].update(newvals, live[newslice]['_rspec'])
606 # Since the slice has been reinitialed, remove from dead database.
607 del deaddb[deadslice['slice'].name]
610 logger.log("bwmon: Slice %s doesn't have xid. Skipping." % live[newslice]['name'])
612 # Move dead slices that exist in the pickle file, but
613 # aren't instantiated by PLC into the dead dict until
614 # recording period is over. This is to avoid the case where a slice is dynamically created
615 # and destroyed then recreated to get around byte limits.
616 deadxids = set(slices.keys()) - set(live.keys())
617 logger.verbose("bwmon: Found %s dead slices" % (deadxids.__len__() - 2))
618 for deadxid in deadxids:
619 if deadxid == root_xid or deadxid == default_xid:
621 logger.log("bwmon: removing dead slice %s " % deadxid)
622 if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid):
623 # add slice (by name) to deaddb
624 logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
625 deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
627 if kernelhtbs.has_key(deadxid):
628 logger.verbose("bwmon: Removing HTB for %s." % deadxid)
632 for deadslice in deaddb.keys():
633 if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
634 logger.log("bwmon: Removing dead slice %s from dat." \
635 % deaddb[deadslice]['slice'].name)
636 del deaddb[deadslice]
638 # Get actual running values from tc since we've added and removed buckets.
639 # Update slice totals and bandwidth. {xid: {values}}
640 kernelhtbs = gethtbs(root_xid, default_xid)
641 logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__())
643 # Update all byte limites on all slices
644 for (xid, slice) in slices.iteritems():
645 # Monitor only the specified slices
646 if xid == root_xid or xid == default_xid: continue
647 if names and name not in names:
650 if (time.time() >= (slice.time + period)) or \
651 (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \
652 (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes):
653 # Reset to defaults every 24 hours or if it appears
654 # that the byte counters have overflowed (or, more
655 # likely, the node was restarted or the HTB buckets
656 # were re-initialized).
657 slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
659 logger.verbose("bwmon: Updating slice %s" % slice.name)
661 slice.update(kernelhtbs[xid], live[xid]['_rspec'])
663 logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),DB_FILE))
664 f = open(DB_FILE, "w")
665 pickle.dump((version, slices, deaddb), f)
668 # doesnt use generic default interface because this runs as its own thread.
669 # changing the config variable will not have an effect since GetSlivers: pass
670 def getDefaults(nmdbcopy):
672 Get defaults from default slice's slice attributes.
676 dfltslice = nmdbcopy.get(PLC_SLICE_PREFIX+"_default")
678 if dfltslice['rspec']['net_max_rate'] == -1:
686 Turn off all slice HTBs
688 # Get/set special slice IDs
689 root_xid = bwlimit.get_xid("root")
690 default_xid = bwlimit.get_xid("default")
691 kernelhtbs = gethtbs(root_xid, default_xid)
693 logger.log("bwmon: Disabling all running HTBs.")
694 for htb in kernelhtbs.keys(): bwlimit.off(htb)
697 lock = threading.Event()
700 When run as a thread, wait for event, lock db, deep copy it, release it,
701 run bwmon.GetSlivers(), then go back to waiting.
703 logger.verbose("bwmon: Thread started")
706 logger.verbose("bwmon: Event received. Running.")
707 database.db_lock.acquire()
708 nmdbcopy = copy.deepcopy(database.db)
709 database.db_lock.release()
711 if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0:
712 # class show to check if net:InitNodeLimit:bwlimit.init has run.
714 else: logger.log("bwmon: BW limits DISABLED.")
715 except: logger.log_exc("bwmon failed")
719 tools.as_daemon_thread(run)
721 def GetSlivers(*args):
722 logger.verbose ("bwmon: triggering dummy GetSlivers")