6 # Average bandwidth monitoring script. Run periodically via NM db.sync to
7 # enforce a soft limit on daily bandwidth usage for each slice. If a
8 # slice is found to have transmitted 80% of its daily byte limit usage,
9 # its instantaneous rate will be capped at the bytes remaning in the limit
10 # over the time remaining in the recording period.
12 # Two separate limits are enforced, one for destinations exempt from
13 # the node bandwidth cap (i.e. Internet2), and the other for all other destinations.
15 # Mark Huang <mlhuang@cs.princeton.edu>
16 # Andy Bavier <acb@cs.princeton.edu>
17 # Faiyaz Ahmed <faiyaza@cs.princeton.edu>
18 # Copyright (C) 2004-2008 The Trustees of Princeton University
39 # Set DEBUG to True if you don't want to send emails
41 # Set ENABLE to False to setup buckets, but not limit.
44 datafile = "/var/lib/misc/bwmon.dat"
47 sys.path.append("/etc/planetlab")
48 from plc_config import *
51 logger.log("bwmon: Warning: Configuration file /etc/planetlab/plc_config.py not found", 2)
52 logger.log("bwmon: Running in DEBUG mode. Logging to file and not emailing.", 1)
55 seconds_per_day = 24 * 60 * 60
58 dev_default = tools.get_default_if()
59 # Burst to line rate (or node cap). Set by NM. in KBit/s
60 default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
61 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
62 # 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
63 # 5.4 Gbyte per day max allowed transfered per recording period
64 # 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
65 # but its better to keep a higher byte total and keep people happy than correct
66 # the problem and piss people off.
67 # default_MaxKByte = 5662310
70 # llp wants to double these, so we use the following
71 # 1mbit * 24hrs * 60mins * 60secs = bits/day
72 # 1000000 * 24 * 60 * 60 / (1024 * 8)
73 default_MaxKByte = 10546875
75 # 16.4 Gbyte per day max allowed transfered per recording period to I2
76 # default_Maxi2KByte = 17196646
79 # 3Mb/s for 24hrs a day (30.17 gigs)
80 default_Maxi2KByte = 31640625
82 # Default share quanta
86 period = 1 * seconds_per_day
91 The slice %(slice)s has transmitted more than %(bytes)s from
92 %(hostname)s to %(class)s destinations
95 Its maximum %(class)s burst rate will be capped at %(new_maxrate)s/s
98 Please reduce the average %(class)s transmission rate
99 of the slice to %(limit)s per %(period)s.
105 %(date)s %(hostname)s bwcap %(slice)s
108 def format_bytes(bytes, si = True):
110 Formats bytes into a string
115 # Officially, a kibibyte
118 if bytes >= (kilo * kilo * kilo):
119 return "%.1f GB" % (bytes / (kilo * kilo * kilo))
120 elif bytes >= 1000000:
121 return "%.1f MB" % (bytes / (kilo * kilo))
123 return "%.1f KB" % (bytes / kilo)
125 return "%.0f bytes" % bytes
127 def format_period(seconds):
129 Formats a period in seconds into a string
132 if seconds == (24 * 60 * 60):
134 elif seconds == (60 * 60):
136 elif seconds > (24 * 60 * 60):
137 return "%.1f days" % (seconds / 24. / 60. / 60.)
138 elif seconds > (60 * 60):
139 return "%.1f hours" % (seconds / 60. / 60.)
141 return "%.1f minutes" % (seconds / 60.)
143 return "%.0f seconds" % seconds
145 def slicemail(slice, subject, body):
147 Front end to sendmail. Sends email to slice alias with given subject and body.
150 sendmail = os.popen("/usr/sbin/sendmail -N never -t -f%s" % PLC_MAIL_SUPPORT_ADDRESS, "w")
152 # Parsed from MyPLC config
153 to = [PLC_MAIL_MOM_LIST_ADDRESS]
155 if slice is not None and slice != "root":
156 to.append(PLC_MAIL_SLICE_ADDRESS.replace("SLICE", slice))
158 header = {'from': "%s Support <%s>" % (PLC_NAME, PLC_MAIL_SUPPORT_ADDRESS),
160 'version': sys.version.split(" ")[0],
166 Content-type: text/plain
170 X-Mailer: Python/%(version)s
173 """.lstrip() % header)
183 Stores the last recorded bandwidth parameters of a slice.
185 xid - slice context/VServer ID
187 time - beginning of recording period in UNIX seconds
188 bytes - low bandwidth bytes transmitted at the beginning of the recording period
189 i2bytes - high bandwidth bytes transmitted at the beginning of the recording period (for I2 -F)
190 MaxKByte - total volume of data allowed
191 ThreshKbyte - After thresh, cap node to (maxkbyte - bytes)/(time left in period)
192 Maxi2KByte - same as MaxKByte, but for i2
193 Threshi2Kbyte - same as Threshi2KByte, but for i2
194 MaxRate - max_rate slice attribute.
195 Maxi2Rate - max_exempt_rate slice attribute.
196 Share - Used by Sirius to loan min rates
197 Sharei2 - Used by Sirius to loan min rates for i2
198 self.emailed - did slice recv email during this recording period
202 def __init__(self, xid, name, rspec):
208 self.MaxRate = default_MaxRate
209 self.MinRate = bwlimit.bwmin / 1000
210 self.Maxi2Rate = default_Maxi2Rate
211 self.Mini2Rate = bwlimit.bwmin / 1000
212 self.MaxKByte = default_MaxKByte
213 self.ThreshKByte = int(.8 * self.MaxKByte)
214 self.Maxi2KByte = default_Maxi2KByte
215 self.Threshi2KByte = int(.8 * self.Maxi2KByte)
216 self.Share = default_Share
217 self.Sharei2 = default_Share
221 self.updateSliceTags(rspec)
222 bwlimit.set(xid = self.xid,
223 minrate = self.MinRate * 1000,
224 maxrate = self.MaxRate * 1000,
225 maxexemptrate = self.Maxi2Rate * 1000,
226 minexemptrate = self.Mini2Rate * 1000,
232 def updateSliceTags(self, rspec):
234 Use respects from GetSlivers to PLC to populate slice object. Also
235 do some sanity checking.
238 # Sanity check plus policy decision for MinRate:
239 # Minrate cant be greater than 25% of MaxRate or NodeCap.
240 MinRate = int(rspec.get("net_min_rate", bwlimit.bwmin / 1000))
241 if MinRate > int(.25 * default_MaxRate):
242 MinRate = int(.25 * default_MaxRate)
243 if MinRate != self.MinRate:
244 self.MinRate = MinRate
245 logger.log("bwmon: Updating %s: Min Rate = %s" %(self.name, self.MinRate))
247 MaxRate = int(rspec.get('net_max_rate', default_MaxRate))
248 if MaxRate != self.MaxRate:
249 self.MaxRate = MaxRate
250 logger.log("bwmon: Updating %s: Max Rate = %s" %(self.name, self.MaxRate))
252 Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin / 1000))
253 if Mini2Rate != self.Mini2Rate:
254 self.Mini2Rate = Mini2Rate
255 logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate))
257 Maxi2Rate = int(rspec.get('net_i2_max_rate', default_Maxi2Rate))
258 if Maxi2Rate != self.Maxi2Rate:
259 self.Maxi2Rate = Maxi2Rate
260 logger.log("bwmon: Updating %s: Max i2 Rate = %s" %(self.name, self.Maxi2Rate))
262 MaxKByte = int(rspec.get('net_max_kbyte', default_MaxKByte))
263 if MaxKByte != self.MaxKByte:
264 self.MaxKByte = MaxKByte
265 logger.log("bwmon: Updating %s: Max KByte lim = %s" %(self.name, self.MaxKByte))
267 Maxi2KByte = int(rspec.get('net_i2_max_kbyte', default_Maxi2KByte))
268 if Maxi2KByte != self.Maxi2KByte:
269 self.Maxi2KByte = Maxi2KByte
270 logger.log("bwmon: Updating %s: Max i2 KByte = %s" %(self.name, self.Maxi2KByte))
272 ThreshKByte = int(rspec.get('net_thresh_kbyte', (MaxKByte * .8)))
273 if ThreshKByte != self.ThreshKByte:
274 self.ThreshKByte = ThreshKByte
275 logger.log("bwmon: Updating %s: Thresh KByte = %s" %(self.name, self.ThreshKByte))
277 Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', (Maxi2KByte * .8)))
278 if Threshi2KByte != self.Threshi2KByte:
279 self.Threshi2KByte = Threshi2KByte
280 logger.log("bwmon: Updating %s: i2 Thresh KByte = %s" %(self.name, self.Threshi2KByte))
282 Share = int(rspec.get('net_share', default_Share))
283 if Share != self.Share:
285 logger.log("bwmon: Updating %s: Net Share = %s" %(self.name, self.Share))
287 Sharei2 = int(rspec.get('net_i2_share', default_Share))
288 if Sharei2 != self.Sharei2:
289 self.Sharei2 = Sharei2
290 logger.log("bwmon: Updating %s: Net i2 Share = %s" %(self.name, self.i2Share))
293 def reset(self, runningrates, rspec):
295 Begin a new recording period. Remove caps by restoring limits
296 to their default values.
298 # Cache share for later comparison
299 self.Share = runningrates.get('share', 1)
301 # Query Node Manager for max rate overrides
302 self.updateSliceTags(rspec)
304 # Reset baseline time
305 self.time = time.time()
307 # Reset baseline byte coutns
308 self.bytes = runningrates.get('usedbytes', 0)
309 self.i2bytes = runningrates.get('usedi2bytes', 0)
316 maxrate = self.MaxRate * 1000
317 minrate = self.MinRate * 1000
318 maxi2rate = self.Maxi2Rate * 1000
319 mini2rate = self.Mini2Rate * 1000
321 if (maxrate != runningrates.get('maxrate', 0)) or \
322 (minrate != runningrates.get('maxrate', 0)) or \
323 (maxi2rate != runningrates.get('maxexemptrate', 0)) or \
324 (mini2rate != runningrates.get('minexemptrate', 0)) or \
325 (self.Share != runningrates.get('share', 0)):
326 logger.log("bwmon: %s reset to %s/%s" % \
328 bwlimit.format_tc_rate(maxrate),
329 bwlimit.format_tc_rate(maxi2rate)), 1)
330 bwlimit.set(xid = self.xid, dev = dev_default,
331 minrate = self.MinRate * 1000,
332 maxrate = self.MaxRate * 1000,
333 maxexemptrate = self.Maxi2Rate * 1000,
334 minexemptrate = self.Mini2Rate * 1000,
337 def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes):
339 Notify the slice it's being capped.
341 # Prepare message parameters from the template
343 params = {'slice': self.name, 'hostname': socket.gethostname(),
344 'since': time.asctime(time.gmtime(self.time)) + " GMT",
345 'until': time.asctime(time.gmtime(self.time + period)) + " GMT",
346 'date': time.asctime(time.gmtime()) + " GMT",
347 'period': format_period(period)}
349 if new_maxrate != (self.MaxRate * 1000):
350 # Format template parameters for low bandwidth message
351 params['class'] = "low bandwidth"
352 params['bytes'] = format_bytes(usedbytes - self.bytes)
353 params['limit'] = format_bytes(self.MaxKByte * 1024)
354 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate)
356 # Cap low bandwidth burst rate
357 message += template % params
358 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
360 if new_maxexemptrate != (self.Maxi2Rate * 1000):
361 # Format template parameters for high bandwidth message
362 params['class'] = "high bandwidth"
363 params['bytes'] = format_bytes(usedi2bytes - self.i2bytes)
364 params['limit'] = format_bytes(self.Maxi2KByte * 1024)
365 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate)
367 message += template % params
368 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
371 if self.emailed == False:
372 subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params
374 logger.log("bwmon: "+ subject)
375 logger.log("bwmon: "+ message + (footer % params))
378 logger.log("bwmon: Emailing %s" % self.name)
379 slicemail(self.name, subject, message + (footer % params))
382 def update(self, runningrates, rspec):
384 Update byte counts and check if byte thresholds have been
385 exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period.
386 Recalculate every time module runs.
388 # cache share for later comparison
389 runningrates['share'] = self.Share
391 # Query Node Manager for max rate overrides
392 self.updateSliceTags(rspec)
394 usedbytes = runningrates['usedbytes']
395 usedi2bytes = runningrates['usedi2bytes']
398 if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):
399 sum = self.bytes + (self.ThreshKByte * 1024)
400 maxbyte = self.MaxKByte * 1024
401 bytesused = usedbytes - self.bytes
402 timeused = int(time.time() - self.time)
403 # Calcuate new rate. in bit/s
404 new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))
405 # Never go under MinRate
406 if new_maxrate < (self.MinRate * 1000):
407 new_maxrate = self.MinRate * 1000
408 # State information. I'm capped.
412 new_maxrate = self.MaxRate * 1000
415 if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):
416 maxi2byte = self.Maxi2KByte * 1024
417 i2bytesused = usedi2bytes - self.i2bytes
418 timeused = int(time.time() - self.time)
420 new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused))
421 # Never go under MinRate
422 if new_maxi2rate < (self.Mini2Rate * 1000):
423 new_maxi2rate = self.Mini2Rate * 1000
424 # State information. I'm capped.
428 new_maxi2rate = self.Maxi2Rate * 1000
431 # Check running values against newly calculated values so as not to run tc
433 if (runningrates['maxrate'] != new_maxrate) or \
434 (runningrates['minrate'] != self.MinRate * 1000) or \
435 (runningrates['maxexemptrate'] != new_maxi2rate) or \
436 (runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \
437 (runningrates['share'] != self.Share):
439 bwlimit.set(xid = self.xid,
440 minrate = self.MinRate * 1000,
441 maxrate = new_maxrate,
442 minexemptrate = self.Mini2Rate * 1000,
443 maxexemptrate = new_maxi2rate,
447 if self.capped == True:
448 self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)
451 def gethtbs(root_xid, default_xid):
453 Return dict {xid: {*rates}} of running htbs as reported by tc that have names.
454 Turn off HTBs without names.
457 for params in bwlimit.get():
460 minexemptrate, maxexemptrate,
461 usedbytes, usedi2bytes) = params
463 name = bwlimit.get_slice(xid)
466 and (xid != root_xid) \
467 and (xid != default_xid):
468 # Orphaned (not associated with a slice) class
470 logger.log("bwmon: Found orphaned HTB %s. Removing." %name, 1)
473 livehtbs[xid] = {'share': share,
476 'maxexemptrate': maxexemptrate,
477 'minexemptrate': minexemptrate,
478 'usedbytes': usedbytes,
480 'usedi2bytes': usedi2bytes}
486 Syncs tc, db, and bwmon.dat. Then, starts new slices, kills old ones, and updates byte accounts for each running slice. Sends emails and caps those that went over their limit.
499 # Incase the limits have changed.
500 default_MaxRate = int(bwlimit.get_bwcap() / 1000)
501 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
503 # Incase default isn't set yet.
504 if default_MaxRate == -1:
505 default_MaxRate = 1000000
508 f = open(datafile, "r+")
509 logger.log("bwmon: Loading %s" % datafile, 2)
510 (version, slices, deaddb) = pickle.load(f)
512 # Check version of data file
513 if version != "$Id$":
514 logger.log("bwmon: Not using old version '%s' data file %s" % (version, datafile))
521 # Get/set special slice IDs
522 root_xid = bwlimit.get_xid("root")
523 default_xid = bwlimit.get_xid("default")
525 # Since root is required for sanity, its not in the API/plc database, so pass {}
527 if root_xid not in slices.keys():
528 slices[root_xid] = Slice(root_xid, "root", {})
529 slices[root_xid].reset({}, {})
531 # Used by bwlimit. pass {} since there is no rspec (like above).
532 if default_xid not in slices.keys():
533 slices[default_xid] = Slice(default_xid, "default", {})
534 slices[default_xid].reset({}, {})
537 # Get running slivers that should be on this node (from plc). {xid: name}
538 # db keys on name, bwmon keys on xid. db doesnt have xid either.
539 for plcSliver in nmdbcopy.keys():
540 live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
542 logger.log("bwmon: Found %s instantiated slices" % live.keys().__len__(), 2)
543 logger.log("bwmon: Found %s slices in dat file" % slices.values().__len__(), 2)
545 # Get actual running values from tc.
546 # Update slice totals and bandwidth. {xid: {values}}
547 kernelhtbs = gethtbs(root_xid, default_xid)
548 logger.log("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__(), 2)
550 # The dat file has HTBs for slices, but the HTBs aren't running
551 nohtbslices = Set(slices.keys()) - Set(kernelhtbs.keys())
552 logger.log( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__(), 2)
554 for nohtbslice in nohtbslices:
555 if live.has_key(nohtbslice):
556 slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
558 logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
559 del slices[nohtbslice]
561 # The dat file doesnt have HTB for the slice but kern has HTB
562 slicesnodat = Set(kernelhtbs.keys()) - Set(slices.keys())
563 logger.log( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__(), 2)
564 for slicenodat in slicesnodat:
565 # But slice is running
566 if live.has_key(slicenodat):
567 # init the slice. which means start accounting over since kernel
568 # htb was already there.
569 slices[slicenodat] = Slice(slicenodat,
570 live[slicenodat]['name'],
571 live[slicenodat]['_rspec'])
574 # Slices in GetSlivers but not running HTBs
575 newslicesxids = Set(live.keys()) - Set(kernelhtbs.keys())
576 logger.log("bwmon: Found %s new slices" % newslicesxids.__len__(), 2)
579 for newslice in newslicesxids:
580 # Delegated slices dont have xids (which are uids) since they haven't been
582 if newslice != None and live[newslice].has_key('_rspec') == True:
583 # Check to see if we recently deleted this slice.
584 if live[newslice]['name'] not in deaddb.keys():
585 logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
586 # _rspec is the computed rspec: NM retrieved data from PLC, computed loans
587 # and made a dict of computed values.
588 slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
589 slices[newslice].reset( {}, live[newslice]['_rspec'] )
590 # Double check time for dead slice in deaddb is within 24hr recording period.
591 elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)):
592 deadslice = deaddb[live[newslice]['name']]
593 logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name'])
594 slices[newslice] = deadslice['slice']
595 slices[newslice].xid = newslice
597 newvals = {"maxrate": deadslice['slice'].MaxRate * 1000,
598 "minrate": deadslice['slice'].MinRate * 1000,
599 "maxexemptrate": deadslice['slice'].Maxi2Rate * 1000,
600 "usedbytes": deadslice['htb']['usedbytes'] * 1000,
601 "usedi2bytes": deadslice['htb']['usedi2bytes'],
602 "share":deadslice['htb']['share']}
603 slices[newslice].reset(newvals, live[newslice]['_rspec'])
605 slices[newslice].update(newvals, live[newslice]['_rspec'])
606 # Since the slice has been reinitialed, remove from dead database.
607 del deaddb[deadslice['slice'].name]
610 logger.log("bwmon: Slice %s doesn't have xid. Skipping." % live[newslice]['name'])
612 # Move dead slices that exist in the pickle file, but
613 # aren't instantiated by PLC into the dead dict until
614 # recording period is over. This is to avoid the case where a slice is dynamically created
615 # and destroyed then recreated to get around byte limits.
616 deadxids = Set(slices.keys()) - Set(live.keys())
617 logger.log("bwmon: Found %s dead slices" % (deadxids.__len__() - 2), 2)
618 for deadxid in deadxids:
619 if deadxid == root_xid or deadxid == default_xid:
621 logger.log("bwmon: removing dead slice %s " % deadxid)
622 if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid):
623 # add slice (by name) to deaddb
624 logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
625 deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
627 if kernelhtbs.has_key(deadxid):
628 logger.log("bwmon: Removing HTB for %s." % deadxid, 2)
632 for deadslice in deaddb.keys():
633 if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
634 logger.log("bwmon: Removing dead slice %s from dat." \
635 % deaddb[deadslice]['slice'].name)
636 del deaddb[deadslice]
638 # Get actual running values from tc since we've added and removed buckets.
639 # Update slice totals and bandwidth. {xid: {values}}
640 kernelhtbs = gethtbs(root_xid, default_xid)
641 logger.log("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__(), 2)
643 # Update all byte limites on all slices
644 for (xid, slice) in slices.iteritems():
645 # Monitor only the specified slices
646 if xid == root_xid or xid == default_xid: continue
647 if names and name not in names:
650 if (time.time() >= (slice.time + period)) or \
651 (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \
652 (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes):
653 # Reset to defaults every 24 hours or if it appears
654 # that the byte counters have overflowed (or, more
655 # likely, the node was restarted or the HTB buckets
656 # were re-initialized).
657 slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
659 logger.log("bwmon: Updating slice %s" % slice.name, 2)
661 slice.update(kernelhtbs[xid], live[xid]['_rspec'])
663 logger.log("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),datafile), 2)
664 f = open(datafile, "w")
665 pickle.dump((version, slices, deaddb), f)
668 # doesnt use generic default interface because this runs as its own thread.
669 # changing the config variable will not have an effect since GetSlivers: pass
670 def getDefaults(nmdbcopy):
672 Get defaults from default slice's slice attributes.
676 dfltslice = nmdbcopy.get(PLC_SLICE_PREFIX+"_default")
678 if dfltslice['rspec']['net_max_rate'] == -1:
686 Turn off all slice HTBs
688 # Get/set special slice IDs
689 root_xid = bwlimit.get_xid("root")
690 default_xid = bwlimit.get_xid("default")
691 kernelhtbs = gethtbs(root_xid, default_xid)
693 logger.log("bwmon: Disabling all running HTBs.")
694 for htb in kernelhtbs.keys(): bwlimit.off(htb)
697 lock = threading.Event()
700 When run as a thread, wait for event, lock db, deep copy it, release it,
701 run bwmon.GetSlivers(), then go back to waiting.
703 logger.log("bwmon: Thread started", 2)
706 logger.log("bwmon: Event received. Running.", 2)
707 database.db_lock.acquire()
708 nmdbcopy = copy.deepcopy(database.db)
709 database.db_lock.release()
711 if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0:
712 # class show to check if net:InitNodeLimit:bwlimit.init has run.
714 else: logger.log("bwmon: BW limits DISABLED.")
715 except: logger.log_exc("bwmon failed")
719 tools.as_daemon_thread(run)
721 def GetSlivers(*args):
722 logger.verbose ("bwmon: triggering dummy GetSlivers")