3 # Average bandwidth monitoring script. Run periodically via NM db.sync to
4 # enforce a soft limit on daily bandwidth usage for each slice. If a
5 # slice is found to have transmitted 80% of its daily byte limit usage,
6 # its instantaneous rate will be capped at the bytes remaning in the limit
7 # over the time remaining in the recording period.
9 # Two separate limits are enforced, one for destinations exempt from
10 # the node bandwidth cap (i.e. Internet2), and the other for all other destinations.
12 # Mark Huang <mlhuang@cs.princeton.edu>
13 # Andy Bavier <acb@cs.princeton.edu>
14 # Faiyaz Ahmed <faiyaza@cs.princeton.edu>
15 # Copyright (C) 2004-2008 The Trustees of Princeton University
36 # Set DEBUG to True if you don't want to send emails
38 # Set ENABLE to False to setup buckets, but not limit.
41 datafile = "/var/lib/misc/bwmon.dat"
44 sys.path.append("/etc/planetlab")
45 from plc_config import *
48 logger.log("bwmon: Warning: Configuration file /etc/planetlab/plc_config.py not found", 2)
49 logger.log("bwmon: Running in DEBUG mode. Logging to file and not emailing.", 1)
52 seconds_per_day = 24 * 60 * 60
55 dev_default = tools.get_default_if()
56 # Burst to line rate (or node cap). Set by NM. in KBit/s
57 default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
58 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
59 # 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
60 # 5.4 Gbyte per day max allowed transfered per recording period
61 # 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
62 # but its better to keep a higher byte total and keep people happy than correct
63 # the problem and piss people off.
64 # default_MaxKByte = 5662310
67 # llp wants to double these, so we use the following
68 # 1mbit * 24hrs * 60mins * 60secs = bits/day
69 # 1000000 * 24 * 60 * 60 / (1024 * 8)
70 default_MaxKByte = 10546875
72 # 16.4 Gbyte per day max allowed transfered per recording period to I2
73 # default_Maxi2KByte = 17196646
76 # 3Mb/s for 24hrs a day (30.17 gigs)
77 default_Maxi2KByte = 31640625
79 # Default share quanta
83 period = 1 * seconds_per_day
88 The slice %(slice)s has transmitted more than %(bytes)s from
89 %(hostname)s to %(class)s destinations
92 Its maximum %(class)s burst rate will be capped at %(new_maxrate)s/s
95 Please reduce the average %(class)s transmission rate
96 of the slice to %(limit)s per %(period)s.
102 %(date)s %(hostname)s bwcap %(slice)s
105 def format_bytes(bytes, si = True):
107 Formats bytes into a string
112 # Officially, a kibibyte
115 if bytes >= (kilo * kilo * kilo):
116 return "%.1f GB" % (bytes / (kilo * kilo * kilo))
117 elif bytes >= 1000000:
118 return "%.1f MB" % (bytes / (kilo * kilo))
120 return "%.1f KB" % (bytes / kilo)
122 return "%.0f bytes" % bytes
124 def format_period(seconds):
126 Formats a period in seconds into a string
129 if seconds == (24 * 60 * 60):
131 elif seconds == (60 * 60):
133 elif seconds > (24 * 60 * 60):
134 return "%.1f days" % (seconds / 24. / 60. / 60.)
135 elif seconds > (60 * 60):
136 return "%.1f hours" % (seconds / 60. / 60.)
138 return "%.1f minutes" % (seconds / 60.)
140 return "%.0f seconds" % seconds
142 def slicemail(slice, subject, body):
144 Front end to sendmail. Sends email to slice alias with given subject and body.
147 sendmail = os.popen("/usr/sbin/sendmail -N never -t -f%s" % PLC_MAIL_SUPPORT_ADDRESS, "w")
149 # Parsed from MyPLC config
150 to = [PLC_MAIL_MOM_LIST_ADDRESS]
152 if slice is not None and slice != "root":
153 to.append(PLC_MAIL_SLICE_ADDRESS.replace("SLICE", slice))
155 header = {'from': "%s Support <%s>" % (PLC_NAME, PLC_MAIL_SUPPORT_ADDRESS),
157 'version': sys.version.split(" ")[0],
163 Content-type: text/plain
167 X-Mailer: Python/%(version)s
170 """.lstrip() % header)
180 Stores the last recorded bandwidth parameters of a slice.
182 xid - slice context/VServer ID
184 time - beginning of recording period in UNIX seconds
185 bytes - low bandwidth bytes transmitted at the beginning of the recording period
186 i2bytes - high bandwidth bytes transmitted at the beginning of the recording period (for I2 -F)
187 MaxKByte - total volume of data allowed
188 ThreshKbyte - After thresh, cap node to (maxkbyte - bytes)/(time left in period)
189 Maxi2KByte - same as MaxKByte, but for i2
190 Threshi2Kbyte - same as Threshi2KByte, but for i2
191 MaxRate - max_rate slice attribute.
192 Maxi2Rate - max_exempt_rate slice attribute.
193 Share - Used by Sirius to loan min rates
194 Sharei2 - Used by Sirius to loan min rates for i2
195 self.emailed - did slice recv email during this recording period
199 def __init__(self, xid, name, rspec):
205 self.MaxRate = default_MaxRate
206 self.MinRate = bwlimit.bwmin / 1000
207 self.Maxi2Rate = default_Maxi2Rate
208 self.Mini2Rate = bwlimit.bwmin / 1000
209 self.MaxKByte = default_MaxKByte
210 self.ThreshKByte = int(.8 * self.MaxKByte)
211 self.Maxi2KByte = default_Maxi2KByte
212 self.Threshi2KByte = int(.8 * self.Maxi2KByte)
213 self.Share = default_Share
214 self.Sharei2 = default_Share
218 self.updateSliceTags(rspec)
219 bwlimit.set(xid = self.xid,
220 minrate = self.MinRate * 1000,
221 maxrate = self.MaxRate * 1000,
222 maxexemptrate = self.Maxi2Rate * 1000,
223 minexemptrate = self.Mini2Rate * 1000,
229 def updateSliceTags(self, rspec):
231 Use respects from GetSlivers to PLC to populate slice object. Also
232 do some sanity checking.
235 # Sanity check plus policy decision for MinRate:
236 # Minrate cant be greater than 25% of MaxRate or NodeCap.
237 MinRate = int(rspec.get("net_min_rate", bwlimit.bwmin / 1000))
238 if MinRate > int(.25 * default_MaxRate):
239 MinRate = int(.25 * default_MaxRate)
240 if MinRate != self.MinRate:
241 self.MinRate = MinRate
242 logger.log("bwmon: Updating %s: Min Rate = %s" %(self.name, self.MinRate))
244 MaxRate = int(rspec.get('net_max_rate', default_MaxRate))
245 if MaxRate != self.MaxRate:
246 self.MaxRate = MaxRate
247 logger.log("bwmon: Updating %s: Max Rate = %s" %(self.name, self.MaxRate))
249 Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin / 1000))
250 if Mini2Rate != self.Mini2Rate:
251 self.Mini2Rate = Mini2Rate
252 logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate))
254 Maxi2Rate = int(rspec.get('net_i2_max_rate', default_Maxi2Rate))
255 if Maxi2Rate != self.Maxi2Rate:
256 self.Maxi2Rate = Maxi2Rate
257 logger.log("bwmon: Updating %s: Max i2 Rate = %s" %(self.name, self.Maxi2Rate))
259 MaxKByte = int(rspec.get('net_max_kbyte', default_MaxKByte))
260 if MaxKByte != self.MaxKByte:
261 self.MaxKByte = MaxKByte
262 logger.log("bwmon: Updating %s: Max KByte lim = %s" %(self.name, self.MaxKByte))
264 Maxi2KByte = int(rspec.get('net_i2_max_kbyte', default_Maxi2KByte))
265 if Maxi2KByte != self.Maxi2KByte:
266 self.Maxi2KByte = Maxi2KByte
267 logger.log("bwmon: Updating %s: Max i2 KByte = %s" %(self.name, self.Maxi2KByte))
269 ThreshKByte = int(rspec.get('net_thresh_kbyte', (MaxKByte * .8)))
270 if ThreshKByte != self.ThreshKByte:
271 self.ThreshKByte = ThreshKByte
272 logger.log("bwmon: Updating %s: Thresh KByte = %s" %(self.name, self.ThreshKByte))
274 Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', (Maxi2KByte * .8)))
275 if Threshi2KByte != self.Threshi2KByte:
276 self.Threshi2KByte = Threshi2KByte
277 logger.log("bwmon: Updating %s: i2 Thresh KByte = %s" %(self.name, self.Threshi2KByte))
279 Share = int(rspec.get('net_share', default_Share))
280 if Share != self.Share:
282 logger.log("bwmon: Updating %s: Net Share = %s" %(self.name, self.Share))
284 Sharei2 = int(rspec.get('net_i2_share', default_Share))
285 if Sharei2 != self.Sharei2:
286 self.Sharei2 = Sharei2
287 logger.log("bwmon: Updating %s: Net i2 Share = %s" %(self.name, self.i2Share))
290 def reset(self, runningrates, rspec):
292 Begin a new recording period. Remove caps by restoring limits
293 to their default values.
295 # Cache share for later comparison
296 self.Share = runningrates.get('share', 1)
298 # Query Node Manager for max rate overrides
299 self.updateSliceTags(rspec)
301 # Reset baseline time
302 self.time = time.time()
304 # Reset baseline byte coutns
305 self.bytes = runningrates.get('usedbytes', 0)
306 self.i2bytes = runningrates.get('usedi2bytes', 0)
313 maxrate = self.MaxRate * 1000
314 minrate = self.MinRate * 1000
315 maxi2rate = self.Maxi2Rate * 1000
316 mini2rate = self.Mini2Rate * 1000
318 if (maxrate != runningrates.get('maxrate', 0)) or \
319 (minrate != runningrates.get('maxrate', 0)) or \
320 (maxi2rate != runningrates.get('maxexemptrate', 0)) or \
321 (mini2rate != runningrates.get('minexemptrate', 0)) or \
322 (self.Share != runningrates.get('share', 0)):
323 logger.log("bwmon: %s reset to %s/%s" % \
325 bwlimit.format_tc_rate(maxrate),
326 bwlimit.format_tc_rate(maxi2rate)), 1)
327 bwlimit.set(xid = self.xid, dev = dev_default,
328 minrate = self.MinRate * 1000,
329 maxrate = self.MaxRate * 1000,
330 maxexemptrate = self.Maxi2Rate * 1000,
331 minexemptrate = self.Mini2Rate * 1000,
334 def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes):
336 Notify the slice it's being capped.
338 # Prepare message parameters from the template
340 params = {'slice': self.name, 'hostname': socket.gethostname(),
341 'since': time.asctime(time.gmtime(self.time)) + " GMT",
342 'until': time.asctime(time.gmtime(self.time + period)) + " GMT",
343 'date': time.asctime(time.gmtime()) + " GMT",
344 'period': format_period(period)}
346 if new_maxrate != (self.MaxRate * 1000):
347 # Format template parameters for low bandwidth message
348 params['class'] = "low bandwidth"
349 params['bytes'] = format_bytes(usedbytes - self.bytes)
350 params['limit'] = format_bytes(self.MaxKByte * 1024)
351 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate)
353 # Cap low bandwidth burst rate
354 message += template % params
355 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
357 if new_maxexemptrate != (self.Maxi2Rate * 1000):
358 # Format template parameters for high bandwidth message
359 params['class'] = "high bandwidth"
360 params['bytes'] = format_bytes(usedi2bytes - self.i2bytes)
361 params['limit'] = format_bytes(self.Maxi2KByte * 1024)
362 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate)
364 message += template % params
365 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
368 if self.emailed == False:
369 subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params
371 logger.log("bwmon: "+ subject)
372 logger.log("bwmon: "+ message + (footer % params))
375 logger.log("bwmon: Emailing %s" % self.name)
376 slicemail(self.name, subject, message + (footer % params))
379 def update(self, runningrates, rspec):
381 Update byte counts and check if byte thresholds have been
382 exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period.
383 Recalculate every time module runs.
385 # cache share for later comparison
386 runningrates['share'] = self.Share
388 # Query Node Manager for max rate overrides
389 self.updateSliceTags(rspec)
391 usedbytes = runningrates['usedbytes']
392 usedi2bytes = runningrates['usedi2bytes']
395 if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):
396 sum = self.bytes + (self.ThreshKByte * 1024)
397 maxbyte = self.MaxKByte * 1024
398 bytesused = usedbytes - self.bytes
399 timeused = int(time.time() - self.time)
400 # Calcuate new rate. in bit/s
401 new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))
402 # Never go under MinRate
403 if new_maxrate < (self.MinRate * 1000):
404 new_maxrate = self.MinRate * 1000
405 # State information. I'm capped.
409 new_maxrate = self.MaxRate * 1000
412 if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):
413 maxi2byte = self.Maxi2KByte * 1024
414 i2bytesused = usedi2bytes - self.i2bytes
415 timeused = int(time.time() - self.time)
417 new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused))
418 # Never go under MinRate
419 if new_maxi2rate < (self.Mini2Rate * 1000):
420 new_maxi2rate = self.Mini2Rate * 1000
421 # State information. I'm capped.
425 new_maxi2rate = self.Maxi2Rate * 1000
428 # Check running values against newly calculated values so as not to run tc
430 if (runningrates['maxrate'] != new_maxrate) or \
431 (runningrates['minrate'] != self.MinRate * 1000) or \
432 (runningrates['maxexemptrate'] != new_maxi2rate) or \
433 (runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \
434 (runningrates['share'] != self.Share):
436 bwlimit.set(xid = self.xid,
437 minrate = self.MinRate * 1000,
438 maxrate = new_maxrate,
439 minexemptrate = self.Mini2Rate * 1000,
440 maxexemptrate = new_maxi2rate,
444 if self.capped == True:
445 self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)
448 def gethtbs(root_xid, default_xid):
450 Return dict {xid: {*rates}} of running htbs as reported by tc that have names.
451 Turn off HTBs without names.
454 for params in bwlimit.get():
457 minexemptrate, maxexemptrate,
458 usedbytes, usedi2bytes) = params
460 name = bwlimit.get_slice(xid)
463 and (xid != root_xid) \
464 and (xid != default_xid):
465 # Orphaned (not associated with a slice) class
467 logger.log("bwmon: Found orphaned HTB %s. Removing." %name, 1)
470 livehtbs[xid] = {'share': share,
473 'maxexemptrate': maxexemptrate,
474 'minexemptrate': minexemptrate,
475 'usedbytes': usedbytes,
477 'usedi2bytes': usedi2bytes}
483 Syncs tc, db, and bwmon.dat. Then, starts new slices, kills old ones, and updates byte accounts for each running slice. Sends emails and caps those that went over their limit.
496 # Incase the limits have changed.
497 default_MaxRate = int(bwlimit.get_bwcap() / 1000)
498 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
500 # Incase default isn't set yet.
501 if default_MaxRate == -1:
502 default_MaxRate = 1000000
505 f = open(datafile, "r+")
506 logger.log("bwmon: Loading %s" % datafile, 2)
507 (version, slices, deaddb) = pickle.load(f)
509 # Check version of data file
510 if version != "$Id$":
511 logger.log("bwmon: Not using old version '%s' data file %s" % (version, datafile))
518 # Get/set special slice IDs
519 root_xid = bwlimit.get_xid("root")
520 default_xid = bwlimit.get_xid("default")
522 # Since root is required for sanity, its not in the API/plc database, so pass {}
524 if root_xid not in slices.keys():
525 slices[root_xid] = Slice(root_xid, "root", {})
526 slices[root_xid].reset({}, {})
528 # Used by bwlimit. pass {} since there is no rspec (like above).
529 if default_xid not in slices.keys():
530 slices[default_xid] = Slice(default_xid, "default", {})
531 slices[default_xid].reset({}, {})
534 # Get running slivers that should be on this node (from plc). {xid: name}
535 # db keys on name, bwmon keys on xid. db doesnt have xid either.
536 for plcSliver in nmdbcopy.keys():
537 live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
539 logger.log("bwmon: Found %s instantiated slices" % live.keys().__len__(), 2)
540 logger.log("bwmon: Found %s slices in dat file" % slices.values().__len__(), 2)
542 # Get actual running values from tc.
543 # Update slice totals and bandwidth. {xid: {values}}
544 kernelhtbs = gethtbs(root_xid, default_xid)
545 logger.log("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__(), 2)
547 # The dat file has HTBs for slices, but the HTBs aren't running
548 nohtbslices = Set(slices.keys()) - Set(kernelhtbs.keys())
549 logger.log( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__(), 2)
551 for nohtbslice in nohtbslices:
552 if live.has_key(nohtbslice):
553 slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
555 logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
556 del slices[nohtbslice]
558 # The dat file doesnt have HTB for the slice but kern has HTB
559 slicesnodat = Set(kernelhtbs.keys()) - Set(slices.keys())
560 logger.log( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__(), 2)
561 for slicenodat in slicesnodat:
562 # But slice is running
563 if live.has_key(slicenodat):
564 # init the slice. which means start accounting over since kernel
565 # htb was already there.
566 slices[slicenodat] = Slice(slicenodat,
567 live[slicenodat]['name'],
568 live[slicenodat]['_rspec'])
571 # Slices in GetSlivers but not running HTBs
572 newslicesxids = Set(live.keys()) - Set(kernelhtbs.keys())
573 logger.log("bwmon: Found %s new slices" % newslicesxids.__len__(), 2)
576 for newslice in newslicesxids:
577 # Delegated slices dont have xids (which are uids) since they haven't been
579 if newslice != None and live[newslice].has_key('_rspec') == True:
580 # Check to see if we recently deleted this slice.
581 if live[newslice]['name'] not in deaddb.keys():
582 logger.log( "bwmon: New Slice %s" % live[newslice]['name'] )
583 # _rspec is the computed rspec: NM retrieved data from PLC, computed loans
584 # and made a dict of computed values.
585 slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
586 slices[newslice].reset( {}, live[newslice]['_rspec'] )
587 # Double check time for dead slice in deaddb is within 24hr recording period.
588 elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)):
589 deadslice = deaddb[live[newslice]['name']]
590 logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name'])
591 slices[newslice] = deadslice['slice']
592 slices[newslice].xid = newslice
594 newvals = {"maxrate": deadslice['slice'].MaxRate * 1000,
595 "minrate": deadslice['slice'].MinRate * 1000,
596 "maxexemptrate": deadslice['slice'].Maxi2Rate * 1000,
597 "usedbytes": deadslice['htb']['usedbytes'] * 1000,
598 "usedi2bytes": deadslice['htb']['usedi2bytes'],
599 "share":deadslice['htb']['share']}
600 slices[newslice].reset(newvals, live[newslice]['_rspec'])
602 slices[newslice].update(newvals, live[newslice]['_rspec'])
603 # Since the slice has been reinitialed, remove from dead database.
604 del deaddb[deadslice['slice'].name]
607 logger.log("bwmon: Slice %s doesn't have xid. Skipping." % live[newslice]['name'])
609 # Move dead slices that exist in the pickle file, but
610 # aren't instantiated by PLC into the dead dict until
611 # recording period is over. This is to avoid the case where a slice is dynamically created
612 # and destroyed then recreated to get around byte limits.
613 deadxids = Set(slices.keys()) - Set(live.keys())
614 logger.log("bwmon: Found %s dead slices" % (deadxids.__len__() - 2), 2)
615 for deadxid in deadxids:
616 if deadxid == root_xid or deadxid == default_xid:
618 logger.log("bwmon: removing dead slice %s " % deadxid)
619 if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid):
620 # add slice (by name) to deaddb
621 logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
622 deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
624 if kernelhtbs.has_key(deadxid):
625 logger.log("bwmon: Removing HTB for %s." % deadxid, 2)
629 for deadslice in deaddb.keys():
630 if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
631 logger.log("bwmon: Removing dead slice %s from dat." \
632 % deaddb[deadslice]['slice'].name)
633 del deaddb[deadslice]
635 # Get actual running values from tc since we've added and removed buckets.
636 # Update slice totals and bandwidth. {xid: {values}}
637 kernelhtbs = gethtbs(root_xid, default_xid)
638 logger.log("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__(), 2)
640 # Update all byte limites on all slices
641 for (xid, slice) in slices.iteritems():
642 # Monitor only the specified slices
643 if xid == root_xid or xid == default_xid: continue
644 if names and name not in names:
647 if (time.time() >= (slice.time + period)) or \
648 (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \
649 (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes):
650 # Reset to defaults every 24 hours or if it appears
651 # that the byte counters have overflowed (or, more
652 # likely, the node was restarted or the HTB buckets
653 # were re-initialized).
654 slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
656 logger.log("bwmon: Updating slice %s" % slice.name, 2)
658 slice.update(kernelhtbs[xid], live[xid]['_rspec'])
660 logger.log("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),datafile), 2)
661 f = open(datafile, "w")
662 pickle.dump((version, slices, deaddb), f)
665 # doesnt use generic default interface because this runs as its own thread.
666 # changing the config variable will not have an effect since GetSlivers: pass
667 def getDefaults(nmdbcopy):
669 Get defaults from default slice's slice attributes.
673 dfltslice = nmdbcopy.get(PLC_SLICE_PREFIX+"_default")
675 if dfltslice['rspec']['net_max_rate'] == -1:
683 Turn off all slice HTBs
685 # Get/set special slice IDs
686 root_xid = bwlimit.get_xid("root")
687 default_xid = bwlimit.get_xid("default")
688 kernelhtbs = gethtbs(root_xid, default_xid)
690 logger.log("bwlimit: Disabling all running HTBs.")
691 for htb in kernelhtbs.keys(): bwlimit.off(htb)
694 lock = threading.Event()
697 When run as a thread, wait for event, lock db, deep copy it, release it,
698 run bwmon.GetSlivers(), then go back to waiting.
700 logger.log("bwmon: Thread started", 2)
703 logger.log("bwmon: Event received. Running.", 2)
704 database.db_lock.acquire()
705 nmdbcopy = copy.deepcopy(database.db)
706 database.db_lock.release()
708 if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0:
709 # class show to check if net:InitNodeLimit:bwlimit.init has run.
711 else: logger.log("bwmon: BW limits DISABLED.")
712 except: logger.log_exc()
716 tools.as_daemon_thread(run)
718 def GetSlivers(*args):