3 # Average bandwidth monitoring script. Run periodically via NM db.sync to
4 # enforce a soft limit on daily bandwidth usage for each slice. If a
5 # slice is found to have transmitted 80% of its daily byte limit usage,
6 # its instantaneous rate will be capped at the bytes remaning in the limit
7 # over the time remaining in the recording period.
9 # Two separate limits are enforced, one for destinations exempt from
10 # the node bandwidth cap (i.e. Internet2), and the other for all other destinations.
12 # Mark Huang <mlhuang@cs.princeton.edu>
13 # Andy Bavier <acb@cs.princeton.edu>
14 # Faiyaz Ahmed <faiyaza@cs.princeton.edu>
15 # Copyright (C) 2004-2008 The Trustees of Princeton University
26 import plnode.bwlimit as bwlimit
31 from config import Config
36 # Set DEBUG to True if you don't want to send emails
38 # Set ENABLE to False to setup buckets, but not limit.
41 DB_FILE = "/var/lib/nodemanager/bwmon.pickle"
44 seconds_per_day = 24 * 60 * 60
47 dev_default = tools.get_default_if()
48 # Burst to line rate (or node cap). Set by NM. in KBit/s
49 default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
50 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
51 # 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
52 # 5.4 Gbyte per day max allowed transfered per recording period
53 # 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
54 # but its better to keep a higher byte total and keep people happy than correct
55 # the problem and piss people off.
56 # default_MaxKByte = 5662310
59 # llp wants to double these, so we use the following
60 # 1mbit * 24hrs * 60mins * 60secs = bits/day
61 # 1000000 * 24 * 60 * 60 / (1024 * 8)
62 default_MaxKByte = 10546875
64 # 16.4 Gbyte per day max allowed transfered per recording period to I2
65 # default_Maxi2KByte = 17196646
68 # 3Mb/s for 24hrs a day (30.17 gigs)
69 default_Maxi2KByte = 31640625
71 # Default share quanta
75 period = 1 * seconds_per_day
80 The slice %(slice)s has transmitted more than %(bytes)s from
81 %(hostname)s to %(class)s destinations
84 Its maximum %(class)s burst rate will be capped at %(new_maxrate)s/s
87 Please reduce the average %(class)s transmission rate
88 of the slice to %(limit)s per %(period)s.
94 %(date)s %(hostname)s bwcap %(slice)s
97 def format_bytes(bytes, si = True):
99 Formats bytes into a string
104 # Officially, a kibibyte
107 if bytes >= (kilo * kilo * kilo):
108 return "%.1f GB" % (bytes / (kilo * kilo * kilo))
109 elif bytes >= 1000000:
110 return "%.1f MB" % (bytes / (kilo * kilo))
112 return "%.1f KB" % (bytes / kilo)
114 return "%.0f bytes" % bytes
116 def format_period(seconds):
118 Formats a period in seconds into a string
121 if seconds == (24 * 60 * 60):
123 elif seconds == (60 * 60):
125 elif seconds > (24 * 60 * 60):
126 return "%.1f days" % (seconds / 24. / 60. / 60.)
127 elif seconds > (60 * 60):
128 return "%.1f hours" % (seconds / 60. / 60.)
130 return "%.1f minutes" % (seconds / 60.)
132 return "%.0f seconds" % seconds
134 def slicemail(slice, subject, body):
136 Front end to sendmail. Sends email to slice alias with given subject and body.
139 sendmail = os.popen("/usr/sbin/sendmail -N never -t -f%s" % config.PLC_MAIL_SUPPORT_ADDRESS, "w")
141 # Parsed from MyPLC config
142 to = [config.PLC_MAIL_MOM_LIST_ADDRESS]
144 if slice is not None and slice != "root":
145 to.append(config.PLC_MAIL_SLICE_ADDRESS.replace("SLICE", slice))
147 header = {'from': "%s Support <%s>" % (config.PLC_NAME,
148 config.PLC_MAIL_SUPPORT_ADDRESS),
150 'version': sys.version.split(" ")[0],
156 Content-type: text/plain
160 X-Mailer: Python/%(version)s
163 """.lstrip() % header)
173 Stores the last recorded bandwidth parameters of a slice.
175 xid - slice context/VServer ID
177 time - beginning of recording period in UNIX seconds
178 bytes - low bandwidth bytes transmitted at the beginning of the recording period
179 i2bytes - high bandwidth bytes transmitted at the beginning of the recording period (for I2 -F)
180 MaxKByte - total volume of data allowed
181 ThreshKbyte - After thresh, cap node to (maxkbyte - bytes)/(time left in period)
182 Maxi2KByte - same as MaxKByte, but for i2
183 Threshi2Kbyte - same as Threshi2KByte, but for i2
184 MaxRate - max_rate slice attribute.
185 Maxi2Rate - max_exempt_rate slice attribute.
186 Share - Used by Sirius to loan min rates
187 Sharei2 - Used by Sirius to loan min rates for i2
188 self.emailed - did slice recv email during this recording period
192 def __init__(self, xid, name, rspec):
198 self.MaxRate = default_MaxRate
199 self.MinRate = bwlimit.bwmin // 1000
200 self.Maxi2Rate = default_Maxi2Rate
201 self.Mini2Rate = bwlimit.bwmin // 1000
202 self.MaxKByte = default_MaxKByte
203 self.ThreshKByte = int(.8 * self.MaxKByte)
204 self.Maxi2KByte = default_Maxi2KByte
205 self.Threshi2KByte = int(.8 * self.Maxi2KByte)
206 self.Share = default_Share
207 self.Sharei2 = default_Share
211 self.updateSliceTags(rspec)
213 xid=self.xid, dev=dev_default,
214 minrate=self.MinRate * 1000,
215 maxrate=self.MaxRate * 1000,
216 maxexemptrate=self.Maxi2Rate * 1000,
217 minexemptrate=self.Mini2Rate * 1000,
223 def updateSliceTags(self, rspec):
225 Use respects from GetSlivers to PLC to populate slice object. Also
226 do some sanity checking.
229 # Sanity check plus policy decision for MinRate:
230 # Minrate cant be greater than 25% of MaxRate or NodeCap.
231 MinRate = int(rspec.get("net_min_rate", bwlimit.bwmin // 1000))
232 if MinRate > int(.25 * default_MaxRate):
233 MinRate = int(.25 * default_MaxRate)
234 if MinRate != self.MinRate:
235 self.MinRate = MinRate
236 logger.log("bwmon: Updating %s: Min Rate = %s" %(self.name, self.MinRate))
238 MaxRate = int(rspec.get('net_max_rate', default_MaxRate))
239 if MaxRate != self.MaxRate:
240 self.MaxRate = MaxRate
241 logger.log("bwmon: Updating %s: Max Rate = %s" %(self.name, self.MaxRate))
243 Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin // 1000))
244 if Mini2Rate != self.Mini2Rate:
245 self.Mini2Rate = Mini2Rate
246 logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate))
248 Maxi2Rate = int(rspec.get('net_i2_max_rate', default_Maxi2Rate))
249 if Maxi2Rate != self.Maxi2Rate:
250 self.Maxi2Rate = Maxi2Rate
251 logger.log("bwmon: Updating %s: Max i2 Rate = %s" %(self.name, self.Maxi2Rate))
253 MaxKByte = int(rspec.get('net_max_kbyte', default_MaxKByte))
254 if MaxKByte != self.MaxKByte:
255 self.MaxKByte = MaxKByte
256 logger.log("bwmon: Updating %s: Max KByte lim = %s" %(self.name, self.MaxKByte))
258 Maxi2KByte = int(rspec.get('net_i2_max_kbyte', default_Maxi2KByte))
259 if Maxi2KByte != self.Maxi2KByte:
260 self.Maxi2KByte = Maxi2KByte
261 logger.log("bwmon: Updating %s: Max i2 KByte = %s" %(self.name, self.Maxi2KByte))
263 ThreshKByte = int(rspec.get('net_thresh_kbyte', (MaxKByte * .8)))
264 if ThreshKByte != self.ThreshKByte:
265 self.ThreshKByte = ThreshKByte
266 logger.log("bwmon: Updating %s: Thresh KByte = %s" %(self.name, self.ThreshKByte))
268 Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', (Maxi2KByte * .8)))
269 if Threshi2KByte != self.Threshi2KByte:
270 self.Threshi2KByte = Threshi2KByte
271 logger.log("bwmon: Updating %s: i2 Thresh KByte = %s" %(self.name, self.Threshi2KByte))
273 Share = int(rspec.get('net_share', default_Share))
274 if Share != self.Share:
276 logger.log("bwmon: Updating %s: Net Share = %s" %(self.name, self.Share))
278 Sharei2 = int(rspec.get('net_i2_share', default_Share))
279 if Sharei2 != self.Sharei2:
280 self.Sharei2 = Sharei2
281 logger.log("bwmon: Updating %s: Net i2 Share = %s" %(self.name, self.i2Share))
284 def reset(self, runningrates, rspec):
286 Begin a new recording period. Remove caps by restoring limits
287 to their default values.
289 # Cache share for later comparison
290 self.Share = runningrates.get('share', 1)
292 # Query Node Manager for max rate overrides
293 self.updateSliceTags(rspec)
295 # Reset baseline time
296 self.time = time.time()
298 # Reset baseline byte coutns
299 self.bytes = runningrates.get('usedbytes', 0)
300 self.i2bytes = runningrates.get('usedi2bytes', 0)
307 maxrate = self.MaxRate * 1000
308 minrate = self.MinRate * 1000
309 maxi2rate = self.Maxi2Rate * 1000
310 mini2rate = self.Mini2Rate * 1000
312 if (maxrate != runningrates.get('maxrate', 0)) or \
313 (minrate != runningrates.get('maxrate', 0)) or \
314 (maxi2rate != runningrates.get('maxexemptrate', 0)) or \
315 (mini2rate != runningrates.get('minexemptrate', 0)) or \
316 (self.Share != runningrates.get('share', 0)):
317 logger.log("bwmon: %s reset to %s/%s" % \
319 bwlimit.format_tc_rate(maxrate),
320 bwlimit.format_tc_rate(maxi2rate)))
321 bwlimit.set(xid = self.xid, dev = dev_default,
322 minrate = self.MinRate * 1000,
323 maxrate = self.MaxRate * 1000,
324 maxexemptrate = self.Maxi2Rate * 1000,
325 minexemptrate = self.Mini2Rate * 1000,
328 def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes):
330 Notify the slice it's being capped.
332 # Prepare message parameters from the template
334 params = {'slice': self.name, 'hostname': socket.gethostname(),
335 'since': time.asctime(time.gmtime(self.time)) + " GMT",
336 'until': time.asctime(time.gmtime(self.time + period)) + " GMT",
337 'date': time.asctime(time.gmtime()) + " GMT",
338 'period': format_period(period)}
340 if new_maxrate != (self.MaxRate * 1000):
341 # Format template parameters for low bandwidth message
342 params['class'] = "low bandwidth"
343 params['bytes'] = format_bytes(usedbytes - self.bytes)
344 params['limit'] = format_bytes(self.MaxKByte * 1024)
345 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate)
347 # Cap low bandwidth burst rate
348 message += template % params
349 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
351 if new_maxexemptrate != (self.Maxi2Rate * 1000):
352 # Format template parameters for high bandwidth message
353 params['class'] = "high bandwidth"
354 params['bytes'] = format_bytes(usedi2bytes - self.i2bytes)
355 params['limit'] = format_bytes(self.Maxi2KByte * 1024)
356 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate)
358 message += template % params
359 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
362 if self.emailed == False:
363 subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params
365 logger.log("bwmon: "+ subject)
366 logger.log("bwmon: "+ message + (footer % params))
369 logger.log("bwmon: Emailing %s" % self.name)
370 slicemail(self.name, subject, message + (footer % params))
373 def update(self, runningrates, rspec):
375 Update byte counts and check if byte thresholds have been
376 exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period.
377 Recalculate every time module runs.
379 # cache share for later comparison
380 runningrates['share'] = self.Share
382 # Query Node Manager for max rate overrides
383 self.updateSliceTags(rspec)
385 usedbytes = runningrates['usedbytes']
386 usedi2bytes = runningrates['usedi2bytes']
389 if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):
390 sum = self.bytes + (self.ThreshKByte * 1024)
391 maxbyte = self.MaxKByte * 1024
392 bytesused = usedbytes - self.bytes
393 timeused = int(time.time() - self.time)
394 # Calcuate new rate. in bit/s
395 new_maxrate = int(((maxbyte - bytesused) * 8)
396 / (period - timeused))
397 # Never go under MinRate
398 if new_maxrate < (self.MinRate * 1000):
399 new_maxrate = self.MinRate * 1000
400 # State information. I'm capped.
404 new_maxrate = self.MaxRate * 1000
407 if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):
408 maxi2byte = self.Maxi2KByte * 1024
409 i2bytesused = usedi2bytes - self.i2bytes
410 timeused = int(time.time() - self.time)
412 new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)
413 /(period - timeused))
414 # Never go under MinRate
415 if new_maxi2rate < (self.Mini2Rate * 1000):
416 new_maxi2rate = self.Mini2Rate * 1000
417 # State information. I'm capped.
421 new_maxi2rate = self.Maxi2Rate * 1000
424 # Check running values against newly calculated values so as not to run tc
426 if (runningrates['maxrate'] != new_maxrate) or \
427 (runningrates['minrate'] != self.MinRate * 1000) or \
428 (runningrates['maxexemptrate'] != new_maxi2rate) or \
429 ('minexemptrate' in runningrates and runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \
430 (runningrates['share'] != self.Share):
432 bwlimit.set(xid = self.xid, dev = dev_default,
433 minrate = self.MinRate * 1000,
434 maxrate = new_maxrate,
435 minexemptrate = self.Mini2Rate * 1000,
436 maxexemptrate = new_maxi2rate,
440 if self.capped == True:
441 self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)
444 def gethtbs(root_xid, default_xid):
446 Return dict {xid: {*rates}} of running htbs as reported by tc that have names.
447 Turn off HTBs without names.
450 for params in bwlimit.get(dev = dev_default):
453 minexemptrate, maxexemptrate,
454 usedbytes, usedi2bytes) = params
456 name = bwlimit.get_slice(xid)
459 and (xid != root_xid) \
460 and (xid != default_xid):
461 # Orphaned (not associated with a slice) class
463 logger.log("bwmon: Found orphaned HTB %s. Removing." %name)
464 bwlimit.off(xid, dev = dev_default)
466 livehtbs[xid] = {'share': share,
469 'maxexemptrate': maxexemptrate,
470 'minexemptrate': minexemptrate,
471 'usedbytes': usedbytes,
473 'usedi2bytes': usedi2bytes}
479 Syncs tc, db, and bwmon.pickle.
480 Then, starts new slices, kills old ones, and updates byte accounts for each running slice.
481 Sends emails and caps those that went over their limit.
489 default_Maxi2KByte, \
495 # In case the limits have changed.
496 default_MaxRate = int(bwlimit.get_bwcap(dev_default) / 1000)
497 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
499 # Incase default isn't set yet.
500 if default_MaxRate == -1:
501 default_MaxRate = 1000000
504 # with svn we used to have a trick to detect upgrades of this file
505 # this has gone with the move to git, without any noticeable effect on operations though
507 f = open(DB_FILE, "r+")
508 logger.verbose("bwmon: Loading %s" % DB_FILE)
509 (version, slices, deaddb) = pickle.load(f)
511 # Check version of data file
512 if version != "$Id$":
513 logger.log("bwmon: Not using old version '%s' data file %s" % (version, DB_FILE))
520 # Get/set special slice IDs
521 root_xid = bwlimit.get_xid("root")
522 default_xid = bwlimit.get_xid("default")
524 # Since root is required for sanity, its not in the API/plc database, so pass {}
526 if root_xid not in list(slices.keys()):
527 slices[root_xid] = Slice(root_xid, "root", {})
528 slices[root_xid].reset({}, {})
530 # Used by bwlimit. pass {} since there is no rspec (like above).
531 if default_xid not in list(slices.keys()):
532 slices[default_xid] = Slice(default_xid, "default", {})
533 slices[default_xid].reset({}, {})
536 # Get running slivers that should be on this node (from plc). {xid: name}
537 # db keys on name, bwmon keys on xid. db doesnt have xid either.
538 for plcSliver in list(nmdbcopy.keys()):
539 live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
541 logger.verbose("bwmon: Found %s instantiated slices" % list(live.keys()).__len__())
542 logger.verbose("bwmon: Found %s slices in dat file" % list(slices.values()).__len__())
544 # Get actual running values from tc.
545 # Update slice totals and bandwidth. {xid: {values}}
546 kernelhtbs = gethtbs(root_xid, default_xid)
547 logger.verbose("bwmon: Found %s running HTBs" % list(kernelhtbs.keys()).__len__())
549 # The dat file has HTBs for slices, but the HTBs aren't running
550 nohtbslices = set(slices.keys()) - set(kernelhtbs.keys())
551 logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__())
553 for nohtbslice in nohtbslices:
554 if nohtbslice in live:
555 slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
557 logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
558 del slices[nohtbslice]
560 # The dat file doesnt have HTB for the slice but kern has HTB
561 slicesnodat = set(kernelhtbs.keys()) - set(slices.keys())
562 logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__())
563 for slicenodat in slicesnodat:
564 # But slice is running
565 if slicenodat in live:
566 # init the slice. which means start accounting over since kernel
567 # htb was already there.
568 slices[slicenodat] = Slice(slicenodat,
569 live[slicenodat]['name'],
570 live[slicenodat]['_rspec'])
573 # Slices in GetSlivers but not running HTBs
574 newslicesxids = set(live.keys()) - set(kernelhtbs.keys())
575 logger.verbose("bwmon: Found %s new slices" % newslicesxids.__len__())
578 for newslice in newslicesxids:
579 # Delegated slices dont have xids (which are uids) since they haven't been
581 if newslice != None and ('_rspec' in live[newslice]) == True:
582 # Check to see if we recently deleted this slice.
583 if live[newslice]['name'] not in list(deaddb.keys()):
584 logger.log( "bwmon: new slice %s" % live[newslice]['name'] )
585 # _rspec is the computed rspec: NM retrieved data from PLC, computed loans
586 # and made a dict of computed values.
587 slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
588 slices[newslice].reset( {}, live[newslice]['_rspec'] )
589 # Double check time for dead slice in deaddb is within 24hr recording period.
590 elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)):
591 deadslice = deaddb[live[newslice]['name']]
592 logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name'])
593 slices[newslice] = deadslice['slice']
594 slices[newslice].xid = newslice
596 newvals = {"maxrate": deadslice['slice'].MaxRate * 1000,
597 "minrate": deadslice['slice'].MinRate * 1000,
598 "maxexemptrate": deadslice['slice'].Maxi2Rate * 1000,
599 "usedbytes": deadslice['htb']['usedbytes'] * 1000,
600 "usedi2bytes": deadslice['htb']['usedi2bytes'],
601 "share":deadslice['htb']['share']}
602 slices[newslice].reset(newvals, live[newslice]['_rspec'])
604 slices[newslice].update(newvals, live[newslice]['_rspec'])
605 # Since the slice has been reinitialed, remove from dead database.
606 del deaddb[deadslice['slice'].name]
609 logger.log("bwmon: Slice %s doesn't have xid. Skipping." % live[newslice]['name'])
611 # Move dead slices that exist in the pickle file, but
612 # aren't instantiated by PLC into the dead dict until
613 # recording period is over. This is to avoid the case where a slice is dynamically created
614 # and destroyed then recreated to get around byte limits.
615 deadxids = set(slices.keys()) - set(live.keys())
616 logger.verbose("bwmon: Found %s dead slices" % (deadxids.__len__() - 2))
617 for deadxid in deadxids:
618 if deadxid == root_xid or deadxid == default_xid:
620 logger.log("bwmon: removing dead slice %s " % deadxid)
621 if deadxid in slices and deadxid in kernelhtbs:
622 # add slice (by name) to deaddb
623 logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
624 deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
626 if deadxid in kernelhtbs:
627 logger.verbose("bwmon: Removing HTB for %s." % deadxid)
628 bwlimit.off(deadxid, dev = dev_default)
631 for deadslice in list(deaddb.keys()):
632 if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
633 logger.log("bwmon: Removing dead slice %s from dat." \
634 % deaddb[deadslice]['slice'].name)
635 del deaddb[deadslice]
637 # Get actual running values from tc since we've added and removed buckets.
638 # Update slice totals and bandwidth. {xid: {values}}
639 kernelhtbs = gethtbs(root_xid, default_xid)
640 logger.verbose("bwmon: now %s running HTBs" % list(kernelhtbs.keys()).__len__())
642 # Update all byte limites on all slices
643 for (xid, slice) in slices.items():
644 # Monitor only the specified slices
645 if xid == root_xid or xid == default_xid: continue
646 if names and name not in names:
649 if (time.time() >= (slice.time + period)) or \
650 (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \
651 (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes):
652 # Reset to defaults every 24 hours or if it appears
653 # that the byte counters have overflowed (or, more
654 # likely, the node was restarted or the HTB buckets
655 # were re-initialized).
656 slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
658 logger.verbose("bwmon: Updating slice %s" % slice.name)
660 slice.update(kernelhtbs[xid], live[xid]['_rspec'])
662 logger.verbose("bwmon: Saving %s slices in %s" % (list(slices.keys()).__len__(), DB_FILE))
663 f = open(DB_FILE, "w")
664 pickle.dump((version, slices, deaddb), f)
667 # doesnt use generic default interface because this runs as its own thread.
668 # changing the config variable will not have an effect since GetSlivers: pass
669 def getDefaults(nmdbcopy):
671 Get defaults from default slice's slice attributes.
675 dfltslice = nmdbcopy.get(Config().PLC_SLICE_PREFIX+"_default")
677 if dfltslice['rspec']['net_max_rate'] == -1:
685 Turn off all slice HTBs
687 # Get/set special slice IDs
688 root_xid = bwlimit.get_xid("root")
689 default_xid = bwlimit.get_xid("default")
690 kernelhtbs = gethtbs(root_xid, default_xid)
692 logger.log("bwmon: Disabling all running HTBs.")
693 for htb in list(kernelhtbs.keys()): bwlimit.off(htb, dev = dev_default)
696 lock = threading.Event()
699 When run as a thread, wait for event, lock db, deep copy it, release it,
700 run bwmon.GetSlivers(), then go back to waiting.
702 logger.verbose("bwmon: Thread started")
705 logger.verbose("bwmon: Event received. Running.")
706 database.db_lock.acquire()
707 nmdbcopy = copy.deepcopy(database.db)
708 database.db_lock.release()
710 if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev %s" % dev_default)) > 0:
711 # class show to check if net:InitNodeLimit:bwlimit.init has run.
713 else: logger.log("bwmon: BW limits DISABLED.")
714 except: logger.log_exc("bwmon failed")
718 tools.as_daemon_thread(run)
720 def GetSlivers(*args):
721 logger.verbose ("bwmon: triggering dummy GetSlivers")