3 # Average bandwidth monitoring script. Run periodically via NM db.sync to
4 # enforce a soft limit on daily bandwidth usage for each slice. If a
5 # slice is found to have transmitted 80% of its daily byte limit usage,
6 # its instantaneous rate will be capped at the bytes remaning in the limit
7 # over the time remaining in the recording period.
9 # Two separate limits are enforced, one for destinations exempt from
10 # the node bandwidth cap (i.e. Internet2), and the other for all other destinations.
12 # Mark Huang <mlhuang@cs.princeton.edu>
13 # Andy Bavier <acb@cs.princeton.edu>
14 # Faiyaz Ahmed <faiyaza@cs.princeton.edu>
15 # Copyright (C) 2004-2008 The Trustees of Princeton University
36 # Set DEBUG to True if you don't want to send emails
38 # Set ENABLE to False to setup buckets, but not limit.
41 datafile = "/var/lib/misc/bwmon.dat"
44 sys.path.append("/etc/planetlab")
45 from plc_config import *
48 logger.log("bwmon: Warning: Configuration file /etc/planetlab/plc_config.py not found", 2)
49 logger.log("bwmon: Running in DEBUG mode. Logging to file and not emailing.", 1)
52 seconds_per_day = 24 * 60 * 60
55 # Burst to line rate (or node cap). Set by NM. in KBit/s
56 default_MaxRate = int(bwlimit.get_bwcap() / 1000)
57 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
58 # 5.4 Gbyte per day. 5.4 * 1024 k * 1024M * 1024G
59 # 5.4 Gbyte per day max allowed transfered per recording period
60 # 5.4 Gbytes per day is aprox 512k/s for 24hrs (approx because original math was wrong
61 # but its better to keep a higher byte total and keep people happy than correct
62 # the problem and piss people off.
63 # default_MaxKByte = 5662310
66 # llp wants to double these, so we use the following
67 # 1mbit * 24hrs * 60mins * 60secs = bits/day
68 # 1000000 * 24 * 60 * 60 / (1024 * 8)
69 default_MaxKByte = 10546875
71 # 16.4 Gbyte per day max allowed transfered per recording period to I2
72 # default_Maxi2KByte = 17196646
75 # 3Mb/s for 24hrs a day (30.17 gigs)
76 default_Maxi2KByte = 31640625
78 # Default share quanta
82 period = 1 * seconds_per_day
87 The slice %(slice)s has transmitted more than %(bytes)s from
88 %(hostname)s to %(class)s destinations
91 Its maximum %(class)s burst rate will be capped at %(new_maxrate)s/s
94 Please reduce the average %(class)s transmission rate
95 of the slice to %(limit)s per %(period)s.
101 %(date)s %(hostname)s bwcap %(slice)s
104 def format_bytes(bytes, si = True):
106 Formats bytes into a string
111 # Officially, a kibibyte
114 if bytes >= (kilo * kilo * kilo):
115 return "%.1f GB" % (bytes / (kilo * kilo * kilo))
116 elif bytes >= 1000000:
117 return "%.1f MB" % (bytes / (kilo * kilo))
119 return "%.1f KB" % (bytes / kilo)
121 return "%.0f bytes" % bytes
123 def format_period(seconds):
125 Formats a period in seconds into a string
128 if seconds == (24 * 60 * 60):
130 elif seconds == (60 * 60):
132 elif seconds > (24 * 60 * 60):
133 return "%.1f days" % (seconds / 24. / 60. / 60.)
134 elif seconds > (60 * 60):
135 return "%.1f hours" % (seconds / 60. / 60.)
137 return "%.1f minutes" % (seconds / 60.)
139 return "%.0f seconds" % seconds
141 def slicemail(slice, subject, body):
143 Front end to sendmail. Sends email to slice alias with given subject and body.
146 sendmail = os.popen("/usr/sbin/sendmail -N never -t -f%s" % PLC_MAIL_SUPPORT_ADDRESS, "w")
148 # Parsed from MyPLC config
149 to = [PLC_MAIL_MOM_LIST_ADDRESS]
151 if slice is not None and slice != "root":
152 to.append(PLC_MAIL_SLICE_ADDRESS.replace("SLICE", slice))
154 header = {'from': "%s Support <%s>" % (PLC_NAME, PLC_MAIL_SUPPORT_ADDRESS),
156 'version': sys.version.split(" ")[0],
162 Content-type: text/plain
166 X-Mailer: Python/%(version)s
169 """.lstrip() % header)
179 Stores the last recorded bandwidth parameters of a slice.
181 xid - slice context/VServer ID
183 time - beginning of recording period in UNIX seconds
184 bytes - low bandwidth bytes transmitted at the beginning of the recording period
185 i2bytes - high bandwidth bytes transmitted at the beginning of the recording period (for I2 -F)
186 MaxKByte - total volume of data allowed
187 ThreshKbyte - After thresh, cap node to (maxkbyte - bytes)/(time left in period)
188 Maxi2KByte - same as MaxKByte, but for i2
189 Threshi2Kbyte - same as Threshi2KByte, but for i2
190 MaxRate - max_rate slice attribute.
191 Maxi2Rate - max_exempt_rate slice attribute.
192 Share - Used by Sirius to loan min rates
193 Sharei2 - Used by Sirius to loan min rates for i2
194 self.emailed - did slice recv email during this recording period
198 def __init__(self, xid, name, rspec):
204 self.MaxRate = default_MaxRate
205 self.MinRate = bwlimit.bwmin / 1000
206 self.Maxi2Rate = default_Maxi2Rate
207 self.Mini2Rate = bwlimit.bwmin / 1000
208 self.MaxKByte = default_MaxKByte
209 self.ThreshKByte = int(.8 * self.MaxKByte)
210 self.Maxi2KByte = default_Maxi2KByte
211 self.Threshi2KByte = int(.8 * self.Maxi2KByte)
212 self.Share = default_Share
213 self.Sharei2 = default_Share
217 self.updateSliceTags(rspec)
218 bwlimit.set(xid = self.xid,
219 minrate = self.MinRate * 1000,
220 maxrate = self.MaxRate * 1000,
221 maxexemptrate = self.Maxi2Rate * 1000,
222 minexemptrate = self.Mini2Rate * 1000,
228 def updateSliceTags(self, rspec):
230 Use respects from GetSlivers to PLC to populate slice object. Also
231 do some sanity checking.
234 # Sanity check plus policy decision for MinRate:
235 # Minrate cant be greater than 25% of MaxRate or NodeCap.
236 MinRate = int(rspec.get("net_min_rate", bwlimit.bwmin / 1000))
237 if MinRate > int(.25 * default_MaxRate):
238 MinRate = int(.25 * default_MaxRate)
239 if MinRate != self.MinRate:
240 self.MinRate = MinRate
241 logger.log("bwmon: Updating %s: Min Rate = %s" %(self.name, self.MinRate))
243 MaxRate = int(rspec.get('net_max_rate', default_MaxRate))
244 if MaxRate != self.MaxRate:
245 self.MaxRate = MaxRate
246 logger.log("bwmon: Updating %s: Max Rate = %s" %(self.name, self.MaxRate))
248 Mini2Rate = int(rspec.get('net_i2_min_rate', bwlimit.bwmin / 1000))
249 if Mini2Rate != self.Mini2Rate:
250 self.Mini2Rate = Mini2Rate
251 logger.log("bwmon: Updating %s: Min i2 Rate = %s" %(self.name, self.Mini2Rate))
253 Maxi2Rate = int(rspec.get('net_i2_max_rate', default_Maxi2Rate))
254 if Maxi2Rate != self.Maxi2Rate:
255 self.Maxi2Rate = Maxi2Rate
256 logger.log("bwmon: Updating %s: Max i2 Rate = %s" %(self.name, self.Maxi2Rate))
258 MaxKByte = int(rspec.get('net_max_kbyte', default_MaxKByte))
259 if MaxKByte != self.MaxKByte:
260 self.MaxKByte = MaxKByte
261 logger.log("bwmon: Updating %s: Max KByte lim = %s" %(self.name, self.MaxKByte))
263 Maxi2KByte = int(rspec.get('net_i2_max_kbyte', default_Maxi2KByte))
264 if Maxi2KByte != self.Maxi2KByte:
265 self.Maxi2KByte = Maxi2KByte
266 logger.log("bwmon: Updating %s: Max i2 KByte = %s" %(self.name, self.Maxi2KByte))
268 ThreshKByte = int(rspec.get('net_thresh_kbyte', (MaxKByte * .8)))
269 if ThreshKByte != self.ThreshKByte:
270 self.ThreshKByte = ThreshKByte
271 logger.log("bwmon: Updating %s: Thresh KByte = %s" %(self.name, self.ThreshKByte))
273 Threshi2KByte = int(rspec.get('net_i2_thresh_kbyte', (Maxi2KByte * .8)))
274 if Threshi2KByte != self.Threshi2KByte:
275 self.Threshi2KByte = Threshi2KByte
276 logger.log("bwmon: Updating %s: i2 Thresh KByte = %s" %(self.name, self.Threshi2KByte))
278 Share = int(rspec.get('net_share', default_Share))
279 if Share != self.Share:
281 logger.log("bwmon: Updating %s: Net Share = %s" %(self.name, self.Share))
283 Sharei2 = int(rspec.get('net_i2_share', default_Share))
284 if Sharei2 != self.Sharei2:
285 self.Sharei2 = Sharei2
286 logger.log("bwmon: Updating %s: Net i2 Share = %s" %(self.name, self.i2Share))
289 def reset(self, runningrates, rspec):
291 Begin a new recording period. Remove caps by restoring limits
292 to their default values.
294 # Cache share for later comparison
295 self.Share = runningrates.get('share', 1)
297 # Query Node Manager for max rate overrides
298 self.updateSliceTags(rspec)
300 # Reset baseline time
301 self.time = time.time()
303 # Reset baseline byte coutns
304 self.bytes = runningrates.get('usedbytes', 0)
305 self.i2bytes = runningrates.get('usedi2bytes', 0)
312 maxrate = self.MaxRate * 1000
313 minrate = self.MinRate * 1000
314 maxi2rate = self.Maxi2Rate * 1000
315 mini2rate = self.Mini2Rate * 1000
317 if (maxrate != runningrates.get('maxrate', 0)) or \
318 (minrate != runningrates.get('maxrate', 0)) or \
319 (maxi2rate != runningrates.get('maxexemptrate', 0)) or \
320 (mini2rate != runningrates.get('minexemptrate', 0)) or \
321 (self.Share != runningrates.get('share', 0)):
322 logger.log("bwmon: %s reset to %s/%s" % \
324 bwlimit.format_tc_rate(maxrate),
325 bwlimit.format_tc_rate(maxi2rate)), 1)
326 bwlimit.set(xid = self.xid,
327 minrate = self.MinRate * 1000,
328 maxrate = self.MaxRate * 1000,
329 maxexemptrate = self.Maxi2Rate * 1000,
330 minexemptrate = self.Mini2Rate * 1000,
333 def notify(self, new_maxrate, new_maxexemptrate, usedbytes, usedi2bytes):
335 Notify the slice it's being capped.
337 # Prepare message parameters from the template
339 params = {'slice': self.name, 'hostname': socket.gethostname(),
340 'since': time.asctime(time.gmtime(self.time)) + " GMT",
341 'until': time.asctime(time.gmtime(self.time + period)) + " GMT",
342 'date': time.asctime(time.gmtime()) + " GMT",
343 'period': format_period(period)}
345 if new_maxrate != (self.MaxRate * 1000):
346 # Format template parameters for low bandwidth message
347 params['class'] = "low bandwidth"
348 params['bytes'] = format_bytes(usedbytes - self.bytes)
349 params['limit'] = format_bytes(self.MaxKByte * 1024)
350 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxrate)
352 # Cap low bandwidth burst rate
353 message += template % params
354 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
356 if new_maxexemptrate != (self.Maxi2Rate * 1000):
357 # Format template parameters for high bandwidth message
358 params['class'] = "high bandwidth"
359 params['bytes'] = format_bytes(usedi2bytes - self.i2bytes)
360 params['limit'] = format_bytes(self.Maxi2KByte * 1024)
361 params['new_maxrate'] = bwlimit.format_tc_rate(new_maxexemptrate)
363 message += template % params
364 logger.log("bwmon: ** %(slice)s %(class)s capped at %(new_maxrate)s/s " % params)
367 if self.emailed == False:
368 subject = "pl_mom capped bandwidth of slice %(slice)s on %(hostname)s" % params
370 logger.log("bwmon: "+ subject)
371 logger.log("bwmon: "+ message + (footer % params))
374 logger.log("bwmon: Emailing %s" % self.name)
375 slicemail(self.name, subject, message + (footer % params))
378 def update(self, runningrates, rspec):
380 Update byte counts and check if byte thresholds have been
381 exceeded. If exceeded, cap to remaining bytes in limit over remaining time in period.
382 Recalculate every time module runs.
384 # cache share for later comparison
385 runningrates['share'] = self.Share
387 # Query Node Manager for max rate overrides
388 self.updateSliceTags(rspec)
390 usedbytes = runningrates['usedbytes']
391 usedi2bytes = runningrates['usedi2bytes']
394 if usedbytes >= (self.bytes + (self.ThreshKByte * 1024)):
395 sum = self.bytes + (self.ThreshKByte * 1024)
396 maxbyte = self.MaxKByte * 1024
397 bytesused = usedbytes - self.bytes
398 timeused = int(time.time() - self.time)
399 # Calcuate new rate. in bit/s
400 new_maxrate = int(((maxbyte - bytesused) * 8)/(period - timeused))
401 # Never go under MinRate
402 if new_maxrate < (self.MinRate * 1000):
403 new_maxrate = self.MinRate * 1000
404 # State information. I'm capped.
408 new_maxrate = self.MaxRate * 1000
411 if usedi2bytes >= (self.i2bytes + (self.Threshi2KByte * 1024)):
412 maxi2byte = self.Maxi2KByte * 1024
413 i2bytesused = usedi2bytes - self.i2bytes
414 timeused = int(time.time() - self.time)
416 new_maxi2rate = int(((maxi2byte - i2bytesused) * 8)/(period - timeused))
417 # Never go under MinRate
418 if new_maxi2rate < (self.Mini2Rate * 1000):
419 new_maxi2rate = self.Mini2Rate * 1000
420 # State information. I'm capped.
424 new_maxi2rate = self.Maxi2Rate * 1000
427 # Check running values against newly calculated values so as not to run tc
429 if (runningrates['maxrate'] != new_maxrate) or \
430 (runningrates['minrate'] != self.MinRate * 1000) or \
431 (runningrates['maxexemptrate'] != new_maxi2rate) or \
432 (runningrates['minexemptrate'] != self.Mini2Rate * 1000) or \
433 (runningrates['share'] != self.Share):
435 bwlimit.set(xid = self.xid,
436 minrate = self.MinRate * 1000,
437 maxrate = new_maxrate,
438 minexemptrate = self.Mini2Rate * 1000,
439 maxexemptrate = new_maxi2rate,
443 if self.capped == True:
444 self.notify(new_maxrate, new_maxi2rate, usedbytes, usedi2bytes)
447 def gethtbs(root_xid, default_xid):
449 Return dict {xid: {*rates}} of running htbs as reported by tc that have names.
450 Turn off HTBs without names.
453 for params in bwlimit.get():
456 minexemptrate, maxexemptrate,
457 usedbytes, usedi2bytes) = params
459 name = bwlimit.get_slice(xid)
462 and (xid != root_xid) \
463 and (xid != default_xid):
464 # Orphaned (not associated with a slice) class
466 logger.log("bwmon: Found orphaned HTB %s. Removing." %name, 1)
469 livehtbs[xid] = {'share': share,
472 'maxexemptrate': maxexemptrate,
473 'minexemptrate': minexemptrate,
474 'usedbytes': usedbytes,
476 'usedi2bytes': usedi2bytes}
482 Syncs tc, db, and bwmon.dat. Then, starts new slices, kills old ones, and updates byte accounts for each running slice. Sends emails and caps those that went over their limit.
495 # Incase the limits have changed.
496 default_MaxRate = int(bwlimit.get_bwcap() / 1000)
497 default_Maxi2Rate = int(bwlimit.bwmax / 1000)
499 # Incase default isn't set yet.
500 if default_MaxRate == -1:
501 default_MaxRate = 1000000
504 f = open(datafile, "r+")
505 logger.log("bwmon: Loading %s" % datafile, 2)
506 (version, slices, deaddb) = pickle.load(f)
508 # Check version of data file
509 if version != "$Id$":
510 logger.log("bwmon: Not using old version '%s' data file %s" % (version, datafile))
517 # Get/set special slice IDs
518 root_xid = bwlimit.get_xid("root")
519 default_xid = bwlimit.get_xid("default")
521 # Since root is required for sanity, its not in the API/plc database, so pass {}
523 if root_xid not in slices.keys():
524 slices[root_xid] = Slice(root_xid, "root", {})
525 slices[root_xid].reset({}, {})
527 # Used by bwlimit. pass {} since there is no rspec (like above).
528 if default_xid not in slices.keys():
529 slices[default_xid] = Slice(default_xid, "default", {})
530 slices[default_xid].reset({}, {})
533 # Get running slivers that should be on this node (from plc). {xid: name}
534 # db keys on name, bwmon keys on xid. db doesnt have xid either.
535 for plcSliver in nmdbcopy.keys():
536 live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
538 logger.log("bwmon: Found %s instantiated slices" % live.keys().__len__(), 2)
539 logger.log("bwmon: Found %s slices in dat file" % slices.values().__len__(), 2)
541 # Get actual running values from tc.
542 # Update slice totals and bandwidth. {xid: {values}}
543 kernelhtbs = gethtbs(root_xid, default_xid)
544 logger.log("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__(), 2)
546 # The dat file has HTBs for slices, but the HTBs aren't running
547 nohtbslices = Set(slices.keys()) - Set(kernelhtbs.keys())
548 logger.log( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__(), 2)
550 for nohtbslice in nohtbslices:
551 if live.has_key(nohtbslice):
552 slices[nohtbslice].reset( {}, live[nohtbslice]['_rspec'] )
554 logger.log("bwmon: Removing abondoned slice %s from dat." % nohtbslice)
555 del slices[nohtbslice]
557 # The dat file doesnt have HTB for the slice but kern has HTB
558 slicesnodat = Set(kernelhtbs.keys()) - Set(slices.keys())
559 logger.log( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__(), 2)
560 for slicenodat in slicesnodat:
561 # But slice is running
562 if live.has_key(slicenodat):
563 # init the slice. which means start accounting over since kernel
564 # htb was already there.
565 slices[slicenodat] = Slice(slicenodat,
566 live[slicenodat]['name'],
567 live[slicenodat]['_rspec'])
570 # Slices in GetSlivers but not running HTBs
571 newslicesxids = Set(live.keys()) - Set(kernelhtbs.keys())
572 logger.log("bwmon: Found %s new slices" % newslicesxids.__len__(), 2)
575 for newslice in newslicesxids:
576 # Delegated slices dont have xids (which are uids) since they haven't been
578 if newslice != None and live[newslice].has_key('_rspec') == True:
579 # Check to see if we recently deleted this slice.
580 if live[newslice]['name'] not in deaddb.keys():
581 logger.log( "bwmon: New Slice %s" % live[newslice]['name'] )
582 # _rspec is the computed rspec: NM retrieved data from PLC, computed loans
583 # and made a dict of computed values.
584 slices[newslice] = Slice(newslice, live[newslice]['name'], live[newslice]['_rspec'])
585 slices[newslice].reset( {}, live[newslice]['_rspec'] )
586 # Double check time for dead slice in deaddb is within 24hr recording period.
587 elif (time.time() <= (deaddb[live[newslice]['name']]['slice'].time + period)):
588 deadslice = deaddb[live[newslice]['name']]
589 logger.log("bwmon: Reinstantiating deleted slice %s" % live[newslice]['name'])
590 slices[newslice] = deadslice['slice']
591 slices[newslice].xid = newslice
593 newvals = {"maxrate": deadslice['slice'].MaxRate * 1000,
594 "minrate": deadslice['slice'].MinRate * 1000,
595 "maxexemptrate": deadslice['slice'].Maxi2Rate * 1000,
596 "usedbytes": deadslice['htb']['usedbytes'] * 1000,
597 "usedi2bytes": deadslice['htb']['usedi2bytes'],
598 "share":deadslice['htb']['share']}
599 slices[newslice].reset(newvals, live[newslice]['_rspec'])
601 slices[newslice].update(newvals, live[newslice]['_rspec'])
602 # Since the slice has been reinitialed, remove from dead database.
603 del deaddb[deadslice['slice'].name]
606 logger.log("bwmon: Slice %s doesn't have xid. Skipping." % live[newslice]['name'])
608 # Move dead slices that exist in the pickle file, but
609 # aren't instantiated by PLC into the dead dict until
610 # recording period is over. This is to avoid the case where a slice is dynamically created
611 # and destroyed then recreated to get around byte limits.
612 deadxids = Set(slices.keys()) - Set(live.keys())
613 logger.log("bwmon: Found %s dead slices" % (deadxids.__len__() - 2), 2)
614 for deadxid in deadxids:
615 if deadxid == root_xid or deadxid == default_xid:
617 logger.log("bwmon: removing dead slice %s " % deadxid)
618 if slices.has_key(deadxid) and kernelhtbs.has_key(deadxid):
619 # add slice (by name) to deaddb
620 logger.log("bwmon: Saving bandwidth totals for %s." % slices[deadxid].name)
621 deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
623 if kernelhtbs.has_key(deadxid):
624 logger.log("bwmon: Removing HTB for %s." % deadxid, 2)
628 for deadslice in deaddb.keys():
629 if (time.time() >= (deaddb[deadslice]['slice'].time + period)):
630 logger.log("bwmon: Removing dead slice %s from dat." \
631 % deaddb[deadslice]['slice'].name)
632 del deaddb[deadslice]
634 # Get actual running values from tc since we've added and removed buckets.
635 # Update slice totals and bandwidth. {xid: {values}}
636 kernelhtbs = gethtbs(root_xid, default_xid)
637 logger.log("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__(), 2)
639 # Update all byte limites on all slices
640 for (xid, slice) in slices.iteritems():
641 # Monitor only the specified slices
642 if xid == root_xid or xid == default_xid: continue
643 if names and name not in names:
646 if (time.time() >= (slice.time + period)) or \
647 (kernelhtbs[xid]['usedbytes'] < slice.bytes) or \
648 (kernelhtbs[xid]['usedi2bytes'] < slice.i2bytes):
649 # Reset to defaults every 24 hours or if it appears
650 # that the byte counters have overflowed (or, more
651 # likely, the node was restarted or the HTB buckets
652 # were re-initialized).
653 slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
655 logger.log("bwmon: Updating slice %s" % slice.name, 2)
657 slice.update(kernelhtbs[xid], live[xid]['_rspec'])
659 logger.log("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),datafile), 2)
660 f = open(datafile, "w")
661 pickle.dump((version, slices, deaddb), f)
664 # doesnt use generic default interface because this runs as its own thread.
665 # changing the config variable will not have an effect since GetSlivers: pass
666 def getDefaults(nmdbcopy):
668 Get defaults from default slice's slice attributes.
672 dfltslice = nmdbcopy.get(PLC_SLICE_PREFIX+"_default")
674 if dfltslice['rspec']['net_max_rate'] == -1:
682 Turn off all slice HTBs
684 # Get/set special slice IDs
685 root_xid = bwlimit.get_xid("root")
686 default_xid = bwlimit.get_xid("default")
687 kernelhtbs = gethtbs(root_xid, default_xid)
689 logger.log("bwlimit: Disabling all running HTBs.")
690 for htb in kernelhtbs.keys(): bwlimit.off(htb)
693 lock = threading.Event()
696 When run as a thread, wait for event, lock db, deep copy it, release it,
697 run bwmon.GetSlivers(), then go back to waiting.
699 logger.log("bwmon: Thread started", 2)
702 logger.log("bwmon: Event received. Running.", 2)
703 database.db_lock.acquire()
704 nmdbcopy = copy.deepcopy(database.db)
705 database.db_lock.release()
707 if getDefaults(nmdbcopy) and len(bwlimit.tc("class show dev eth0")) > 0:
708 # class show to check if net:InitNodeLimit:bwlimit.init has run.
710 else: logger.log("bwmon: BW limits DISABLED.")
711 except: logger.log_exc()
715 tools.as_daemon_thread(run)
717 def GetSlivers(*args):