from plc_config import *
except:
DEBUG = True
- logger.log("bwmon: Warning: Configuration file /etc/planetlab/plc_config.py not found", 2)
- logger.log("bwmon: Running in DEBUG mode. Logging to file and not emailing.", 1)
+ logger.verbose("bwmon: Warning: Configuration file /etc/planetlab/plc_config.py not found")
+ logger.log("bwmon: Running in DEBUG mode. Logging to file and not emailing.")
# Constants
seconds_per_day = 24 * 60 * 60
(mini2rate != runningrates.get('minexemptrate', 0)) or \
(self.Share != runningrates.get('share', 0)):
logger.log("bwmon: %s reset to %s/%s" % \
- (self.name,
- bwlimit.format_tc_rate(maxrate),
- bwlimit.format_tc_rate(maxi2rate)), 1)
+ (self.name,
+ bwlimit.format_tc_rate(maxrate),
+ bwlimit.format_tc_rate(maxi2rate)))
bwlimit.set(xid = self.xid, dev = dev_default,
minrate = self.MinRate * 1000,
maxrate = self.MaxRate * 1000,
and (xid != default_xid):
# Orphaned (not associated with a slice) class
name = "%d?" % xid
- logger.log("bwmon: Found orphaned HTB %s. Removing." %name, 1)
+ logger.log("bwmon: Found orphaned HTB %s. Removing." %name)
bwlimit.off(xid)
livehtbs[xid] = {'share': share,
try:
f = open(DB_FILE, "r+")
- logger.log("bwmon: Loading %s" % DB_FILE, 2)
+ logger.verbose("bwmon: Loading %s" % DB_FILE)
(version, slices, deaddb) = pickle.load(f)
f.close()
# Check version of data file
for plcSliver in nmdbcopy.keys():
live[bwlimit.get_xid(plcSliver)] = nmdbcopy[plcSliver]
- logger.log("bwmon: Found %s instantiated slices" % live.keys().__len__(), 2)
- logger.log("bwmon: Found %s slices in dat file" % slices.values().__len__(), 2)
+ logger.verbose("bwmon: Found %s instantiated slices" % live.keys().__len__())
+ logger.verbose("bwmon: Found %s slices in dat file" % slices.values().__len__())
# Get actual running values from tc.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
- logger.log("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__(), 2)
+ logger.verbose("bwmon: Found %s running HTBs" % kernelhtbs.keys().__len__())
# The dat file has HTBs for slices, but the HTBs aren't running
nohtbslices = set(slices.keys()) - set(kernelhtbs.keys())
- logger.log( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__(), 2)
+ logger.verbose( "bwmon: Found %s slices in dat but not running." % nohtbslices.__len__())
# Reset tc counts.
for nohtbslice in nohtbslices:
if live.has_key(nohtbslice):
# The dat file doesnt have HTB for the slice but kern has HTB
slicesnodat = set(kernelhtbs.keys()) - set(slices.keys())
- logger.log( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__(), 2)
+ logger.verbose( "bwmon: Found %s slices with HTBs but not in dat" % slicesnodat.__len__())
for slicenodat in slicesnodat:
# But slice is running
if live.has_key(slicenodat):
# Get new slices.
# Slices in GetSlivers but not running HTBs
newslicesxids = set(live.keys()) - set(kernelhtbs.keys())
- logger.log("bwmon: Found %s new slices" % newslicesxids.__len__(), 2)
+ logger.verbose("bwmon: Found %s new slices" % newslicesxids.__len__())
# Setup new slices
for newslice in newslicesxids:
# recording period is over. This is to avoid the case where a slice is dynamically created
# and destroyed then recreated to get around byte limits.
deadxids = set(slices.keys()) - set(live.keys())
- logger.log("bwmon: Found %s dead slices" % (deadxids.__len__() - 2), 2)
+ logger.verbose("bwmon: Found %s dead slices" % (deadxids.__len__() - 2))
for deadxid in deadxids:
if deadxid == root_xid or deadxid == default_xid:
continue
deaddb[slices[deadxid].name] = {'slice': slices[deadxid], 'htb': kernelhtbs[deadxid]}
del slices[deadxid]
if kernelhtbs.has_key(deadxid):
- logger.log("bwmon: Removing HTB for %s." % deadxid, 2)
+ logger.verbose("bwmon: Removing HTB for %s." % deadxid)
bwlimit.off(deadxid)
# Clean up deaddb
# Get actual running values from tc since we've added and removed buckets.
# Update slice totals and bandwidth. {xid: {values}}
kernelhtbs = gethtbs(root_xid, default_xid)
- logger.log("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__(), 2)
+ logger.verbose("bwmon: now %s running HTBs" % kernelhtbs.keys().__len__())
# Update all byte limites on all slices
for (xid, slice) in slices.iteritems():
# were re-initialized).
slice.reset(kernelhtbs[xid], live[xid]['_rspec'])
elif ENABLE:
- logger.log("bwmon: Updating slice %s" % slice.name, 2)
+ logger.verbose("bwmon: Updating slice %s" % slice.name)
# Update byte counts
slice.update(kernelhtbs[xid], live[xid]['_rspec'])
- logger.log("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),DB_FILE), 2)
+ logger.verbose("bwmon: Saving %s slices in %s" % (slices.keys().__len__(),DB_FILE))
f = open(DB_FILE, "w")
pickle.dump((version, slices, deaddb), f)
f.close()
When run as a thread, wait for event, lock db, deep copy it, release it,
run bwmon.GetSlivers(), then go back to waiting.
"""
- logger.log("bwmon: Thread started", 2)
+ logger.verbose("bwmon: Thread started")
while True:
lock.wait()
- logger.log("bwmon: Event received. Running.", 2)
+ logger.verbose("bwmon: Event received. Running.")
database.db_lock.acquire()
nmdbcopy = copy.deepcopy(database.db)
database.db_lock.release()
# time out in seconds - avoid hanging subprocesses - default is 5 minutes
default_timeout_minutes=5
+# returns a bool that is True when everything goes fine and the retcod is 0
def log_call(command,timeout=default_timeout_minutes*60,poll=1):
message=" ".join(command)
log("log_call: running command %s" % message)
verbose("log_call: timeout=%r s" % timeout)
verbose("log_call: poll=%r s" % poll)
trigger=time.time()+timeout
+ result = False
try:
child = subprocess.Popen(command, bufsize=1,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
buffer.flush()
# child is done and return 0
if returncode == 0:
- log("log_call: command completed (%s)" % message)
+ log("log_call:end command (%s) completed" % message)
+ result=True
break
# child has failed
else:
- log("log_call: command return=%d (%s)" %(returncode,message))
- raise Exception("log_call: failed with returncode %d"%returncode)
+ log("log_call:end command (%s) returned with code %d" %(message,returncode))
+ break
# no : still within timeout ?
if time.time() >= trigger:
buffer.flush()
child.terminate()
- raise Exception("log_call: terminated command - exceeded timeout %d s"%timeout)
+ log("log_call:end terminating command (%s) - exceeded timeout %d s"%(message,timeout))
+ break
except: log_exc("failed to run command %s" % message)
-
+ return result
def __init__ (self):
parser = optparse.OptionParser()
- parser.add_option('-d', '--daemon', action='store_true', dest='daemon', default=False, help='run daemonized')
- parser.add_option('-s', '--startup', action='store_true', dest='startup', default=False, help='run all sliver startup scripts')
- parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config', help='PLC configuration file')
- parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session', help='API session key (or file)')
+ parser.add_option('-d', '--daemon', action='store_true', dest='daemon', default=False,
+ help='run daemonized')
+ parser.add_option('-s', '--startup', action='store_true', dest='startup', default=False,
+ help='run all sliver startup scripts')
+ parser.add_option('-f', '--config', action='store', dest='config', default='/etc/planetlab/plc_config',
+ help='PLC configuration file')
+ parser.add_option('-k', '--session', action='store', dest='session', default='/etc/planetlab/session',
+ help='API session key (or file)')
parser.add_option('-p', '--period', action='store', dest='period', default=NodeManager.default_period,
help='Polling interval (sec) - default %d'%NodeManager.default_period)
parser.add_option('-r', '--random', action='store', dest='random', default=NodeManager.default_random,
help='Range for additional random polling interval (sec) -- default %d'%NodeManager.default_random)
- parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='more verbose log')
- parser.add_option('-P', '--path', action='store', dest='path', default=NodeManager.PLUGIN_PATH, help='Path to plugins directory')
+ parser.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False,
+ help='more verbose log')
+ parser.add_option('-P', '--path', action='store', dest='path', default=NodeManager.PLUGIN_PATH,
+ help='Path to plugins directory')
# NOTE: BUG the 'help' for this parser.add_option() wont list plugins from the --path argument
parser.add_option('-m', '--module', action='store', dest='user_module', default='', help='run a single module')