2 """A very simple logger that tries to be concurrency-safe."""
10 LOG_FILE = '/var/log/nodemanager'
11 LOG_SLIVERS = '/var/lib/nodemanager/getslivers.txt'
12 LOG_DATABASE = '/var/lib/nodemanager/database.txt'
14 # basically define 3 levels
18 # default is to log a reasonable amount of stuff for when running on operational nodes
24 assert level in [LOG_NONE,LOG_NODE,LOG_VERBOSE]
27 logger.log("Failed to set LOG_LEVEL to %s"%level)
30 log('(v) '+msg,LOG_VERBOSE)
32 def log(msg,level=LOG_NODE):
33 """Write <msg> to the log file if level >= current log level (default LOG_NODE)."""
34 if (level > LOG_LEVEL):
37 fd = os.open(LOG_FILE, os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
38 if not msg.endswith('\n'): msg += '\n'
39 os.write(fd, '%s: %s' % (time.asctime(time.gmtime()), msg))
45 def log_exc(msg="",name=None):
46 """Log the traceback resulting from an exception."""
48 log("%s: EXCEPTION caught <%s> \n %s" %(name, msg, traceback.format_exc()))
50 log("EXCEPTION caught <%s> \n %s" %(msg, traceback.format_exc()))
52 ########## snapshot data to a file
53 # for some reason the various modules are still triggered even when the
54 # data from PLC cannot be reached
55 # we show this message instead of the exception stack instead in this case
56 def log_missing_data (msg,key):
57 log("%s: could not find the %s key in data (PLC connection down?) - IGNORED"%(msg,key))
59 def log_data_in_file (data, file, message="",level=LOG_NODE):
60 if (level > LOG_LEVEL):
65 now=time.strftime("Last update: %Y.%m.%d at %H:%M:%S %Z", time.localtime())
67 if message: f.write('Message:'+message+'\n')
68 pp=pprint.PrettyPrinter(stream=f,indent=2)
71 verbose("logger:.log_data_in_file Owerwrote %s"%file)
73 log_exc('logger.log_data_in_file failed - file=%s - message=%r'%(file,message))
75 def log_slivers (data):
76 log_data_in_file (data, LOG_SLIVERS, "raw GetSlivers")
77 def log_database (db):
78 log_data_in_file (db, LOG_DATABASE, "raw database")
80 #################### child processes
81 # avoid waiting until the process returns;
82 # that makes debugging of hanging children hard
85 def __init__ (self,message='log_call: '):
91 if c=='\n': self.flush()
95 log (self.message + self.buffer)
98 # time out in seconds - avoid hanging subprocesses - default is 5 minutes
99 default_timeout_minutes=5
101 # returns a bool that is True when everything goes fine and the retcod is 0
102 def log_call(command,timeout=default_timeout_minutes*60,poll=1):
103 message=" ".join(command)
104 log("log_call: running command %s" % message)
105 verbose("log_call: timeout=%r s" % timeout)
106 verbose("log_call: poll=%r s" % poll)
107 trigger=time.time()+timeout
110 child = subprocess.Popen(command, bufsize=1,
111 stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
114 # see if anything can be read within the poll interval
115 (r,w,x)=select.select([child.stdout],[],[],poll)
116 if r: buffer.add(child.stdout.read(1))
118 returncode=child.poll()
120 if returncode != None:
122 # child is done and return 0
124 log("log_call:end command (%s) completed" % message)
129 log("log_call:end command (%s) returned with code %d" %(message,returncode))
131 # no : still within timeout ?
132 if time.time() >= trigger:
135 log("log_call:end terminating command (%s) - exceeded timeout %d s"%(message,timeout))
137 except: log_exc("failed to run command %s" % message)