X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Futil%2Fsfalogging.py;h=a8f88f5cc445ed46ed97611cb17bfc2534a988f2;hb=00440a7c7e509301cb49db90ff15abc0f479aaf3;hp=ccb140094644238b6a97db8fe379572d8c973683;hpb=0cf0d31c313a366e3f272f830bdb4f2a7308e11f;p=sfa.git diff --git a/sfa/util/sfalogging.py b/sfa/util/sfalogging.py old mode 100755 new mode 100644 index ccb14009..a8f88f5c --- a/sfa/util/sfalogging.py +++ b/sfa/util/sfalogging.py @@ -1,113 +1,188 @@ #!/usr/bin/python -import os -import traceback -import logging, logging.handlers - -class SfaLogging: - def __init__ (self,logfile,name=None,level=logging.INFO): - # default is to locate logger name from the logfile - if not name: - name=os.path.basename(logfile) - self.logger=logging.getLogger(name) - self.logger.setLevel(level) - try: - handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=5) - except IOError: - # This is usually a permissions error becaue the file is - # owned by root, but httpd is trying to access it. - tmplogfile=os.getenv("TMPDIR", "/tmp") + os.path.sep + os.path.basename(logfile) - handler=logging.handlers.RotatingFileHandler(tmplogfile,maxBytes=1000000, backupCount=5) - handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) - self.logger.addHandler(handler) - - def wrap(fun): - def wrapped(self,msg,*args,**kwds): - native=getattr(self.logger,fun.__name__) - return native(msg,*args,**kwds) - #wrapped.__doc__=native.__doc__ - return wrapped - - def setLevel(self,level): - self.logger.setLevel(level) - - @wrap - def critical(): pass - @wrap - def error(): pass - @wrap - def warning(): pass - @wrap - def info(): pass - @wrap - def debug(): pass - - # logs an exception - use in an except statement - def log_exc(self,message): - self.error("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n")) - self.error("%s END TRACEBACK"%message) - - def log_exc_critical(self,message): - self.critical("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n")) - self.critical("%s END TRACEBACK"%message) - - # for investigation purposes, can be placed anywhere - def log_stack(self,message): - to_log="".join(traceback.format_stack()) - self.debug("%s BEG STACK"%message+"\n"+to_log) - self.debug("%s END STACK"%message) +""" +A reroutable logger that can handle deep tracebacks -sfa_logger=SfaLogging(logfile='/var/log/sfa.log') -sfa_import_logger=SfaLogging(logfile='/var/log/sfa_import.log') +Requirements: +* for legacy, we want all our code to just do: -######################################## -import time + from sfa.util.sfalogging import logger + ... + logger.info('blabla') -def profile(callable): - """ - Prints the runtime of the specified callable. Use as a decorator, e.g., +* depending on whether the code runs (a) inside the server, + (b) as part of sfa-import, or (c) as part of the sfi CLI, + we want these messages to be directed in different places - @profile - def foo(...): - ... +* also because troubleshooting is very painful, we need a better way + to report stacks when an exception occurs. - Or, equivalently, +Implementation: - def foo(...): - ... - foo = profile(foo) +* we use a single unique logger name 'sfa' (wrt getLogger()), + and provide an auxiliary function `init_logger()` that + accepts for its `context` parameter one of : + `server`, `import` `sfi` or `console` + It will then reconfigure the 'sfa' logger to do the right thing - Or inline: +* also we create our own subclass of loggers, and install it + with logging.setLoggerClass(), so we can add our own customized + `log_exc()` method - result = profile(foo)(...) +""" + +# pylint: disable=c0111, c0103, w1201 + +from __future__ import print_function + +import os +import os.path +import sys +import traceback +import logging +import logging.handlers +import logging.config + +# so that users of this module don't need to import logging +from logging import (CRITICAL, ERROR, WARNING, INFO, DEBUG) + + +class SfaLogger(logging.getLoggerClass()): """ + a rewrite of old _SfaLogger class that was way too cumbersome + keep this as much as possible though + """ + + # shorthand to avoid having to import logging all over the place + def setLevelDebug(self): + self.setLevel(DEBUG) + + def debugEnabled(self): + return self.getEffectiveLevel() == logging.DEBUG + + # define a verbose option with s/t like + # parser.add_option("-v", "--verbose", action="count", + # dest="verbose", default=0) + # and pass the coresponding options.verbose to this method to adjust level + def setLevelFromOptVerbose(self, verbose): + if verbose == 0: + self.setLevel(logging.WARNING) + elif verbose == 1: + self.setLevel(logging.INFO) + elif verbose >= 2: + self.setLevel(logging.DEBUG) + + # in case some other code needs a boolean + @staticmethod + def getBoolVerboseFromOpt(verbose): + return verbose >= 1 + + @staticmethod + def getBoolDebugFromOpt(verbose): + return verbose >= 2 + + def log_exc(self, message, limit=100): + """ + standard logger has an exception() method but this will + dump the stack only between the frames + (1) that does `raise` and (2) the one that does `except` + + log_exc() has a limit argument that allows to see deeper than that + + use limit=None to get the same behaviour as exception() + """ + self.error("%s BEG TRACEBACK" % message + "\n" + + traceback.format_exc(limit=limit).strip("\n")) + self.error("%s END TRACEBACK" % message) - def wrapper(*args, **kwds): - start = time.time() - result = callable(*args, **kwds) - end = time.time() - args = map(str, args) - args += ["%s = %s" % (name, str(value)) for (name, value) in kwds.items()] - sfa_logger.debug("%s (%s): %.02f s" % (callable.__name__, ", ".join(args), end - start)) - return result - - return wrapper - -if __name__ == '__main__': - print 'testing sfalogging into logger.log' - global sfa_logger - sfa_logger=SfaLogging('logger.log') - sfa_logger.critical("logger.critical") - sfa_logger.error("logger.error") - sfa_logger.warning("logger.warning") - sfa_logger.info("logger.info") - sfa_logger.debug("logger.debug") - sfa_logger.setLevel(logging.DEBUG) - sfa_logger.debug("logger.debug again") - - @profile - def sleep(seconds = 1): - time.sleep(seconds) - - sleep(1) + # for investigation purposes, can be placed anywhere + def log_stack(self, message, limit=100): + to_log = "".join(traceback.format_stack(limit=limit)) + self.info("%s BEG STACK" % message + "\n" + to_log) + self.info("%s END STACK" % message) + + def enable_console(self): + formatter = logging.Formatter("%(message)s") + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(formatter) + self.addHandler(handler) + + +# install our class as the default +logging.setLoggerClass(SfaLogger) + + +# configure +# this is *NOT* passed to dictConfig as-is +# instead we filter 'handlers' and 'loggers' +# to contain just one entry +# so make sure that 'handlers' and 'loggers' +# have the same set of keys +def logging_config(context): + if context == 'server': + handlername = 'file' + filename = '/var/log/sfa.log' + level = 'INFO' + elif context == 'import': + handlername = 'file' + filename = '/var/log/sfa-import.log' + level = 'INFO' + elif context == 'cli': + handlername = 'file' + filename = os.path.expanduser("~/.sfi.log") + level = 'DEBUG' + elif context == 'console': + handlername = 'stdout' + filename = 'ignored' + level = 'INFO' + else: + print("Cannot configure logging - exiting") + exit(1) + + return { + 'version': 1, + # IMPORTANT: we may be imported by something else, so: + 'disable_existing_loggers': False, + 'formatters': { + 'standard': { + 'datefmt': '%m-%d %H:%M:%S', + 'format': ('%(asctime)s %(levelname)s ' + '%(filename)s:%(lineno)d %(message)s'), + }, + }, + 'handlers': { + 'file': { + 'filename': filename, + 'level': level, + # not using RotatingFileHandler for this first version + 'class': 'logging.FileHandler', + 'formatter': 'standard', + }, + 'stdout': { + 'level': level, + 'class': 'logging.StreamHandler', + 'formatter': 'standard', + }, + }, + 'loggers': { + 'sfa': { + 'handlers': [handlername], + 'level': level, + 'propagate': False, + }, + }, + } + + +logger = logging.getLogger('sfa') + + +def init_logger(context): + logging.config.dictConfig(logging_config(context)) + + +# if the user process does not do anything +# like for the miscell testers and other certificate +# probing/dumping utilities +init_logger('console')