X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=sfa%2Futil%2Fsfalogging.py;h=9afc334b4d94e4bcf991a3b14857f4363f0192d7;hb=0a187639a1d00549ccc45fe0436fa972955207fe;hp=f812517c326141f26feaaf4063edea526e7fb7da;hpb=3eea82897aba845da0d12c1ba56012e599f58853;p=sfa.git diff --git a/sfa/util/sfalogging.py b/sfa/util/sfalogging.py old mode 100755 new mode 100644 index f812517c..9afc334b --- a/sfa/util/sfalogging.py +++ b/sfa/util/sfalogging.py @@ -1,140 +1,197 @@ -#!/usr/bin/python +#!/usr/bin/env python3 -import os, sys -import traceback -import logging, logging.handlers - -CRITICAL=logging.CRITICAL -ERROR=logging.ERROR -WARNING=logging.WARNING -INFO=logging.INFO -DEBUG=logging.DEBUG - -# a logger that can handle tracebacks -class _SfaLogger: - def __init__ (self,logfile=None,loggername=None,level=logging.INFO): - # default is to locate loggername from the logfile if avail. - if not logfile: - #loggername='console' - #handler=logging.StreamHandler() - #handler.setFormatter(logging.Formatter("%(levelname)s %(message)s")) - logfile = "/var/log/sfa.log" - - if not loggername: - loggername=os.path.basename(logfile) - try: - handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=5) - except IOError: - # This is usually a permissions error becaue the file is - # owned by root, but httpd is trying to access it. - tmplogfile=os.getenv("TMPDIR", "/tmp") + os.path.sep + os.path.basename(logfile) - handler=logging.handlers.RotatingFileHandler(tmplogfile,maxBytes=1000000, backupCount=5) - handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")) - self.logger=logging.getLogger(loggername) - self.logger.setLevel(level) - self.logger.addHandler(handler) - self.loggername=loggername - - def setLevel(self,level): - self.logger.setLevel(level) +""" +A reroutable logger that can handle deep tracebacks - # shorthand to avoid having to import logging all over the place - def setLevelDebug(self): - self.logger.setLevel(logging.DEBUG) +Requirements: - # define a verbose option with s/t like - # parser.add_option("-v", "--verbose", action="count", dest="verbose", default=0) - # and pass the coresponding options.verbose to this method to adjust level - def setLevelFromOptVerbose(self,verbose): - if verbose==0: - self.logger.setLevel(logging.WARNING) - elif verbose==1: - self.logger.setLevel(logging.INFO) - elif verbose==2: - self.logger.setLevel(logging.DEBUG) - - #################### - def info(self, msg): - self.logger.info(msg) - - def debug(self, msg): - self.logger.debug(msg) - - def warn(self, msg): - self.logger.warn(msg) - - def error(self, msg): - self.logger.error(msg) - - def critical(self, msg): - self.logger.critical(msg) - - # logs an exception - use in an except statement - def log_exc(self,message): - self.error("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n")) - self.error("%s END TRACEBACK"%message) - - def log_exc_critical(self,message): - self.critical("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n")) - self.critical("%s END TRACEBACK"%message) - - # for investigation purposes, can be placed anywhere - def log_stack(self,message): - to_log="".join(traceback.format_stack()) - self.debug("%s BEG STACK"%message+"\n"+to_log) - self.debug("%s END STACK"%message) +* for legacy, we want all our code to just do: + + from sfa.util.sfalogging import logger + ... + logger.info('blabla') + +* depending on whether the code runs (a) inside the server, + (b) as part of sfa-import, or (c) as part of the sfi CLI, + we want these messages to be directed in different places + +* also because troubleshooting is very painful, we need a better way + to report stacks when an exception occurs. + +Implementation: + +* we use a single unique logger name 'sfa' (wrt getLogger()), + and provide an auxiliary function `init_logger()` that + accepts for its `context` parameter one of : + `server`, `import` `sfi` or `console` + It will then reconfigure the 'sfa' logger to do the right thing + +* also we create our own subclass of loggers, and install it + with logging.setLoggerClass(), so we can add our own customized + `log_exc()` method + +""" +# pylint: disable=c0111, c0103, w1201 -info_logger = _SfaLogger(loggername='info', level=logging.INFO) -debug_logger = _SfaLogger(loggername='debug', level=logging.DEBUG) -warn_logger = _SfaLogger(loggername='warning', level=logging.WARNING) -error_logger = _SfaLogger(loggername='error', level=logging.ERROR) -critical_logger = _SfaLogger(loggername='critical', level=logging.CRITICAL) -logger = info_logger -######################################## -import time -def profile(logger): +import os +import os.path +import sys +import traceback +import logging +import logging.handlers +import logging.config + +# so that users of this module don't need to import logging +from logging import (CRITICAL, ERROR, WARNING, INFO, DEBUG) + + +class SfaLogger(logging.getLoggerClass()): """ - Prints the runtime of the specified callable. Use as a decorator, e.g., - - @profile(logger) - def foo(...): - ... + a rewrite of old _SfaLogger class that was way too cumbersome + keep this as much as possible though """ - def logger_profile(callable): - def wrapper(*args, **kwds): - start = time.time() - result = callable(*args, **kwds) - end = time.time() - args = map(str, args) - args += ["%s = %s" % (name, str(value)) for (name, value) in kwds.iteritems()] - # should probably use debug, but then debug is not always enabled - logger.info("PROFILED %s (%s): %.02f s" % (callable.__name__, ", ".join(args), end - start)) - return result - return wrapper - return logger_profile - - -if __name__ == '__main__': - print 'testing sfalogging into logger.log' - logger=_SfaLogger('logger.log') - logger.critical("logger.critical") - logger.error("logger.error") - logger.warning("logger.warning") - logger.info("logger.info") - logger.debug("logger.debug") - logger.setLevel(logging.DEBUG) - logger.debug("logger.debug again") - - - @profile(my_logger) - def sleep(seconds = 1): - time.sleep(seconds) - - my_logger.info('console.info') - sleep(0.5) - my_logger.setLevel(logging.DEBUG) - sleep(0.25) + # shorthand to avoid having to import logging all over the place + def setLevelDebug(self): + self.setLevel(DEBUG) + + def debugEnabled(self): + return self.getEffectiveLevel() == logging.DEBUG + + # define a verbose option with s/t like + # parser.add_option("-v", "--verbose", action="count", + # dest="verbose", default=0) + # and pass the coresponding options.verbose to this method to adjust level + def setLevelFromOptVerbose(self, verbose): + if verbose == 0: + self.setLevel(logging.WARNING) + elif verbose == 1: + self.setLevel(logging.INFO) + elif verbose >= 2: + self.setLevel(logging.DEBUG) + + # in case some other code needs a boolean + @staticmethod + def getBoolVerboseFromOpt(verbose): + return verbose >= 1 + + @staticmethod + def getBoolDebugFromOpt(verbose): + return verbose >= 2 + + def log_exc(self, message, limit=100): + """ + standard logger has an exception() method but this will + dump the stack only between the frames + (1) that does `raise` and (2) the one that does `except` + + log_exc() has a limit argument that allows to see deeper than that + + use limit=None to get the same behaviour as exception() + """ + self.error("%s BEG TRACEBACK" % message + "\n" + + traceback.format_exc(limit=limit).strip("\n")) + self.error("%s END TRACEBACK" % message) + + # for investigation purposes, can be placed anywhere + def log_stack(self, message, limit=100): + to_log = "".join(traceback.format_stack(limit=limit)) + self.info("%s BEG STACK" % message + "\n" + to_log) + self.info("%s END STACK" % message) + + def enable_console(self): + formatter = logging.Formatter("%(message)s") + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(formatter) + self.addHandler(handler) + + +# install our class as the default +logging.setLoggerClass(SfaLogger) + + +# configure +# this is *NOT* passed to dictConfig as-is +# instead we filter 'handlers' and 'loggers' +# to contain just one entry +# so make sure that 'handlers' and 'loggers' +# have the same set of keys +def logging_config(context): + if context == 'server': + # use stdout and let journalctl do the heavy lifting + handlername = 'stdout' + #filename = '/var/log/sfa.log' + level = 'DEBUG' + elif context == 'import': + handlername = 'file' + filename = '/var/log/sfa-import.log' + level = 'INFO' + elif context == 'cli': + handlername = 'file' + filename = os.path.expanduser("~/.sfi.log") + level = 'DEBUG' + elif context == 'console': + handlername = 'stdout' + #filename = 'ignored' + level = 'INFO' + else: + print("Cannot configure logging - exiting") + exit(1) + + config = { + 'version': 1, + # IMPORTANT: we may be imported by something else, so: + 'disable_existing_loggers': False, + 'formatters': { + 'standard': { + 'datefmt': '%m-%d %H:%M:%S', + 'format': ('%(asctime)s %(levelname)s ' + '%(filename)s:%(lineno)d %(message)s'), + }, + }, + # fill in later with just the one needed + # otherwise a dummy 'ignored' file gets created + 'handlers': { + }, + 'loggers': { + 'sfa': { + 'handlers': [handlername], + 'level': level, + 'propagate': False, + }, + }, + } + if handlername == 'stdout': + config['handlers']['stdout'] = { + 'level': level, + 'formatter': 'standard', + 'class': 'logging.StreamHandler', + } + else: + config['handlers']['file'] = { + 'filename': filename, + 'level': level, + 'formatter': 'standard', + 'class': 'logging.handlers.TimedRotatingFileHandler', + # every monday and during 3 months + 'when': 'w0', + 'interval': 1, + 'backupCount': 12, + } + return config + + +logger = logging.getLogger('sfa') + + +def init_logger(context): + logging.config.dictConfig(logging_config(context)) + + +# if the user process does not do anything +# like for the miscell testers and other certificate +# probing/dumping utilities +init_logger('console')