-#!/usr/bin/python\r
-\r
-#----------------------------------------------------------------------\r
-# Copyright (c) 2008 Board of Trustees, Princeton University\r
-#\r
-# Permission is hereby granted, free of charge, to any person obtaining\r
-# a copy of this software and/or hardware specification (the "Work") to\r
-# deal in the Work without restriction, including without limitation the\r
-# rights to use, copy, modify, merge, publish, distribute, sublicense,\r
-# and/or sell copies of the Work, and to permit persons to whom the Work\r
-# is furnished to do so, subject to the following conditions:\r
-#\r
-# The above copyright notice and this permission notice shall be\r
-# included in all copies or substantial portions of the Work.\r
-#\r
-# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS \r
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF \r
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND \r
-# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT \r
-# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \r
-# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, \r
-# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS \r
-# IN THE WORK.\r
-#----------------------------------------------------------------------\r
-\r
-import os, sys\r
-import traceback\r
-import logging, logging.handlers\r
-\r
-CRITICAL=logging.CRITICAL\r
-ERROR=logging.ERROR\r
-WARNING=logging.WARNING\r
-INFO=logging.INFO\r
-DEBUG=logging.DEBUG\r
-\r
-# a logger that can handle tracebacks \r
-class _SfaLogger:\r
- def __init__ (self,logfile=None,loggername=None,level=logging.INFO):\r
- # default is to locate loggername from the logfile if avail.\r
- if not logfile:\r
- #loggername='console'\r
- #handler=logging.StreamHandler()\r
- #handler.setFormatter(logging.Formatter("%(levelname)s %(message)s"))\r
- logfile = "/var/log/sfa.log"\r
-\r
- if not loggername:\r
- loggername=os.path.basename(logfile)\r
- try:\r
- handler=logging.handlers.RotatingFileHandler(logfile,maxBytes=1000000, backupCount=5) \r
- except IOError:\r
- # This is usually a permissions error becaue the file is\r
- # owned by root, but httpd is trying to access it.\r
- tmplogfile=os.getenv("TMPDIR", "/tmp") + os.path.sep + os.path.basename(logfile)\r
- # In strange uses, 2 users on same machine might use same code,\r
- # meaning they would clobber each others files\r
- # We could (a) rename the tmplogfile, or (b)\r
- # just log to the console in that case.\r
- # Here we default to the console.\r
- if os.path.exists(tmplogfile) and not os.access(tmplogfile,os.W_OK):\r
- loggername = loggername + "-console"\r
- handler = logging.StreamHandler()\r
- else:\r
- handler=logging.handlers.RotatingFileHandler(tmplogfile,maxBytes=1000000, backupCount=5) \r
- handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s - %(message)s"))\r
- self.logger=logging.getLogger(loggername)\r
- self.logger.setLevel(level)\r
- # check if logger already has the handler we're about to add\r
- handler_exists = False\r
- for l_handler in self.logger.handlers:\r
- if l_handler.baseFilename == handler.baseFilename and \\r
- l_handler.level == handler.level:\r
- handler_exists = True \r
-\r
- if not handler_exists:\r
- self.logger.addHandler(handler)\r
-\r
- self.loggername=loggername\r
-\r
- def setLevel(self,level):\r
- self.logger.setLevel(level)\r
-\r
- # shorthand to avoid having to import logging all over the place\r
- def setLevelDebug(self):\r
- self.logger.setLevel(logging.DEBUG)\r
-\r
- # define a verbose option with s/t like\r
- # parser.add_option("-v", "--verbose", action="count", dest="verbose", default=0)\r
- # and pass the coresponding options.verbose to this method to adjust level\r
- def setLevelFromOptVerbose(self,verbose):\r
- if verbose==0:\r
- self.logger.setLevel(logging.WARNING)\r
- elif verbose==1:\r
- self.logger.setLevel(logging.INFO)\r
- elif verbose>=2:\r
- self.logger.setLevel(logging.DEBUG)\r
- # in case some other code needs a boolean\r
- def getBoolVerboseFromOpt(self,verbose):\r
- return verbose>=1\r
-\r
- ####################\r
- def info(self, msg):\r
- self.logger.info(msg)\r
-\r
- def debug(self, msg):\r
- self.logger.debug(msg)\r
- \r
- def warn(self, msg):\r
- self.logger.warn(msg)\r
-\r
- # some code is using logger.warn(), some is using logger.warning()\r
- def warning(self, msg):\r
- self.logger.warning(msg)\r
- \r
- def error(self, msg):\r
- self.logger.error(msg) \r
- \r
- def critical(self, msg):\r
- self.logger.critical(msg)\r
-\r
- # logs an exception - use in an except statement\r
- def log_exc(self,message):\r
- self.error("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"))\r
- self.error("%s END TRACEBACK"%message)\r
- \r
- def log_exc_critical(self,message):\r
- self.critical("%s BEG TRACEBACK"%message+"\n"+traceback.format_exc().strip("\n"))\r
- self.critical("%s END TRACEBACK"%message)\r
- \r
- # for investigation purposes, can be placed anywhere\r
- def log_stack(self,message):\r
- to_log="".join(traceback.format_stack())\r
- self.info("%s BEG STACK"%message+"\n"+to_log)\r
- self.info("%s END STACK"%message)\r
-\r
- def enable_console(self, stream=sys.stdout):\r
- formatter = logging.Formatter("%(message)s")\r
- handler = logging.StreamHandler(stream)\r
- handler.setFormatter(formatter)\r
- self.logger.addHandler(handler)\r
-\r
-\r
-info_logger = _SfaLogger(loggername='info', level=logging.INFO)\r
-debug_logger = _SfaLogger(loggername='debug', level=logging.DEBUG)\r
-warn_logger = _SfaLogger(loggername='warning', level=logging.WARNING)\r
-error_logger = _SfaLogger(loggername='error', level=logging.ERROR)\r
-critical_logger = _SfaLogger(loggername='critical', level=logging.CRITICAL)\r
-logger = info_logger\r
-sfi_logger = _SfaLogger(logfile=os.path.expanduser("~/.sfi/")+'sfi.log',loggername='sfilog', level=logging.DEBUG)\r
-########################################\r
-import time\r
-\r
-def profile(logger):\r
- """\r
- Prints the runtime of the specified callable. Use as a decorator, e.g.,\r
- \r
- @profile(logger)\r
- def foo(...):\r
- ...\r
- """\r
- def logger_profile(callable):\r
- def wrapper(*args, **kwds):\r
- start = time.time()\r
- result = callable(*args, **kwds)\r
- end = time.time()\r
- args = map(str, args)\r
- args += ["%s = %s" % (name, str(value)) for (name, value) in kwds.iteritems()]\r
- # should probably use debug, but then debug is not always enabled\r
- logger.info("PROFILED %s (%s): %.02f s" % (callable.__name__, ", ".join(args), end - start))\r
- return result\r
- return wrapper\r
- return logger_profile\r
-\r
-\r
-if __name__ == '__main__': \r
- print 'testing sfalogging into logger.log'\r
- logger1=_SfaLogger('logger.log', loggername='std(info)')\r
- logger2=_SfaLogger('logger.log', loggername='error', level=logging.ERROR)\r
- logger3=_SfaLogger('logger.log', loggername='debug', level=logging.DEBUG)\r
- \r
- for (logger,msg) in [ (logger1,"std(info)"),(logger2,"error"),(logger3,"debug")]:\r
- \r
- print "====================",msg, logger.logger.handlers\r
- \r
- logger.enable_console()\r
- logger.critical("logger.critical")\r
- logger.error("logger.error")\r
- logger.warn("logger.warning")\r
- logger.info("logger.info")\r
- logger.debug("logger.debug")\r
- logger.setLevel(logging.DEBUG)\r
- logger.debug("logger.debug again")\r
- \r
- @profile(logger)\r
- def sleep(seconds = 1):\r
- time.sleep(seconds)\r
-\r
- logger.info('console.info')\r
- sleep(0.5)\r
- logger.setLevel(logging.DEBUG)\r
- sleep(0.25)\r
-\r
+#!/usr/bin/python
+
+"""
+A reroutable logger that can handle deep tracebacks
+
+Requirements:
+
+* for legacy, we want all our code to just do:
+
+ from sfa.util.sfalogging import logger
+ ...
+ logger.info('blabla')
+
+* depending on whether the code runs (a) inside the server,
+ (b) as part of sfa-import, or (c) as part of the sfi CLI,
+ we want these messages to be directed in different places
+
+* also because troubleshooting is very painful, we need a better way
+ to report stacks when an exception occurs.
+
+Implementation:
+
+* we use a single unique logger name 'sfa' (wrt getLogger()),
+ and provide an auxiliary function `init_logger()` that
+ accepts for its `context` parameter one of :
+ `server`, `import` `sfi` or `console`
+ It will then reconfigure the 'sfa' logger to do the right thing
+
+* also we create our own subclass of loggers, and install it
+ with logging.setLoggerClass(), so we can add our own customized
+ `log_exc()` method
+
+"""
+
+# pylint: disable=c0111, c0103, w1201
+
+from __future__ import print_function
+
+import os
+import os.path
+import sys
+import traceback
+import logging
+import logging.handlers
+import logging.config
+
+# so that users of this module don't need to import logging
+from logging import (CRITICAL, ERROR, WARNING, INFO, DEBUG)
+
+
+class SfaLogger(logging.getLoggerClass()):
+ """
+ a rewrite of old _SfaLogger class that was way too cumbersome
+ keep this as much as possible though
+ """
+
+ # shorthand to avoid having to import logging all over the place
+ def setLevelDebug(self):
+ self.setLevel(DEBUG)
+
+ def debugEnabled(self):
+ return self.getEffectiveLevel() == logging.DEBUG
+
+ # define a verbose option with s/t like
+ # parser.add_option("-v", "--verbose", action="count",
+ # dest="verbose", default=0)
+ # and pass the coresponding options.verbose to this method to adjust level
+ def setLevelFromOptVerbose(self, verbose):
+ if verbose == 0:
+ self.setLevel(logging.WARNING)
+ elif verbose == 1:
+ self.setLevel(logging.INFO)
+ elif verbose >= 2:
+ self.setLevel(logging.DEBUG)
+
+ # in case some other code needs a boolean
+ @staticmethod
+ def getBoolVerboseFromOpt(verbose):
+ return verbose >= 1
+
+ @staticmethod
+ def getBoolDebugFromOpt(verbose):
+ return verbose >= 2
+
+ def log_exc(self, message, limit=100):
+ """
+ standard logger has an exception() method but this will
+ dump the stack only between the frames
+ (1) that does `raise` and (2) the one that does `except`
+
+ log_exc() has a limit argument that allows to see deeper than that
+
+ use limit=None to get the same behaviour as exception()
+ """
+ self.error("%s BEG TRACEBACK" % message + "\n" +
+ traceback.format_exc(limit=limit).strip("\n"))
+ self.error("%s END TRACEBACK" % message)
+
+ # for investigation purposes, can be placed anywhere
+ def log_stack(self, message, limit=100):
+ to_log = "".join(traceback.format_stack(limit=limit))
+ self.info("%s BEG STACK" % message + "\n" + to_log)
+ self.info("%s END STACK" % message)
+
+ def enable_console(self):
+ formatter = logging.Formatter("%(message)s")
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setFormatter(formatter)
+ self.addHandler(handler)
+
+
+# install our class as the default
+logging.setLoggerClass(SfaLogger)
+
+
+# configure
+# this is *NOT* passed to dictConfig as-is
+# instead we filter 'handlers' and 'loggers'
+# to contain just one entry
+# so make sure that 'handlers' and 'loggers'
+# have the same set of keys
+def logging_config(context):
+ if context == 'server':
+ handlername = 'file'
+ filename = '/var/log/sfa.log'
+ level = 'INFO'
+ elif context == 'import':
+ handlername = 'file'
+ filename = '/var/log/sfa-import.log'
+ level = 'INFO'
+ elif context == 'cli':
+ handlername = 'file'
+ filename = os.path.expanduser("~/.sfi.log")
+ level = 'DEBUG'
+ elif context == 'console':
+ handlername = 'stdout'
+ filename = 'ignored'
+ level = 'INFO'
+ else:
+ print("Cannot configure logging - exiting")
+ exit(1)
+
+ config = {
+ 'version': 1,
+ # IMPORTANT: we may be imported by something else, so:
+ 'disable_existing_loggers': False,
+ 'formatters': {
+ 'standard': {
+ 'datefmt': '%m-%d %H:%M:%S',
+ 'format': ('%(asctime)s %(levelname)s '
+ '%(filename)s:%(lineno)d %(message)s'),
+ },
+ },
+ # fill in later with just the one needed
+ # otherwise a dummy 'ignored' file gets created
+ 'handlers': {
+ },
+ 'loggers': {
+ 'sfa': {
+ 'handlers': [handlername],
+ 'level': level,
+ 'propagate': False,
+ },
+ },
+ }
+ if handlername == 'stdout':
+ config['handlers']['stdout'] = {
+ 'level': level,
+ 'formatter': 'standard',
+ 'class': 'logging.StreamHandler',
+ }
+ else:
+ config['handlers']['file'] = {
+ 'filename': filename,
+ 'level': level,
+ 'formatter': 'standard',
+ 'class': 'logging.handlers.TimedRotatingFileHandler',
+ # every monday and during 3 months
+ 'when': 'w0',
+ 'interval': 1,
+ 'backupCount': 12,
+ }
+ return config
+
+
+logger = logging.getLogger('sfa')
+
+
+def init_logger(context):
+ logging.config.dictConfig(logging_config(context))
+
+
+# if the user process does not do anything
+# like for the miscell testers and other certificate
+# probing/dumping utilities
+init_logger('console')