I would just wrap the main logging class with the custom logic I needed like this:
import logging
class CustomLogger(object):
def __init__(self, logger_name, log_format, extra = None):
logging.basicConfig(format = log_format)
self.logger = logging.getLogger(logger_name)
self.extra = extra
def debug(self, msg, * args, ** kwargs):
self.logger.debug(msg, * args, extra = self.extra, ** kwargs)
def info(self, msg, * args, ** kwargs):
self.logger.info(msg, * args, extra = self.extra, ** kwargs)
def warning(self, msg, * args, ** kwargs):
self.logger.warning(msg, * args, extra = self.extra, ** kwargs)
def error(self, msg, * args, ** kwargs):
self.logger.error(msg, * args, extra = self.extra, ** kwargs)
Then wherever you need a logger create it from that class:
from custom_logging
import CustomLogger
logger_name = 'my_logger'
log_format = '%(asctime)-15s %(app_name)-8s %(env)-8s %(message)s'
extras = {
"app_name": "myapp",
"env": "prod"
}
logger = CustomLogger(logger_name, log_format, extras)
logger.info('Testing it out')
If you are using python-json-logger
and have a custom formatter class, you can add those fields directly in log_record
in add_fields()
method.
from pythonjsonlogger
import jsonlogger
from datetime
import datetime
import logging
import json
logger = logging.getLogger()
logHandler = logging.StreamHandler()
class CustomJsonFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super(CustomJsonFormatter, self).add_fields(log_record, record, message_dict)
if not log_record.get('datetime'):
log_record['datetime'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
if log_record.get('level'):
log_record['level'] = log_record['level'].upper()
else:
log_record['level'] = record.levelname
log_record['app_name'] = 'myapp'
log_record['env'] = 'prod'
formatter = CustomJsonFormatter('%(datetime)s %(level)s %(message)s')
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
logger.setLevel(logging.INFO)
logger.info("Something happened.")
This prints out:
{
"datetime": "2021-12-28T23:43:25.503081Z",
"level": "INFO",
"message": "Something happened.",
"app_name": "myapp",
"env": "prod"
}
I'm using the python logging module, along anycodings_structured-logging with python-json-logger and I would like to anycodings_structured-logging add a few keys:,Then wherever you need a logger create anycodings_structured-logging it from that class:,To all of my logs automatically without anycodings_structured-logging doing the following.,If you are using python-json-logger and anycodings_structured-logging have a custom formatter class, you can anycodings_structured-logging add those fields directly in log_record anycodings_structured-logging in add_fields() method.
I'm using the python logging module, along anycodings_structured-logging with python-json-logger and I would like to anycodings_structured-logging add a few keys:
{
"app_name": "myapp",
"env": "prod"
}
To all of my logs automatically without anycodings_structured-logging doing the following.
logger.info("Something happened", extra = {
"app_name": "myapp",
"env": "prod"
})
I would just wrap the main logging class anycodings_structured-logging with the custom logic I needed like anycodings_structured-logging this:
import logging
class CustomLogger(object):
def __init__(self, logger_name, log_format, extra = None):
logging.basicConfig(format = log_format)
self.logger = logging.getLogger(logger_name)
self.extra = extra
def debug(self, msg, * args, ** kwargs):
self.logger.debug(msg, * args, extra = self.extra, ** kwargs)
def info(self, msg, * args, ** kwargs):
self.logger.info(msg, * args, extra = self.extra, ** kwargs)
def warning(self, msg, * args, ** kwargs):
self.logger.warning(msg, * args, extra = self.extra, ** kwargs)
def error(self, msg, * args, ** kwargs):
self.logger.error(msg, * args, extra = self.extra, ** kwargs)
Then wherever you need a logger create anycodings_structured-logging it from that class:
from custom_logging
import CustomLogger
logger_name = 'my_logger'
log_format = '%(asctime)-15s %(app_name)-8s %(env)-8s %(message)s'
extras = {
"app_name": "myapp",
"env": "prod"
}
logger = CustomLogger(logger_name, log_format, extras)
logger.info('Testing it out')
If you are using python-json-logger and anycodings_structured-logging have a custom formatter class, you can anycodings_structured-logging add those fields directly in log_record anycodings_structured-logging in add_fields() method.
from pythonjsonlogger
import jsonlogger
from datetime
import datetime
import logging
import json
logger = logging.getLogger()
logHandler = logging.StreamHandler()
class CustomJsonFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super(CustomJsonFormatter, self).add_fields(log_record, record, message_dict)
if not log_record.get('datetime'):
log_record['datetime'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
if log_record.get('level'):
log_record['level'] = log_record['level'].upper()
else:
log_record['level'] = record.levelname
log_record['app_name'] = 'myapp'
log_record['env'] = 'prod'
formatter = CustomJsonFormatter('%(datetime)s %(level)s %(message)s')
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
logger.setLevel(logging.INFO)
logger.info("Something happened.")
This prints out:
{
"datetime": "2021-12-28T23:43:25.503081Z",
"level": "INFO",
"message": "Something happened.",
"app_name": "myapp",
"env": "prod"
}
Published: April 11, 2019
import logging
def word_count(myfile):
logging.basicConfig(level = logging.DEBUG, filename = 'myapp.log', format = '%(asctime)s %(levelname)s:%(message)s')
try:
# count the number of words in a file and log the result
with open(myfile, 'r') as f:
file_data = f.read()
words = file_data.split(" ")
num_words = len(words)
logging.debug("this file has %d words", num_words)
return num_words
except OSError as e:
logging.error("error reading the file")[...]
2019 - 03 - 27 10: 49: 00, 979 DEBUG: this file has 44 words
2019 - 03 - 27 10: 49: 00, 979 ERROR: error reading the file
logger = logging.getLogger(__name__)
# lowermodule.py import logging logging.basicConfig(level = logging.DEBUG, format = '%(asctime)s %(name)s %(levelname)s:%(message)s') logger = logging.getLogger(__name__) def word_count(myfile): try: with open(myfile, 'r') as f: file_data = f.read() words = file_data.split(" ") final_word_count = len(words) logger.info("this file has %d words", final_word_count) return final_word_count except OSError as e: logger.error("error reading the file")[...] # uppermodule.py import logging import lowermodule logging.basicConfig(level = logging.DEBUG, format = '%(asctime)s %(name)s %(levelname)s:%(message)s') logger = logging.getLogger(__name__) def record_word_count(myfile): logger.info("starting the function") try: word_count = lowermodule.word_count(myfile) with open('wordcountarchive.csv', 'a') as file: row = str(myfile) + ',' + str(word_count) file.write(row + '\n') except: logger.warning("could not write file %s to destination", myfile) finally: logger.debug("the function is done for the file %s", myfile)
2019 - 03 - 27 21: 16: 41, 200 __main__ INFO: starting the
function
2019 - 03 - 27 21: 16: 41, 200 lowermodule INFO: this file has 44 words
2019 - 03 - 27 21: 16: 41, 201 __main__ DEBUG: the
function is done
for the file myfile.txt
2019 - 03 - 27 21: 16: 41, 201 __main__ INFO: starting the
function
2019 - 03 - 27 21: 16: 41, 202 lowermodule ERROR: [Errno 2] No such file or directory: 'nonexistentfile.txt'
2019 - 03 - 27 21: 16: 41, 202 __main__ DEBUG: the
function is done
for the file nonexistentfile.txt
[loggers]
keys = root
[handlers]
keys = fileHandler
[formatters]
keys = simpleFormatter
[logger_root]
level = DEBUG
handlers = fileHandler
[handler_fileHandler]
class = FileHandler
level = DEBUG
formatter = simpleFormatter
args = ("/path/to/log/file.log", )
[formatter_simpleFormatter]
format = % (asctime) s % (name) s - % (levelname) s: % (message) s
The advantage of using ‘extra’ is that the values in the dict-like object are merged into the LogRecord instance’s __dict__, allowing you to use customized strings with your Formatter instances which know about the keys of the dict-like object. If you need a different method, e.g. if you want to prepend or append the contextual information to the message string, you just need to subclass LoggerAdapter and override process() to do what you need. Here is a simple example:,You can also add contextual information to log output using a user-defined Filter. Filter instances are allowed to modify the LogRecords passed to them, including adding additional attributes which can then be output using a suitable format string, or if needed a custom Formatter.,You don’t need to pass an actual dict to a LoggerAdapter - you could pass an instance of a class which implements __getitem__ and __iter__ so that it looks like a dict to logging. This would be useful if you want to generate values dynamically (whereas the values in a dict would be constant).,Using particular formatting styles throughout your application Using LogRecord factories Using custom message objects
import logging import auxiliary_module # create logger with 'spam_application' logger = logging.getLogger('spam_application') logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler('spam.log') fh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.ERROR) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) # add the handlers to the logger logger.addHandler(fh) logger.addHandler(ch) logger.info('creating an instance of auxiliary_module.Auxiliary') a = auxiliary_module.Auxiliary() logger.info('created an instance of auxiliary_module.Auxiliary') logger.info('calling auxiliary_module.Auxiliary.do_something') a.do_something() logger.info('finished auxiliary_module.Auxiliary.do_something') logger.info('calling auxiliary_module.some_function()') auxiliary_module.some_function() logger.info('done with auxiliary_module.some_function()')
import logging # create logger module_logger = logging.getLogger('spam_application.auxiliary') class Auxiliary: def __init__(self): self.logger = logging.getLogger('spam_application.auxiliary.Auxiliary') self.logger.info('creating an instance of Auxiliary') def do_something(self): self.logger.info('doing something') a = 1 + 1 self.logger.info('done doing something') def some_function(): module_logger.info('received a call to "some_function"')
2005 - 03 - 23 23: 47: 11, 663 - spam_application - INFO -
creating an instance of auxiliary_module.Auxiliary
2005 - 03 - 23 23: 47: 11, 665 - spam_application.auxiliary.Auxiliary - INFO -
creating an instance of Auxiliary
2005 - 03 - 23 23: 47: 11, 665 - spam_application - INFO -
created an instance of auxiliary_module.Auxiliary
2005 - 03 - 23 23: 47: 11, 668 - spam_application - INFO -
calling auxiliary_module.Auxiliary.do_something
2005 - 03 - 23 23: 47: 11, 668 - spam_application.auxiliary.Auxiliary - INFO -
doing something
2005 - 03 - 23 23: 47: 11, 669 - spam_application.auxiliary.Auxiliary - INFO -
done doing something
2005 - 03 - 23 23: 47: 11, 670 - spam_application - INFO -
finished auxiliary_module.Auxiliary.do_something
2005 - 03 - 23 23: 47: 11, 671 - spam_application - INFO -
calling auxiliary_module.some_function()
2005 - 03 - 23 23: 47: 11, 672 - spam_application.auxiliary - INFO -
received a call to 'some_function'
2005 - 03 - 23 23: 47: 11, 673 - spam_application - INFO -
done with auxiliary_module.some_function()
import logging
import threading
import time
def worker(arg):
while not arg['stop']:
logging.debug('Hi from myfunc')
time.sleep(0.5)
def main():
logging.basicConfig(level = logging.DEBUG, format = '%(relativeCreated)6d %(threadName)s %(message)s')
info = {
'stop': False
}
thread = threading.Thread(target = worker, args = (info, ))
thread.start()
while True:
try:
logging.debug('Hello from main')
time.sleep(0.75)
except KeyboardInterrupt:
info['stop'] = True
break
thread.join()
if __name__ == '__main__':
main()
0 Thread - 1 Hi from myfunc
3 MainThread Hello from main
505 Thread - 1 Hi from myfunc
755 MainThread Hello from main
1007 Thread - 1 Hi from myfunc
1507 MainThread Hello from main
1508 Thread - 1 Hi from myfunc
2010 Thread - 1 Hi from myfunc
2258 MainThread Hello from main
2512 Thread - 1 Hi from myfunc
3009 MainThread Hello from main
3013 Thread - 1 Hi from myfunc
3515 Thread - 1 Hi from myfunc
3761 MainThread Hello from main
4017 Thread - 1 Hi from myfunc
4513 MainThread Hello from main
4518 Thread - 1 Hi from myfunc
import logging logger = logging.getLogger('simple_example') logger.setLevel(logging.DEBUG) # create file handler which logs even debug messages fh = logging.FileHandler('spam.log') fh.setLevel(logging.DEBUG) # create console handler with a higher log level ch = logging.StreamHandler() ch.setLevel(logging.ERROR) # create formatter and add it to the handlers formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) fh.setFormatter(formatter) # add the handlers to logger logger.addHandler(ch) logger.addHandler(fh) # 'application' code logger.debug('debug message') logger.info('info message') logger.warning('warn message') logger.error('error message') logger.critical('critical message')
You can also add extra fields to your json output by specifying a dict in place of message, as well as by specifying an extra={} argument.,Contents of these dictionaries will be added at the root level of the entry and may override basic fields.,You can also use the add_fields method to add to or generally normalize the set of default set of fields, it is called for every log event. For example, to unify default fields with those provided by structlog you could do something like this:,The fmt parser can also be overidden if you want to have required fields that differ from the default of just message.
Pip:
pip install python - json - logger
Manual:
python setup.py install
Please note: version 0.1.0 has changed the import structure, please update to the following example for proper importing
import logging
from pythonjsonlogger
import jsonlogger
logger = logging.getLogger()
logHandler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter()
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
You can also use the add_fields
method to add to or generally normalize the set of default set of fields, it is called for every log event. For example, to unify default fields with those provided by structlog you could do something like this:
class CustomJsonFormatter(jsonlogger.JsonFormatter): def add_fields(self, log_record, record, message_dict): super(CustomJsonFormatter, self).add_fields(log_record, record, message_dict) if not log_record.get('timestamp'): # this doesn 't use record.created, so it is slightly off now = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') log_record['timestamp'] = now if log_record.get('level'): log_record['level'] = log_record['level'].upper() else: log_record['level'] = record.levelname formatter = CustomJsonFormatter('%(timestamp)s %(level)s %(name)s %(message)s')
For custom handling of object serialization you can specify default json object translator or provide a custom encoder
def json_translate(obj):
if isinstance(obj, MyClass):
return {
"special": obj.special
}
formatter = jsonlogger.JsonFormatter(json_default = json_translate,
json_encoder = json.JSONEncoder)
logHandler.setFormatter(formatter)
logger.info({
"special": "value",
"run": 12
})
logger.info("classic message", extra = {
"special": "value",
"run": 12
})
By Tomislav Levanic on February 24, 2020
{
"message": "message",
"time": "created",
"log_level": "levelname",
"process": "process",
"process_name": "processName",
"thread": "thread",
"thread_name": "threadName",
"traceback": "exc_text"
}
import logging
import useful.logs
useful.logs.setup()
_log = logging.getLogger(__name__)
if __name__ == '__main__':
_log.info("testing", extra = {
"app": "AppName",
"metric": 1.2
})
import logging
import sys
def except_logging(exc_type, exc_value, exc_traceback):
logging.error("Uncaught exception",
exc_info = (exc_type, exc_value, exc_traceback))
sys.excepthook = except_logging
import logging
import sys
def unraisable_logging(args):
exc_type, exc_value, exc_traceback, err_msg, _ = args
default_msg = "Unraisable exception"
logging.error(err_msg or default_msg,
exc_info = (exc_type, exc_value, exc_traceback))
sys.unraisablehook = unraisable_logging
import logging
import threading
def threading_except_logging(args):
exc_type, exc_value, exc_traceback, _ = args
logging.error("Uncaught threading exception",
exc_info = (exc_type, exc_value, exc_traceback))
threading.excepthook = threading_except_logging