Python Logging
Logging is essential for debugging, monitoring, and maintaining Python applications. The logging module provides a flexible framework for recording events, errors, and diagnostic information.
Logging Basics
The logging module provides different log levels and handlers.
Simple Logging
import logging
# Basic configuration
logging.basicConfig(level=logging.DEBUG, format='%(levelname)s: %(message)s')
# Log messages at different levels
logging.debug("This is a debug message")
logging.info("This is an info message")
logging.warning("This is a warning message")
logging.error("This is an error message")
logging.critical("This is a critical message")basicConfig() sets up basic logging. Messages below the set level are ignored.
Log Levels
import logging
# Log levels (in order of severity):
# DEBUG: Detailed information for debugging
# INFO: General information about program execution
# WARNING: Indicates potential problems
# ERROR: Errors that don't stop program execution
# CRITICAL: Serious errors that may stop the program
logging.basicConfig(level=logging.DEBUG)
logging.debug("Debug: Variable x = 42")
logging.info("Info: Program started successfully")
logging.warning("Warning: Low disk space")
logging.error("Error: File not found")
logging.critical("Critical: System out of memory")Choose appropriate log levels for different types of messages.
Advanced Configuration
Customize logging format, handlers, and behavior.
Custom Format
import logging
# Custom format with timestamp and module name
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
logging.info("Application started")
logging.warning("This is a warning with timestamp")Format strings control log message appearance. See LogRecord attributes for all options.
File Logging
import logging
# Log to file
logging.basicConfig(
filename='app.log',
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logging.info("This will be written to app.log")
logging.error("Error message in log file")
# Also log to console
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
# Add console handler to root logger
logging.getLogger('').addHandler(console)
logging.debug("This appears in console only")
logging.info("This appears in both file and console")Use handlers to send logs to different destinations.
Loggers, Handlers, Formatters
The logging system has three main components.
Logger Objects
import logging
# Create logger
logger = logging.getLogger('my_app')
logger.setLevel(logging.DEBUG)
# Create console handler
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
# Create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
# Add handler to logger
logger.addHandler(console_handler)
# Use logger
logger.debug("Debug message")
logger.info("Info message")
logger.warning("Warning message")
# Child loggers
db_logger = logging.getLogger('my_app.database')
api_logger = logging.getLogger('my_app.api')
db_logger.info("Database connected")
api_logger.warning("API rate limit exceeded")Organize logging with hierarchical loggers. Child loggers inherit parent settings.
Multiple Handlers
import logging
# Create logger
logger = logging.getLogger('multi_handler')
logger.setLevel(logging.DEBUG)
# File handler
file_handler = logging.FileHandler('debug.log')
file_handler.setLevel(logging.DEBUG)
file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s')
file_handler.setFormatter(file_formatter)
# Console handler
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.WARNING)
console_formatter = logging.Formatter('%(levelname)s: %(message)s')
console_handler.setFormatter(console_formatter)
# Add both handlers
logger.addHandler(file_handler)
logger.addHandler(console_handler)
# Test different levels
logger.debug("This goes to file only")
logger.info("This goes to file only")
logger.warning("This goes to both file and console")
logger.error("This goes to both file and console")Different handlers can have different levels and formats.
Configuration Files
Configure logging using configuration files or dictionaries.
Dictionary Configuration
import logging.config
# Logging configuration dictionary
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
},
'detailed': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(module)s:%(lineno)d - %(message)s'
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'INFO',
'formatter': 'standard'
},
'file': {
'class': 'logging.FileHandler',
'level': 'DEBUG',
'filename': 'app.log',
'formatter': 'detailed'
}
},
'loggers': {
'': { # Root logger
'handlers': ['console', 'file'],
'level': 'DEBUG'
},
'my_app.database': {
'handlers': ['file'],
'level': 'INFO',
'propagate': False # Don't pass to parent loggers
}
}
}
# Apply configuration
logging.config.dictConfig(LOGGING_CONFIG)
# Test logging
logger = logging.getLogger('my_app')
db_logger = logging.getLogger('my_app.database')
logger.info("This goes to console and file")
db_logger.info("This goes to file only (no propagation)")Dictionary configuration provides flexible, reusable logging setup.
Exception Logging
Log exceptions with traceback information.
Logging Exceptions
import logging
logger = logging.getLogger('exception_logger')
try:
# Code that might raise exception
result = 10 / 0
except Exception as e:
logger.error("An error occurred", exc_info=True)
# Or more explicitly:
logger.exception("An error occurred with traceback")exc_info=True or logger.exception() includes full traceback.
Custom Exception Formatter
import logging
import traceback
import sys
class ExceptionFormatter(logging.Formatter):
def format(self, record):
# Call parent formatter
s = super().format(record)
if record.exc_info:
# Add exception traceback
exc_text = ''.join(traceback.format_exception(*record.exc_info))
s += '\n' + exc_text
return s
# Set up logger with exception formatter
logger = logging.getLogger('exception_demo')
handler = logging.StreamHandler()
handler.setFormatter(ExceptionFormatter())
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
try:
raise ValueError("Something went wrong")
except:
logger.error("Caught an exception", exc_info=True)Custom formatter can include exception details in log messages.
Rotating Log Files
Automatically manage log file size and rotation.
RotatingFileHandler
import logging
from logging.handlers import RotatingFileHandler
# Create rotating file handler
handler = RotatingFileHandler(
'app.log', # Log file name
maxBytes=1024, # Maximum file size (1KB for demo)
backupCount=3 # Number of backup files to keep
)
# Set formatter
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# Create logger and add handler
logger = logging.getLogger('rotating_demo')
logger.setLevel(logging.INFO)
logger.addHandler(handler)
# Generate log messages to trigger rotation
for i in range(20):
logger.info(f"Log message {i}: " + "x" * 100) # Large message
print("Check app.log, app.log.1, app.log.2, app.log.3 files")RotatingFileHandler rotates logs when they reach a size limit.
Time-based Rotation
import logging
from logging.handlers import TimedRotatingFileHandler
import time
# Create time-based rotating handler
handler = TimedRotatingFileHandler(
'timed_app.log',
when='midnight', # Rotate at midnight
interval=1, # Every 1 interval
backupCount=7 # Keep 7 days of logs
)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger('timed_demo')
logger.setLevel(logging.INFO)
logger.addHandler(handler)
# Log messages
for i in range(10):
logger.info(f"Message {i}")
time.sleep(1)
print("Logs rotate daily at midnight")TimedRotatingFileHandler rotates logs based on time intervals.
Logging in Libraries
Configure logging appropriately for reusable code.
Library Logging Pattern
import logging
# In your library module
logger = logging.getLogger(__name__) # Use module name as logger name
class MyLibrary:
def __init__(self):
logger.info("MyLibrary initialized")
def do_something(self, value):
logger.debug(f"Processing value: {value}")
try:
result = value * 2
logger.info(f"Successfully processed {value} -> {result}")
return result
except Exception as e:
logger.error(f"Failed to process {value}: {e}")
raise
# Usage example
if __name__ == "__main__":
# Configure logging for library
logging.basicConfig(level=logging.INFO)
lib = MyLibrary()
result = lib.do_something(42)Use __name__ for logger names in libraries. Let users configure logging.
Performance Considerations
Efficient Logging
import logging
logger = logging.getLogger('performance')
# Good: Check level before expensive operations
if logger.isEnabledFor(logging.DEBUG):
expensive_data = compute_expensive_data()
logger.debug(f"Expensive data: {expensive_data}")
# Bad: Always compute expensive data
# logger.debug(f"Expensive data: {compute_expensive_data()}")
# Use lazy formatting
logger.info("User %s logged in", username)Check log levels before expensive operations. Use lazy formatting.
Asynchronous Logging
import logging
import queue
import threading
from logging.handlers import QueueHandler, QueueListener
# Create queue for async logging
log_queue = queue.Queue()
# Create queue handler
queue_handler = QueueHandler(log_queue)
# Create actual handler (file handler)
file_handler = logging.FileHandler('async.log')
file_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
file_handler.setFormatter(file_formatter)
# Create queue listener
listener = QueueListener(log_queue, file_handler)
listener.start()
# Configure logger with queue handler
logger = logging.getLogger('async_demo')
logger.setLevel(logging.INFO)
logger.addHandler(queue_handler)
# Log messages (handled asynchronously)
for i in range(10):
logger.info(f"Async log message {i}")
# Stop listener
listener.stop()Queue-based logging prevents blocking on I/O operations.
Common Patterns
Structured Logging
import logging
import json
class StructuredFormatter(logging.Formatter):
def format(self, record):
# Convert log record to structured dict
log_entry = {
'timestamp': self.formatTime(record),
'level': record.levelname,
'logger': record.name,
'message': record.getMessage(),
'module': record.module,
'function': record.funcName,
'line': record.lineno
}
# Add extra fields if present
if hasattr(record, 'extra_data'):
log_entry.update(record.extra_data)
return json.dumps(log_entry)
# Set up structured logging
logger = logging.getLogger('structured')
handler = logging.StreamHandler()
handler.setFormatter(StructuredFormatter())
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# Log with extra data
logger.info("User login", extra={'extra_data': {'user_id': 123, 'ip': '192.168.1.1'}})
logger.error("Database error", extra={'extra_data': {'error_code': 'DB001', 'table': 'users'}})Structured logging makes logs easier to parse and analyze.
Context Managers for Logging
import logging
import time
from contextlib import contextmanager
@contextmanager
def log_execution_time(logger, operation_name):
start_time = time.time()
logger.info(f"Starting {operation_name}")
try:
yield
logger.info(f"Completed {operation_name}")
except Exception as e:
logger.error(f"Failed {operation_name}: {e}")
raise
finally:
end_time = time.time()
logger.info(f"{operation_name} took {end_time - start_time:.2f} seconds")
# Usage
logger = logging.getLogger('context_demo')
with log_execution_time(logger, "database backup"):
time.sleep(2) # Simulate backup operationContext managers for timing and error logging.
Best Practices
- Use appropriate log levels
- Configure logging early in application startup
- Use descriptive logger names
- Include relevant context in log messages
- Handle exceptions properly in logging code
- Use rotating handlers for production
- Consider structured logging for better analysis
- Don’t log sensitive information
External Resources:
Related Tutorials: