I am currently configuring a micro-service and want to leave good logs.
Using the common Logger
class in services that use fastapi and python,
1. Leave logs in a file
2. Leave stdout
3. Log with fluentd
in FastAPI and python, they use this Logger
class:
import logging
import os
import sys
import json
import requests
from logging.handlers import RotatingFileHandler
LOGGING_ROOT_DIR = "/var/log"
LOGGING_FORMAT = "%(asctime)s | %(levelname)s | %(filename)s:%(funcName)s:%(lineno)d - %(message)s"
LOGGING_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
class Logger:
def __init__(
self, service_name: str, log_level=logging.INFO, max_bytes=104857600, backup_count=10
):
"""
:param service_name: Service name (ex: "fastapi")
:param log_level: Log level (default: logging.INFO)
"""
self.service_name = service_name
self.log_level = log_level
self.formatter = logging.Formatter(LOGGING_FORMAT, datefmt=LOGGING_DATE_FORMAT)
# Create logger
self.logger = logging.getLogger(self.service_name)
self.logger.setLevel(self.log_level)
self.logger.propagate = False # Prevent propagation to parent loggers
# Console Logging
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(self.log_level)
console_handler.setFormatter(self.formatter)
self.logger.addHandler(console_handler)
# Local File Logging
self.log_dir = os.path.join(LOGGING_ROOT_DIR, service_name)
os.makedirs(self.log_dir, exist_ok=True)
file_handler = RotatingFileHandler(
os.path.join(self.log_dir, f"{service_name}.log"),
maxBytes=max_bytes,
backupCount=backup_count,
)
file_handler.setLevel(self.log_level)
file_handler.setFormatter(self.formatter)
self.logger.addHandler(file_handler)
try:
# Fluentd Logging (HTTP method)
self.fluentd_handler = FluentdHandler(self.service_name)
self.logger.addHandler(self.fluentd_handler)
except Exception as e:
self.logger.info(f"Fluentd logging handler is skipped: {e}")
pass
def get_logger(self):
return self.logger
def add_logger(self, logger, package_name: str):
_logger = logging.getLogger(package_name)
setattr(self, f"{package_name}_logger", _logger)
getattr(self, f"{package_name}_logger").setLevel(self.log_level)
getattr(self, f"{package_name}_logger").propagate = False
logger.addHandler(getattr(self, f"{package_name}_logger"))
def remove_logger(self, package_name: str):
logger = getattr(self, f"{package_name}_logger")
logger.removeHandler(logger.handlers[0])
delattr(self, f"{package_name}_logger")
def get_log_savedir(self):
return self.log_dir
class FluentdHandler(logging.Handler):
def __init__(self, service_name: str):
super().__init__()
self.fluentd_host = os.getenv("FLUENTD_HOST", "fluentd")
self.fluentd_port = os.getenv("FLUENTD_HTTP_PORT", 9880)
self.fluentd_url = f"http://{self.fluentd_host}:{self.fluentd_port}/{service_name}.log"
self.failure_count = 0 # Number of consecutive failures
self.max_failures = 5 # Maximum number of failures allowed
self.logger = logging.getLogger("fluentd") # Logging when Fluentd transfer fails
def emit(self, record):
if self.failure_count >= self.max_failures:
return # If Fluentd keeps going down, stop the request
# Convert log data to JSON format
log_entry = {
"datetime": record.asctime,
"level": record.levelname,
"message": record.getMessage(),
"filename": record.filename,
"funcName": record.funcName,
"lineno": record.lineno,
"service": record.name,
}
try:
response = requests.post(
self.fluentd_url,
data=json.dumps(log_entry),
headers={"Content-Type": "application/json"},
)
if response.status_code != 200:
raise requests.exceptions.RequestException(
f"Fluentd returned {response.status_code}"
)
self.failure_count = 0 # If successful, reset failure count
except requests.exceptions.RequestException as e:
self.failure_count += 1
self.logger.warning(f"Failed to send log to Fluentd (count={self.failure_count}): {e}")
I don't know how to write fluent.conf
corresponding to the above Logger class.
If there are few services, you can write them one by one, but since there are many services, you will probably have to write .conf
using wild-card.
The .conf
file I wrote is as follows:
<source>
@type http
port 9880
bind 0.0.0.0
body_size_limit 32m
keepalive_timeout 10s
format json
</source>
<source>
@type forward
port 24224
</source>
<match minio.log>
@type copy
<store>
@type file
path /fluentd/log
<buffer time>
@type file
path /fluentd/buffer/minio.log
timekey 86400
timekey_wait 10m
flush_interval 1s
</buffer>
append true
time_slice_format %Y%m%d
time_format %Y-%m-%dT%H:%M:%S.%N%Z
</store>
<store>
@type stdout
</store>
</match>
<match **.log>
@type copy
<store>
@type file
path /fluentd/log/${tag}
<buffer tag,time>
@type file
path /fluentd/buffer/${tag}
timekey 86400
timekey_wait 10m
flush_interval 1s
</buffer>
append true
time_slice_format %Y%m%d
time_format %Y-%m-%dT%H:%M:%S.%N%Z
</store>
<store>
@type stdout
</store>
</match>
<match **>
@type stdout
</match>
Here are my questions:
- Is the FluentdHandler in my
Logger
class written correctly? - I also pass the log to fluentd with minio-audit, but the date remains as 19700101. Why is this?
Finally, each python/fastapi service should individually create a log file under /var/log/
and accumulate logs, and fluentd should accumulate the same content under /var/log/fluentd
.
I'm new to fluentd and I'm not sure what to do. Can you help me?