Ensure logging config is propagated to forked processes (#18704)

* Move log level initialization to log

* Use logger config

* Formatting

* Fix config order

* Set process names

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
Josh Hawkins 2025-06-13 09:43:38 -05:00 committed by GitHub
parent 8a8fd4ca8e
commit d7a446e0f6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 54 additions and 32 deletions

View File

@ -385,6 +385,7 @@ class FrigateApp:
name, name,
self.detection_queue, self.detection_queue,
list(self.config.cameras.keys()), list(self.config.cameras.keys()),
self.config,
detector_config, detector_config,
) )

View File

@ -1,20 +1,11 @@
import logging
from enum import Enum
from pydantic import Field, ValidationInfo, model_validator from pydantic import Field, ValidationInfo, model_validator
from typing_extensions import Self from typing_extensions import Self
from frigate.log import LogLevel, apply_log_levels
from .base import FrigateBaseModel from .base import FrigateBaseModel
__all__ = ["LoggerConfig", "LogLevel"] __all__ = ["LoggerConfig"]
class LogLevel(str, Enum):
debug = "debug"
info = "info"
warning = "warning"
error = "error"
critical = "critical"
class LoggerConfig(FrigateBaseModel): class LoggerConfig(FrigateBaseModel):
@ -26,18 +17,6 @@ class LoggerConfig(FrigateBaseModel):
@model_validator(mode="after") @model_validator(mode="after")
def post_validation(self, info: ValidationInfo) -> Self: def post_validation(self, info: ValidationInfo) -> Self:
if isinstance(info.context, dict) and info.context.get("install", False): if isinstance(info.context, dict) and info.context.get("install", False):
logging.getLogger().setLevel(self.default.value.upper()) apply_log_levels(self.default.value.upper(), self.logs)
log_levels = {
"absl": LogLevel.error,
"httpx": LogLevel.error,
"tensorflow": LogLevel.error,
"werkzeug": LogLevel.error,
"ws4py": LogLevel.error,
**self.logs,
}
for log, level in log_levels.items():
logging.getLogger(log).setLevel(level.value.upper())
return self return self

View File

@ -35,7 +35,7 @@ class EmbeddingProcess(FrigateProcess):
self.metrics = metrics self.metrics = metrics
def run(self) -> None: def run(self) -> None:
self.pre_run_setup() self.pre_run_setup(self.config.logger)
maintainer = EmbeddingMaintainer( maintainer = EmbeddingMaintainer(
self.config, self.config,
self.metrics, self.metrics,

View File

@ -100,7 +100,7 @@ class AudioProcessor(util.Process):
self.transcription_model_runner = None self.transcription_model_runner = None
def run(self) -> None: def run(self) -> None:
self.pre_run_setup() self.pre_run_setup(self.config.logger)
audio_threads: list[AudioEventMaintainer] = [] audio_threads: list[AudioEventMaintainer] = []
threading.current_thread().name = "process:audio_manager" threading.current_thread().name = "process:audio_manager"

View File

@ -5,6 +5,7 @@ import os
import sys import sys
import threading import threading
from collections import deque from collections import deque
from enum import Enum
from logging.handlers import QueueHandler, QueueListener from logging.handlers import QueueHandler, QueueListener
from multiprocessing.managers import SyncManager from multiprocessing.managers import SyncManager
from queue import Queue from queue import Queue
@ -33,6 +34,15 @@ LOG_HANDLER.addFilter(
not in record.getMessage() not in record.getMessage()
) )
class LogLevel(str, Enum):
debug = "debug"
info = "info"
warning = "warning"
error = "error"
critical = "critical"
log_listener: Optional[QueueListener] = None log_listener: Optional[QueueListener] = None
log_queue: Optional[Queue] = None log_queue: Optional[Queue] = None
@ -61,6 +71,22 @@ def _stop_logging() -> None:
log_listener = None log_listener = None
def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None:
logging.getLogger().setLevel(default)
log_levels = {
"absl": LogLevel.error,
"httpx": LogLevel.error,
"tensorflow": LogLevel.error,
"werkzeug": LogLevel.error,
"ws4py": LogLevel.error,
**log_levels,
}
for log, level in log_levels.items():
logging.getLogger(log).setLevel(level.value.upper())
# When a multiprocessing.Process exits, python tries to flush stdout and stderr. However, if the # When a multiprocessing.Process exits, python tries to flush stdout and stderr. However, if the
# process is created after a thread (for example a logging thread) is created and the process fork # process is created after a thread (for example a logging thread) is created and the process fork
# happens while an internal lock is held, the stdout/err flush can cause a deadlock. # happens while an internal lock is held, the stdout/err flush can cause a deadlock.

View File

@ -12,6 +12,7 @@ from frigate.comms.object_detector_signaler import (
ObjectDetectorPublisher, ObjectDetectorPublisher,
ObjectDetectorSubscriber, ObjectDetectorSubscriber,
) )
from frigate.config import FrigateConfig
from frigate.detectors import create_detector from frigate.detectors import create_detector
from frigate.detectors.detector_config import ( from frigate.detectors.detector_config import (
BaseDetectorConfig, BaseDetectorConfig,
@ -92,6 +93,7 @@ class DetectorRunner(util.Process):
cameras: list[str], cameras: list[str],
avg_speed: Value, avg_speed: Value,
start_time: Value, start_time: Value,
config: FrigateConfig,
detector_config: BaseDetectorConfig, detector_config: BaseDetectorConfig,
) -> None: ) -> None:
super().__init__(name=name, daemon=True) super().__init__(name=name, daemon=True)
@ -99,6 +101,7 @@ class DetectorRunner(util.Process):
self.cameras = cameras self.cameras = cameras
self.avg_speed = avg_speed self.avg_speed = avg_speed
self.start_time = start_time self.start_time = start_time
self.config = config
self.detector_config = detector_config self.detector_config = detector_config
self.outputs: dict = {} self.outputs: dict = {}
@ -108,7 +111,7 @@ class DetectorRunner(util.Process):
self.outputs[name] = {"shm": out_shm, "np": out_np} self.outputs[name] = {"shm": out_shm, "np": out_np}
def run(self) -> None: def run(self) -> None:
self.pre_run_setup() self.pre_run_setup(self.config.logger)
frame_manager = SharedMemoryFrameManager() frame_manager = SharedMemoryFrameManager()
object_detector = LocalObjectDetector(detector_config=self.detector_config) object_detector = LocalObjectDetector(detector_config=self.detector_config)
@ -161,6 +164,7 @@ class ObjectDetectProcess:
name: str, name: str,
detection_queue: Queue, detection_queue: Queue,
cameras: list[str], cameras: list[str],
config: FrigateConfig,
detector_config: BaseDetectorConfig, detector_config: BaseDetectorConfig,
): ):
self.name = name self.name = name
@ -169,6 +173,7 @@ class ObjectDetectProcess:
self.avg_inference_speed = Value("d", 0.01) self.avg_inference_speed = Value("d", 0.01)
self.detection_start = Value("d", 0.0) self.detection_start = Value("d", 0.0)
self.detect_process: util.Process | None = None self.detect_process: util.Process | None = None
self.config = config
self.detector_config = detector_config self.detector_config = detector_config
self.start_or_restart() self.start_or_restart()
@ -195,6 +200,7 @@ class ObjectDetectProcess:
self.cameras, self.cameras,
self.avg_inference_speed, self.avg_inference_speed,
self.detection_start, self.detection_start,
self.config,
self.detector_config, self.detector_config,
) )
self.detect_process.start() self.detect_process.start()

View File

@ -77,7 +77,7 @@ class OutputProcess(util.Process):
self.config = config self.config = config
def run(self) -> None: def run(self) -> None:
self.pre_run_setup() self.pre_run_setup(self.config.logger)
frame_manager = SharedMemoryFrameManager() frame_manager = SharedMemoryFrameManager()

View File

@ -18,7 +18,7 @@ class RecordProcess(FrigateProcess):
self.config = config self.config = config
def run(self) -> None: def run(self) -> None:
self.pre_run_setup() self.pre_run_setup(self.config.logger)
db = SqliteQueueDatabase( db = SqliteQueueDatabase(
self.config.database.path, self.config.database.path,
pragmas={ pragmas={

View File

@ -15,7 +15,7 @@ class ReviewProcess(util.Process):
self.config = config self.config = config
def run(self) -> None: def run(self) -> None:
self.pre_run_setup() self.pre_run_setup(self.config.logger)
maintainer = ReviewSegmentMaintainer( maintainer = ReviewSegmentMaintainer(
self.config, self.config,
self.stop_event, self.stop_event,

View File

@ -7,7 +7,10 @@ import threading
from logging.handlers import QueueHandler from logging.handlers import QueueHandler
from typing import Callable, Optional from typing import Callable, Optional
from setproctitle import setproctitle
import frigate.log import frigate.log
from frigate.config.logger import LoggerConfig
class BaseProcess(mp.Process): class BaseProcess(mp.Process):
@ -50,7 +53,9 @@ class Process(BaseProcess):
def before_start(self) -> None: def before_start(self) -> None:
self.__log_queue = frigate.log.log_listener.queue self.__log_queue = frigate.log.log_listener.queue
def pre_run_setup(self) -> None: def pre_run_setup(self, logConfig: LoggerConfig | None = None) -> None:
setproctitle(self.name)
threading.current_thread().name = f"process:{self.name}"
faulthandler.enable() faulthandler.enable()
def receiveSignal(signalNumber, frame): def receiveSignal(signalNumber, frame):
@ -68,3 +73,8 @@ class Process(BaseProcess):
self.logger = logging.getLogger(self.name) self.logger = logging.getLogger(self.name)
logging.basicConfig(handlers=[], force=True) logging.basicConfig(handlers=[], force=True)
logging.getLogger().addHandler(QueueHandler(self.__log_queue)) logging.getLogger().addHandler(QueueHandler(self.__log_queue))
if logConfig:
frigate.log.apply_log_levels(
logConfig.default.value.upper(), logConfig.logs
)