mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-07-26 13:47:03 +02:00
Ensure logging config is propagated to forked processes (#18704)
* Move log level initialization to log * Use logger config * Formatting * Fix config order * Set process names --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
parent
8a8fd4ca8e
commit
d7a446e0f6
@ -385,6 +385,7 @@ class FrigateApp:
|
||||
name,
|
||||
self.detection_queue,
|
||||
list(self.config.cameras.keys()),
|
||||
self.config,
|
||||
detector_config,
|
||||
)
|
||||
|
||||
|
@ -1,20 +1,11 @@
|
||||
import logging
|
||||
from enum import Enum
|
||||
|
||||
from pydantic import Field, ValidationInfo, model_validator
|
||||
from typing_extensions import Self
|
||||
|
||||
from frigate.log import LogLevel, apply_log_levels
|
||||
|
||||
from .base import FrigateBaseModel
|
||||
|
||||
__all__ = ["LoggerConfig", "LogLevel"]
|
||||
|
||||
|
||||
class LogLevel(str, Enum):
|
||||
debug = "debug"
|
||||
info = "info"
|
||||
warning = "warning"
|
||||
error = "error"
|
||||
critical = "critical"
|
||||
__all__ = ["LoggerConfig"]
|
||||
|
||||
|
||||
class LoggerConfig(FrigateBaseModel):
|
||||
@ -26,18 +17,6 @@ class LoggerConfig(FrigateBaseModel):
|
||||
@model_validator(mode="after")
|
||||
def post_validation(self, info: ValidationInfo) -> Self:
|
||||
if isinstance(info.context, dict) and info.context.get("install", False):
|
||||
logging.getLogger().setLevel(self.default.value.upper())
|
||||
|
||||
log_levels = {
|
||||
"absl": LogLevel.error,
|
||||
"httpx": LogLevel.error,
|
||||
"tensorflow": LogLevel.error,
|
||||
"werkzeug": LogLevel.error,
|
||||
"ws4py": LogLevel.error,
|
||||
**self.logs,
|
||||
}
|
||||
|
||||
for log, level in log_levels.items():
|
||||
logging.getLogger(log).setLevel(level.value.upper())
|
||||
apply_log_levels(self.default.value.upper(), self.logs)
|
||||
|
||||
return self
|
||||
|
@ -35,7 +35,7 @@ class EmbeddingProcess(FrigateProcess):
|
||||
self.metrics = metrics
|
||||
|
||||
def run(self) -> None:
|
||||
self.pre_run_setup()
|
||||
self.pre_run_setup(self.config.logger)
|
||||
maintainer = EmbeddingMaintainer(
|
||||
self.config,
|
||||
self.metrics,
|
||||
|
@ -100,7 +100,7 @@ class AudioProcessor(util.Process):
|
||||
self.transcription_model_runner = None
|
||||
|
||||
def run(self) -> None:
|
||||
self.pre_run_setup()
|
||||
self.pre_run_setup(self.config.logger)
|
||||
audio_threads: list[AudioEventMaintainer] = []
|
||||
|
||||
threading.current_thread().name = "process:audio_manager"
|
||||
|
@ -5,6 +5,7 @@ import os
|
||||
import sys
|
||||
import threading
|
||||
from collections import deque
|
||||
from enum import Enum
|
||||
from logging.handlers import QueueHandler, QueueListener
|
||||
from multiprocessing.managers import SyncManager
|
||||
from queue import Queue
|
||||
@ -33,6 +34,15 @@ LOG_HANDLER.addFilter(
|
||||
not in record.getMessage()
|
||||
)
|
||||
|
||||
|
||||
class LogLevel(str, Enum):
|
||||
debug = "debug"
|
||||
info = "info"
|
||||
warning = "warning"
|
||||
error = "error"
|
||||
critical = "critical"
|
||||
|
||||
|
||||
log_listener: Optional[QueueListener] = None
|
||||
log_queue: Optional[Queue] = None
|
||||
|
||||
@ -61,6 +71,22 @@ def _stop_logging() -> None:
|
||||
log_listener = None
|
||||
|
||||
|
||||
def apply_log_levels(default: str, log_levels: dict[str, LogLevel]) -> None:
|
||||
logging.getLogger().setLevel(default)
|
||||
|
||||
log_levels = {
|
||||
"absl": LogLevel.error,
|
||||
"httpx": LogLevel.error,
|
||||
"tensorflow": LogLevel.error,
|
||||
"werkzeug": LogLevel.error,
|
||||
"ws4py": LogLevel.error,
|
||||
**log_levels,
|
||||
}
|
||||
|
||||
for log, level in log_levels.items():
|
||||
logging.getLogger(log).setLevel(level.value.upper())
|
||||
|
||||
|
||||
# When a multiprocessing.Process exits, python tries to flush stdout and stderr. However, if the
|
||||
# process is created after a thread (for example a logging thread) is created and the process fork
|
||||
# happens while an internal lock is held, the stdout/err flush can cause a deadlock.
|
||||
|
@ -12,6 +12,7 @@ from frigate.comms.object_detector_signaler import (
|
||||
ObjectDetectorPublisher,
|
||||
ObjectDetectorSubscriber,
|
||||
)
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.detectors import create_detector
|
||||
from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
@ -92,6 +93,7 @@ class DetectorRunner(util.Process):
|
||||
cameras: list[str],
|
||||
avg_speed: Value,
|
||||
start_time: Value,
|
||||
config: FrigateConfig,
|
||||
detector_config: BaseDetectorConfig,
|
||||
) -> None:
|
||||
super().__init__(name=name, daemon=True)
|
||||
@ -99,6 +101,7 @@ class DetectorRunner(util.Process):
|
||||
self.cameras = cameras
|
||||
self.avg_speed = avg_speed
|
||||
self.start_time = start_time
|
||||
self.config = config
|
||||
self.detector_config = detector_config
|
||||
self.outputs: dict = {}
|
||||
|
||||
@ -108,7 +111,7 @@ class DetectorRunner(util.Process):
|
||||
self.outputs[name] = {"shm": out_shm, "np": out_np}
|
||||
|
||||
def run(self) -> None:
|
||||
self.pre_run_setup()
|
||||
self.pre_run_setup(self.config.logger)
|
||||
|
||||
frame_manager = SharedMemoryFrameManager()
|
||||
object_detector = LocalObjectDetector(detector_config=self.detector_config)
|
||||
@ -161,6 +164,7 @@ class ObjectDetectProcess:
|
||||
name: str,
|
||||
detection_queue: Queue,
|
||||
cameras: list[str],
|
||||
config: FrigateConfig,
|
||||
detector_config: BaseDetectorConfig,
|
||||
):
|
||||
self.name = name
|
||||
@ -169,6 +173,7 @@ class ObjectDetectProcess:
|
||||
self.avg_inference_speed = Value("d", 0.01)
|
||||
self.detection_start = Value("d", 0.0)
|
||||
self.detect_process: util.Process | None = None
|
||||
self.config = config
|
||||
self.detector_config = detector_config
|
||||
self.start_or_restart()
|
||||
|
||||
@ -195,6 +200,7 @@ class ObjectDetectProcess:
|
||||
self.cameras,
|
||||
self.avg_inference_speed,
|
||||
self.detection_start,
|
||||
self.config,
|
||||
self.detector_config,
|
||||
)
|
||||
self.detect_process.start()
|
||||
|
@ -77,7 +77,7 @@ class OutputProcess(util.Process):
|
||||
self.config = config
|
||||
|
||||
def run(self) -> None:
|
||||
self.pre_run_setup()
|
||||
self.pre_run_setup(self.config.logger)
|
||||
|
||||
frame_manager = SharedMemoryFrameManager()
|
||||
|
||||
|
@ -18,7 +18,7 @@ class RecordProcess(FrigateProcess):
|
||||
self.config = config
|
||||
|
||||
def run(self) -> None:
|
||||
self.pre_run_setup()
|
||||
self.pre_run_setup(self.config.logger)
|
||||
db = SqliteQueueDatabase(
|
||||
self.config.database.path,
|
||||
pragmas={
|
||||
|
@ -15,7 +15,7 @@ class ReviewProcess(util.Process):
|
||||
self.config = config
|
||||
|
||||
def run(self) -> None:
|
||||
self.pre_run_setup()
|
||||
self.pre_run_setup(self.config.logger)
|
||||
maintainer = ReviewSegmentMaintainer(
|
||||
self.config,
|
||||
self.stop_event,
|
||||
|
@ -7,7 +7,10 @@ import threading
|
||||
from logging.handlers import QueueHandler
|
||||
from typing import Callable, Optional
|
||||
|
||||
from setproctitle import setproctitle
|
||||
|
||||
import frigate.log
|
||||
from frigate.config.logger import LoggerConfig
|
||||
|
||||
|
||||
class BaseProcess(mp.Process):
|
||||
@ -50,7 +53,9 @@ class Process(BaseProcess):
|
||||
def before_start(self) -> None:
|
||||
self.__log_queue = frigate.log.log_listener.queue
|
||||
|
||||
def pre_run_setup(self) -> None:
|
||||
def pre_run_setup(self, logConfig: LoggerConfig | None = None) -> None:
|
||||
setproctitle(self.name)
|
||||
threading.current_thread().name = f"process:{self.name}"
|
||||
faulthandler.enable()
|
||||
|
||||
def receiveSignal(signalNumber, frame):
|
||||
@ -68,3 +73,8 @@ class Process(BaseProcess):
|
||||
self.logger = logging.getLogger(self.name)
|
||||
logging.basicConfig(handlers=[], force=True)
|
||||
logging.getLogger().addHandler(QueueHandler(self.__log_queue))
|
||||
|
||||
if logConfig:
|
||||
frigate.log.apply_log_levels(
|
||||
logConfig.default.value.upper(), logConfig.logs
|
||||
)
|
||||
|
Loading…
Reference in New Issue
Block a user