Improve logging (#18867)

* Ignore numpy get limits warning

* Add function wrapper to redirect stdout and stderr to logpipe

* Save stderr too

* Add more to catch

* run logpipe

* Use other logging redirect class

* Use other logging redirect class

* add decorator for redirecting c/c++ level output to logger

* fix typing

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
This commit is contained in:
Nicolas Mowen
2025-06-25 07:24:45 -06:00
committed by Blake Blackshear
parent da0248db15
commit ec6c04e49a
7 changed files with 206 additions and 18 deletions

View File

@@ -1,15 +1,18 @@
# In log.py
import atexit
import io
import logging
import os
import sys
import threading
from collections import deque
from contextlib import contextmanager
from enum import Enum
from functools import wraps
from logging.handlers import QueueHandler, QueueListener
from multiprocessing.managers import SyncManager
from queue import Queue
from typing import Deque, Optional
from queue import Empty, Queue
from typing import Any, Callable, Deque, Generator, Optional
from frigate.util.builtin import clean_camera_user_pass
@@ -102,11 +105,11 @@ os.register_at_fork(after_in_child=reopen_std_streams)
# based on https://codereview.stackexchange.com/a/17959
class LogPipe(threading.Thread):
def __init__(self, log_name: str):
def __init__(self, log_name: str, level: int = logging.ERROR):
"""Setup the object with a logger and start the thread"""
super().__init__(daemon=False)
self.logger = logging.getLogger(log_name)
self.level = logging.ERROR
self.level = level
self.deque: Deque[str] = deque(maxlen=100)
self.fdRead, self.fdWrite = os.pipe()
self.pipeReader = os.fdopen(self.fdRead)
@@ -135,3 +138,182 @@ class LogPipe(threading.Thread):
def close(self) -> None:
"""Close the write end of the pipe."""
os.close(self.fdWrite)
class LogRedirect(io.StringIO):
"""
A custom file-like object to capture stdout and process it.
It extends io.StringIO to capture output and then processes it
line by line.
"""
def __init__(self, logger_instance: logging.Logger, level: int):
super().__init__()
self.logger = logger_instance
self.log_level = level
self._line_buffer: list[str] = []
def write(self, s: Any) -> int:
if not isinstance(s, str):
s = str(s)
self._line_buffer.append(s)
# Process output line by line if a newline is present
if "\n" in s:
full_output = "".join(self._line_buffer)
lines = full_output.splitlines(keepends=True)
self._line_buffer = []
for line in lines:
if line.endswith("\n"):
self._process_line(line.rstrip("\n"))
else:
self._line_buffer.append(line)
return len(s)
def _process_line(self, line: str) -> None:
self.logger.log(self.log_level, line)
def flush(self) -> None:
if self._line_buffer:
full_output = "".join(self._line_buffer)
self._line_buffer = []
if full_output: # Only process if there's content
self._process_line(full_output)
def __enter__(self) -> "LogRedirect":
"""Context manager entry point."""
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
"""Context manager exit point. Ensures buffered content is flushed."""
self.flush()
@contextmanager
def redirect_fd_to_queue(queue: Queue[str]) -> Generator[None, None, None]:
"""Redirect file descriptor 1 (stdout) to a pipe and capture output in a queue."""
stdout_fd = os.dup(1)
read_fd, write_fd = os.pipe()
os.dup2(write_fd, 1)
os.close(write_fd)
stop_event = threading.Event()
def reader() -> None:
"""Read from pipe and put lines in queue until stop_event is set."""
try:
with os.fdopen(read_fd, "r") as pipe:
while not stop_event.is_set():
line = pipe.readline()
if not line: # EOF
break
queue.put(line.strip())
except OSError as e:
queue.put(f"Reader error: {e}")
finally:
if not stop_event.is_set():
stop_event.set()
reader_thread = threading.Thread(target=reader, daemon=False)
reader_thread.start()
try:
yield
finally:
os.dup2(stdout_fd, 1)
os.close(stdout_fd)
stop_event.set()
reader_thread.join(timeout=1.0)
try:
os.close(read_fd)
except OSError:
pass
def redirect_output_to_logger(logger: logging.Logger, level: int) -> Any:
"""Decorator to redirect both Python sys.stdout/stderr and C-level stdout to logger."""
def decorator(func: Callable) -> Callable:
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
queue: Queue[str] = Queue()
log_redirect = LogRedirect(logger, level)
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = log_redirect
sys.stderr = log_redirect
try:
# Redirect C-level stdout
with redirect_fd_to_queue(queue):
result = func(*args, **kwargs)
finally:
# Restore Python stdout/stderr
sys.stdout = old_stdout
sys.stderr = old_stderr
log_redirect.flush()
# Log C-level output from queue
while True:
try:
logger.log(level, queue.get_nowait())
except Empty:
break
return result
return wrapper
return decorator
def suppress_os_output(func: Callable) -> Callable:
"""
A decorator that suppresses all output (stdout and stderr)
at the operating system file descriptor level for the decorated function.
This is useful for silencing noisy C/C++ libraries.
Note: This is a Unix-specific solution using os.dup2 and os.pipe.
It temporarily redirects file descriptors 1 (stdout) and 2 (stderr)
to a non-read pipe, effectively discarding their output.
"""
@wraps(func)
def wrapper(*args: tuple, **kwargs: dict[str, Any]) -> Any:
# Save the original file descriptors for stdout (1) and stderr (2)
original_stdout_fd = os.dup(1)
original_stderr_fd = os.dup(2)
# Create dummy pipes. We only need the write ends to redirect to.
# The data written to these pipes will be discarded as nothing
# will read from the read ends.
devnull_read_fd, devnull_write_fd = os.pipe()
try:
# Redirect stdout (FD 1) and stderr (FD 2) to the write end of our dummy pipe
os.dup2(devnull_write_fd, 1) # Redirect stdout to devnull pipe
os.dup2(devnull_write_fd, 2) # Redirect stderr to devnull pipe
# Execute the original function
result = func(*args, **kwargs)
finally:
# Restore original stdout and stderr file descriptors (1 and 2)
# This is crucial to ensure normal printing resumes after the decorated function.
os.dup2(original_stdout_fd, 1)
os.dup2(original_stderr_fd, 2)
# Close all duplicated and pipe file descriptors to prevent resource leaks.
# It's important to close the read end of the dummy pipe too,
# as nothing is explicitly reading from it.
os.close(original_stdout_fd)
os.close(original_stderr_fd)
os.close(devnull_read_fd)
os.close(devnull_write_fd)
return result
return wrapper