blakeblackshear.frigate/frigate/app.py

508 lines
18 KiB
Python
Raw Normal View History

2020-11-04 13:28:07 +01:00
import logging
import multiprocessing as mp
from multiprocessing.queues import Queue
from multiprocessing.synchronize import Event as MpEvent
2020-11-04 13:28:07 +01:00
import os
import shutil
2021-06-14 14:31:13 +02:00
import signal
import sys
from typing import Optional
from types import FrameType
import psutil
2020-11-04 13:28:07 +01:00
import traceback
2020-12-24 14:47:27 +01:00
from peewee_migrate import Router
2020-11-04 13:28:07 +01:00
from playhouse.sqlite_ext import SqliteExtDatabase
2021-01-24 13:53:01 +01:00
from playhouse.sqliteq import SqliteQueueDatabase
2020-11-04 13:28:07 +01:00
from frigate.comms.dispatcher import Communicator, Dispatcher
from frigate.comms.mqtt import MqttClient
from frigate.comms.ws import WebSocketClient
from frigate.config import FrigateConfig
from frigate.const import (
CACHE_DIR,
CLIPS_DIR,
CONFIG_DIR,
DEFAULT_DB_PATH,
MODEL_CACHE_DIR,
RECORD_DIR,
)
from frigate.object_detection import ObjectDetectProcess
from frigate.events.cleanup import EventCleanup
from frigate.events.external import ExternalEventProcessor
from frigate.events.maintainer import EventProcessor
2020-11-04 13:28:07 +01:00
from frigate.http import create_app
from frigate.log import log_process, root_configurer
from frigate.models import Event, Recordings, Timeline
2020-11-04 13:28:07 +01:00
from frigate.object_processing import TrackedObjectProcessor
from frigate.output import output_frames
2022-04-03 22:00:11 +02:00
from frigate.plus import PlusApi
from frigate.ptz import OnvifController
from frigate.record.record import manage_recordings
from frigate.stats import StatsEmitter, stats_init
Limit recording retention to available storage (#3942) * Add field and migration for segment size * Store the segment size in db * Add comment * Add default * Fix size parsing * Include segment size in recordings endpoint * Start adding storage maintainer * Add storage maintainer and calculate average sizes * Update comment * Store segment and hour avg sizes per camera * Formatting * Keep track of total segment and hour averages * Remove unused files * Cleanup 2 hours of recordings at a time * Formatting * Fix bug * Round segment size * Cleanup some comments * Handle case where segments are not deleted on initial run or is only retained segments * Improve cleanup log * Formatting * Fix typo and improve logging * Catch case where no recordings exist for camera * Specifically define sort * Handle edge case for cameras that only record part time * Increase definition of part time recorder * Remove warning about not supported storage based retention * Add note about storage based retention to recording docs * Add tests for storage maintenance calculation and cleanup * Format tests * Don't run for a camera with no recording segments * Get size of file from cache * Rework camera stats to be more efficient * Remove total and other inefficencies * Rewrite storage cleanup logic to be much more efficient * Fix existing tests * Fix bugs from tests * Add another test * Improve logging * Formatting * Set back correct loop time * Update name * Update comment * Only include segments that have a nonzero size * Catch case where camera has 0 nonzero segment durations * Add test to cover zero bandwidth migration case * Fix test * Incorrect boolean logic * Formatting * Explicity re-define iterator
2022-10-09 13:28:26 +02:00
from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor
2021-09-14 05:02:23 +02:00
from frigate.version import VERSION
2020-11-04 13:28:07 +01:00
from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog
from frigate.types import CameraMetricsTypes, RecordMetricsTypes
2020-11-04 13:28:07 +01:00
logger = logging.getLogger(__name__)
2021-02-17 14:23:32 +01:00
class FrigateApp:
def __init__(self) -> None:
self.stop_event: MpEvent = mp.Event()
self.detection_queue: Queue = mp.Queue()
self.detectors: dict[str, ObjectDetectProcess] = {}
self.detection_out_events: dict[str, MpEvent] = {}
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue()
self.plus_api = PlusApi()
self.camera_metrics: dict[str, CameraMetricsTypes] = {}
self.record_metrics: dict[str, RecordMetricsTypes] = {}
self.processes: dict[str, int] = {}
def set_environment_vars(self) -> None:
2021-01-16 04:33:53 +01:00
for key, value in self.config.environment_vars.items():
os.environ[key] = value
def ensure_dirs(self) -> None:
for d in [CONFIG_DIR, RECORD_DIR, CLIPS_DIR, CACHE_DIR, MODEL_CACHE_DIR]:
2020-12-01 14:22:23 +01:00
if not os.path.exists(d) and not os.path.islink(d):
2020-12-03 15:01:22 +01:00
logger.info(f"Creating directory: {d}")
2020-12-01 14:22:23 +01:00
os.makedirs(d)
2020-12-03 15:01:22 +01:00
else:
logger.debug(f"Skipping directory: {d}")
2020-12-21 14:37:42 +01:00
def init_logger(self) -> None:
2021-02-17 14:23:32 +01:00
self.log_process = mp.Process(
target=log_process, args=(self.log_queue,), name="log_process"
)
2020-12-05 18:14:18 +01:00
self.log_process.daemon = True
2020-11-04 13:28:07 +01:00
self.log_process.start()
self.processes["logger"] = self.log_process.pid or 0
2020-11-04 13:28:07 +01:00
root_configurer(self.log_queue)
2021-02-17 14:23:32 +01:00
def init_config(self) -> None:
2021-02-17 14:23:32 +01:00
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
2021-06-24 07:45:27 +02:00
user_config = FrigateConfig.parse_file(config_file)
self.config = user_config.runtime_config(self.plus_api)
2020-11-04 13:28:07 +01:00
for camera_name in self.config.cameras.keys():
# create camera_metrics
self.camera_metrics[camera_name] = {
2021-02-17 14:23:32 +01:00
"camera_fps": mp.Value("d", 0.0),
"skipped_fps": mp.Value("d", 0.0),
"process_fps": mp.Value("d", 0.0),
"detection_enabled": mp.Value(
"i", self.config.cameras[camera_name].detect.enabled
),
"motion_enabled": mp.Value("i", True),
"improve_contrast_enabled": mp.Value(
2022-04-16 15:42:44 +02:00
"i", self.config.cameras[camera_name].motion.improve_contrast
),
"motion_threshold": mp.Value(
"i", self.config.cameras[camera_name].motion.threshold
),
"motion_contour_area": mp.Value(
"i", self.config.cameras[camera_name].motion.contour_area
),
2021-02-17 14:23:32 +01:00
"detection_fps": mp.Value("d", 0.0),
"detection_frame": mp.Value("d", 0.0),
"read_start": mp.Value("d", 0.0),
"ffmpeg_pid": mp.Value("i", 0),
"frame_queue": mp.Queue(maxsize=2),
"capture_process": None,
"process": None,
2020-11-04 13:28:07 +01:00
}
self.record_metrics[camera_name] = {
"record_enabled": mp.Value(
"i", self.config.cameras[camera_name].record.enabled
)
}
2021-02-17 14:23:32 +01:00
def set_log_levels(self) -> None:
2021-06-24 07:45:27 +02:00
logging.getLogger().setLevel(self.config.logger.default.value.upper())
2020-12-04 13:59:03 +01:00
for log, level in self.config.logger.logs.items():
2021-06-24 07:45:27 +02:00
logging.getLogger(log).setLevel(level.value.upper())
2021-02-17 14:23:32 +01:00
2021-06-14 14:31:13 +02:00
if not "werkzeug" in self.config.logger.logs:
logging.getLogger("werkzeug").setLevel("ERROR")
2020-11-04 13:28:07 +01:00
if not "ws4py" in self.config.logger.logs:
logging.getLogger("ws4py").setLevel("ERROR")
def init_queues(self) -> None:
2020-11-25 17:37:41 +01:00
# Queues for clip processing
self.event_queue: Queue = mp.Queue()
self.event_processed_queue: Queue = mp.Queue()
self.video_output_queue: Queue = mp.Queue(
maxsize=len(self.config.cameras.keys()) * 2
)
2020-11-04 13:28:07 +01:00
# Queue for cameras to push tracked objects to
self.detected_frames_queue: Queue = mp.Queue(
2021-02-17 14:23:32 +01:00
maxsize=len(self.config.cameras.keys()) * 2
)
2020-11-04 13:28:07 +01:00
# Queue for recordings info
self.recordings_info_queue: Queue = mp.Queue()
# Queue for timeline events
self.timeline_queue: Queue = mp.Queue()
def init_database(self) -> None:
# Migrate DB location
old_db_path = DEFAULT_DB_PATH
if not os.path.isfile(self.config.database.path) and os.path.isfile(
old_db_path
):
os.rename(old_db_path, self.config.database.path)
# Migrate DB schema
2021-01-24 13:53:01 +01:00
migrate_db = SqliteExtDatabase(self.config.database.path)
2020-12-24 14:47:27 +01:00
# Run migrations
2021-02-17 14:23:32 +01:00
del logging.getLogger("peewee_migrate").handlers[:]
2021-01-24 13:53:01 +01:00
router = Router(migrate_db)
2020-12-24 14:47:27 +01:00
router.run()
2021-01-24 13:53:01 +01:00
migrate_db.close()
def init_go2rtc(self) -> None:
for proc in psutil.process_iter(["pid", "name"]):
if proc.info["name"] == "go2rtc":
logger.info(f"go2rtc process pid: {proc.info['pid']}")
self.processes["go2rtc"] = proc.info["pid"]
def init_recording_manager(self) -> None:
recording_process = mp.Process(
target=manage_recordings,
name="recording_manager",
args=(self.config, self.recordings_info_queue, self.record_metrics),
)
recording_process.daemon = True
self.recording_process = recording_process
recording_process.start()
self.processes["recording"] = recording_process.pid or 0
logger.info(f"Recording process started: {recording_process.pid}")
def bind_database(self) -> None:
"""Bind db to the main process."""
# NOTE: all db accessing processes need to be created before the db can be bound to the main process
2021-01-24 13:53:01 +01:00
self.db = SqliteQueueDatabase(self.config.database.path)
models = [Event, Recordings, Timeline]
2020-11-04 13:28:07 +01:00
self.db.bind(models)
def init_stats(self) -> None:
self.stats_tracking = stats_init(
self.config, self.camera_metrics, self.detectors, self.processes
)
def init_external_event_processor(self) -> None:
self.external_event_processor = ExternalEventProcessor(
self.config, self.event_queue
)
def init_web_server(self) -> None:
2021-02-17 14:23:32 +01:00
self.flask_app = create_app(
self.config,
self.db,
self.stats_tracking,
self.detected_frames_processor,
self.storage_maintainer,
self.onvif_controller,
self.external_event_processor,
2022-04-03 22:00:11 +02:00
self.plus_api,
2021-02-17 14:23:32 +01:00
)
2020-11-04 13:28:07 +01:00
def init_onvif(self) -> None:
self.onvif_controller = OnvifController(self.config)
def init_dispatcher(self) -> None:
comms: list[Communicator] = []
2020-11-04 13:28:07 +01:00
if self.config.mqtt.enabled:
comms.append(MqttClient(self.config))
comms.append(WebSocketClient(self.config))
self.dispatcher = Dispatcher(
self.config,
self.onvif_controller,
self.camera_metrics,
self.record_metrics,
comms,
)
2021-06-14 14:31:13 +02:00
def start_detectors(self) -> None:
2020-11-04 13:28:07 +01:00
for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
try:
largest_frame = max(
[
det.model.height * det.model.width * 3
for (name, det) in self.config.detectors.items()
]
)
shm_in = mp.shared_memory.SharedMemory(
2021-06-24 07:45:27 +02:00
name=name,
create=True,
size=largest_frame,
)
except FileExistsError:
shm_in = mp.shared_memory.SharedMemory(name=name)
try:
shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4
)
except FileExistsError:
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
2020-11-04 13:28:07 +01:00
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)
for name, detector_config in self.config.detectors.items():
self.detectors[name] = ObjectDetectProcess(
name,
self.detection_queue,
self.detection_out_events,
detector_config,
)
2020-11-04 13:28:07 +01:00
def start_detected_frames_processor(self) -> None:
2021-02-17 14:23:32 +01:00
self.detected_frames_processor = TrackedObjectProcessor(
self.config,
self.dispatcher,
2021-02-17 14:23:32 +01:00
self.detected_frames_queue,
self.event_queue,
self.event_processed_queue,
self.video_output_queue,
self.recordings_info_queue,
2021-02-17 14:23:32 +01:00
self.stop_event,
)
2020-11-04 13:28:07 +01:00
self.detected_frames_processor.start()
def start_video_output_processor(self) -> None:
output_processor = mp.Process(
target=output_frames,
name=f"output_processor",
args=(
self.config,
self.video_output_queue,
),
)
output_processor.daemon = True
self.output_processor = output_processor
output_processor.start()
2021-05-30 13:45:37 +02:00
logger.info(f"Output process started: {output_processor.pid}")
def start_camera_processors(self) -> None:
2020-11-04 13:28:07 +01:00
for name, config in self.config.cameras.items():
if not self.config.cameras[name].enabled:
logger.info(f"Camera processor not started for disabled camera {name}")
continue
2021-02-17 14:23:32 +01:00
camera_process = mp.Process(
target=track_camera,
name=f"camera_processor:{name}",
args=(
name,
config,
self.config.model,
self.config.model.merged_labelmap,
2021-02-17 14:23:32 +01:00
self.detection_queue,
self.detection_out_events[name],
self.detected_frames_queue,
self.camera_metrics[name],
),
)
2020-11-04 13:28:07 +01:00
camera_process.daemon = True
2021-02-17 14:23:32 +01:00
self.camera_metrics[name]["process"] = camera_process
2020-11-04 13:28:07 +01:00
camera_process.start()
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
def start_camera_capture_processes(self) -> None:
2020-11-04 13:28:07 +01:00
for name, config in self.config.cameras.items():
if not self.config.cameras[name].enabled:
logger.info(f"Capture process not started for disabled camera {name}")
continue
2021-02-17 14:23:32 +01:00
capture_process = mp.Process(
target=capture_camera,
name=f"camera_capture:{name}",
args=(name, config, self.camera_metrics[name]),
)
2020-11-04 13:28:07 +01:00
capture_process.daemon = True
2021-02-17 14:23:32 +01:00
self.camera_metrics[name]["capture_process"] = capture_process
2020-11-04 13:28:07 +01:00
capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}")
2021-02-17 14:23:32 +01:00
def start_timeline_processor(self) -> None:
self.timeline_processor = TimelineProcessor(
self.config, self.timeline_queue, self.stop_event
)
self.timeline_processor.start()
def start_event_processor(self) -> None:
2021-02-17 14:23:32 +01:00
self.event_processor = EventProcessor(
self.config,
self.camera_metrics,
self.event_queue,
self.event_processed_queue,
self.timeline_queue,
2021-02-17 14:23:32 +01:00
self.stop_event,
)
2020-11-04 13:28:07 +01:00
self.event_processor.start()
2021-02-17 14:23:32 +01:00
def start_event_cleanup(self) -> None:
2020-11-24 14:27:51 +01:00
self.event_cleanup = EventCleanup(self.config, self.stop_event)
self.event_cleanup.start()
2021-02-17 14:23:32 +01:00
Limit recording retention to available storage (#3942) * Add field and migration for segment size * Store the segment size in db * Add comment * Add default * Fix size parsing * Include segment size in recordings endpoint * Start adding storage maintainer * Add storage maintainer and calculate average sizes * Update comment * Store segment and hour avg sizes per camera * Formatting * Keep track of total segment and hour averages * Remove unused files * Cleanup 2 hours of recordings at a time * Formatting * Fix bug * Round segment size * Cleanup some comments * Handle case where segments are not deleted on initial run or is only retained segments * Improve cleanup log * Formatting * Fix typo and improve logging * Catch case where no recordings exist for camera * Specifically define sort * Handle edge case for cameras that only record part time * Increase definition of part time recorder * Remove warning about not supported storage based retention * Add note about storage based retention to recording docs * Add tests for storage maintenance calculation and cleanup * Format tests * Don't run for a camera with no recording segments * Get size of file from cache * Rework camera stats to be more efficient * Remove total and other inefficencies * Rewrite storage cleanup logic to be much more efficient * Fix existing tests * Fix bugs from tests * Add another test * Improve logging * Formatting * Set back correct loop time * Update name * Update comment * Only include segments that have a nonzero size * Catch case where camera has 0 nonzero segment durations * Add test to cover zero bandwidth migration case * Fix test * Incorrect boolean logic * Formatting * Explicity re-define iterator
2022-10-09 13:28:26 +02:00
def start_storage_maintainer(self) -> None:
self.storage_maintainer = StorageMaintainer(self.config, self.stop_event)
self.storage_maintainer.start()
def start_stats_emitter(self) -> None:
2021-02-17 14:23:32 +01:00
self.stats_emitter = StatsEmitter(
self.config,
self.stats_tracking,
self.dispatcher,
2021-02-17 14:23:32 +01:00
self.stop_event,
)
self.stats_emitter.start()
def start_watchdog(self) -> None:
2020-11-04 13:28:07 +01:00
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
self.frigate_watchdog.start()
def check_shm(self) -> None:
available_shm = round(shutil.disk_usage("/dev/shm").total / 1000000, 1)
min_req_shm = 30
for _, camera in self.config.cameras.items():
min_req_shm += round(
(camera.detect.width * camera.detect.height * 1.5 * 9 + 270480)
/ 1048576,
1,
)
if available_shm < min_req_shm:
logger.warning(
f"The current SHM size of {available_shm}MB is too small, recommend increasing it to at least {min_req_shm}MB."
)
def start(self) -> None:
2020-11-04 13:28:07 +01:00
self.init_logger()
2021-09-14 05:02:23 +02:00
logger.info(f"Starting Frigate ({VERSION})")
2020-12-03 15:01:03 +01:00
try:
self.ensure_dirs()
2020-12-05 16:47:43 +01:00
try:
self.init_config()
except Exception as e:
print("*************************************************************")
print("*************************************************************")
print("*** Your config file is not valid! ***")
print("*** Please check the docs at ***")
print("*** https://docs.frigate.video/configuration/index ***")
print("*************************************************************")
print("*************************************************************")
print("*** Config Validation Errors ***")
print("*************************************************************")
print(e)
print(traceback.format_exc())
print("*************************************************************")
print("*** End Config Validation Errors ***")
print("*************************************************************")
2020-12-05 16:47:43 +01:00
self.log_process.terminate()
sys.exit(1)
2021-01-16 04:33:53 +01:00
self.set_environment_vars()
2020-12-05 16:47:43 +01:00
self.set_log_levels()
self.init_queues()
self.init_database()
self.init_onvif()
self.init_recording_manager()
self.init_go2rtc()
self.bind_database()
self.init_dispatcher()
2020-12-03 15:01:03 +01:00
except Exception as e:
2020-12-20 15:00:07 +01:00
print(e)
2020-12-03 15:01:03 +01:00
self.log_process.terminate()
sys.exit(1)
2020-11-04 13:28:07 +01:00
self.start_detectors()
self.start_video_output_processor()
2020-11-04 13:28:07 +01:00
self.start_detected_frames_processor()
self.start_camera_processors()
self.start_camera_capture_processes()
self.start_storage_maintainer()
self.init_stats()
self.init_external_event_processor()
2020-11-04 13:28:07 +01:00
self.init_web_server()
self.start_timeline_processor()
2020-11-04 13:28:07 +01:00
self.start_event_processor()
2020-11-24 14:27:51 +01:00
self.start_event_cleanup()
self.start_stats_emitter()
2020-11-04 13:28:07 +01:00
self.start_watchdog()
self.check_shm()
2020-12-05 18:14:18 +01:00
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
2020-12-05 18:14:18 +01:00
self.stop()
sys.exit()
2021-02-17 14:23:32 +01:00
2020-12-05 18:14:18 +01:00
signal.signal(signal.SIGTERM, receiveSignal)
2021-05-18 07:52:08 +02:00
try:
2021-06-14 14:31:13 +02:00
self.flask_app.run(host="127.0.0.1", port=5001, debug=False)
2021-05-18 07:52:08 +02:00
except KeyboardInterrupt:
pass
2021-02-13 15:09:44 +01:00
2020-11-04 13:28:07 +01:00
self.stop()
2021-02-17 14:23:32 +01:00
def stop(self) -> None:
2020-11-04 13:28:07 +01:00
logger.info(f"Stopping...")
self.stop_event.set()
for detector in self.detectors.values():
detector.stop()
# Empty the detection queue and set the events for all requests
while not self.detection_queue.empty():
connection_id = self.detection_queue.get(timeout=1)
self.detection_out_events[connection_id].set()
self.detection_queue.close()
self.detection_queue.join_thread()
self.dispatcher.stop()
2020-11-04 13:28:07 +01:00
self.detected_frames_processor.join()
self.event_processor.join()
2020-11-24 14:27:51 +01:00
self.event_cleanup.join()
self.stats_emitter.join()
2020-11-04 13:28:07 +01:00
self.frigate_watchdog.join()
2021-02-07 15:38:35 +01:00
self.db.stop()
2020-11-04 13:28:07 +01:00
while len(self.detection_shms) > 0:
shm = self.detection_shms.pop()
shm.close()
shm.unlink()
for queue in [
self.event_queue,
self.event_processed_queue,
self.video_output_queue,
self.detected_frames_queue,
self.recordings_info_queue,
self.log_queue,
]:
while not queue.empty():
queue.get_nowait()
queue.close()
queue.join_thread()