blakeblackshear.frigate/frigate/app.py

855 lines
33 KiB
Python
Raw Normal View History

import argparse
import datetime
2020-11-04 13:28:07 +01:00
import logging
import multiprocessing as mp
import os
import secrets
import shutil
2021-06-14 14:31:13 +02:00
import signal
import sys
import traceback
from multiprocessing import Queue
from multiprocessing.synchronize import Event as MpEvent
from types import FrameType
from typing import Optional
2020-11-04 13:28:07 +01:00
import psutil
2020-12-24 14:47:27 +01:00
from peewee_migrate import Router
2020-11-04 13:28:07 +01:00
from playhouse.sqlite_ext import SqliteExtDatabase
2021-01-24 13:53:01 +01:00
from playhouse.sqliteq import SqliteQueueDatabase
from pydantic import ValidationError
2020-11-04 13:28:07 +01:00
from frigate.api.app import create_app
from frigate.api.auth import hash_password
from frigate.comms.config_updater import ConfigPublisher
from frigate.comms.dispatcher import Communicator, Dispatcher
from frigate.comms.inter_process import InterProcessCommunicator
from frigate.comms.mqtt import MqttClient
from frigate.comms.webpush import WebPushClient
from frigate.comms.ws import WebSocketClient
from frigate.comms.zmq_proxy import ZmqProxy
from frigate.config import FrigateConfig
from frigate.const import (
CACHE_DIR,
CLIPS_DIR,
CONFIG_DIR,
DEFAULT_DB_PATH,
EXPORT_DIR,
MODEL_CACHE_DIR,
RECORD_DIR,
)
from frigate.embeddings import EmbeddingsContext, manage_embeddings
from frigate.events.audio import listen_to_audio
from frigate.events.cleanup import EventCleanup
from frigate.events.external import ExternalEventProcessor
from frigate.events.maintainer import EventProcessor
2020-11-04 13:28:07 +01:00
from frigate.log import log_process, root_configurer
from frigate.models import (
Event,
Export,
Previews,
Recordings,
RecordingsToDelete,
Regions,
ReviewSegment,
Timeline,
User,
)
from frigate.object_detection import ObjectDetectProcess
2020-11-04 13:28:07 +01:00
from frigate.object_processing import TrackedObjectProcessor
from frigate.output.output import output_frames
2022-04-03 22:00:11 +02:00
from frigate.plus import PlusApi
from frigate.ptz.autotrack import PtzAutoTrackerThread
from frigate.ptz.onvif import OnvifController
from frigate.record.cleanup import RecordingCleanup
from frigate.record.export import migrate_exports
from frigate.record.record import manage_recordings
from frigate.review.review import manage_review_segments
from frigate.stats.emitter import StatsEmitter
from frigate.stats.util import stats_init
Limit recording retention to available storage (#3942) * Add field and migration for segment size * Store the segment size in db * Add comment * Add default * Fix size parsing * Include segment size in recordings endpoint * Start adding storage maintainer * Add storage maintainer and calculate average sizes * Update comment * Store segment and hour avg sizes per camera * Formatting * Keep track of total segment and hour averages * Remove unused files * Cleanup 2 hours of recordings at a time * Formatting * Fix bug * Round segment size * Cleanup some comments * Handle case where segments are not deleted on initial run or is only retained segments * Improve cleanup log * Formatting * Fix typo and improve logging * Catch case where no recordings exist for camera * Specifically define sort * Handle edge case for cameras that only record part time * Increase definition of part time recorder * Remove warning about not supported storage based retention * Add note about storage based retention to recording docs * Add tests for storage maintenance calculation and cleanup * Format tests * Don't run for a camera with no recording segments * Get size of file from cache * Rework camera stats to be more efficient * Remove total and other inefficencies * Rewrite storage cleanup logic to be much more efficient * Fix existing tests * Fix bugs from tests * Add another test * Improve logging * Formatting * Set back correct loop time * Update name * Update comment * Only include segments that have a nonzero size * Catch case where camera has 0 nonzero segment durations * Add test to cover zero bandwidth migration case * Fix test * Incorrect boolean logic * Formatting * Explicity re-define iterator
2022-10-09 13:28:26 +02:00
from frigate.storage import StorageMaintainer
from frigate.timeline import TimelineProcessor
from frigate.types import CameraMetricsTypes, PTZMetricsTypes
from frigate.util.builtin import empty_and_close_queue, save_default_config
from frigate.util.config import migrate_frigate_config
from frigate.util.object import get_camera_regions_grid
2021-09-14 05:02:23 +02:00
from frigate.version import VERSION
2020-11-04 13:28:07 +01:00
from frigate.video import capture_camera, track_camera
from frigate.watchdog import FrigateWatchdog
logger = logging.getLogger(__name__)
2021-02-17 14:23:32 +01:00
class FrigateApp:
def __init__(self) -> None:
self.stop_event: MpEvent = mp.Event()
self.detection_queue: Queue = mp.Queue()
self.detectors: dict[str, ObjectDetectProcess] = {}
self.detection_out_events: dict[str, MpEvent] = {}
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue()
self.plus_api = PlusApi()
self.camera_metrics: dict[str, CameraMetricsTypes] = {}
self.ptz_metrics: dict[str, PTZMetricsTypes] = {}
self.processes: dict[str, int] = {}
self.region_grids: dict[str, list[list[dict[str, int]]]] = {}
def set_environment_vars(self) -> None:
2021-01-16 04:33:53 +01:00
for key, value in self.config.environment_vars.items():
os.environ[key] = value
def ensure_dirs(self) -> None:
for d in [
CONFIG_DIR,
RECORD_DIR,
f"{CLIPS_DIR}/cache",
CACHE_DIR,
MODEL_CACHE_DIR,
EXPORT_DIR,
]:
2020-12-01 14:22:23 +01:00
if not os.path.exists(d) and not os.path.islink(d):
2020-12-03 15:01:22 +01:00
logger.info(f"Creating directory: {d}")
2020-12-01 14:22:23 +01:00
os.makedirs(d)
2020-12-03 15:01:22 +01:00
else:
logger.debug(f"Skipping directory: {d}")
2020-12-21 14:37:42 +01:00
def init_logger(self) -> None:
2021-02-17 14:23:32 +01:00
self.log_process = mp.Process(
target=log_process, args=(self.log_queue,), name="log_process"
)
2020-12-05 18:14:18 +01:00
self.log_process.daemon = True
2020-11-04 13:28:07 +01:00
self.log_process.start()
self.processes["logger"] = self.log_process.pid or 0
2020-11-04 13:28:07 +01:00
root_configurer(self.log_queue)
2021-02-17 14:23:32 +01:00
def init_config(self) -> None:
2021-02-17 14:23:32 +01:00
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
if not os.path.isfile(config_file):
print("No config file found, saving default config")
config_file = config_file_yaml
save_default_config(config_file)
# check if the config file needs to be migrated
migrate_frigate_config(config_file)
2021-06-24 07:45:27 +02:00
user_config = FrigateConfig.parse_file(config_file)
self.config = user_config.runtime_config(self.plus_api)
2020-11-04 13:28:07 +01:00
for camera_name in self.config.cameras.keys():
2020-11-04 13:28:07 +01:00
# create camera_metrics
self.camera_metrics[camera_name] = {
"camera_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"skipped_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"process_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"detection_fps": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"detection_frame": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"read_start": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ffmpeg_pid": mp.Value("i", 0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"frame_queue": mp.Queue(maxsize=2),
"capture_process": None,
"process": None,
"audio_rms": mp.Value("d", 0.0), # type: ignore[typeddict-item]
"audio_dBFS": mp.Value("d", 0.0), # type: ignore[typeddict-item]
2020-11-04 13:28:07 +01:00
}
self.ptz_metrics[camera_name] = {
"ptz_autotracker_enabled": mp.Value( # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"i",
self.config.cameras[camera_name].onvif.autotracking.enabled,
),
"ptz_tracking_active": mp.Event(),
"ptz_motor_stopped": mp.Event(),
"ptz_reset": mp.Event(),
"ptz_start_time": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ptz_stop_time": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ptz_frame_time": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ptz_zoom_level": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
Autotracking bugfixes and zooming updates (#8103) * zoom in/out in search for lost objects * predicted box should not be empty * clean up and update zoom logic * only zoom if enabled * more cleanup * check for valid velocity when zooming * only try absolute zoom in if obj area has changed * zoom logic * don't enqueue lost object zoom if already at limit * don't disable motion boxes during ptz moves * velocity threshold based on move coefficients * fix area zoom logic * disable debug zoom * don't process objects if ptz moving * recalc with exponent * change exponent * remove lost object zooming * increase distance threshold for stationary object * increase distance threshold constant * only zoom out if nonzero * camera name in all debug logging * add camera name to debug logging * camera variable name consistency * update calibration behavior and docs * docs and better zooming * more sensible target values * docs wording * fix velocity threshold variable * zooming tweaks and remove iou for current objects * debug and docs * get valid velocity * include zero * additional debug statements * add zoom hysteresis * zoom on initial move if relative * only update target box if we actually zoom * merge dev * use getattr instead of get * increase distance threshold * reverse logic * get_camera_status after preset move to store zoom * final tweaks and docs * use constants and catch possible debug exception * adjust zoom factor exponent * don't run motion estimation when calling preset * adjust dimension threshold * use numpy for velocity estimate calcs * more numpy conversion * fix numpy shapes * numpy zeros dimension * more zoom out conditions * fix velocity bug * ensure init has been called in debug view * ensure onvif init if enabling by mqtt * change default hysteresis values * recalc relative zoom value * zoom out value * try to zoom when object isn't moving * try zoom when tracked object is not moving * don't try to zoom every time * negate zoom out condition when needed * hysteresis constants for absolute zooming * update zoom conditions * don't recalc target box on zoom only * zoom out if above area threshold * don't print zooming debug for stationary obj * revamp zooming to use area moving average * zooming tweaks and expose property * limit zoom with max target box * use calibration to determine zoom levels * zoom logic fix * docs * add tapo c200 camera * fix initial absolute zoom * small zoom logic fix * better invalid velocity checks * fix test * really fix test this time
2023-10-22 18:59:13 +02:00
"ptz_max_zoom": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
"ptz_min_zoom": mp.Value("d", 0.0), # type: ignore[typeddict-item]
# issue https://github.com/python/typeshed/issues/8799
# from mypy 0.981 onwards
}
self.ptz_metrics[camera_name]["ptz_motor_stopped"].set()
2021-02-17 14:23:32 +01:00
def set_log_levels(self) -> None:
2021-06-24 07:45:27 +02:00
logging.getLogger().setLevel(self.config.logger.default.value.upper())
2020-12-04 13:59:03 +01:00
for log, level in self.config.logger.logs.items():
2021-06-24 07:45:27 +02:00
logging.getLogger(log).setLevel(level.value.upper())
2021-02-17 14:23:32 +01:00
if "werkzeug" not in self.config.logger.logs:
2021-06-14 14:31:13 +02:00
logging.getLogger("werkzeug").setLevel("ERROR")
2020-11-04 13:28:07 +01:00
if "ws4py" not in self.config.logger.logs:
logging.getLogger("ws4py").setLevel("ERROR")
def init_queues(self) -> None:
2020-11-04 13:28:07 +01:00
# Queue for cameras to push tracked objects to
self.detected_frames_queue: Queue = mp.Queue(
maxsize=sum(camera.enabled for camera in self.config.cameras.values()) * 2
2021-02-17 14:23:32 +01:00
)
2020-11-04 13:28:07 +01:00
# Queue for timeline events
self.timeline_queue: Queue = mp.Queue()
def init_database(self) -> None:
def vacuum_db(db: SqliteExtDatabase) -> None:
logger.info("Running database vacuum")
db.execute_sql("VACUUM;")
try:
with open(f"{CONFIG_DIR}/.vacuum", "w") as f:
f.write(str(datetime.datetime.now().timestamp()))
except PermissionError:
logger.error("Unable to write to /config to save DB state")
def cleanup_timeline_db(db: SqliteExtDatabase) -> None:
db.execute_sql(
"DELETE FROM timeline WHERE source_id NOT IN (SELECT id FROM event);"
)
try:
with open(f"{CONFIG_DIR}/.timeline", "w") as f:
f.write(str(datetime.datetime.now().timestamp()))
except PermissionError:
logger.error("Unable to write to /config to save DB state")
# Migrate DB location
old_db_path = DEFAULT_DB_PATH
if not os.path.isfile(self.config.database.path) and os.path.isfile(
old_db_path
):
os.rename(old_db_path, self.config.database.path)
# Migrate DB schema
2021-01-24 13:53:01 +01:00
migrate_db = SqliteExtDatabase(self.config.database.path)
2020-12-24 14:47:27 +01:00
# Run migrations
2021-02-17 14:23:32 +01:00
del logging.getLogger("peewee_migrate").handlers[:]
2021-01-24 13:53:01 +01:00
router = Router(migrate_db)
if len(router.diff) > 0:
logger.info("Making backup of DB before migrations...")
shutil.copyfile(
self.config.database.path,
self.config.database.path.replace("frigate.db", "backup.db"),
)
2020-12-24 14:47:27 +01:00
router.run()
# this is a temporary check to clean up user DB from beta
# will be removed before final release
if not os.path.exists(f"{CONFIG_DIR}/.timeline"):
cleanup_timeline_db(migrate_db)
# check if vacuum needs to be run
if os.path.exists(f"{CONFIG_DIR}/.vacuum"):
with open(f"{CONFIG_DIR}/.vacuum") as f:
try:
timestamp = round(float(f.readline()))
except Exception:
timestamp = 0
if (
timestamp
< (
datetime.datetime.now() - datetime.timedelta(weeks=2)
).timestamp()
):
vacuum_db(migrate_db)
else:
vacuum_db(migrate_db)
2021-01-24 13:53:01 +01:00
migrate_db.close()
def init_go2rtc(self) -> None:
for proc in psutil.process_iter(["pid", "name"]):
if proc.info["name"] == "go2rtc":
logger.info(f"go2rtc process pid: {proc.info['pid']}")
self.processes["go2rtc"] = proc.info["pid"]
def init_recording_manager(self) -> None:
recording_process = mp.Process(
target=manage_recordings,
name="recording_manager",
args=(self.config,),
)
recording_process.daemon = True
self.recording_process = recording_process
recording_process.start()
self.processes["recording"] = recording_process.pid or 0
logger.info(f"Recording process started: {recording_process.pid}")
def init_review_segment_manager(self) -> None:
review_segment_process = mp.Process(
target=manage_review_segments,
name="review_segment_manager",
args=(self.config,),
)
review_segment_process.daemon = True
self.review_segment_process = review_segment_process
review_segment_process.start()
self.processes["review_segment"] = review_segment_process.pid or 0
logger.info(f"Review process started: {review_segment_process.pid}")
def init_embeddings_manager(self) -> None:
if not self.config.semantic_search.enabled:
self.embeddings = None
return
# Create a client for other processes to use
self.embeddings = EmbeddingsContext()
embedding_process = mp.Process(
target=manage_embeddings,
name="embeddings_manager",
args=(self.config,),
)
embedding_process.daemon = True
self.embedding_process = embedding_process
embedding_process.start()
self.processes["embeddings"] = embedding_process.pid or 0
logger.info(f"Embedding process started: {embedding_process.pid}")
def bind_database(self) -> None:
"""Bind db to the main process."""
# NOTE: all db accessing processes need to be created before the db can be bound to the main process
self.db = SqliteQueueDatabase(
self.config.database.path,
pragmas={
"auto_vacuum": "FULL", # Does not defragment database
"cache_size": -512 * 1000, # 512MB of cache,
"synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous
},
timeout=max(
60, 10 * len([c for c in self.config.cameras.values() if c.enabled])
),
)
models = [
Event,
Export,
Previews,
Recordings,
RecordingsToDelete,
Regions,
ReviewSegment,
Timeline,
User,
]
2020-11-04 13:28:07 +01:00
self.db.bind(models)
def check_db_data_migrations(self) -> None:
# check if vacuum needs to be run
if not os.path.exists(f"{CONFIG_DIR}/.exports"):
try:
with open(f"{CONFIG_DIR}/.exports", "w") as f:
f.write(str(datetime.datetime.now().timestamp()))
except PermissionError:
logger.error("Unable to write to /config to save export state")
migrate_exports(self.config.ffmpeg, self.config.cameras.keys())
def init_external_event_processor(self) -> None:
self.external_event_processor = ExternalEventProcessor(self.config)
def init_inter_process_communicator(self) -> None:
self.inter_process_communicator = InterProcessCommunicator()
self.inter_config_updater = ConfigPublisher()
self.inter_zmq_proxy = ZmqProxy()
def init_web_server(self) -> None:
2021-02-17 14:23:32 +01:00
self.flask_app = create_app(
self.config,
self.db,
self.embeddings,
2021-02-17 14:23:32 +01:00
self.detected_frames_processor,
self.storage_maintainer,
self.onvif_controller,
self.external_event_processor,
2022-04-03 22:00:11 +02:00
self.plus_api,
self.stats_emitter,
2021-02-17 14:23:32 +01:00
)
2020-11-04 13:28:07 +01:00
def init_onvif(self) -> None:
self.onvif_controller = OnvifController(self.config, self.ptz_metrics)
def init_dispatcher(self) -> None:
comms: list[Communicator] = []
2020-11-04 13:28:07 +01:00
if self.config.mqtt.enabled:
comms.append(MqttClient(self.config))
if self.config.notifications.enabled_in_config:
comms.append(WebPushClient(self.config))
comms.append(WebSocketClient(self.config))
comms.append(self.inter_process_communicator)
self.dispatcher = Dispatcher(
self.config,
self.inter_config_updater,
self.onvif_controller,
self.ptz_metrics,
comms,
)
2021-06-14 14:31:13 +02:00
def start_detectors(self) -> None:
2020-11-04 13:28:07 +01:00
for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
try:
largest_frame = max(
[
det.model.height * det.model.width * 3
for (name, det) in self.config.detectors.items()
]
)
shm_in = mp.shared_memory.SharedMemory(
2021-06-24 07:45:27 +02:00
name=name,
create=True,
size=largest_frame,
)
except FileExistsError:
shm_in = mp.shared_memory.SharedMemory(name=name)
try:
shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4
)
except FileExistsError:
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}")
2020-11-04 13:28:07 +01:00
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)
for name, detector_config in self.config.detectors.items():
self.detectors[name] = ObjectDetectProcess(
name,
self.detection_queue,
self.detection_out_events,
detector_config,
)
2020-11-04 13:28:07 +01:00
def start_ptz_autotracker(self) -> None:
self.ptz_autotracker_thread = PtzAutoTrackerThread(
self.config,
self.onvif_controller,
self.ptz_metrics,
self.dispatcher,
self.stop_event,
)
self.ptz_autotracker_thread.start()
def start_detected_frames_processor(self) -> None:
2021-02-17 14:23:32 +01:00
self.detected_frames_processor = TrackedObjectProcessor(
self.config,
self.dispatcher,
2021-02-17 14:23:32 +01:00
self.detected_frames_queue,
self.ptz_autotracker_thread,
2021-02-17 14:23:32 +01:00
self.stop_event,
)
2020-11-04 13:28:07 +01:00
self.detected_frames_processor.start()
def start_video_output_processor(self) -> None:
output_processor = mp.Process(
target=output_frames,
name="output_processor",
args=(self.config,),
)
output_processor.daemon = True
self.output_processor = output_processor
output_processor.start()
2021-05-30 13:45:37 +02:00
logger.info(f"Output process started: {output_processor.pid}")
def init_historical_regions(self) -> None:
# delete region grids for removed or renamed cameras
cameras = list(self.config.cameras.keys())
Regions.delete().where(~(Regions.camera << cameras)).execute()
# create or update region grids for each camera
for camera in self.config.cameras.values():
self.region_grids[camera.name] = get_camera_regions_grid(
camera.name,
camera.detect,
max(self.config.model.width, self.config.model.height),
)
def start_camera_processors(self) -> None:
2020-11-04 13:28:07 +01:00
for name, config in self.config.cameras.items():
if not self.config.cameras[name].enabled:
logger.info(f"Camera processor not started for disabled camera {name}")
continue
2021-02-17 14:23:32 +01:00
camera_process = mp.Process(
target=track_camera,
name=f"camera_processor:{name}",
args=(
name,
config,
self.config.model,
self.config.model.merged_labelmap,
2021-02-17 14:23:32 +01:00
self.detection_queue,
self.detection_out_events[name],
self.detected_frames_queue,
self.camera_metrics[name],
self.ptz_metrics[name],
self.region_grids[name],
2021-02-17 14:23:32 +01:00
),
)
2020-11-04 13:28:07 +01:00
camera_process.daemon = True
2021-02-17 14:23:32 +01:00
self.camera_metrics[name]["process"] = camera_process
2020-11-04 13:28:07 +01:00
camera_process.start()
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
def start_camera_capture_processes(self) -> None:
2020-11-04 13:28:07 +01:00
for name, config in self.config.cameras.items():
if not self.config.cameras[name].enabled:
logger.info(f"Capture process not started for disabled camera {name}")
continue
2021-02-17 14:23:32 +01:00
capture_process = mp.Process(
target=capture_camera,
name=f"camera_capture:{name}",
args=(name, config, self.shm_frame_count, self.camera_metrics[name]),
2021-02-17 14:23:32 +01:00
)
2020-11-04 13:28:07 +01:00
capture_process.daemon = True
2021-02-17 14:23:32 +01:00
self.camera_metrics[name]["capture_process"] = capture_process
2020-11-04 13:28:07 +01:00
capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}")
2021-02-17 14:23:32 +01:00
def start_audio_processors(self) -> None:
self.audio_process = None
if len([c for c in self.config.cameras.values() if c.audio.enabled]) > 0:
self.audio_process = mp.Process(
target=listen_to_audio,
name="audio_capture",
args=(
self.config,
self.camera_metrics,
),
)
self.audio_process.daemon = True
self.audio_process.start()
self.processes["audio_detector"] = self.audio_process.pid or 0
logger.info(f"Audio process started: {self.audio_process.pid}")
def start_timeline_processor(self) -> None:
self.timeline_processor = TimelineProcessor(
self.config, self.timeline_queue, self.stop_event
)
self.timeline_processor.start()
def start_event_processor(self) -> None:
2021-02-17 14:23:32 +01:00
self.event_processor = EventProcessor(
self.config,
self.timeline_queue,
2021-02-17 14:23:32 +01:00
self.stop_event,
)
2020-11-04 13:28:07 +01:00
self.event_processor.start()
2021-02-17 14:23:32 +01:00
def start_event_cleanup(self) -> None:
2020-11-24 14:27:51 +01:00
self.event_cleanup = EventCleanup(self.config, self.stop_event)
self.event_cleanup.start()
2021-02-17 14:23:32 +01:00
def start_record_cleanup(self) -> None:
self.record_cleanup = RecordingCleanup(self.config, self.stop_event)
self.record_cleanup.start()
Limit recording retention to available storage (#3942) * Add field and migration for segment size * Store the segment size in db * Add comment * Add default * Fix size parsing * Include segment size in recordings endpoint * Start adding storage maintainer * Add storage maintainer and calculate average sizes * Update comment * Store segment and hour avg sizes per camera * Formatting * Keep track of total segment and hour averages * Remove unused files * Cleanup 2 hours of recordings at a time * Formatting * Fix bug * Round segment size * Cleanup some comments * Handle case where segments are not deleted on initial run or is only retained segments * Improve cleanup log * Formatting * Fix typo and improve logging * Catch case where no recordings exist for camera * Specifically define sort * Handle edge case for cameras that only record part time * Increase definition of part time recorder * Remove warning about not supported storage based retention * Add note about storage based retention to recording docs * Add tests for storage maintenance calculation and cleanup * Format tests * Don't run for a camera with no recording segments * Get size of file from cache * Rework camera stats to be more efficient * Remove total and other inefficencies * Rewrite storage cleanup logic to be much more efficient * Fix existing tests * Fix bugs from tests * Add another test * Improve logging * Formatting * Set back correct loop time * Update name * Update comment * Only include segments that have a nonzero size * Catch case where camera has 0 nonzero segment durations * Add test to cover zero bandwidth migration case * Fix test * Incorrect boolean logic * Formatting * Explicity re-define iterator
2022-10-09 13:28:26 +02:00
def start_storage_maintainer(self) -> None:
self.storage_maintainer = StorageMaintainer(self.config, self.stop_event)
self.storage_maintainer.start()
def start_stats_emitter(self) -> None:
2021-02-17 14:23:32 +01:00
self.stats_emitter = StatsEmitter(
self.config,
stats_init(
self.config, self.camera_metrics, self.detectors, self.processes
),
2021-02-17 14:23:32 +01:00
self.stop_event,
)
self.stats_emitter.start()
def start_watchdog(self) -> None:
2020-11-04 13:28:07 +01:00
self.frigate_watchdog = FrigateWatchdog(self.detectors, self.stop_event)
self.frigate_watchdog.start()
def check_shm(self) -> None:
total_shm = round(shutil.disk_usage("/dev/shm").total / pow(2, 20), 1)
# required for log files + nginx cache
min_req_shm = 40 + 10
if self.config.birdseye.restream:
min_req_shm += 8
available_shm = total_shm - min_req_shm
cam_total_frame_size = 0
for camera in self.config.cameras.values():
if camera.enabled:
cam_total_frame_size += round(
(camera.detect.width * camera.detect.height * 1.5 + 270480)
/ 1048576,
1,
)
self.shm_frame_count = min(50, int(available_shm / (cam_total_frame_size)))
logger.debug(
f"Calculated total camera size {available_shm} / {cam_total_frame_size} :: {self.shm_frame_count} frames for each camera in SHM"
)
if self.shm_frame_count < 10:
logger.warning(
f"The current SHM size of {total_shm}MB is too small, recommend increasing it to at least {round(min_req_shm + cam_total_frame_size)}MB."
)
def init_auth(self) -> None:
if self.config.auth.enabled:
if User.select().count() == 0:
password = secrets.token_hex(16)
password_hash = hash_password(
password, iterations=self.config.auth.hash_iterations
)
User.insert(
{
User.username: "admin",
User.password_hash: password_hash,
}
).execute()
logger.info("********************************************************")
logger.info("********************************************************")
logger.info("*** Auth is enabled, but no users exist. ***")
logger.info("*** Created a default user: ***")
logger.info("*** User: admin ***")
logger.info(f"*** Password: {password} ***")
logger.info("********************************************************")
logger.info("********************************************************")
elif self.config.auth.reset_admin_password:
password = secrets.token_hex(16)
password_hash = hash_password(
password, iterations=self.config.auth.hash_iterations
)
User.replace(username="admin", password_hash=password_hash).execute()
logger.info("********************************************************")
logger.info("********************************************************")
logger.info("*** Reset admin password set in the config. ***")
logger.info(f"*** Password: {password} ***")
logger.info("********************************************************")
logger.info("********************************************************")
def start(self) -> None:
parser = argparse.ArgumentParser(
prog="Frigate",
description="An NVR with realtime local object detection for IP cameras.",
)
parser.add_argument("--validate-config", action="store_true")
args = parser.parse_args()
2020-11-04 13:28:07 +01:00
self.init_logger()
2021-09-14 05:02:23 +02:00
logger.info(f"Starting Frigate ({VERSION})")
Streamline live view (#9772) * Break out live page * Improving layouts and add chip component * Improve default camera player sizing * Improve live updating * Cleanup and fit figma * Use fixed height * Masonry layout * Fix stuff * Don't force heights * Adjust scaling * Cleanup * remove sidebar (#9731) * remove sidebar * keep sidebar on mobile for now and add icons * Fix revalidation * Cleanup * Cleanup width * Add chips for activity on cameras * Remove dashboard from header * Use Inter font (#9735) * Show still image when no activity is occurring * remove unused search params * add playing check for webrtc * Don't use grid at all for single column * Fix height on mobile * a few style updates to better match figma (#9745) * Remove active objects when they become stationary * Move to sidebar only and make settings separate component * Fix layout * Animate visibility of chips * Sidebar is full screen * Fix tall aspect ratio cameras * Fix complicated aspect logic * remove * Adjust thumbnail aspect and add text * margin on single column layout * Smaller event thumb text * Simplify basic image view * Only show the red dot when camera is recording * Improve typing for camera toggles * animate chips with react-transition-group (#9763) * don't flash when going to still image * revalidate * tooltips and active tracking outline (#9766) * tooltips * fix tooltip provider and add active tracking outline * remove unused icon * remove figma comment * Get live mode working for jsmpeg * add small gradient below timeago on event thumbnails (#9767) * Create live mode hook and make sure jsmpeg can be used * Enforce env var * Use print * Remove unstable * Add tooltips to thumbnails * Put back vite * Format * Update web/src/components/player/JSMpegPlayer.tsx --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Co-authored-by: Blake Blackshear <blake@frigate.video>
2024-02-10 13:30:53 +01:00
2020-12-03 15:01:03 +01:00
try:
self.ensure_dirs()
2020-12-05 16:47:43 +01:00
try:
self.init_config()
except Exception as e:
print("*************************************************************")
print("*************************************************************")
print("*** Your config file is not valid! ***")
print("*** Please check the docs at ***")
print("*** https://docs.frigate.video/configuration/index ***")
print("*************************************************************")
print("*************************************************************")
print("*** Config Validation Errors ***")
print("*************************************************************")
if isinstance(e, ValidationError):
for error in e.errors():
location = ".".join(str(item) for item in error["loc"])
print(f"{location}: {error['msg']}")
else:
print(e)
print(traceback.format_exc())
print("*************************************************************")
print("*** End Config Validation Errors ***")
print("*************************************************************")
2020-12-05 16:47:43 +01:00
self.log_process.terminate()
sys.exit(1)
if args.validate_config:
print("*************************************************************")
print("*** Your config file is valid. ***")
print("*************************************************************")
self.log_process.terminate()
sys.exit(0)
2021-01-16 04:33:53 +01:00
self.set_environment_vars()
2020-12-05 16:47:43 +01:00
self.set_log_levels()
self.init_queues()
self.init_database()
self.init_onvif()
self.init_recording_manager()
self.init_review_segment_manager()
self.init_embeddings_manager()
self.init_go2rtc()
self.bind_database()
self.check_db_data_migrations()
self.init_inter_process_communicator()
self.init_dispatcher()
2020-12-03 15:01:03 +01:00
except Exception as e:
2020-12-20 15:00:07 +01:00
print(e)
2020-12-03 15:01:03 +01:00
self.log_process.terminate()
sys.exit(1)
2020-11-04 13:28:07 +01:00
self.start_detectors()
self.start_video_output_processor()
self.start_ptz_autotracker()
self.init_historical_regions()
2020-11-04 13:28:07 +01:00
self.start_detected_frames_processor()
self.start_camera_processors()
self.check_shm()
2020-11-04 13:28:07 +01:00
self.start_camera_capture_processes()
self.start_audio_processors()
self.start_storage_maintainer()
self.init_external_event_processor()
self.start_stats_emitter()
2020-11-04 13:28:07 +01:00
self.init_web_server()
self.start_timeline_processor()
2020-11-04 13:28:07 +01:00
self.start_event_processor()
2020-11-24 14:27:51 +01:00
self.start_event_cleanup()
self.start_record_cleanup()
2020-11-04 13:28:07 +01:00
self.start_watchdog()
self.init_auth()
2020-12-05 18:14:18 +01:00
# Flask only listens for SIGINT, so we need to catch SIGTERM and send SIGINT
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
os.kill(os.getpid(), signal.SIGINT)
2021-02-17 14:23:32 +01:00
2020-12-05 18:14:18 +01:00
signal.signal(signal.SIGTERM, receiveSignal)
2021-05-18 07:52:08 +02:00
try:
2023-09-02 12:41:46 +02:00
self.flask_app.run(host="127.0.0.1", port=5001, debug=False, threaded=True)
2021-05-18 07:52:08 +02:00
except KeyboardInterrupt:
pass
2021-02-13 15:09:44 +01:00
logger.info("Flask has exited...")
2020-11-04 13:28:07 +01:00
self.stop()
2021-02-17 14:23:32 +01:00
def stop(self) -> None:
logger.info("Stopping...")
2020-11-04 13:28:07 +01:00
self.stop_event.set()
# set an end_time on entries without an end_time before exiting
Event.update(
end_time=datetime.datetime.now().timestamp(), has_snapshot=False
).where(Event.end_time == None).execute()
ReviewSegment.update(end_time=datetime.datetime.now().timestamp()).where(
ReviewSegment.end_time == None
).execute()
# stop the audio process
if self.audio_process is not None:
self.audio_process.terminate()
self.audio_process.join()
# ensure the capture processes are done
for camera in self.camera_metrics.keys():
capture_process = self.camera_metrics[camera]["capture_process"]
if capture_process is not None:
logger.info(f"Waiting for capture process for {camera} to stop")
capture_process.terminate()
capture_process.join()
# ensure the camera processors are done
for camera in self.camera_metrics.keys():
camera_process = self.camera_metrics[camera]["process"]
if camera_process is not None:
logger.info(f"Waiting for process for {camera} to stop")
camera_process.terminate()
camera_process.join()
logger.info(f"Closing frame queue for {camera}")
frame_queue = self.camera_metrics[camera]["frame_queue"]
empty_and_close_queue(frame_queue)
# ensure the detectors are done
for detector in self.detectors.values():
detector.stop()
empty_and_close_queue(self.detection_queue)
logger.info("Detection queue closed")
self.detected_frames_processor.join()
empty_and_close_queue(self.detected_frames_queue)
logger.info("Detected frames queue closed")
self.timeline_processor.join()
self.event_processor.join()
empty_and_close_queue(self.timeline_queue)
logger.info("Timeline queue closed")
self.output_processor.terminate()
self.output_processor.join()
self.recording_process.terminate()
self.recording_process.join()
self.review_segment_process.terminate()
self.review_segment_process.join()
self.external_event_processor.stop()
self.dispatcher.stop()
self.ptz_autotracker_thread.join()
2020-11-24 14:27:51 +01:00
self.event_cleanup.join()
self.record_cleanup.join()
self.stats_emitter.join()
2020-11-04 13:28:07 +01:00
self.frigate_watchdog.join()
2021-02-07 15:38:35 +01:00
self.db.stop()
2020-11-04 13:28:07 +01:00
# Save embeddings stats to disk
if self.embeddings:
self.embeddings.save_stats()
# Stop Communicators
self.inter_process_communicator.stop()
self.inter_config_updater.stop()
self.inter_zmq_proxy.stop()
2020-11-04 13:28:07 +01:00
while len(self.detection_shms) > 0:
shm = self.detection_shms.pop()
shm.close()
shm.unlink()
self.log_process.terminate()
self.log_process.join()
os._exit(os.EX_OK)