blakeblackshear.frigate/frigate/config.py

1341 lines
47 KiB
Python
Raw Normal View History

from __future__ import annotations
import asyncio
2020-11-03 15:15:58 +01:00
import json
2020-12-21 15:03:27 +01:00
import logging
2020-11-03 15:15:58 +01:00
import os
2021-08-16 15:02:04 +02:00
from enum import Enum
from pathlib import Path
2021-06-24 07:45:27 +02:00
from typing import Dict, List, Optional, Tuple, Union
2020-11-03 15:15:58 +01:00
import matplotlib.pyplot as plt
import numpy as np
from pydantic import BaseModel, Extra, Field, parse_obj_as, validator
2021-06-24 07:45:27 +02:00
from pydantic.fields import PrivateAttr
2020-11-01 13:17:44 +01:00
from frigate.const import (
ALL_ATTRIBUTE_LABELS,
AUDIO_MIN_CONFIDENCE,
CACHE_DIR,
CACHE_SEGMENT_FORMAT,
DEFAULT_DB_PATH,
2023-11-18 22:37:06 +01:00
MAX_PRE_CAPTURE,
REGEX_CAMERA_NAME,
YAML_EXT,
)
from frigate.detectors import DetectorConfig, ModelConfig
from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.ffmpeg_presets import (
parse_preset_hardware_acceleration_decode,
parse_preset_hardware_acceleration_scale,
parse_preset_input,
parse_preset_output_record,
)
from frigate.plus import PlusApi
from frigate.util.builtin import (
deep_merge,
escape_special_characters,
get_ffmpeg_arg_list,
load_config_with_no_duplicates,
)
from frigate.util.image import create_mask
from frigate.util.services import auto_detect_hwaccel, get_video_properties
2020-12-21 15:03:27 +01:00
logger = logging.getLogger(__name__)
2021-06-15 22:19:49 +02:00
# TODO: Identify what the default format to display timestamps is
DEFAULT_TIME_FORMAT = "%m/%d/%Y %H:%M:%S"
# German Style:
# DEFAULT_TIME_FORMAT = "%d.%m.%Y %H:%M:%S"
2021-06-24 07:45:27 +02:00
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
# read docker secret files as env vars too
if os.path.isdir("/run/secrets"):
for secret_file in os.listdir("/run/secrets"):
if secret_file.startswith("FRIGATE_"):
FRIGATE_ENV_VARS[secret_file] = Path(
os.path.join("/run/secrets", secret_file)
).read_text()
2021-06-24 07:45:27 +02:00
2021-06-15 22:19:49 +02:00
DEFAULT_TRACKED_OBJECTS = ["person"]
DEFAULT_LISTEN_AUDIO = ["bark", "fire_alarm", "scream", "speech", "yell"]
2021-08-16 14:39:20 +02:00
DEFAULT_DETECTORS = {"cpu": {"type": "cpu"}}
Feature: automatic camera resolution configuration (#6810) * Add auto configuration for height, width and fps in detect role * Add auto-configuration for detect width, height, and fps for input roles with detect in the CameraConfig class in config.py * Refactor code to retrieve video properties from input stream in CameraConfig class and add optional parameter to retrieve video duration in get_video_properties function * format * Set default detect dimensions to 1280x720 and update DetectConfig to use the defaults * Revert "Set default detect dimensions to 1280x720 and update DetectConfig to use the defaults" This reverts commit a1aed0414d75a6db0a826c08359740764c4861e5. * Add default detect dimensions if autoconfiguration failed and log a warning message * fix warn message spelling on frigate/config.py Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> * Ensure detect height and width are not None before using them in camera configuration * docs: initial commit * rename streamInfo to stream_info Co-authored-by: Blake Blackshear <blakeb@blakeshome.com> * Apply suggestions from code review Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> * Update docs * handle case then get_video_properties returns 0x0 dimension * Set detect resolution based on stream properties if available, else apply default values * Update FrigateConfig to set default values for stream_info if resolution detection fails * Update camera detection dimensions based on stream information if available --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> Co-authored-by: Blake Blackshear <blakeb@blakeshome.com>
2023-07-14 13:56:03 +02:00
DEFAULT_DETECT_DIMENSIONS = {"width": 1280, "height": 720}
DEFAULT_TIME_LAPSE_FFMPEG_ARGS = "-vf setpts=0.04*PTS -r 30"
2021-09-04 23:56:01 +02:00
class FrigateBaseModel(BaseModel):
class Config:
extra = Extra.forbid
class LiveModeEnum(str, Enum):
jsmpeg = "jsmpeg"
mse = "mse"
webrtc = "webrtc"
class TimeFormatEnum(str, Enum):
browser = "browser"
hours12 = "12hour"
hours24 = "24hour"
class DateTimeStyleEnum(str, Enum):
full = "full"
long = "long"
medium = "medium"
short = "short"
2022-02-27 15:04:12 +01:00
class UIConfig(FrigateBaseModel):
live_mode: LiveModeEnum = Field(
default=LiveModeEnum.mse, title="Default Live Mode."
)
timezone: Optional[str] = Field(title="Override UI timezone.")
2022-02-27 15:04:12 +01:00
use_experimental: bool = Field(default=False, title="Experimental UI")
time_format: TimeFormatEnum = Field(
default=TimeFormatEnum.browser, title="Override UI time format."
)
date_style: DateTimeStyleEnum = Field(
default=DateTimeStyleEnum.short, title="Override UI dateStyle."
)
time_style: DateTimeStyleEnum = Field(
default=DateTimeStyleEnum.medium, title="Override UI timeStyle."
)
strftime_fmt: Optional[str] = Field(
default=None, title="Override date and time format using strftime syntax."
)
2021-06-24 07:45:27 +02:00
2022-04-16 15:42:44 +02:00
class StatsConfig(FrigateBaseModel):
amd_gpu_stats: bool = Field(default=True, title="Enable AMD GPU stats.")
intel_gpu_stats: bool = Field(default=True, title="Enable Intel GPU stats.")
network_bandwidth: bool = Field(
default=False, title="Enable network bandwidth for ffmpeg processes."
)
class TelemetryConfig(FrigateBaseModel):
network_interfaces: List[str] = Field(
default=[],
title="Enabled network interfaces for bandwidth calculation.",
)
stats: StatsConfig = Field(
default_factory=StatsConfig, title="System Stats Configuration"
)
version_check: bool = Field(default=True, title="Enable latest version check.")
2021-09-04 23:56:01 +02:00
class MqttConfig(FrigateBaseModel):
enabled: bool = Field(title="Enable MQTT Communication.", default=True)
2023-01-04 02:24:53 +01:00
host: str = Field(default="", title="MQTT Host")
2021-06-24 07:45:27 +02:00
port: int = Field(default=1883, title="MQTT Port")
topic_prefix: str = Field(default="frigate", title="MQTT Topic Prefix")
client_id: str = Field(default="frigate", title="MQTT Client ID")
stats_interval: int = Field(default=60, title="MQTT Camera Stats Interval")
user: Optional[str] = Field(title="MQTT Username")
password: Optional[str] = Field(title="MQTT Password")
tls_ca_certs: Optional[str] = Field(title="MQTT TLS CA Certificates")
tls_client_cert: Optional[str] = Field(title="MQTT TLS Client Certificate")
tls_client_key: Optional[str] = Field(title="MQTT TLS Client Key")
tls_insecure: Optional[bool] = Field(title="MQTT TLS Insecure")
@validator("password", pre=True, always=True)
def validate_password(cls, v, values):
if (v is None) != (values["user"] is None):
raise ValueError("Password must be provided with username.")
return v
2021-06-24 07:45:27 +02:00
class ZoomingModeEnum(str, Enum):
disabled = "disabled"
absolute = "absolute"
relative = "relative"
class PtzAutotrackConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable PTZ object autotracking.")
calibrate_on_startup: bool = Field(
default=False, title="Perform a camera calibration when Frigate starts."
)
zooming: ZoomingModeEnum = Field(
default=ZoomingModeEnum.disabled, title="Autotracker zooming mode."
)
zoom_factor: float = Field(
default=0.3,
title="Zooming factor (0.1-0.75).",
ge=0.1,
le=0.75,
)
track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
required_zones: List[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to begin autotracking.",
)
return_preset: str = Field(
default="home",
title="Name of camera preset to return to when object tracking is over.",
)
timeout: int = Field(
default=10, title="Seconds to delay before returning to preset."
)
movement_weights: Optional[Union[str, List[str]]] = Field(
default=[],
title="Internal value used for PTZ movements based on the speed of your camera's motor.",
)
enabled_in_config: Optional[bool] = Field(
title="Keep track of original state of autotracking."
)
@validator("movement_weights", pre=True)
def validate_weights(cls, v):
if v is None:
return None
if isinstance(v, str):
weights = list(map(float, v.split(",")))
elif isinstance(v, list):
weights = [float(val) for val in v]
else:
raise ValueError("Invalid type for movement_weights")
Autotracking bugfixes and zooming updates (#8103) * zoom in/out in search for lost objects * predicted box should not be empty * clean up and update zoom logic * only zoom if enabled * more cleanup * check for valid velocity when zooming * only try absolute zoom in if obj area has changed * zoom logic * don't enqueue lost object zoom if already at limit * don't disable motion boxes during ptz moves * velocity threshold based on move coefficients * fix area zoom logic * disable debug zoom * don't process objects if ptz moving * recalc with exponent * change exponent * remove lost object zooming * increase distance threshold for stationary object * increase distance threshold constant * only zoom out if nonzero * camera name in all debug logging * add camera name to debug logging * camera variable name consistency * update calibration behavior and docs * docs and better zooming * more sensible target values * docs wording * fix velocity threshold variable * zooming tweaks and remove iou for current objects * debug and docs * get valid velocity * include zero * additional debug statements * add zoom hysteresis * zoom on initial move if relative * only update target box if we actually zoom * merge dev * use getattr instead of get * increase distance threshold * reverse logic * get_camera_status after preset move to store zoom * final tweaks and docs * use constants and catch possible debug exception * adjust zoom factor exponent * don't run motion estimation when calling preset * adjust dimension threshold * use numpy for velocity estimate calcs * more numpy conversion * fix numpy shapes * numpy zeros dimension * more zoom out conditions * fix velocity bug * ensure init has been called in debug view * ensure onvif init if enabling by mqtt * change default hysteresis values * recalc relative zoom value * zoom out value * try to zoom when object isn't moving * try zoom when tracked object is not moving * don't try to zoom every time * negate zoom out condition when needed * hysteresis constants for absolute zooming * update zoom conditions * don't recalc target box on zoom only * zoom out if above area threshold * don't print zooming debug for stationary obj * revamp zooming to use area moving average * zooming tweaks and expose property * limit zoom with max target box * use calibration to determine zoom levels * zoom logic fix * docs * add tapo c200 camera * fix initial absolute zoom * small zoom logic fix * better invalid velocity checks * fix test * really fix test this time
2023-10-22 18:59:13 +02:00
if len(weights) != 5:
raise ValueError("movement_weights must have exactly 5 floats")
return weights
class OnvifConfig(FrigateBaseModel):
host: str = Field(default="", title="Onvif Host")
port: int = Field(default=8000, title="Onvif Port")
user: Optional[str] = Field(title="Onvif Username")
password: Optional[str] = Field(title="Onvif Password")
autotracking: PtzAutotrackConfig = Field(
default_factory=PtzAutotrackConfig,
title="PTZ auto tracking config.",
)
class RetainModeEnum(str, Enum):
all = "all"
motion = "motion"
active_objects = "active_objects"
2021-09-04 23:56:01 +02:00
class RetainConfig(FrigateBaseModel):
default: float = Field(default=10, title="Default retention period.")
2022-02-05 16:28:21 +01:00
mode: RetainModeEnum = Field(default=RetainModeEnum.motion, title="Retain mode.")
objects: Dict[str, float] = Field(
2021-06-24 07:45:27 +02:00
default_factory=dict, title="Object retention period."
)
2020-11-23 15:25:46 +01:00
class EventsConfig(FrigateBaseModel):
2023-11-18 22:37:06 +01:00
pre_capture: int = Field(
default=5, title="Seconds to retain before event starts.", le=MAX_PRE_CAPTURE
)
post_capture: int = Field(default=5, title="Seconds to retain after event ends.")
2021-07-09 22:14:16 +02:00
required_zones: List[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to save the event.",
2021-07-09 22:14:16 +02:00
)
objects: Optional[List[str]] = Field(
title="List of objects to be detected in order to save the event.",
2021-07-09 22:14:16 +02:00
)
2021-06-24 07:45:27 +02:00
retain: RetainConfig = Field(
default_factory=RetainConfig, title="Event retention settings."
2021-06-24 07:45:27 +02:00
)
class RecordRetainConfig(FrigateBaseModel):
days: float = Field(default=0, title="Default retention period.")
mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.")
class RecordExportConfig(FrigateBaseModel):
timelapse_args: str = Field(
default=DEFAULT_TIME_LAPSE_FFMPEG_ARGS, title="Timelapse Args"
)
class RecordQualityEnum(str, Enum):
very_low = "very_low"
low = "low"
medium = "medium"
high = "high"
very_high = "very_high"
class RecordPreviewConfig(FrigateBaseModel):
quality: RecordQualityEnum = Field(
default=RecordQualityEnum.medium, title="Quality of recording preview."
)
2021-09-04 23:56:01 +02:00
class RecordConfig(FrigateBaseModel):
2021-07-09 22:14:16 +02:00
enabled: bool = Field(default=False, title="Enable record on all cameras.")
sync_recordings: bool = Field(
default=False, title="Sync recordings with disk on startup and once a day."
)
expire_interval: int = Field(
default=60,
title="Number of minutes to wait between cleanup runs.",
)
retain: RecordRetainConfig = Field(
default_factory=RecordRetainConfig, title="Record retention settings."
)
events: EventsConfig = Field(
default_factory=EventsConfig, title="Event specific settings."
2021-07-09 22:14:16 +02:00
)
export: RecordExportConfig = Field(
default_factory=RecordExportConfig, title="Recording Export Config"
)
preview: RecordPreviewConfig = Field(
default_factory=RecordPreviewConfig, title="Recording Preview Config"
)
enabled_in_config: Optional[bool] = Field(
title="Keep track of original state of recording."
)
2021-07-09 22:14:16 +02:00
2021-09-04 23:56:01 +02:00
class MotionConfig(FrigateBaseModel):
2021-06-24 07:45:27 +02:00
threshold: int = Field(
default=30,
2021-06-24 07:45:27 +02:00
title="Motion detection threshold (1-255).",
ge=1,
le=255,
)
lightning_threshold: float = Field(
default=0.8, title="Lightning detection threshold (0.3-1.0).", ge=0.3, le=1.0
)
improve_contrast: bool = Field(default=True, title="Improve Contrast")
contour_area: Optional[int] = Field(default=10, title="Contour Area")
2021-06-24 07:45:27 +02:00
delta_alpha: float = Field(default=0.2, title="Delta Alpha")
frame_alpha: float = Field(default=0.01, title="Frame Alpha")
frame_height: Optional[int] = Field(default=100, title="Frame Height")
2021-06-24 07:45:27 +02:00
mask: Union[str, List[str]] = Field(
default="", title="Coordinates polygon for the motion mask."
)
mqtt_off_delay: int = Field(
default=30,
title="Delay for updating MQTT with no motion detected.",
)
2021-06-24 07:45:27 +02:00
class RuntimeMotionConfig(MotionConfig):
raw_mask: Union[str, List[str]] = ""
mask: np.ndarray = None
2021-06-24 07:45:27 +02:00
def __init__(self, **config):
frame_shape = config.get("frame_shape", (1, 1))
2020-11-01 13:17:44 +01:00
2021-06-24 07:45:27 +02:00
mask = config.get("mask", "")
config["raw_mask"] = mask
2021-06-24 07:45:27 +02:00
if mask:
config["mask"] = create_mask(frame_shape, mask)
else:
empty_mask = np.zeros(frame_shape, np.uint8)
empty_mask[:] = 255
config["mask"] = empty_mask
2021-06-24 07:45:27 +02:00
super().__init__(**config)
2021-06-24 07:45:27 +02:00
def dict(self, **kwargs):
ret = super().dict(**kwargs)
if "mask" in ret:
ret["mask"] = ret["raw_mask"]
ret.pop("raw_mask")
return ret
2021-06-24 07:45:27 +02:00
class Config:
arbitrary_types_allowed = True
2021-09-04 23:56:01 +02:00
extra = Extra.ignore
class StationaryMaxFramesConfig(FrigateBaseModel):
default: Optional[int] = Field(title="Default max frames.", ge=1)
objects: Dict[str, int] = Field(
default_factory=dict, title="Object specific max frames."
)
class StationaryConfig(FrigateBaseModel):
interval: Optional[int] = Field(
title="Frame interval for checking stationary objects.",
gt=0,
)
threshold: Optional[int] = Field(
title="Number of frames without a position change for an object to be considered stationary",
ge=1,
)
max_frames: StationaryMaxFramesConfig = Field(
default_factory=StationaryMaxFramesConfig,
title="Max frames for stationary objects.",
)
2021-09-04 23:56:01 +02:00
class DetectConfig(FrigateBaseModel):
Feature: automatic camera resolution configuration (#6810) * Add auto configuration for height, width and fps in detect role * Add auto-configuration for detect width, height, and fps for input roles with detect in the CameraConfig class in config.py * Refactor code to retrieve video properties from input stream in CameraConfig class and add optional parameter to retrieve video duration in get_video_properties function * format * Set default detect dimensions to 1280x720 and update DetectConfig to use the defaults * Revert "Set default detect dimensions to 1280x720 and update DetectConfig to use the defaults" This reverts commit a1aed0414d75a6db0a826c08359740764c4861e5. * Add default detect dimensions if autoconfiguration failed and log a warning message * fix warn message spelling on frigate/config.py Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> * Ensure detect height and width are not None before using them in camera configuration * docs: initial commit * rename streamInfo to stream_info Co-authored-by: Blake Blackshear <blakeb@blakeshome.com> * Apply suggestions from code review Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> * Update docs * handle case then get_video_properties returns 0x0 dimension * Set detect resolution based on stream properties if available, else apply default values * Update FrigateConfig to set default values for stream_info if resolution detection fails * Update camera detection dimensions based on stream information if available --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> Co-authored-by: Blake Blackshear <blakeb@blakeshome.com>
2023-07-14 13:56:03 +02:00
height: Optional[int] = Field(title="Height of the stream for the detect role.")
width: Optional[int] = Field(title="Width of the stream for the detect role.")
fps: int = Field(
default=5, title="Number of frames per second to process through detection."
)
2021-06-24 07:45:27 +02:00
enabled: bool = Field(default=True, title="Detection Enabled.")
min_initialized: Optional[int] = Field(
title="Minimum number of consecutive hits for an object to be initialized by the tracker."
)
max_disappeared: Optional[int] = Field(
2021-06-24 07:45:27 +02:00
title="Maximum number of frames the object can dissapear before detection ends."
)
stationary: StationaryConfig = Field(
default_factory=StationaryConfig,
title="Stationary objects config.",
2022-02-08 14:40:45 +01:00
)
2023-04-28 13:02:06 +02:00
annotation_offset: int = Field(
default=0, title="Milliseconds to offset detect annotations by."
)
2021-09-04 23:56:01 +02:00
class FilterConfig(FrigateBaseModel):
2021-06-24 07:45:27 +02:00
min_area: int = Field(
default=0, title="Minimum area of bounding box for object to be counted."
)
max_area: int = Field(
default=24000000, title="Maximum area of bounding box for object to be counted."
)
min_ratio: float = Field(
default=0,
title="Minimum ratio of bounding box's width/height for object to be counted.",
)
max_ratio: float = Field(
default=24000000,
title="Maximum ratio of bounding box's width/height for object to be counted.",
)
2021-06-24 07:45:27 +02:00
threshold: float = Field(
default=0.7,
title="Average detection confidence threshold for object to be counted.",
)
min_score: float = Field(
default=0.5, title="Minimum detection confidence for object to be counted."
)
mask: Optional[Union[str, List[str]]] = Field(
title="Detection area polygon mask for this filter configuration.",
)
class AudioFilterConfig(FrigateBaseModel):
threshold: float = Field(
default=0.8,
ge=AUDIO_MIN_CONFIDENCE,
lt=1.0,
title="Minimum detection confidence threshold for audio to be counted.",
)
2021-06-24 07:45:27 +02:00
class RuntimeFilterConfig(FilterConfig):
mask: Optional[np.ndarray]
2021-06-24 07:45:27 +02:00
raw_mask: Optional[Union[str, List[str]]]
2021-06-24 07:45:27 +02:00
def __init__(self, **config):
mask = config.get("mask")
config["raw_mask"] = mask
2021-06-24 07:45:27 +02:00
if mask is not None:
config["mask"] = create_mask(config.get("frame_shape", (1, 1)), mask)
2021-06-24 07:45:27 +02:00
super().__init__(**config)
2021-06-24 07:45:27 +02:00
def dict(self, **kwargs):
ret = super().dict(**kwargs)
if "mask" in ret:
ret["mask"] = ret["raw_mask"]
ret.pop("raw_mask")
return ret
2021-06-24 07:45:27 +02:00
class Config:
arbitrary_types_allowed = True
2021-09-04 23:56:01 +02:00
extra = Extra.ignore
2021-09-04 23:56:01 +02:00
# this uses the base model because the color is an extra attribute
2021-06-24 07:45:27 +02:00
class ZoneConfig(BaseModel):
filters: Dict[str, FilterConfig] = Field(
default_factory=dict, title="Zone filters."
)
coordinates: Union[str, List[str]] = Field(
title="Coordinates polygon for the defined zone."
)
inertia: int = Field(
default=3,
title="Number of consecutive frames required for object to be considered present in the zone.",
gt=0,
)
objects: List[str] = Field(
default_factory=list,
title="List of objects that can trigger the zone.",
)
2021-06-24 07:45:27 +02:00
_color: Optional[Tuple[int, int, int]] = PrivateAttr()
_contour: np.ndarray = PrivateAttr()
@property
def color(self) -> Tuple[int, int, int]:
return self._color
@property
def contour(self) -> np.ndarray:
return self._contour
def __init__(self, **config):
super().__init__(**config)
self._color = config.get("color", (0, 0, 0))
coordinates = config["coordinates"]
if isinstance(coordinates, list):
2021-06-24 07:45:27 +02:00
self._contour = np.array(
[[int(p.split(",")[0]), int(p.split(",")[1])] for p in coordinates]
)
elif isinstance(coordinates, str):
points = coordinates.split(",")
2021-06-24 07:45:27 +02:00
self._contour = np.array(
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
)
else:
2021-06-24 07:45:27 +02:00
self._contour = np.array([])
2021-09-04 23:56:01 +02:00
class ObjectConfig(FrigateBaseModel):
2021-06-24 07:45:27 +02:00
track: List[str] = Field(default=DEFAULT_TRACKED_OBJECTS, title="Objects to track.")
filters: Dict[str, FilterConfig] = Field(default={}, title="Object filters.")
2021-06-24 07:45:27 +02:00
mask: Union[str, List[str]] = Field(default="", title="Object mask.")
class AudioConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable audio events.")
max_not_heard: int = Field(
default=30, title="Seconds of not hearing the type of audio to end the event."
)
min_volume: int = Field(
default=500, title="Min volume required to run audio detection."
)
listen: List[str] = Field(
default=DEFAULT_LISTEN_AUDIO, title="Audio to listen for."
)
filters: Optional[Dict[str, AudioFilterConfig]] = Field(title="Audio filters.")
enabled_in_config: Optional[bool] = Field(
title="Keep track of original state of audio detection."
)
num_threads: int = Field(default=2, title="Number of detection threads", ge=1)
2021-06-24 07:45:27 +02:00
class BirdseyeModeEnum(str, Enum):
objects = "objects"
motion = "motion"
continuous = "continuous"
2021-06-11 14:26:35 +02:00
@classmethod
def get_index(cls, type):
return list(cls).index(type)
@classmethod
def get(cls, index):
return list(cls)[index]
2021-06-11 14:26:35 +02:00
class BirdseyeLayoutConfig(FrigateBaseModel):
scaling_factor: float = Field(
default=2.0, title="Birdseye Scaling Factor", ge=1.0, le=5.0
)
max_cameras: Optional[int] = Field(default=None, title="Max cameras")
2021-09-04 23:56:01 +02:00
class BirdseyeConfig(FrigateBaseModel):
2021-06-24 07:45:27 +02:00
enabled: bool = Field(default=True, title="Enable birdseye view.")
restream: bool = Field(default=False, title="Restream birdseye via RTSP.")
2021-06-24 07:45:27 +02:00
width: int = Field(default=1280, title="Birdseye width.")
height: int = Field(default=720, title="Birdseye height.")
2021-07-02 14:50:09 +02:00
quality: int = Field(
default=8,
title="Encoding quality.",
ge=1,
le=31,
)
inactivity_threshold: int = Field(
default=30, title="Birdseye Inactivity Threshold", gt=0
)
2021-06-24 07:45:27 +02:00
mode: BirdseyeModeEnum = Field(
default=BirdseyeModeEnum.objects, title="Tracking mode."
)
layout: BirdseyeLayoutConfig = Field(
default_factory=BirdseyeLayoutConfig, title="Birdseye Layout Config"
)
2021-06-11 14:26:35 +02:00
2022-04-16 15:44:04 +02:00
# uses BaseModel because some global attributes are not available at the camera level
class BirdseyeCameraConfig(BaseModel):
enabled: bool = Field(default=True, title="Enable birdseye view for camera.")
order: int = Field(default=0, title="Position of the camera in the birdseye view.")
mode: BirdseyeModeEnum = Field(
default=BirdseyeModeEnum.objects, title="Tracking mode for camera."
)
2023-03-17 23:14:57 +01:00
# Note: Setting threads to less than 2 caused several issues with recording segments
# https://github.com/blakeblackshear/frigate/issues/5659
FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
2022-12-16 14:41:03 +01:00
FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
2023-03-17 23:14:57 +01:00
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
"-threads",
"2",
"-f",
"rawvideo",
"-pix_fmt",
"yuv420p",
]
2022-12-16 14:41:03 +01:00
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic"
2020-11-01 13:17:44 +01:00
2021-09-04 23:56:01 +02:00
class FfmpegOutputArgsConfig(FrigateBaseModel):
detect: Union[str, List[str]] = Field(
2021-06-24 07:45:27 +02:00
default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Detect role FFmpeg output arguments.",
)
record: Union[str, List[str]] = Field(
2021-06-24 07:45:27 +02:00
default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT,
title="Record role FFmpeg output arguments.",
)
2021-09-04 23:56:01 +02:00
class FfmpegConfig(FrigateBaseModel):
2021-06-24 07:45:27 +02:00
global_args: Union[str, List[str]] = Field(
default=FFMPEG_GLOBAL_ARGS_DEFAULT, title="Global FFmpeg arguments."
)
hwaccel_args: Union[str, List[str]] = Field(
default="auto", title="FFmpeg hardware acceleration arguments."
2021-06-24 07:45:27 +02:00
)
input_args: Union[str, List[str]] = Field(
default=FFMPEG_INPUT_ARGS_DEFAULT, title="FFmpeg input arguments."
)
output_args: FfmpegOutputArgsConfig = Field(
default_factory=FfmpegOutputArgsConfig,
title="FFmpeg output arguments per role.",
)
retry_interval: float = Field(
default=10.0,
title="Time in seconds to wait before FFmpeg retries connecting to the camera.",
)
2021-06-24 07:45:27 +02:00
2021-08-28 15:16:25 +02:00
class CameraRoleEnum(str, Enum):
audio = "audio"
2021-08-28 15:16:25 +02:00
record = "record"
detect = "detect"
2021-09-04 23:56:01 +02:00
class CameraInput(FrigateBaseModel):
2021-06-24 07:45:27 +02:00
path: str = Field(title="Camera input path.")
2021-08-28 15:16:25 +02:00
roles: List[CameraRoleEnum] = Field(title="Roles assigned to this input.")
global_args: Union[str, List[str]] = Field(
2021-06-24 07:45:27 +02:00
default_factory=list, title="FFmpeg global arguments."
)
hwaccel_args: Union[str, List[str]] = Field(
2021-06-24 07:45:27 +02:00
default_factory=list, title="FFmpeg hardware acceleration arguments."
)
input_args: Union[str, List[str]] = Field(
default_factory=list, title="FFmpeg input arguments."
)
2021-06-24 07:45:27 +02:00
2021-06-24 07:45:27 +02:00
class CameraFfmpegConfig(FfmpegConfig):
inputs: List[CameraInput] = Field(title="Camera inputs.")
2020-11-01 13:17:44 +01:00
2021-06-24 07:45:27 +02:00
@validator("inputs")
def validate_roles(cls, v):
roles = [role for i in v for role in i.roles]
roles_set = set(roles)
2020-11-03 15:15:58 +01:00
2021-06-24 07:45:27 +02:00
if len(roles) > len(roles_set):
raise ValueError("Each input role may only be used once.")
if "detect" not in roles:
2021-06-24 07:45:27 +02:00
raise ValueError("The detect role is required.")
2021-02-17 14:23:32 +01:00
2021-06-24 07:45:27 +02:00
return v
2021-09-04 23:56:01 +02:00
class SnapshotsConfig(FrigateBaseModel):
2021-06-24 07:45:27 +02:00
enabled: bool = Field(default=False, title="Snapshots enabled.")
clean_copy: bool = Field(
default=True, title="Create a clean copy of the snapshot image."
)
timestamp: bool = Field(
default=False, title="Add a timestamp overlay on the snapshot."
)
bounding_box: bool = Field(
default=True, title="Add a bounding box overlay on the snapshot."
)
crop: bool = Field(default=False, title="Crop the snapshot to the detected object.")
required_zones: List[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to save a snapshot.",
)
height: Optional[int] = Field(title="Snapshot image height.")
retain: RetainConfig = Field(
default_factory=RetainConfig, title="Snapshot retention."
)
2021-07-02 14:47:03 +02:00
quality: int = Field(
default=70,
title="Quality of the encoded jpeg (0-100).",
ge=0,
le=100,
)
2021-02-17 14:23:32 +01:00
2021-06-15 22:19:49 +02:00
2021-09-04 23:56:01 +02:00
class ColorConfig(FrigateBaseModel):
2021-09-04 23:39:56 +02:00
red: int = Field(default=255, ge=0, le=255, title="Red")
green: int = Field(default=255, ge=0, le=255, title="Green")
blue: int = Field(default=255, ge=0, le=255, title="Blue")
2021-06-15 22:19:49 +02:00
class TimestampPositionEnum(str, Enum):
tl = "tl"
tr = "tr"
bl = "bl"
br = "br"
class TimestampEffectEnum(str, Enum):
solid = "solid"
shadow = "shadow"
2021-09-04 23:56:01 +02:00
class TimestampStyleConfig(FrigateBaseModel):
position: TimestampPositionEnum = Field(
default=TimestampPositionEnum.tl, title="Timestamp position."
)
2021-06-24 07:45:27 +02:00
format: str = Field(default=DEFAULT_TIME_FORMAT, title="Timestamp format.")
color: ColorConfig = Field(default_factory=ColorConfig, title="Timestamp color.")
thickness: int = Field(default=2, title="Timestamp thickness.")
effect: Optional[TimestampEffectEnum] = Field(title="Timestamp effect.")
2021-06-15 22:19:49 +02:00
2021-09-04 23:56:01 +02:00
class CameraMqttConfig(FrigateBaseModel):
2021-06-24 07:45:27 +02:00
enabled: bool = Field(default=True, title="Send image over MQTT.")
timestamp: bool = Field(default=True, title="Add timestamp to MQTT image.")
bounding_box: bool = Field(default=True, title="Add bounding box to MQTT image.")
crop: bool = Field(default=True, title="Crop MQTT image to detected object.")
height: int = Field(default=270, title="MQTT image height.")
required_zones: List[str] = Field(
default_factory=list,
title="List of required zones to be entered in order to send the image.",
)
2021-07-02 14:47:03 +02:00
quality: int = Field(
default=70,
title="Quality of the encoded jpeg (0-100).",
ge=0,
le=100,
)
2020-11-29 22:55:53 +01:00
class CameraLiveConfig(FrigateBaseModel):
stream_name: str = Field(default="", title="Name of restream to use as live view.")
height: int = Field(default=720, title="Live camera view height")
quality: int = Field(default=8, ge=1, le=31, title="Live camera view quality")
Add go2rtc and add restream role / live source (#4082) * Pull go2rtc dependency * Add go2rtc to local services and add to s6 * Add relay controller for go2rtc * Add restream role * Add restream role * Add restream to nginx * Add camera live source config * Disable RTMP by default and use restream * Use go2rtc for camera config * Fix go2rtc move * Start restream on frigate start * Send restream to camera level * Fix restream * Make sure jsmpeg works as expected * Make view rspect live size config * Tweak player options to fit live view * Adjust VideoPlayer to accept live option which disables irrelevant controls * Add multiple options from restream live view * Add base for webrtc option * Setup specific restream modules * Make mp4 the default streaming for now * Expose 8554 for rtsp relay from go2rtc * Formatting * Update docs to suggest new restream method. * Update docs to reflect restream role * Update docs to reflect restream role * Add webrtc player * Improvements to webRTC * Support webrtc * Cleanup * Adjust rtmp test and add restream test * Fix tests * Add restream tests * Add live view docs and show different options * Small docs tweak * Support all stream types * Update to beta 9 of go2rtc * Formatting * Make jsmpeg the default * Support wss if made from https * Support wss if made from https * Use onEffect * Set url outside onEffect * Fix passed deps * Update docs about required host mode * Try memo instead * Close websocket on changing camera * Formatting * Close pc connection * Set video source to null on cleanup * Use full path since go2rtc can't see PATH var * Adjust audio codec to enable browser audio by default * Cleanup stream creation * Add restream tests * Format tests * Mock requests * Adjust paths * Move stream configs to restream * Remove live source * Remove live config * Use live persistence for which view to use on each camera * Fix live sizes * Only use jsmpeg sizes for jsmpeg live * Set max live size * Remove access of live config * Add selector for live view source in web view * Remove RTMP from default list of roles * Update docs * Fix tests * Fix docs for live view modes * make default undefined to avoid race condition * Wait until camera source is loaded to avoid race condition * Fix tests * Add config to go2rtc * Work with config * Set full path for config * Set to use stun * Check for mounted file * Look for frigate-go2rtc * Update docs to reflect webRTC configuration. * Add link to go2rtc config * Update docs to be more clear * Update docs to be more clear * Update format Co-authored-by: Felipe Santos <felipecassiors@gmail.com> * Update live docs * Improve bash startup script * Add option to force audio compatibility * Formatting * Fix mapping * Fix broken link * Update go2rtc version * Get go2rtc webui working * Add support for mse * Remove mp4 option * Undo changes to video player * Update docs for new live view options * Make separate path for mse * Remove unused * Remove mp4 path * Try to get go2rtc proxy working * Try to get go2rtc proxy working * Remove unused callback * Allow websocket on restrea dashboard * Make mse default stream option * Fix mse sizing * don't assume roles is defined * Remove nginx mapping to go2rtc ui Co-authored-by: Felipe Santos <felipecassiors@gmail.com> Co-authored-by: Blake Blackshear <blakeb@blakeshome.com>
2022-11-02 12:36:09 +01:00
class RestreamConfig(BaseModel):
class Config:
extra = Extra.allow
2021-02-17 14:23:32 +01:00
2021-06-23 15:09:15 +02:00
class CameraUiConfig(FrigateBaseModel):
order: int = Field(default=0, title="Order of camera in UI.")
2022-04-16 15:42:44 +02:00
dashboard: bool = Field(
default=True, title="Show this camera in Frigate dashboard UI."
)
2021-09-04 23:56:01 +02:00
class CameraConfig(FrigateBaseModel):
name: Optional[str] = Field(title="Camera name.", regex=REGEX_CAMERA_NAME)
enabled: bool = Field(default=True, title="Enable camera.")
2021-06-24 07:45:27 +02:00
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
best_image_timeout: int = Field(
default=60,
title="How long to wait for the image with the highest confidence score.",
)
webui_url: Optional[str] = Field(
title="URL to visit the camera directly from system page",
)
2021-06-24 07:45:27 +02:00
zones: Dict[str, ZoneConfig] = Field(
default_factory=dict, title="Zone configuration."
)
record: RecordConfig = Field(
default_factory=RecordConfig, title="Record configuration."
)
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings."
)
snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig, title="Snapshot configuration."
2021-06-24 07:45:27 +02:00
)
mqtt: CameraMqttConfig = Field(
default_factory=CameraMqttConfig, title="MQTT configuration."
)
objects: ObjectConfig = Field(
default_factory=ObjectConfig, title="Object configuration."
)
audio: AudioConfig = Field(
default_factory=AudioConfig, title="Audio events configuration."
)
2021-06-24 07:45:27 +02:00
motion: Optional[MotionConfig] = Field(title="Motion detection configuration.")
detect: DetectConfig = Field(
default_factory=DetectConfig, title="Object detection configuration."
)
onvif: OnvifConfig = Field(
default_factory=OnvifConfig, title="Camera Onvif Configuration."
)
ui: CameraUiConfig = Field(
default_factory=CameraUiConfig, title="Camera UI Modifications."
)
birdseye: BirdseyeCameraConfig = Field(
default_factory=BirdseyeCameraConfig, title="Birdseye camera configuration."
)
2021-06-24 23:02:46 +02:00
timestamp_style: TimestampStyleConfig = Field(
default_factory=TimestampStyleConfig, title="Timestamp style configuration."
2021-06-24 07:45:27 +02:00
)
2021-11-08 14:32:29 +01:00
_ffmpeg_cmds: List[Dict[str, List[str]]] = PrivateAttr()
2021-06-24 07:45:27 +02:00
def __init__(self, **config):
# Set zone colors
if "zones" in config:
colors = plt.cm.get_cmap("tab10", len(config["zones"]))
config["zones"] = {
name: {**z, "color": tuple(round(255 * c) for c in colors(idx)[:3])}
for idx, (name, z) in enumerate(config["zones"].items())
}
# add roles to the input if there is only one
if len(config["ffmpeg"]["inputs"]) == 1:
has_audio = "audio" in config["ffmpeg"]["inputs"][0].get("roles", [])
Add go2rtc and add restream role / live source (#4082) * Pull go2rtc dependency * Add go2rtc to local services and add to s6 * Add relay controller for go2rtc * Add restream role * Add restream role * Add restream to nginx * Add camera live source config * Disable RTMP by default and use restream * Use go2rtc for camera config * Fix go2rtc move * Start restream on frigate start * Send restream to camera level * Fix restream * Make sure jsmpeg works as expected * Make view rspect live size config * Tweak player options to fit live view * Adjust VideoPlayer to accept live option which disables irrelevant controls * Add multiple options from restream live view * Add base for webrtc option * Setup specific restream modules * Make mp4 the default streaming for now * Expose 8554 for rtsp relay from go2rtc * Formatting * Update docs to suggest new restream method. * Update docs to reflect restream role * Update docs to reflect restream role * Add webrtc player * Improvements to webRTC * Support webrtc * Cleanup * Adjust rtmp test and add restream test * Fix tests * Add restream tests * Add live view docs and show different options * Small docs tweak * Support all stream types * Update to beta 9 of go2rtc * Formatting * Make jsmpeg the default * Support wss if made from https * Support wss if made from https * Use onEffect * Set url outside onEffect * Fix passed deps * Update docs about required host mode * Try memo instead * Close websocket on changing camera * Formatting * Close pc connection * Set video source to null on cleanup * Use full path since go2rtc can't see PATH var * Adjust audio codec to enable browser audio by default * Cleanup stream creation * Add restream tests * Format tests * Mock requests * Adjust paths * Move stream configs to restream * Remove live source * Remove live config * Use live persistence for which view to use on each camera * Fix live sizes * Only use jsmpeg sizes for jsmpeg live * Set max live size * Remove access of live config * Add selector for live view source in web view * Remove RTMP from default list of roles * Update docs * Fix tests * Fix docs for live view modes * make default undefined to avoid race condition * Wait until camera source is loaded to avoid race condition * Fix tests * Add config to go2rtc * Work with config * Set full path for config * Set to use stun * Check for mounted file * Look for frigate-go2rtc * Update docs to reflect webRTC configuration. * Add link to go2rtc config * Update docs to be more clear * Update docs to be more clear * Update format Co-authored-by: Felipe Santos <felipecassiors@gmail.com> * Update live docs * Improve bash startup script * Add option to force audio compatibility * Formatting * Fix mapping * Fix broken link * Update go2rtc version * Get go2rtc webui working * Add support for mse * Remove mp4 option * Undo changes to video player * Update docs for new live view options * Make separate path for mse * Remove unused * Remove mp4 path * Try to get go2rtc proxy working * Try to get go2rtc proxy working * Remove unused callback * Allow websocket on restrea dashboard * Make mse default stream option * Fix mse sizing * don't assume roles is defined * Remove nginx mapping to go2rtc ui Co-authored-by: Felipe Santos <felipecassiors@gmail.com> Co-authored-by: Blake Blackshear <blakeb@blakeshome.com>
2022-11-02 12:36:09 +01:00
config["ffmpeg"]["inputs"][0]["roles"] = [
"record",
"detect",
]
if has_audio:
config["ffmpeg"]["inputs"][0]["roles"].append("audio")
2021-06-24 07:45:27 +02:00
super().__init__(**config)
@property
def frame_shape(self) -> Tuple[int, int]:
return self.detect.height, self.detect.width
@property
def frame_shape_yuv(self) -> Tuple[int, int]:
return self.detect.height * 3 // 2, self.detect.width
@property
def ffmpeg_cmds(self) -> List[Dict[str, List[str]]]:
2021-11-08 14:32:29 +01:00
return self._ffmpeg_cmds
2021-11-09 02:05:39 +01:00
def create_ffmpeg_cmds(self):
if "_ffmpeg_cmds" in self:
return
ffmpeg_cmds = []
for ffmpeg_input in self.ffmpeg.inputs:
ffmpeg_cmd = self._get_ffmpeg_cmd(ffmpeg_input)
if ffmpeg_cmd is None:
continue
ffmpeg_cmds.append({"roles": ffmpeg_input.roles, "cmd": ffmpeg_cmd})
2021-11-09 01:20:47 +01:00
self._ffmpeg_cmds = ffmpeg_cmds
2021-06-24 07:45:27 +02:00
def _get_ffmpeg_cmd(self, ffmpeg_input: CameraInput):
2020-11-29 22:55:53 +01:00
ffmpeg_output_args = []
2021-02-17 14:23:32 +01:00
if "detect" in ffmpeg_input.roles:
detect_args = get_ffmpeg_arg_list(self.ffmpeg.output_args.detect)
scale_detect_args = parse_preset_hardware_acceleration_scale(
ffmpeg_input.hwaccel_args or self.ffmpeg.hwaccel_args,
detect_args,
self.detect.fps,
self.detect.width,
self.detect.height,
)
ffmpeg_output_args = scale_detect_args + ffmpeg_output_args + ["pipe:"]
2021-08-15 15:30:27 +02:00
if "record" in ffmpeg_input.roles and self.record.enabled:
record_args = get_ffmpeg_arg_list(
parse_preset_output_record(self.ffmpeg.output_args.record)
or self.ffmpeg.output_args.record
)
ffmpeg_output_args = (
record_args
+ [f"{os.path.join(CACHE_DIR, self.name)}@{CACHE_SEGMENT_FORMAT}.mp4"]
2021-02-17 14:23:32 +01:00
+ ffmpeg_output_args
)
2021-01-09 18:26:46 +01:00
# if there arent any outputs enabled for this input
if len(ffmpeg_output_args) == 0:
return None
global_args = get_ffmpeg_arg_list(
ffmpeg_input.global_args or self.ffmpeg.global_args
)
camera_arg = (
self.ffmpeg.hwaccel_args if self.ffmpeg.hwaccel_args != "auto" else None
)
hwaccel_args = get_ffmpeg_arg_list(
parse_preset_hardware_acceleration_decode(
ffmpeg_input.hwaccel_args,
self.detect.fps,
self.detect.width,
self.detect.height,
)
or ffmpeg_input.hwaccel_args
or parse_preset_hardware_acceleration_decode(
camera_arg,
self.detect.fps,
self.detect.width,
self.detect.height,
)
or camera_arg
or []
)
input_args = get_ffmpeg_arg_list(
parse_preset_input(ffmpeg_input.input_args, self.detect.fps)
or ffmpeg_input.input_args
or parse_preset_input(self.ffmpeg.input_args, self.detect.fps)
or self.ffmpeg.input_args
)
2021-02-17 14:23:32 +01:00
cmd = (
["ffmpeg"]
+ global_args
+ hwaccel_args
+ input_args
+ ["-i", escape_special_characters(ffmpeg_input.path)]
2021-02-17 14:23:32 +01:00
+ ffmpeg_output_args
)
2021-01-09 18:26:46 +01:00
2021-02-17 14:23:32 +01:00
return [part for part in cmd if part != ""]
2021-01-09 18:26:46 +01:00
2021-09-04 23:56:01 +02:00
class DatabaseConfig(FrigateBaseModel):
path: str = Field(default=DEFAULT_DB_PATH, title="Database path.")
2021-06-24 07:45:27 +02:00
class LogLevelEnum(str, Enum):
debug = "debug"
info = "info"
warning = "warning"
error = "error"
critical = "critical"
2021-09-04 23:56:01 +02:00
class LoggerConfig(FrigateBaseModel):
2021-06-24 07:45:27 +02:00
default: LogLevelEnum = Field(
default=LogLevelEnum.info, title="Default logging level."
)
logs: Dict[str, LogLevelEnum] = Field(
default_factory=dict, title="Log level for specified processes."
)
def verify_config_roles(camera_config: CameraConfig) -> None:
"""Verify that roles are setup in the config correctly."""
assigned_roles = list(
set([r for i in camera_config.ffmpeg.inputs for r in i.roles])
)
if camera_config.record.enabled and "record" not in assigned_roles:
raise ValueError(
f"Camera {camera_config.name} has record enabled, but record is not assigned to an input."
)
if camera_config.audio.enabled and "audio" not in assigned_roles:
raise ValueError(
f"Camera {camera_config.name} has audio events enabled, but audio is not assigned to an input."
)
def verify_valid_live_stream_name(
frigate_config: FrigateConfig, camera_config: CameraConfig
) -> ValueError | None:
"""Verify that a restream exists to use for live view."""
if (
camera_config.live.stream_name
not in frigate_config.go2rtc.dict().get("streams", {}).keys()
):
return ValueError(
f"No restream with name {camera_config.live.stream_name} exists for camera {camera_config.name}."
)
def verify_recording_retention(camera_config: CameraConfig) -> None:
"""Verify that recording retention modes are ranked correctly."""
rank_map = {
RetainModeEnum.all: 0,
RetainModeEnum.motion: 1,
RetainModeEnum.active_objects: 2,
}
if (
camera_config.record.retain.days != 0
and rank_map[camera_config.record.retain.mode]
> rank_map[camera_config.record.events.retain.mode]
):
logger.warning(
f"{camera_config.name}: Recording retention is configured for {camera_config.record.retain.mode} and event retention is configured for {camera_config.record.events.retain.mode}. The more restrictive retention policy will be applied."
)
def verify_recording_segments_setup_with_reasonable_time(
camera_config: CameraConfig,
) -> None:
"""Verify that recording segments are setup and segment time is not greater than 60."""
record_args: list[str] = get_ffmpeg_arg_list(
camera_config.ffmpeg.output_args.record
)
2022-12-09 15:35:28 +01:00
if record_args[0].startswith("preset"):
return
seg_arg_index = record_args.index("-segment_time")
if seg_arg_index < 0:
raise ValueError(
f"Camera {camera_config.name} has no segment_time in recording output args, segment args are required for record."
)
if int(record_args[seg_arg_index + 1]) > 60:
raise ValueError(
f"Camera {camera_config.name} has invalid segment_time output arg, segment_time must be 60 or less."
)
def verify_zone_objects_are_tracked(camera_config: CameraConfig) -> None:
"""Verify that user has not entered zone objects that are not in the tracking config."""
for zone_name, zone in camera_config.zones.items():
for obj in zone.objects:
if obj not in camera_config.objects.track:
raise ValueError(
f"Zone {zone_name} is configured to track {obj} but that object type is not added to objects -> track."
)
def verify_autotrack_zones(camera_config: CameraConfig) -> ValueError | None:
"""Verify that required_zones are specified when autotracking is enabled."""
if (
camera_config.onvif.autotracking.enabled
and not camera_config.onvif.autotracking.required_zones
):
raise ValueError(
f"Camera {camera_config.name} has autotracking enabled, required_zones must be set to at least one of the camera's zones."
)
2021-09-04 23:56:01 +02:00
class FrigateConfig(FrigateBaseModel):
2021-06-24 07:45:27 +02:00
mqtt: MqttConfig = Field(title="MQTT Configuration.")
database: DatabaseConfig = Field(
default_factory=DatabaseConfig, title="Database configuration."
)
environment_vars: Dict[str, str] = Field(
default_factory=dict, title="Frigate environment variables."
)
2022-02-27 15:04:12 +01:00
ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.")
telemetry: TelemetryConfig = Field(
default_factory=TelemetryConfig, title="Telemetry configuration."
)
2021-06-24 07:45:27 +02:00
model: ModelConfig = Field(
default_factory=ModelConfig, title="Detection model configuration."
)
detectors: Dict[str, BaseDetectorConfig] = Field(
default=DEFAULT_DETECTORS,
2021-06-24 07:45:27 +02:00
title="Detector hardware configuration.",
)
logger: LoggerConfig = Field(
default_factory=LoggerConfig, title="Logging configuration."
)
record: RecordConfig = Field(
default_factory=RecordConfig, title="Global record configuration."
)
snapshots: SnapshotsConfig = Field(
default_factory=SnapshotsConfig, title="Global snapshots configuration."
)
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings."
)
go2rtc: RestreamConfig = Field(
Add go2rtc and add restream role / live source (#4082) * Pull go2rtc dependency * Add go2rtc to local services and add to s6 * Add relay controller for go2rtc * Add restream role * Add restream role * Add restream to nginx * Add camera live source config * Disable RTMP by default and use restream * Use go2rtc for camera config * Fix go2rtc move * Start restream on frigate start * Send restream to camera level * Fix restream * Make sure jsmpeg works as expected * Make view rspect live size config * Tweak player options to fit live view * Adjust VideoPlayer to accept live option which disables irrelevant controls * Add multiple options from restream live view * Add base for webrtc option * Setup specific restream modules * Make mp4 the default streaming for now * Expose 8554 for rtsp relay from go2rtc * Formatting * Update docs to suggest new restream method. * Update docs to reflect restream role * Update docs to reflect restream role * Add webrtc player * Improvements to webRTC * Support webrtc * Cleanup * Adjust rtmp test and add restream test * Fix tests * Add restream tests * Add live view docs and show different options * Small docs tweak * Support all stream types * Update to beta 9 of go2rtc * Formatting * Make jsmpeg the default * Support wss if made from https * Support wss if made from https * Use onEffect * Set url outside onEffect * Fix passed deps * Update docs about required host mode * Try memo instead * Close websocket on changing camera * Formatting * Close pc connection * Set video source to null on cleanup * Use full path since go2rtc can't see PATH var * Adjust audio codec to enable browser audio by default * Cleanup stream creation * Add restream tests * Format tests * Mock requests * Adjust paths * Move stream configs to restream * Remove live source * Remove live config * Use live persistence for which view to use on each camera * Fix live sizes * Only use jsmpeg sizes for jsmpeg live * Set max live size * Remove access of live config * Add selector for live view source in web view * Remove RTMP from default list of roles * Update docs * Fix tests * Fix docs for live view modes * make default undefined to avoid race condition * Wait until camera source is loaded to avoid race condition * Fix tests * Add config to go2rtc * Work with config * Set full path for config * Set to use stun * Check for mounted file * Look for frigate-go2rtc * Update docs to reflect webRTC configuration. * Add link to go2rtc config * Update docs to be more clear * Update docs to be more clear * Update format Co-authored-by: Felipe Santos <felipecassiors@gmail.com> * Update live docs * Improve bash startup script * Add option to force audio compatibility * Formatting * Fix mapping * Fix broken link * Update go2rtc version * Get go2rtc webui working * Add support for mse * Remove mp4 option * Undo changes to video player * Update docs for new live view options * Make separate path for mse * Remove unused * Remove mp4 path * Try to get go2rtc proxy working * Try to get go2rtc proxy working * Remove unused callback * Allow websocket on restrea dashboard * Make mse default stream option * Fix mse sizing * don't assume roles is defined * Remove nginx mapping to go2rtc ui Co-authored-by: Felipe Santos <felipecassiors@gmail.com> Co-authored-by: Blake Blackshear <blakeb@blakeshome.com>
2022-11-02 12:36:09 +01:00
default_factory=RestreamConfig, title="Global restream configuration."
)
2021-06-24 07:45:27 +02:00
birdseye: BirdseyeConfig = Field(
default_factory=BirdseyeConfig, title="Birdseye configuration."
)
ffmpeg: FfmpegConfig = Field(
default_factory=FfmpegConfig, title="Global FFmpeg configuration."
)
objects: ObjectConfig = Field(
default_factory=ObjectConfig, title="Global object configuration."
)
audio: AudioConfig = Field(
default_factory=AudioConfig, title="Global Audio events configuration."
)
2021-06-24 07:45:27 +02:00
motion: Optional[MotionConfig] = Field(
title="Global motion detection configuration."
)
detect: DetectConfig = Field(
default_factory=DetectConfig, title="Global object tracking configuration."
2021-06-24 07:45:27 +02:00
)
cameras: Dict[str, CameraConfig] = Field(title="Camera configuration.")
timestamp_style: TimestampStyleConfig = Field(
default_factory=TimestampStyleConfig,
title="Global timestamp style configuration.",
)
def runtime_config(self, plus_api: PlusApi = None) -> FrigateConfig:
2021-06-24 07:45:27 +02:00
"""Merge camera config with globals."""
config = self.copy(deep=True)
# MQTT user/password substitutions
if config.mqtt.user or config.mqtt.password:
config.mqtt.user = config.mqtt.user.format(**FRIGATE_ENV_VARS)
config.mqtt.password = config.mqtt.password.format(**FRIGATE_ENV_VARS)
# set default min_score for object attributes
for attribute in ALL_ATTRIBUTE_LABELS:
if not config.objects.filters.get(attribute):
config.objects.filters[attribute] = FilterConfig(min_score=0.7)
elif config.objects.filters[attribute].min_score == 0.5:
config.objects.filters[attribute].min_score = 0.7
# auto detect hwaccel args
if config.ffmpeg.hwaccel_args == "auto":
config.ffmpeg.hwaccel_args = auto_detect_hwaccel()
# Global config to propagate down to camera level
2021-06-24 07:45:27 +02:00
global_config = config.dict(
include={
"audio": ...,
"birdseye": ...,
2021-06-24 07:45:27 +02:00
"record": ...,
"snapshots": ...,
"live": ...,
2021-06-24 07:45:27 +02:00
"objects": ...,
"motion": ...,
"detect": ...,
"ffmpeg": ...,
"timestamp_style": ...,
2021-06-24 07:45:27 +02:00
},
exclude_unset=True,
)
2021-06-24 07:45:27 +02:00
for name, camera in config.cameras.items():
merged_config = deep_merge(camera.dict(exclude_unset=True), global_config)
2021-07-09 22:14:16 +02:00
camera_config: CameraConfig = CameraConfig.parse_obj(
{"name": name, **merged_config}
)
2021-06-24 07:45:27 +02:00
if camera_config.ffmpeg.hwaccel_args == "auto":
camera_config.ffmpeg.hwaccel_args = config.ffmpeg.hwaccel_args
Feature: automatic camera resolution configuration (#6810) * Add auto configuration for height, width and fps in detect role * Add auto-configuration for detect width, height, and fps for input roles with detect in the CameraConfig class in config.py * Refactor code to retrieve video properties from input stream in CameraConfig class and add optional parameter to retrieve video duration in get_video_properties function * format * Set default detect dimensions to 1280x720 and update DetectConfig to use the defaults * Revert "Set default detect dimensions to 1280x720 and update DetectConfig to use the defaults" This reverts commit a1aed0414d75a6db0a826c08359740764c4861e5. * Add default detect dimensions if autoconfiguration failed and log a warning message * fix warn message spelling on frigate/config.py Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> * Ensure detect height and width are not None before using them in camera configuration * docs: initial commit * rename streamInfo to stream_info Co-authored-by: Blake Blackshear <blakeb@blakeshome.com> * Apply suggestions from code review Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> * Update docs * handle case then get_video_properties returns 0x0 dimension * Set detect resolution based on stream properties if available, else apply default values * Update FrigateConfig to set default values for stream_info if resolution detection fails * Update camera detection dimensions based on stream information if available --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> Co-authored-by: Blake Blackshear <blakeb@blakeshome.com>
2023-07-14 13:56:03 +02:00
if (
camera_config.detect.height is None
or camera_config.detect.width is None
):
for input in camera_config.ffmpeg.inputs:
if "detect" in input.roles:
stream_info = {"width": 0, "height": 0}
try:
stream_info = asyncio.run(get_video_properties(input.path))
Feature: automatic camera resolution configuration (#6810) * Add auto configuration for height, width and fps in detect role * Add auto-configuration for detect width, height, and fps for input roles with detect in the CameraConfig class in config.py * Refactor code to retrieve video properties from input stream in CameraConfig class and add optional parameter to retrieve video duration in get_video_properties function * format * Set default detect dimensions to 1280x720 and update DetectConfig to use the defaults * Revert "Set default detect dimensions to 1280x720 and update DetectConfig to use the defaults" This reverts commit a1aed0414d75a6db0a826c08359740764c4861e5. * Add default detect dimensions if autoconfiguration failed and log a warning message * fix warn message spelling on frigate/config.py Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> * Ensure detect height and width are not None before using them in camera configuration * docs: initial commit * rename streamInfo to stream_info Co-authored-by: Blake Blackshear <blakeb@blakeshome.com> * Apply suggestions from code review Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> * Update docs * handle case then get_video_properties returns 0x0 dimension * Set detect resolution based on stream properties if available, else apply default values * Update FrigateConfig to set default values for stream_info if resolution detection fails * Update camera detection dimensions based on stream information if available --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> Co-authored-by: Blake Blackshear <blakeb@blakeshome.com>
2023-07-14 13:56:03 +02:00
except Exception:
logger.warn(
f"Error detecting stream resolution automatically for {input.path} Applying default values."
)
stream_info = {"width": 0, "height": 0}
camera_config.detect.width = (
stream_info["width"]
if stream_info.get("width")
else DEFAULT_DETECT_DIMENSIONS["width"]
)
camera_config.detect.height = (
stream_info["height"]
if stream_info.get("height")
else DEFAULT_DETECT_DIMENSIONS["height"]
)
# Default min_initialized configuration
min_initialized = camera_config.detect.fps / 2
if camera_config.detect.min_initialized is None:
camera_config.detect.min_initialized = min_initialized
# Default max_disappeared configuration
max_disappeared = camera_config.detect.fps * 5
if camera_config.detect.max_disappeared is None:
camera_config.detect.max_disappeared = max_disappeared
# Default stationary_threshold configuration
stationary_threshold = camera_config.detect.fps * 10
if camera_config.detect.stationary.threshold is None:
camera_config.detect.stationary.threshold = stationary_threshold
# default to the stationary_threshold if not defined
if camera_config.detect.stationary.interval is None:
camera_config.detect.stationary.interval = stationary_threshold
# FFMPEG input substitution
for input in camera_config.ffmpeg.inputs:
input.path = input.path.format(**FRIGATE_ENV_VARS)
# ONVIF substitution
if camera_config.onvif.user or camera_config.onvif.password:
camera_config.onvif.user = camera_config.onvif.user.format(
**FRIGATE_ENV_VARS
)
camera_config.onvif.password = camera_config.onvif.password.format(
**FRIGATE_ENV_VARS
)
# set config pre-value
camera_config.record.enabled_in_config = camera_config.record.enabled
camera_config.audio.enabled_in_config = camera_config.audio.enabled
camera_config.onvif.autotracking.enabled_in_config = (
camera_config.onvif.autotracking.enabled
)
2021-06-24 07:45:27 +02:00
# Add default filters
object_keys = camera_config.objects.track
if camera_config.objects.filters is None:
camera_config.objects.filters = {}
object_keys = object_keys - camera_config.objects.filters.keys()
for key in object_keys:
camera_config.objects.filters[key] = FilterConfig()
# Apply global object masks and convert masks to numpy array
for object, filter in camera_config.objects.filters.items():
if camera_config.objects.mask:
filter_mask = []
if filter.mask is not None:
filter_mask = (
filter.mask
if isinstance(filter.mask, list)
else [filter.mask]
)
object_mask = (
camera_config.objects.mask
if isinstance(camera_config.objects.mask, list)
else [camera_config.objects.mask]
)
filter.mask = filter_mask + object_mask
# Set runtime filter to create masks
camera_config.objects.filters[object] = RuntimeFilterConfig(
frame_shape=camera_config.frame_shape,
**filter.dict(exclude_unset=True),
)
2021-06-24 07:45:27 +02:00
# Convert motion configuration
if camera_config.motion is None:
camera_config.motion = RuntimeMotionConfig(
frame_shape=camera_config.frame_shape
)
else:
camera_config.motion = RuntimeMotionConfig(
frame_shape=camera_config.frame_shape,
raw_mask=camera_config.motion.mask,
**camera_config.motion.dict(exclude_unset=True),
)
# Set live view stream if none is set
if not camera_config.live.stream_name:
camera_config.live.stream_name = name
verify_config_roles(camera_config)
verify_valid_live_stream_name(config, camera_config)
verify_recording_retention(camera_config)
verify_recording_segments_setup_with_reasonable_time(camera_config)
verify_zone_objects_are_tracked(camera_config)
verify_autotrack_zones(camera_config)
2021-10-24 20:33:13 +02:00
# generate the ffmpeg commands
camera_config.create_ffmpeg_cmds()
2021-10-24 20:33:13 +02:00
config.cameras[name] = camera_config
# get list of unique enabled labels for tracking
enabled_labels = set(config.objects.track)
for _, camera in config.cameras.items():
enabled_labels.update(camera.objects.track)
2023-01-07 19:05:11 +01:00
config.model.create_colormap(sorted(enabled_labels))
config.model.check_and_load_plus_model(plus_api)
for key, detector in config.detectors.items():
detector_config: DetectorConfig = parse_obj_as(DetectorConfig, detector)
if detector_config.model is None:
detector_config.model = config.model
else:
model = detector_config.model
schema = ModelConfig.schema()["properties"]
if (
model.width != schema["width"]["default"]
or model.height != schema["height"]["default"]
or model.labelmap_path is not None
or model.labelmap is not {}
or model.input_tensor != schema["input_tensor"]["default"]
or model.input_pixel_format
!= schema["input_pixel_format"]["default"]
):
logger.warning(
"Customizing more than a detector model path is unsupported."
)
merged_model = deep_merge(
detector_config.model.dict(exclude_unset=True),
config.model.dict(exclude_unset=True),
)
if "path" not in merged_model:
if detector_config.type == "cpu":
merged_model["path"] = "/cpu_model.tflite"
elif detector_config.type == "edgetpu":
merged_model["path"] = "/edgetpu_model.tflite"
detector_config.model = ModelConfig.parse_obj(merged_model)
detector_config.model.check_and_load_plus_model(
plus_api, detector_config.type
)
detector_config.model.compute_model_hash()
config.detectors[key] = detector_config
2020-11-03 15:15:58 +01:00
return config
2021-06-24 07:45:27 +02:00
@validator("cameras")
def ensure_zones_and_cameras_have_different_names(cls, v: Dict[str, CameraConfig]):
zones = [zone for camera in v.values() for zone in camera.zones.keys()]
for zone in zones:
if zone in v.keys():
raise ValueError("Zones cannot share names with cameras")
return v
@classmethod
def parse_file(cls, config_file):
2020-11-03 15:15:58 +01:00
with open(config_file) as f:
raw_config = f.read()
2021-01-09 18:26:46 +01:00
if config_file.endswith(YAML_EXT):
config = load_config_with_no_duplicates(raw_config)
2020-11-03 15:15:58 +01:00
elif config_file.endswith(".json"):
config = json.loads(raw_config)
2021-01-09 18:26:46 +01:00
2021-06-24 07:45:27 +02:00
return cls.parse_obj(config)
@classmethod
def parse_raw(cls, raw_config):
config = load_config_with_no_duplicates(raw_config)
return cls.parse_obj(config)