From bf23a2148695501078c561db14d2347f3b8e6b66 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 30 Jan 2024 11:11:43 -0700 Subject: [PATCH 01/16] Quick fix docs (#9506) fixes https://github.com/blakeblackshear/frigate/issues/9505 --- docs/docs/configuration/record.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index 1cf1df559..6169cc945 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -36,7 +36,7 @@ record: enabled: True retain: days: 3 - mode: all + mode: motion events: retain: default: 30 From 34fb1c2ef5b51d8434f6d306c23fc8d3586558ee Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Wed, 31 Jan 2024 11:53:59 +0000 Subject: [PATCH 02/16] Increase hash map size (#9515) * bump version * increase map hash size --- Makefile | 2 +- docker/main/rootfs/usr/local/nginx/conf/nginx.conf | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 2cd831670..868b1886f 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.13.0 +VERSION = 0.13.1 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) CURRENT_UID := $(shell id -u) diff --git a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf index 46706a92f..4c8c73389 100644 --- a/docker/main/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf @@ -10,6 +10,8 @@ events { } http { + map_hash_bucket_size 256; + include mime.types; default_type application/octet-stream; From 97a619eaf0b0ac156c7257d1f99dd73a677a0551 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 5 Feb 2024 16:50:35 -0700 Subject: [PATCH 03/16] Update Makefile for 0.13.2 (#9687) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 868b1886f..df40d7dfe 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.13.1 +VERSION = 0.13.2 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) CURRENT_UID := $(shell id -u) From 50563eef8dfda4e0032c94f60cf7edf49e058d8c Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 5 Feb 2024 16:52:06 -0700 Subject: [PATCH 04/16] Nms optimize for stationary cars (#9684) * Use different nms values for different object types * Add tests * Format tests --- frigate/const.py | 4 ++++ frigate/test/test_video.py | 9 +++++++++ frigate/util/object.py | 12 ++++++++++-- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/frigate/const.py b/frigate/const.py index ebb680333..0fb547e00 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -26,6 +26,10 @@ LABEL_CONSOLIDATION_MAP = { "face": 0.5, } LABEL_CONSOLIDATION_DEFAULT = 0.9 +LABEL_NMS_MAP = { + "car": 0.6, +} +LABEL_NMS_DEFAULT = 0.4 # Audio Consts diff --git a/frigate/test/test_video.py b/frigate/test/test_video.py index cba63c950..2cc5b482f 100644 --- a/frigate/test/test_video.py +++ b/frigate/test/test_video.py @@ -287,6 +287,15 @@ class TestObjectBoundingBoxes(unittest.TestCase): consolidated_detections = reduce_detections(frame_shape, detections) assert len(consolidated_detections) == len(detections) + def test_vert_stacked_cars_not_reduced(self): + detections = [ + ("car", 0.8, (954, 312, 1247, 475), 498512, 1.48, (800, 200, 1400, 600)), + ("car", 0.85, (970, 380, 1273, 610), 698752, 1.56, (800, 200, 1400, 700)), + ] + frame_shape = (720, 1280) + consolidated_detections = reduce_detections(frame_shape, detections) + assert len(consolidated_detections) == len(detections) + class TestRegionGrid(unittest.TestCase): def setUp(self) -> None: diff --git a/frigate/util/object.py b/frigate/util/object.py index 0bf7ea179..43ec017bb 100644 --- a/frigate/util/object.py +++ b/frigate/util/object.py @@ -10,7 +10,12 @@ import numpy as np from peewee import DoesNotExist from frigate.config import DetectConfig, ModelConfig -from frigate.const import LABEL_CONSOLIDATION_DEFAULT, LABEL_CONSOLIDATION_MAP +from frigate.const import ( + LABEL_CONSOLIDATION_DEFAULT, + LABEL_CONSOLIDATION_MAP, + LABEL_NMS_DEFAULT, + LABEL_NMS_MAP, +) from frigate.detectors.detector_config import PixelFormatEnum from frigate.models import Event, Regions, Timeline from frigate.util.image import ( @@ -466,6 +471,7 @@ def reduce_detections( selected_objects = [] for group in detected_object_groups.values(): + label = group[0][0] # o[2] is the box of the object: xmin, ymin, xmax, ymax # apply max/min to ensure values do not exceed the known frame size boxes = [ @@ -483,7 +489,9 @@ def reduce_detections( # due to min score requirement of NMSBoxes confidences = [0.6 if clipped(o, frame_shape) else o[1] for o in group] - idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4) + idxs = cv2.dnn.NMSBoxes( + boxes, confidences, 0.5, LABEL_NMS_MAP.get(label, LABEL_NMS_DEFAULT) + ) # add objects for index in idxs: From a33f2f117efb3650ecf71bef34776e8560584b31 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 5 Feb 2024 17:52:47 -0600 Subject: [PATCH 05/16] more robust onvif checks (#9635) --- frigate/ptz/onvif.py | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/frigate/ptz/onvif.py b/frigate/ptz/onvif.py index e56afe70c..8aae216f1 100644 --- a/frigate/ptz/onvif.py +++ b/frigate/ptz/onvif.py @@ -6,6 +6,7 @@ from enum import Enum import numpy from onvif import ONVIFCamera, ONVIFError +from zeep.exceptions import Fault, TransportError from frigate.config import FrigateConfig, ZoomingModeEnum from frigate.types import PTZMetricsTypes @@ -68,16 +69,19 @@ class OnvifController: media = onvif.create_media_service() try: + # this will fire an exception if camera is not a ptz + capabilities = onvif.get_definition("ptz") + logger.debug(f"Onvif capabilities for {camera_name}: {capabilities}") profile = media.GetProfiles()[0] - except ONVIFError as e: + except (ONVIFError, Fault, TransportError) as e: logger.error(f"Unable to connect to camera: {camera_name}: {e}") return False ptz = onvif.create_ptz_service() - request = ptz.create_type("GetConfigurations") - configs = ptz.GetConfigurations(request)[0] - logger.debug(f"Onvif configs for {camera_name}: {configs}") + # get the PTZ config for the first onvif profile + configs = profile.PTZConfiguration + logger.debug(f"Onvif ptz config for media profile in {camera_name}: {configs}") request = ptz.create_type("GetConfigurationOptions") request.ConfigurationToken = profile.PTZConfiguration.token @@ -187,19 +191,18 @@ class OnvifController: ] = preset["token"] # get list of supported features - ptz_config = ptz.GetConfigurationOptions(request) supported_features = [] - if ptz_config.Spaces and ptz_config.Spaces.ContinuousPanTiltVelocitySpace: + if configs.DefaultContinuousPanTiltVelocitySpace: supported_features.append("pt") - if ptz_config.Spaces and ptz_config.Spaces.ContinuousZoomVelocitySpace: + if configs.DefaultContinuousZoomVelocitySpace: supported_features.append("zoom") - if ptz_config.Spaces and ptz_config.Spaces.RelativePanTiltTranslationSpace: + if configs.DefaultRelativePanTiltTranslationSpace: supported_features.append("pt-r") - if ptz_config.Spaces and ptz_config.Spaces.RelativeZoomTranslationSpace: + if configs.DefaultRelativeZoomTranslationSpace: supported_features.append("zoom-r") try: # get camera's zoom limits from onvif config @@ -218,7 +221,7 @@ class OnvifController: f"Disabling autotracking zooming for {camera_name}: Relative zoom not supported" ) - if ptz_config.Spaces and ptz_config.Spaces.AbsoluteZoomPositionSpace: + if configs.DefaultAbsoluteZoomPositionSpace: supported_features.append("zoom-a") try: # get camera's zoom limits from onvif config @@ -236,7 +239,10 @@ class OnvifController: ) # set relative pan/tilt space for autotracker - if fov_space_id is not None: + if ( + fov_space_id is not None + and configs.DefaultRelativePanTiltTranslationSpace is not None + ): supported_features.append("pt-r-fov") self.cams[camera_name][ "relative_fov_range" From 00804a0f819faf67ea84fe88a045497f43445e4c Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 5 Feb 2024 16:53:16 -0700 Subject: [PATCH 06/16] Don't fail if message is received before websocket start (#9634) --- frigate/comms/ws.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/frigate/comms/ws.py b/frigate/comms/ws.py index 98f24cf28..fccd8db5c 100644 --- a/frigate/comms/ws.py +++ b/frigate/comms/ws.py @@ -38,6 +38,7 @@ class WebSocketClient(Communicator): # type: ignore[misc] def __init__(self, config: FrigateConfig) -> None: self.config = config + self.websocket_server = None def subscribe(self, receiver: Callable) -> None: self._dispatcher = receiver @@ -98,6 +99,10 @@ class WebSocketClient(Communicator): # type: ignore[misc] logger.debug(f"payload for {topic} wasn't text. Skipping...") return + if self.websocket_server is None: + logger.debug("Skipping message, websocket not connected yet") + return + try: self.websocket_server.manager.broadcast(ws_message) except ConnectionResetError: From 91cdf64602f33951360becbbc5abb3e8e1b5c842 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sat, 10 Feb 2024 10:55:13 -0700 Subject: [PATCH 07/16] Birdseye enhancements (#9778) * Center single camera view * Implement scaling factor * Add config for maximum number of cameras * Add config for inactivity threshold * update docs --- docs/docs/configuration/birdseye.md | 51 ++++++++++++++++- docs/docs/configuration/reference.md | 8 +++ frigate/config.py | 13 +++++ frigate/output.py | 84 +++++++++++++++++++++++----- 4 files changed, 141 insertions(+), 15 deletions(-) diff --git a/docs/docs/configuration/birdseye.md b/docs/docs/configuration/birdseye.md index 6471bf4e3..8edf50583 100644 --- a/docs/docs/configuration/birdseye.md +++ b/docs/docs/configuration/birdseye.md @@ -1,6 +1,8 @@ # Birdseye -Birdseye allows a heads-up view of your cameras to see what is going on around your property / space without having to watch all cameras that may have nothing happening. Birdseye allows specific modes that intelligently show and disappear based on what you care about. +Birdseye allows a heads-up view of your cameras to see what is going on around your property / space without having to watch all cameras that may have nothing happening. Birdseye allows specific modes that intelligently show and disappear based on what you care about. + +## Birdseye Behavior ### Birdseye Modes @@ -34,6 +36,29 @@ cameras: enabled: False ``` +### Birdseye Inactivity + +By default birdseye shows all cameras that have had the configured activity in the last 30 seconds, this can be configured: + +```yaml +birdseye: + enabled: True + inactivity_threshold: 15 +``` + +## Birdseye Layout + +### Birdseye Dimensions + +The resolution and aspect ratio of birdseye can be configured. Resolution will increase the quality but does not affect the layout. Changing the aspect ratio of birdseye does affect how cameras are laid out. + +```yaml +birdseye: + enabled: True + width: 1280 + height: 720 +``` + ### Sorting cameras in the Birdseye view It is possible to override the order of cameras that are being shown in the Birdseye view. @@ -55,3 +80,27 @@ cameras: ``` *Note*: Cameras are sorted by default using their name to ensure a constant view inside Birdseye. + +### Birdseye Cameras + +It is possible to limit the number of cameras shown on birdseye at one time. When this is enabled, birdseye will show the cameras with most recent activity. There is a cooldown to ensure that cameras do not switch too frequently. + +For example, this can be configured to only show the most recently active camera. + +```yaml +birdseye: + enabled: True + layout: + max_cameras: 1 +``` + +### Birdseye Scaling + +By default birdseye tries to fit 2 cameras in each row and then double in size until a suitable layout is found. The scaling can be configured with a value between 1.0 and 5.0 depending on use case. + +```yaml +birdseye: + enabled: True + layout: + scaling_factor: 3.0 +``` diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index d500060a7..816bfd456 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -145,6 +145,14 @@ birdseye: # motion - cameras are included if motion was detected in the last 30 seconds # continuous - all cameras are included always mode: objects + # Optional: Threshold for camera activity to stop showing camera (default: shown below) + inactivity_threshold: 30 + # Optional: Configure the birdseye layout + layout: + # Optional: Scaling factor for the layout calculator (default: shown below) + scaling_factor: 2.0 + # Optional: Maximum number of cameras to show at one time, showing the most recent (default: show all cameras) + max_cameras: 1 # Optional: ffmpeg configuration # More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets diff --git a/frigate/config.py b/frigate/config.py index 6760ea5e6..2e8b25700 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -528,6 +528,13 @@ class BirdseyeModeEnum(str, Enum): return list(cls)[index] +class BirdseyeLayoutConfig(FrigateBaseModel): + scaling_factor: float = Field( + default=2.0, title="Birdseye Scaling Factor", ge=1.0, le=5.0 + ) + max_cameras: Optional[int] = Field(default=None, title="Max cameras") + + class BirdseyeConfig(FrigateBaseModel): enabled: bool = Field(default=True, title="Enable birdseye view.") restream: bool = Field(default=False, title="Restream birdseye via RTSP.") @@ -539,9 +546,15 @@ class BirdseyeConfig(FrigateBaseModel): ge=1, le=31, ) + inactivity_threshold: int = Field( + default=30, title="Birdseye Inactivity Threshold", gt=0 + ) mode: BirdseyeModeEnum = Field( default=BirdseyeModeEnum.objects, title="Tracking mode." ) + layout: BirdseyeLayoutConfig = Field( + default_factory=BirdseyeLayoutConfig, title="Birdseye Layout Config" + ) # uses BaseModel because some global attributes are not available at the camera level diff --git a/frigate/output.py b/frigate/output.py index a70e5a804..465c07786 100644 --- a/frigate/output.py +++ b/frigate/output.py @@ -41,11 +41,13 @@ def get_standard_aspect_ratio(width: int, height: int) -> tuple[int, int]: (16, 9), (9, 16), (20, 10), + (16, 3), # max wide camera (16, 6), # reolink duo 2 (32, 9), # panoramic cameras (12, 9), (9, 12), (22, 15), # Amcrest, NTSC DVT + (1, 1), # fisheye ] # aspects are scaled to have common relative size known_aspects_ratios = list( map(lambda aspect: aspect[0] / aspect[1], known_aspects) @@ -74,7 +76,13 @@ def get_canvas_shape(width: int, height: int) -> tuple[int, int]: class Canvas: - def __init__(self, canvas_width: int, canvas_height: int) -> None: + def __init__( + self, + canvas_width: int, + canvas_height: int, + scaling_factor: int, + ) -> None: + self.scaling_factor = scaling_factor gcd = math.gcd(canvas_width, canvas_height) self.aspect = get_standard_aspect_ratio( (canvas_width / gcd), (canvas_height / gcd) @@ -88,7 +96,7 @@ class Canvas: return (self.aspect[0] * coefficient, self.aspect[1] * coefficient) def get_coefficient(self, camera_count: int) -> int: - return self.coefficient_cache.get(camera_count, 2) + return self.coefficient_cache.get(camera_count, self.scaling_factor) def set_coefficient(self, camera_count: int, coefficient: int) -> None: self.coefficient_cache[camera_count] = coefficient @@ -276,9 +284,13 @@ class BirdsEyeFrameManager: self.frame_shape = (height, width) self.yuv_shape = (height * 3 // 2, width) self.frame = np.ndarray(self.yuv_shape, dtype=np.uint8) - self.canvas = Canvas(width, height) + self.canvas = Canvas(width, height, config.birdseye.layout.scaling_factor) self.stop_event = stop_event self.camera_metrics = camera_metrics + self.inactivity_threshold = config.birdseye.inactivity_threshold + + if config.birdseye.layout.max_cameras: + self.last_refresh_time = 0 # initialize the frame as black and with the Frigate logo self.blank_frame = np.zeros(self.yuv_shape, np.uint8) @@ -384,16 +396,39 @@ class BirdsEyeFrameManager: def update_frame(self): """Update to a new frame for birdseye.""" - # determine how many cameras are tracking objects within the last 30 seconds - active_cameras = set( + # determine how many cameras are tracking objects within the last inactivity_threshold seconds + active_cameras: set[str] = set( [ cam for cam, cam_data in self.cameras.items() if cam_data["last_active_frame"] > 0 - and cam_data["current_frame"] - cam_data["last_active_frame"] < 30 + and cam_data["current_frame"] - cam_data["last_active_frame"] + < self.inactivity_threshold ] ) + max_cameras = self.config.birdseye.layout.max_cameras + max_camera_refresh = False + if max_cameras: + now = datetime.datetime.now().timestamp() + + if len(active_cameras) == max_cameras and now - self.last_refresh_time < 10: + # don't refresh cameras too often + active_cameras = self.active_cameras + else: + limited_active_cameras = sorted( + active_cameras, + key=lambda active_camera: ( + self.cameras[active_camera]["current_frame"] + - self.cameras[active_camera]["last_active_frame"] + ), + ) + active_cameras = limited_active_cameras[ + : self.config.birdseye.layout.max_cameras + ] + max_camera_refresh = True + self.last_refresh_time = now + # if there are no active cameras if len(active_cameras) == 0: # if the layout is already cleared @@ -407,7 +442,18 @@ class BirdsEyeFrameManager: return True # check if we need to reset the layout because there is a different number of cameras - reset_layout = len(self.active_cameras) - len(active_cameras) != 0 + if len(self.active_cameras) - len(active_cameras) == 0: + if ( + len(self.active_cameras) == 1 + and self.active_cameras[0] == active_cameras[0] + ): + reset_layout = True + elif max_camera_refresh: + reset_layout = True + else: + reset_layout = False + else: + reset_layout = True # reset the layout if it needs to be different if reset_layout: @@ -431,17 +477,23 @@ class BirdsEyeFrameManager: camera = active_cameras_to_add[0] camera_dims = self.cameras[camera]["dimensions"].copy() scaled_width = int(self.canvas.height * camera_dims[0] / camera_dims[1]) - coefficient = ( - 1 - if scaled_width <= self.canvas.width - else self.canvas.width / scaled_width - ) + + # center camera view in canvas and ensure that it fits + if scaled_width < self.canvas.width: + coefficient = 1 + x_offset = int((self.canvas.width - scaled_width) / 2) + else: + coefficient = self.canvas.width / scaled_width + x_offset = int( + (self.canvas.width - (scaled_width * coefficient)) / 2 + ) + self.camera_layout = [ [ ( camera, ( - 0, + x_offset, 0, int(scaled_width * coefficient), int(self.canvas.height * coefficient), @@ -485,7 +537,11 @@ class BirdsEyeFrameManager: return True - def calculate_layout(self, cameras_to_add: list[str], coefficient) -> tuple[any]: + def calculate_layout( + self, + cameras_to_add: list[str], + coefficient: float, + ) -> tuple[any]: """Calculate the optimal layout for 2+ cameras.""" def map_layout(camera_layout: list[list[any]], row_height: int): From 86341c31722202f79c656b0b62951be5f89011f4 Mon Sep 17 00:00:00 2001 From: Maximo Guk <62088388+Maximo-Guk@users.noreply.github.com> Date: Sat, 10 Feb 2024 15:35:17 -0400 Subject: [PATCH 08/16] Add error handling for unsupported label uploading to frigate+ (#9775) --- frigate/http.py | 14 ++++++++ frigate/plus.py | 22 ++++++++++++ web/src/routes/Events.jsx | 72 +++++++++++++++++++++++++++------------ 3 files changed, 87 insertions(+), 21 deletions(-) diff --git a/frigate/http.py b/frigate/http.py index d9bd5c29f..d634e9b8e 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -275,6 +275,13 @@ def send_to_plus(id): box, event.label, ) + except ValueError: + message = "Error uploading annotation, unsupported label provided." + logger.error(message) + return make_response( + jsonify({"success": False, "message": message}), + 400, + ) except Exception as ex: logger.exception(ex) return make_response( @@ -346,6 +353,13 @@ def false_positive(id): event.model_type, event.detector_type, ) + except ValueError: + message = "Error uploading false positive, unsupported label provided." + logger.error(message) + return make_response( + jsonify({"success": False, "message": message}), + 400, + ) except Exception as ex: logger.exception(ex) return make_response( diff --git a/frigate/plus.py b/frigate/plus.py index 88e025596..2e6144ce3 100644 --- a/frigate/plus.py +++ b/frigate/plus.py @@ -171,6 +171,17 @@ class PlusApi: ) if not r.ok: + try: + error_response = r.json() + errors = error_response.get("errors", []) + for error in errors: + if ( + error.get("param") == "label" + and error.get("type") == "invalid_enum_value" + ): + raise ValueError(f"Unsupported label value provided: {label}") + except ValueError as e: + raise e raise Exception(r.text) def add_annotation( @@ -193,6 +204,17 @@ class PlusApi: ) if not r.ok: + try: + error_response = r.json() + errors = error_response.get("errors", []) + for error in errors: + if ( + error.get("param") == "label" + and error.get("type") == "invalid_enum_value" + ): + raise ValueError(f"Unsupported label value provided: {label}") + except ValueError as e: + raise e raise Exception(r.text) def get_model_download_url( diff --git a/web/src/routes/Events.jsx b/web/src/routes/Events.jsx index 2b2b546ef..0777829a8 100644 --- a/web/src/routes/Events.jsx +++ b/web/src/routes/Events.jsx @@ -7,7 +7,7 @@ import Link from '../components/Link'; import { useApiHost } from '../api'; import useSWR from 'swr'; import useSWRInfinite from 'swr/infinite'; -import axios from 'axios'; +import axios, { AxiosError } from 'axios'; import { useState, useRef, useCallback, useMemo } from 'preact/hooks'; import VideoPlayer from '../components/VideoPlayer'; import { StarRecording } from '../icons/StarRecording'; @@ -79,6 +79,7 @@ export default function Events({ path, ...props }) { validBox: null, }); const [uploading, setUploading] = useState([]); + const [uploadErrors, setUploadErrors] = useState([]); const [viewEvent, setViewEvent] = useState(props.event); const [eventOverlay, setEventOverlay] = useState(); const [eventDetailType, setEventDetailType] = useState('clip'); @@ -328,27 +329,40 @@ export default function Events({ path, ...props }) { setUploading((prev) => [...prev, id]); - const response = false_positive - ? await axios.put(`events/${id}/false_positive`) - : await axios.post(`events/${id}/plus`, validBox ? { include_annotation: 1 } : {}); + try { + const response = false_positive + ? await axios.put(`events/${id}/false_positive`) + : await axios.post(`events/${id}/plus`, validBox ? { include_annotation: 1 } : {}); - if (response.status === 200) { - mutate( - (pages) => - pages.map((page) => - page.map((event) => { - if (event.id === id) { - return { ...event, plus_id: response.data.plus_id }; - } - return event; - }) - ), - false - ); + if (response.status === 200) { + mutate( + (pages) => + pages.map((page) => + page.map((event) => { + if (event.id === id) { + return { ...event, plus_id: response.data.plus_id }; + } + return event; + }) + ), + false + ); + } + } catch (e) { + if ( + e instanceof AxiosError && + (e.response.data.message === 'Error uploading annotation, unsupported label provided.' || + e.response.data.message === 'Error uploading false positive, unsupported label provided.') + ) { + setUploadErrors((prev) => [...prev, { id, isUnsupported: true }]); + return; + } + setUploadErrors((prev) => [...prev, { id }]); + throw e; + } finally { + setUploading((prev) => prev.filter((i) => i !== id)); } - setUploading((prev) => prev.filter((i) => i !== id)); - if (state.showDownloadMenu && downloadEvent.id === id) { setState({ ...state, showDownloadMenu: false }); } @@ -681,6 +695,7 @@ export default function Events({ path, ...props }) { viewEvent={viewEvent} setViewEvent={setViewEvent} uploading={uploading} + uploadErrors={uploadErrors} handleEventDetailTabChange={handleEventDetailTabChange} onEventFrameSelected={onEventFrameSelected} onDelete={onDelete} @@ -721,6 +736,7 @@ export default function Events({ path, ...props }) { lastEvent={lastEvent} lastEventRef={lastEventRef} uploading={uploading} + uploadErrors={uploadErrors} handleEventDetailTabChange={handleEventDetailTabChange} onEventFrameSelected={onEventFrameSelected} onDelete={onDelete} @@ -760,6 +776,7 @@ function Event({ lastEvent, lastEventRef, uploading, + uploadErrors, handleEventDetailTabChange, onEventFrameSelected, onDelete, @@ -769,6 +786,19 @@ function Event({ onSave, showSubmitToPlus, }) { + const getUploadButtonState = (eventId) => { + const isUploading = uploading.includes(eventId); + const hasUploadError = uploadErrors.find((event) => event.id === eventId); + if (hasUploadError) { + if (hasUploadError.isUnsupported) { + return { isDisabled: true, label: 'Unsupported label' }; + } + return { isDisabled: isUploading, label: 'Upload error' }; + } + + const label = isUploading ? 'Uploading...' : 'Send to Frigate+'; + return { isDisabled: isUploading, label }; + }; const apiHost = useApiHost(); return ( @@ -849,10 +879,10 @@ function Event({ ) : ( )} From dc2e8f7f70248199e350e952f48ed2413177af43 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Sat, 10 Feb 2024 13:41:24 -0600 Subject: [PATCH 09/16] Onvif: skip non-video profiles in setup (#9708) * use first h264 onvif profile * error if profile remains unset * move create_ptz_service call * add profile logger debug --- frigate/ptz/onvif.py | 46 ++++++++++++++++++++++++++++++++++++++------ 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/frigate/ptz/onvif.py b/frigate/ptz/onvif.py index 8aae216f1..a7a2cd68b 100644 --- a/frigate/ptz/onvif.py +++ b/frigate/ptz/onvif.py @@ -67,22 +67,56 @@ class OnvifController: # create init services media = onvif.create_media_service() + logger.debug(f"Onvif media xaddr for {camera_name}: {media.xaddr}") try: # this will fire an exception if camera is not a ptz capabilities = onvif.get_definition("ptz") logger.debug(f"Onvif capabilities for {camera_name}: {capabilities}") - profile = media.GetProfiles()[0] except (ONVIFError, Fault, TransportError) as e: - logger.error(f"Unable to connect to camera: {camera_name}: {e}") + logger.error( + f"Unable to get Onvif capabilities for camera: {camera_name}: {e}" + ) + return False + + try: + profiles = media.GetProfiles() + except (ONVIFError, Fault, TransportError) as e: + logger.error( + f"Unable to get Onvif media profiles for camera: {camera_name}: {e}" + ) + return False + + profile = None + for key, onvif_profile in enumerate(profiles): + if ( + onvif_profile.VideoEncoderConfiguration + and onvif_profile.VideoEncoderConfiguration.Encoding == "H264" + ): + profile = onvif_profile + logger.debug(f"Selected Onvif profile for {camera_name}: {profile}") + break + + if profile is None: + logger.error( + f"No appropriate Onvif profiles found for camera: {camera_name}." + ) + return False + + # get the PTZ config for the profile + try: + configs = profile.PTZConfiguration + logger.debug( + f"Onvif ptz config for media profile in {camera_name}: {configs}" + ) + except Exception as e: + logger.error( + f"Invalid Onvif PTZ configuration for camera: {camera_name}: {e}" + ) return False ptz = onvif.create_ptz_service() - # get the PTZ config for the first onvif profile - configs = profile.PTZConfiguration - logger.debug(f"Onvif ptz config for media profile in {camera_name}: {configs}") - request = ptz.create_type("GetConfigurationOptions") request.ConfigurationToken = profile.PTZConfiguration.token ptz_config = ptz.GetConfigurationOptions(request) From 54bbad12f81fda142ad24dd262ed405b4cafde53 Mon Sep 17 00:00:00 2001 From: Sven-Hendrik Haase Date: Sat, 10 Feb 2024 20:42:32 +0100 Subject: [PATCH 10/16] Mention that AMD CPUs work just fine with OpenVINO (#9740) * Mention that AMD CPUs work just fine with OpenVINO * Doc consistency fixes --- docs/docs/configuration/object_detectors.md | 20 +++++++++++++------- docs/docs/frigate/hardware.md | 5 +++-- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 89734efb9..ecff747b5 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -11,6 +11,12 @@ Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvi The CPU detector type runs a TensorFlow Lite model utilizing the CPU without hardware acceleration. It is recommended to use a hardware accelerated detector type instead for better performance. To configure a CPU based detector, set the `"type"` attribute to `"cpu"`. +:::tip + +If you do not have GPU or Edge TPU hardware, using the [OpenVINO Detector](#openvino-detector) is often more efficient than using the CPU detector. + +::: + The number of threads used by the interpreter can be specified using the `"num_threads"` attribute, and defaults to `3.` A TensorFlow Lite model is provided in the container at `/cpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. @@ -29,17 +35,17 @@ detectors: When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance. -## Edge-TPU Detector +## Edge TPU Detector -The EdgeTPU detector type runs a TensorFlow Lite model utilizing the Google Coral delegate for hardware acceleration. To configure an EdgeTPU detector, set the `"type"` attribute to `"edgetpu"`. +The Edge TPU detector type runs a TensorFlow Lite model utilizing the Google Coral delegate for hardware acceleration. To configure an Edge TPU detector, set the `"type"` attribute to `"edgetpu"`. -The EdgeTPU device can be specified using the `"device"` attribute according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). If not set, the delegate will use the first device it finds. +The Edge TPU device can be specified using the `"device"` attribute according to the [Documentation for the TensorFlow Lite Python API](https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api). If not set, the delegate will use the first device it finds. A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. :::tip -See [common Edge-TPU troubleshooting steps](/troubleshooting/edgetpu) if the EdgeTPu is not detected. +See [common Edge TPU troubleshooting steps](/troubleshooting/edgetpu) if the Edge TPU is not detected. ::: @@ -101,11 +107,11 @@ detectors: ## OpenVINO Detector -The OpenVINO detector type runs an OpenVINO IR model on Intel CPU, GPU and VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. +The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. The OpenVINO device to be used is specified using the `"device"` attribute according to the naming conventions in the [Device Documentation](https://docs.openvino.ai/latest/openvino_docs_OV_UG_Working_with_devices.html). Other supported devices could be `AUTO`, `CPU`, `GPU`, `MYRIAD`, etc. If not specified, the default OpenVINO device will be selected by the `AUTO` plugin. -OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. A supported Intel platform is required to use the `GPU` device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html) +OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will also run on AMD CPUs despite having no official support for it. A supported Intel platform is required to use the `GPU` device with OpenVINO. The `MYRIAD` device may be run on any platform, including Arm devices. For detailed system requirements, see [OpenVINO System Requirements](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/system-requirements.html) An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model. @@ -176,7 +182,7 @@ volumes: ## NVidia TensorRT Detector -NVidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection. +Nvidia GPUs may be used for object detection using the TensorRT libraries. Due to the size of the additional libraries, this detector is only provided in images with the `-tensorrt` tag suffix, e.g. `ghcr.io/blakeblackshear/frigate:stable-tensorrt`. This detector is designed to work with Yolo models for object detection. ### Minimum Hardware Support diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index e0285f408..e1bb2a63e 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -40,14 +40,15 @@ The USB version is compatible with the widest variety of hardware and does not r The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai -A single Coral can handle many cameras and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed. +A single Coral can handle many cameras using the default model and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed. -### OpenVino +### OpenVINO The OpenVINO detector type is able to run on: - 6th Gen Intel Platforms and newer that have an iGPU - x86 & Arm64 hosts with VPU Hardware (ex: Intel NCS2) +- Most modern AMD CPUs (though this is officially not supported by Intel) More information is available [in the detector docs](/configuration/object_detectors#openvino-detector) From b5aaa6759ab142a294c76334335b1b142fc44afe Mon Sep 17 00:00:00 2001 From: printplaatreparatie <60609704+printplaatreparatie@users.noreply.github.com> Date: Sat, 10 Feb 2024 20:43:24 +0100 Subject: [PATCH 11/16] Revise VSCode hostname info in docs (#9709) * Revise VSCode hostname info in docs * Fix misplaced backtick Co-authored-by: Nicolas Mowen --------- Co-authored-by: Nicolas Mowen --- docs/docs/configuration/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 53993af67..dda399444 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -25,7 +25,7 @@ cameras: ## VSCode Configuration Schema -VSCode (and VSCode addon) supports the JSON schemas which will automatically validate the config. This can be added by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the top of the config file. `frigate_host` being the IP address of Frigate or `ccab4aaf-frigate` if running in the addon. +VSCode supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VSCode and Frigate as an add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose port `5000` for the Web Interface when accessing the config from VSCode on another machine. ## Environment Variable Substitution From 89f4db617ab19e60e971fee71c7518498268d17c Mon Sep 17 00:00:00 2001 From: Matt Brown Date: Sat, 10 Feb 2024 11:44:52 -0800 Subject: [PATCH 12/16] Docs: Fix and clarify which /dev/video devices to use with Raspberry Pi (#9509) * Docs: Fix and clarify which /dev/video devices to use with Raspberry Pi * Update docs/docs/configuration/hardware_acceleration.md Co-authored-by: Nicolas Mowen * Update docs/docs/configuration/hardware_acceleration.md Co-authored-by: Nicolas Mowen --------- Co-authored-by: Nicolas Mowen --- .../configuration/hardware_acceleration.md | 39 +++++++++++++++---- docs/docs/frigate/installation.md | 7 ++-- 2 files changed, 35 insertions(+), 11 deletions(-) diff --git a/docs/docs/configuration/hardware_acceleration.md b/docs/docs/configuration/hardware_acceleration.md index ad9d27211..8bd303fdf 100644 --- a/docs/docs/configuration/hardware_acceleration.md +++ b/docs/docs/configuration/hardware_acceleration.md @@ -11,8 +11,8 @@ It is recommended to update your configuration to enable hardware accelerated de ## Raspberry Pi 3/4 -Ensure you increase the allocated RAM for your GPU to at least 128 (raspi-config > Performance Options > GPU Memory). -**NOTICE**: If you are using the addon, you may need to turn off `Protection mode` for hardware acceleration. +Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory). +If you are using the HA addon, you may need to use the full access variant and turn off `Protection mode` for hardware acceleration. ```yaml # if you want to decode a h264 stream @@ -26,16 +26,39 @@ ffmpeg: :::note -If running Frigate in docker, you either need to run in priviliged mode or be sure to map the /dev/video1x devices to Frigate +If running Frigate in Docker, you either need to run in privileged mode or +map the `/dev/video*` devices to Frigate. With Docker compose add: ```yaml -docker run -d \ ---name frigate \ -... ---device /dev/video10 \ -ghcr.io/blakeblackshear/frigate:stable +services: + frigate: + ... + devices: + - /dev/video11:/dev/video11 ``` +Or with `docker run`: + +```bash +docker run -d \ + --name frigate \ + ... + --device /dev/video11 \ + ghcr.io/blakeblackshear/frigate:stable +``` + +`/dev/video11` is the correct device (on Raspberry Pi 4B). You can check +by running the following and looking for `H264`: + +```bash +for d in /dev/video*; do + echo -e "---\n$d" + v4l2-ctl --list-formats-ext -d $d +done +``` + +Or map in all the `/dev/video*` devices. + ::: ## Intel-based CPUs diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index fcdaa68ba..99565acf8 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -98,9 +98,10 @@ services: image: ghcr.io/blakeblackshear/frigate:stable shm_size: "64mb" # update for your cameras based on calculation above devices: - - /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions - - /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux - - /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware + - /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions + - /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux + - /dev/video11:/dev/video11 # For Raspberry Pi 4B + - /dev/dri/renderD128:/dev/dri/renderD128 # For intel hwaccel, needs to be updated for your hardware volumes: - /etc/localtime:/etc/localtime:ro - /path/to/your/config:/config From f4a44fd93c6ff97372a13a77366a95a1d55722f0 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sun, 11 Feb 2024 16:40:35 +0000 Subject: [PATCH 13/16] remove reference to the term credit (#9799) --- docs/docs/plus/faq.md | 6 +----- docs/docs/plus/first_model.md | 2 +- docs/docs/plus/index.md | 2 +- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/docs/docs/plus/faq.md b/docs/docs/plus/faq.md index 1723e0583..fb0cd2512 100644 --- a/docs/docs/plus/faq.md +++ b/docs/docs/plus/faq.md @@ -7,10 +7,6 @@ title: FAQ Frigate+ models are built by fine tuning a base model with the images you have annotated and verified. The base model is trained from scratch from a sampling of images across all Frigate+ user submissions and takes weeks of expensive GPU resources to train. If the models were built using your image uploads alone, you would need to provide tens of thousands of examples and it would take more than a week (and considerable cost) to train. Diversity helps the model generalize. -### What is a training credit and how do I use them? - -Essentially, `1 training credit = 1 trained model`. When you have uploaded, annotated, and verified additional images and you are ready to train your model, you will submit a model request which will use one credit. The model that is trained will utilize all of the verified images in your account. When new base models are available, it will require the use of a training credit to generate a new user model on the new base model. - ### Are my video feeds sent to the cloud for analysis when using Frigate+ models? No. Frigate+ models are a drop in replacement for the default model. All processing is performed locally as always. The only images sent to Frigate+ are the ones you specifically submit via the `Send to Frigate+` button or upload directly. @@ -25,4 +21,4 @@ Yes. Models and metadata are stored in the `model_cache` directory within the co ### Can I keep using my Frigate+ models even if I do not renew my subscription? -Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained using the training credits that you purchased are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models. +Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained with your subscription are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models. diff --git a/docs/docs/plus/first_model.md b/docs/docs/plus/first_model.md index 8dcc52015..2f76dd08a 100644 --- a/docs/docs/plus/first_model.md +++ b/docs/docs/plus/first_model.md @@ -13,7 +13,7 @@ For more detailed recommendations, you can refer to the docs on [improving your ## Step 2: Submit a model request -Once you have an initial set of verified images, you can request a model on the Models page. Each model request requires 1 of the training credits that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours. +Once you have an initial set of verified images, you can request a model on the Models page. Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours. ![Plus Models Page](/img/plus/plus-models.jpg) ## Step 3: Set your model id in the config diff --git a/docs/docs/plus/index.md b/docs/docs/plus/index.md index c04e594e8..de84c6a45 100644 --- a/docs/docs/plus/index.md +++ b/docs/docs/plus/index.md @@ -11,7 +11,7 @@ The baseline model isn't directly available after subscribing. This may change i ::: -With a subscription, and at each annual renewal, you will receive 12 model training credits. If you cancel your subscription, you will retain access to any trained models. An active subscription is required to submit model requests or purchase additional training credits. +With a subscription, 12 model trainings per year are included. If you cancel your subscription, you will retain access to any trained models. An active subscription is required to submit model requests or purchase additional trainings. Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md). From 77e77775b2727db7bb8dc6c615e11dc6ad701097 Mon Sep 17 00:00:00 2001 From: Michael Scheiffler Date: Mon, 12 Feb 2024 13:23:48 +0100 Subject: [PATCH 14/16] Added documentation for time-lapse export (master) (#9804) * Added documentation for time-lapse export (master) * Changed accoring to review comments * Update docs/docs/configuration/record.md Co-authored-by: Nicolas Mowen * Update docs/docs/configuration/record.md Co-authored-by: Nicolas Mowen --------- Co-authored-by: Nicolas Mowen --- docs/docs/configuration/record.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index 6169cc945..32b8613f3 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -161,6 +161,25 @@ Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only recor The export page in the Frigate WebUI allows for exporting real time clips with a designated start and stop time as well as exporting a time-lapse for a designated start and stop time. These exports can take a while so it is important to leave the file until it is no longer in progress. +### Time-lapse export + +When exporting a time-lapse the default speed-up is 25x with 30 FPS. This means that every 25 seconds of (real-time) recording is condensed into 1 second of time-lapse video (always without audio) with a smoothness of 30 FPS. +To configure the speed-up factor, the frame rate and further custom settings, the configuration parameter `timelapse_args` can be used. The below configuration example would change the time-lapse speed to 60x (for fitting 1 hour of recording into 1 minute of time-lapse) with 25 FPS: + +```yaml +record: + enabled: True + export: + timelapse_args: "-vf setpts=PTS/60 -r 25" +``` + +:::tip + +When using `hwaccel_args` globally hardware encoding is used for time lapse generation. The encoder determines its own behavior so the resulting file size may be undesirably large. +To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (where `n` stands for the value of the quantisation parameter). The value can be adjusted to get an acceptable tradeoff between quality and file size for the given scenario. + +::: + ## Syncing Recordings With Disk In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist. From e93e5ed931a271997440345fcb949356478120a2 Mon Sep 17 00:00:00 2001 From: lawm Date: Mon, 12 Feb 2024 15:38:55 -0800 Subject: [PATCH 15/16] docs: fix links to full configuration reference (#9808) --- docs/docs/configuration/object_detectors.md | 2 +- docs/docs/configuration/objects.mdx | 2 +- docs/docs/guides/getting_started.md | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index ecff747b5..f5e068522 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -131,7 +131,7 @@ model: labelmap_path: /openvino-model/coco_91cl_bkgr.txt ``` -This detector also supports some YOLO variants: YOLOX, YOLOv5, and YOLOv8 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/index.md#full-configuration-reference) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate: +This detector also supports some YOLO variants: YOLOX, YOLOv5, and YOLOv8 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/reference.md) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate: ```yaml detectors: diff --git a/docs/docs/configuration/objects.mdx b/docs/docs/configuration/objects.mdx index 81e74e2fe..1a93f9704 100644 --- a/docs/docs/configuration/objects.mdx +++ b/docs/docs/configuration/objects.mdx @@ -10,7 +10,7 @@ Frigate includes the object models listed below from the Google Coral test data. Please note: - `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused. -- `person` is the only tracked object by default. See the [full configuration reference](index.md#full-configuration-reference) for an example of expanding the list of tracked objects. +- `person` is the only tracked object by default. See the [full configuration reference](reference.md) for an example of expanding the list of tracked objects.
    {labels.split("\n").map((label) => ( diff --git a/docs/docs/guides/getting_started.md b/docs/docs/guides/getting_started.md index 5975da354..7cf9e2790 100644 --- a/docs/docs/guides/getting_started.md +++ b/docs/docs/guides/getting_started.md @@ -237,7 +237,7 @@ cameras: More details on available detectors can be found [here](../configuration/object_detectors.md). -Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/index.md#full-configuration-reference). +Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/reference.md). ### Step 5: Setup motion masks @@ -305,7 +305,7 @@ cameras: If you don't have separate streams for detect and record, you would just add the record role to the list on the first input. -By default, Frigate will retain video of all events for 10 days. The full set of options for recording can be found [here](../configuration/index.md#full-configuration-reference). +By default, Frigate will retain video of all events for 10 days. The full set of options for recording can be found [here](../configuration/reference.md). #### Snapshots @@ -325,7 +325,7 @@ cameras: motion: ... ``` -By default, Frigate will retain snapshots of all events for 10 days. The full set of options for snapshots can be found [here](../configuration/index.md#full-configuration-reference). +By default, Frigate will retain snapshots of all events for 10 days. The full set of options for snapshots can be found [here](../configuration/reference.md). ### Step 7: Complete config From bad80a64ef9b76dd7441085d3631c2c2a13e1408 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Thu, 15 Feb 2024 00:16:53 +0000 Subject: [PATCH 16/16] Fix builds (#9852) * show images for troubleshooting * more debug and try max space action * fixes * oops * maximize first * mount build volume for docker use * moved to shared setup and cleanup * remove temp branch * remove rocm for master --- .github/actions/setup/action.yml | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml index c96102edb..88ceab935 100644 --- a/.github/actions/setup/action.yml +++ b/.github/actions/setup/action.yml @@ -11,11 +11,22 @@ outputs: runs: using: "composite" steps: - - name: Remove unnecessary files - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf /usr/local/lib/android - sudo rm -rf /opt/ghc + # Stop docker so we can mount more space at /var/lib/docker + - name: Stop docker + run: sudo systemctl stop docker + shell: bash + # This creates a virtual volume at /var/lib/docker to maximize the size + # As of 2/14/2024, this results in 97G for docker images + - name: Maximize build space + uses: easimon/maximize-build-space@master + with: + remove-dotnet: 'true' + remove-android: 'true' + remove-haskell: 'true' + remove-codeql: 'true' + build-mount-path: '/var/lib/docker' + - name: Start docker + run: sudo systemctl start docker shell: bash - id: lowercaseRepo uses: ASzc/change-string-case-action@v5