From c16e536b46ef22715494687091065765621b4d68 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Thu, 22 May 2025 10:38:14 -0500 Subject: [PATCH] Fixes (#18338) * improve spacing of face selection in mobile drawer * fix spacing * sort face names alphabetically * Improve face selection dialog * Use a state to track when face image loads The naturalWidth and naturalHeight will always be 0 until the image loads. So we use onLoad and a state to track loading and then calculate the area after it has loaded * Verify that a camera only tracks objects that are possible to track * Fix test * genai docs tweak * Disable openvino model cache * Clenaup * Sanitize floats for estimated speed and angle Users can configure speed zones in such a way that velocity estimates from Norfair cause a value of inf to be stored as an estimated speed. FastAPI doesn't serialize inf as a float, so trying to return this value would result in an API error. Sanitizing the value before storing should correct this. --------- Co-authored-by: Nicolas Mowen --- docs/docs/configuration/genai.md | 2 +- frigate/config/config.py | 78 ++++++++++++------- frigate/detectors/plugins/openvino.py | 6 -- frigate/test/test_config.py | 2 +- frigate/track/tracked_object.py | 6 ++ frigate/util/builtin.py | 8 ++ .../overlay/FaceSelectionDialog.tsx | 19 +++-- web/src/pages/FaceLibrary.tsx | 10 +-- 8 files changed, 80 insertions(+), 51 deletions(-) diff --git a/docs/docs/configuration/genai.md b/docs/docs/configuration/genai.md index ec733684f..8cfc893b6 100644 --- a/docs/docs/configuration/genai.md +++ b/docs/docs/configuration/genai.md @@ -167,7 +167,7 @@ Analyze the sequence of images containing the {label}. Focus on the likely inten :::tip -Prompts can use variable replacements like `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt. +Prompts can use variable replacements `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt. ::: diff --git a/frigate/config/config.py b/frigate/config/config.py index 2470818b0..6ec048acd 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -299,6 +299,22 @@ def verify_motion_and_detect(camera_config: CameraConfig) -> ValueError | None: ) +def verify_objects_track( + camera_config: CameraConfig, enabled_objects: list[str] +) -> None: + """Verify that a user has not specified an object to track that is not in the labelmap.""" + valid_objects = [ + obj for obj in camera_config.objects.track if obj in enabled_objects + ] + + if len(valid_objects) != len(camera_config.objects.track): + invalid_objects = set(camera_config.objects.track) - set(valid_objects) + logger.warning( + f"{camera_config.name} is configured to track {list(invalid_objects)} objects, which are not supported by the current model." + ) + camera_config.objects.track = valid_objects + + def verify_lpr_and_face( frigate_config: FrigateConfig, camera_config: CameraConfig ) -> ValueError | None: @@ -471,6 +487,37 @@ class FrigateConfig(FrigateBaseModel): exclude_unset=True, ) + for key, detector in self.detectors.items(): + adapter = TypeAdapter(DetectorConfig) + model_dict = ( + detector + if isinstance(detector, dict) + else detector.model_dump(warnings="none") + ) + detector_config: BaseDetectorConfig = adapter.validate_python(model_dict) + + # users should not set model themselves + if detector_config.model: + detector_config.model = None + + model_config = self.model.model_dump(exclude_unset=True, warnings="none") + + if detector_config.model_path: + model_config["path"] = detector_config.model_path + + if "path" not in model_config: + if detector_config.type == "cpu": + model_config["path"] = "/cpu_model.tflite" + elif detector_config.type == "edgetpu": + model_config["path"] = "/edgetpu_model.tflite" + + model = ModelConfig.model_validate(model_config) + model.check_and_load_plus_model(self.plus_api, detector_config.type) + model.compute_model_hash() + labelmap_objects = model.merged_labelmap.values() + detector_config.model = model + self.detectors[key] = detector_config + for name, camera in self.cameras.items(): modified_global_config = global_config.copy() @@ -644,6 +691,7 @@ class FrigateConfig(FrigateBaseModel): verify_required_zones_exist(camera_config) verify_autotrack_zones(camera_config) verify_motion_and_detect(camera_config) + verify_objects_track(camera_config, labelmap_objects) verify_lpr_and_face(self, camera_config) self.objects.parse_all_objects(self.cameras) @@ -655,36 +703,6 @@ class FrigateConfig(FrigateBaseModel): "Frigate+ is configured but clean snapshots are not enabled, submissions to Frigate+ will not be possible./" ) - for key, detector in self.detectors.items(): - adapter = TypeAdapter(DetectorConfig) - model_dict = ( - detector - if isinstance(detector, dict) - else detector.model_dump(warnings="none") - ) - detector_config: BaseDetectorConfig = adapter.validate_python(model_dict) - - # users should not set model themselves - if detector_config.model: - detector_config.model = None - - model_config = self.model.model_dump(exclude_unset=True, warnings="none") - - if detector_config.model_path: - model_config["path"] = detector_config.model_path - - if "path" not in model_config: - if detector_config.type == "cpu": - model_config["path"] = "/cpu_model.tflite" - elif detector_config.type == "edgetpu": - model_config["path"] = "/edgetpu_model.tflite" - - model = ModelConfig.model_validate(model_config) - model.check_and_load_plus_model(self.plus_api, detector_config.type) - model.compute_model_hash() - detector_config.model = model - self.detectors[key] = detector_config - return self @field_validator("cameras") diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py index 08d068d5e..70c2d4725 100644 --- a/frigate/detectors/plugins/openvino.py +++ b/frigate/detectors/plugins/openvino.py @@ -3,11 +3,9 @@ import os import numpy as np import openvino as ov -import openvino.properties as props from pydantic import Field from typing_extensions import Literal -from frigate.const import MODEL_CACHE_DIR from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum from frigate.util.model import ( @@ -49,10 +47,6 @@ class OvDetector(DetectionApi): logger.error(f"OpenVino model file {detector_config.model.path} not found.") raise FileNotFoundError - os.makedirs(os.path.join(MODEL_CACHE_DIR, "openvino"), exist_ok=True) - self.ov_core.set_property( - {props.cache_dir: os.path.join(MODEL_CACHE_DIR, "openvino")} - ) self.interpreter = self.ov_core.compile_model( model=detector_config.model.path, device_name=detector_config.device ) diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index 02f5d5e74..4bafe7369 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -39,7 +39,7 @@ class TestConfig(unittest.TestCase): "description": "Fine tuned model", "trainDate": "2023-04-28T23:22:01.262Z", "type": "ssd", - "supportedDetectors": ["edgetpu"], + "supportedDetectors": ["cpu", "edgetpu"], "width": 320, "height": 320, "inputShape": "nhwc", diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index 6b604b0e9..2852d8917 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -18,6 +18,7 @@ from frigate.config import ( ) from frigate.const import CLIPS_DIR, THUMB_DIR from frigate.review.types import SeverityEnum +from frigate.util.builtin import sanitize_float from frigate.util.image import ( area, calculate_region, @@ -202,6 +203,11 @@ class TrackedObject: ) ) + # users can configure speed zones incorrectly, so sanitize speed_magnitude + # and velocity_angle in case the values come back as inf or NaN + speed_magnitude = sanitize_float(speed_magnitude) + self.velocity_angle = sanitize_float(self.velocity_angle) + if self.ui_config.unit_system == "metric": self.current_estimated_speed = ( speed_magnitude * 3.6 diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index b35c7b942..52280ecd8 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -4,6 +4,7 @@ import ast import copy import datetime import logging +import math import multiprocessing as mp import queue import re @@ -399,3 +400,10 @@ def serialize( def deserialize(bytes_data: bytes) -> list[float]: """Deserializes a compact "raw bytes" format into a list of floats""" return list(struct.unpack("%sf" % (len(bytes_data) // 4), bytes_data)) + + +def sanitize_float(value): + """Replace NaN or inf with 0.0.""" + if isinstance(value, (int, float)) and not math.isfinite(value): + return 0.0 + return value diff --git a/web/src/components/overlay/FaceSelectionDialog.tsx b/web/src/components/overlay/FaceSelectionDialog.tsx index 85595a597..3644ff1cf 100644 --- a/web/src/components/overlay/FaceSelectionDialog.tsx +++ b/web/src/components/overlay/FaceSelectionDialog.tsx @@ -53,7 +53,13 @@ export default function FaceSelectionDialog({ const Selector = isDesktop ? DropdownMenu : Drawer; const SelectorTrigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger; const SelectorContent = isDesktop ? DropdownMenuContent : DrawerContent; - const SelectorItem = isDesktop ? DropdownMenuItem : DrawerClose; + const SelectorItem = isDesktop + ? DropdownMenuItem + : (props: React.HTMLAttributes) => ( + +
+ + ); return (
@@ -72,10 +78,7 @@ export default function FaceSelectionDialog({ {children} {isMobile && ( @@ -86,8 +89,8 @@ export default function FaceSelectionDialog({ {t("trainFaceAs")}
{t("createFaceLibrary.new")} - {faceNames.map((faceName) => ( + {faceNames.sort().map((faceName) => ( { if (data.score >= recognitionConfig.recognition_threshold) { @@ -896,14 +897,12 @@ function FaceAttempt({ }); const imageArea = useMemo(() => { - if (!imgRef.current) { + if (imgRef.current == null || !imageLoaded) { return undefined; } return imgRef.current.naturalWidth * imgRef.current.naturalHeight; - // only refresh when severity changes - // eslint-disable-next-line react-hooks/exhaustive-deps - }, [imgRef.current]); + }, [imageLoaded]); // api calls @@ -966,9 +965,10 @@ function FaceAttempt({ : "outline-transparent duration-500", )} > -
+
setImageLoaded(true)} className={cn("size-44", isMobile && "w-full")} src={`${baseUrl}clips/faces/train/${data.filename}`} onClick={(e) => {