From 8094dd4075bbc62e891c0ec1a407d5ecd655bebf Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Fri, 9 May 2025 08:36:44 -0500 Subject: [PATCH] Fixes (#18117) * face library i18n fixes * face library i18n fixes * add ability to use ctrl/cmd S to save in the config editor * Use datetime as ID * Update metrics inference speed to start with 0 ms * fix android formatted thumbnail * ensure role is comma separated and stripped correctly * improve face library deletion - add a confirmation dialog - add ability to select all / delete faces in collections * Implement lazy loading for video previews * Force GPU for large embedding model * GPU is required * settings i18n fixes * Don't delete train tab * webpush debugging logs * Fix incorrectly copying zones * copy path data * Ensure that cache dir exists for Frigate+ * face docs update * Add description to upload image step to clarify the image * Clean up --------- Co-authored-by: Nicolas Mowen --- docs/docs/configuration/face_recognition.md | 14 +- frigate/api/auth.py | 4 +- frigate/api/classification.py | 6 +- frigate/api/media.py | 2 +- frigate/comms/webpush.py | 5 + .../common/license_plate/mixin.py | 26 +-- frigate/data_processing/real_time/face.py | 17 +- frigate/data_processing/types.py | 12 +- frigate/detectors/detector_config.py | 3 + frigate/embeddings/__init__.py | 2 +- frigate/embeddings/embeddings.py | 23 +- frigate/embeddings/onnx/face_embedding.py | 14 +- frigate/review/maintainer.py | 33 +-- frigate/track/tracked_object.py | 4 +- frigate/util/builtin.py | 28 ++- web/public/locales/en/views/faceLibrary.json | 18 +- web/public/locales/en/views/settings.json | 8 +- .../overlay/detail/FaceCreateWizardDialog.tsx | 17 +- web/src/components/player/PreviewPlayer.tsx | 91 ++++---- web/src/pages/ConfigEditor.tsx | 8 +- web/src/pages/FaceLibrary.tsx | 197 ++++++++++++++---- web/src/views/recording/RecordingView.tsx | 51 +++++ web/src/views/settings/CameraSettingsView.tsx | 4 +- .../settings/ClassificationSettingsView.tsx | 2 +- .../settings/FrigatePlusSettingsView.tsx | 2 +- web/src/views/settings/MotionTunerView.tsx | 2 +- .../settings/NotificationsSettingsView.tsx | 4 +- 27 files changed, 402 insertions(+), 195 deletions(-) diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index b78995cd2..bd97b394c 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -34,7 +34,7 @@ All of these features run locally on your system. The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently. -The `large` model is optimized for accuracy, an integrated or discrete GPU is highly recommended. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. +The `large` model is optimized for accuracy, an integrated or discrete GPU is required. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation. ## Configuration @@ -107,17 +107,17 @@ When choosing images to include in the face training set it is recommended to al ### Step 1 - Building a Strong Foundation -When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-5 "portrait" photos for each person. It is important that the person's face in the photo is straight-on and not turned which will ensure a good starting point. +When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-5 photos containing just this person's face. It is important that the person's face in the photo is front-facing and not turned, this will ensure a good starting point. -Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle. +Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are front-facing. Ignore images from cameras that recognize faces from an angle. Aim to strike a balance between the quality of images while also having a range of conditions (day / night, different weather conditions, different times of day, etc.) in order to have diversity in the images used for each person and not have over-fitting. -Once a person starts to be consistently recognized correctly on images that are straight-on, it is time to move on to the next step. +Once a person starts to be consistently recognized correctly on images that are front-facing, it is time to move on to the next step. ### Step 2 - Expanding The Dataset -Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone. +Once front-facing images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone. ## FAQ @@ -156,3 +156,7 @@ Face recognition does not run on the recording stream, this would be suboptimal ### I get an unknown error when taking a photo directly with my iPhone By default iOS devices will use HEIC (High Efficiency Image Container) for images, but this format is not supported for uploads. Choosing `large` as the format instead of `original` will use JPG which will work correctly. + +## How can I delete the face database and start over? + +Frigate does not store anything in its database related to face recognition. You can simply delete all of your faces through the Frigate UI or remove the contents of the `/media/frigate/clips/faces` directory. diff --git a/frigate/api/auth.py b/frigate/api/auth.py index 710661be3..1a267b521 100644 --- a/frigate/api/auth.py +++ b/frigate/api/auth.py @@ -268,7 +268,9 @@ def auth(request: Request): # if comma-separated with "admin", use "admin", else use default role success_response.headers["remote-role"] = ( - "admin" if role and "admin" in role else proxy_config.default_role + "admin" + if role and "admin" in [r.strip() for r in role.split(",")] + else proxy_config.default_role ) return success_response diff --git a/frigate/api/classification.py b/frigate/api/classification.py index 8f0fb6462..d0fcf775c 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -1,10 +1,9 @@ """Object classification APIs.""" +import datetime import logging import os -import random import shutil -import string import cv2 from fastapi import APIRouter, Depends, Request, UploadFile @@ -120,8 +119,7 @@ def train_face(request: Request, name: str, body: dict = None): ) sanitized_name = sanitize_filename(name) - rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) - new_name = f"{sanitized_name}-{rand_id}.webp" + new_name = f"{sanitized_name}-{datetime.datetime.now().timestamp()}.webp" new_file_folder = os.path.join(FACE_DIR, f"{sanitized_name}") if not os.path.exists(new_file_folder): diff --git a/frigate/api/media.py b/frigate/api/media.py index 476c8349f..9aac3d7e6 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -909,7 +909,7 @@ def event_thumbnail( elif extension == "webp": quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), 60] - _, img = cv2.imencode(f".{img}", thumbnail, quality_params) + _, img = cv2.imencode(f".{extension}", thumbnail, quality_params) thumbnail_bytes = img.tobytes() return Response( diff --git a/frigate/comms/webpush.py b/frigate/comms/webpush.py index b845c3afd..cbc274aef 100644 --- a/frigate/comms/webpush.py +++ b/frigate/comms/webpush.py @@ -303,6 +303,9 @@ class WebPushClient(Communicator): # type: ignore[misc] and len(payload["before"]["data"]["zones"]) == len(payload["after"]["data"]["zones"]) ): + logger.debug( + f"Skipping notification for {camera} - message is an update and important fields don't have an update" + ) return self.last_camera_notification_time[camera] = current_time @@ -325,6 +328,8 @@ class WebPushClient(Communicator): # type: ignore[misc] direct_url = f"/review?id={reviewId}" if state == "end" else f"/#{camera}" ttl = 3600 if state == "end" else 0 + logger.debug(f"Sending push notification for {camera}, review ID {reviewId}") + for user in self.web_pushers: self.send_push_notification( user=user, diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index f4ff08644..b62239491 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -25,7 +25,7 @@ from frigate.comms.event_metadata_updater import ( from frigate.const import CLIPS_DIR from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE from frigate.types import TrackedObjectUpdateTypesEnum -from frigate.util.builtin import EventsPerSecond +from frigate.util.builtin import EventsPerSecond, InferenceSpeed from frigate.util.image import area logger = logging.getLogger(__name__) @@ -36,8 +36,10 @@ WRITE_DEBUG_IMAGES = False class LicensePlateProcessingMixin: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) + self.plate_rec_speed = InferenceSpeed(self.metrics.alpr_speed) self.plates_rec_second = EventsPerSecond() self.plates_rec_second.start() + self.plate_det_speed = InferenceSpeed(self.metrics.yolov9_lpr_speed) self.plates_det_second = EventsPerSecond() self.plates_det_second.start() self.event_metadata_publisher = EventMetadataPublisher() @@ -1157,22 +1159,6 @@ class LicensePlateProcessingMixin: # 5. Return True if previous plate scores higher return prev_score > curr_score - def __update_yolov9_metrics(self, duration: float) -> None: - """ - Update inference metrics. - """ - self.metrics.yolov9_lpr_speed.value = ( - self.metrics.yolov9_lpr_speed.value * 9 + duration - ) / 10 - - def __update_lpr_metrics(self, duration: float) -> None: - """ - Update inference metrics. - """ - self.metrics.alpr_speed.value = ( - self.metrics.alpr_speed.value * 9 + duration - ) / 10 - def _generate_plate_event(self, camera: str, plate: str, plate_score: float) -> str: """Generate a unique ID for a plate event based on camera and text.""" now = datetime.datetime.now().timestamp() @@ -1228,7 +1214,7 @@ class LicensePlateProcessingMixin: f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" ) self.plates_det_second.update() - self.__update_yolov9_metrics( + self.plate_det_speed.update( datetime.datetime.now().timestamp() - yolov9_start ) @@ -1319,7 +1305,7 @@ class LicensePlateProcessingMixin: f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" ) self.plates_det_second.update() - self.__update_yolov9_metrics( + self.plate_det_speed.update( datetime.datetime.now().timestamp() - yolov9_start ) @@ -1433,7 +1419,7 @@ class LicensePlateProcessingMixin: camera, id, license_plate_frame ) self.plates_rec_second.update() - self.__update_lpr_metrics(datetime.datetime.now().timestamp() - start) + self.plate_rec_speed.update(datetime.datetime.now().timestamp() - start) if license_plates: for plate, confidence, text_area in zip(license_plates, confidences, areas): diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index d91ab9b80..a7e1a63ba 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -5,9 +5,7 @@ import datetime import json import logging import os -import random import shutil -import string from typing import Optional import cv2 @@ -27,7 +25,7 @@ from frigate.data_processing.common.face.model import ( FaceRecognizer, ) from frigate.types import TrackedObjectUpdateTypesEnum -from frigate.util.builtin import EventsPerSecond +from frigate.util.builtin import EventsPerSecond, InferenceSpeed from frigate.util.image import area from ..types import DataProcessorMetrics @@ -58,6 +56,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): self.person_face_history: dict[str, list[tuple[str, float, int]]] = {} self.recognizer: FaceRecognizer | None = None self.faces_per_second = EventsPerSecond() + self.inference_speed = InferenceSpeed(self.metrics.face_rec_speed) download_path = os.path.join(MODEL_CACHE_DIR, "facedet") self.model_files = { @@ -155,9 +154,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): def __update_metrics(self, duration: float) -> None: self.faces_per_second.update() - self.metrics.face_rec_speed.value = ( - self.metrics.face_rec_speed.value * 9 + duration - ) / 10 + self.inference_speed.update(duration) def process_frame(self, obj_data: dict[str, any], frame: np.ndarray): """Look for faces in image.""" @@ -343,11 +340,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): return {"success": True, "score": score, "face_name": sub_label} elif topic == EmbeddingsRequestEnum.register_face.value: - rand_id = "".join( - random.choices(string.ascii_lowercase + string.digits, k=6) - ) label = request_data["face_name"] - id = f"{label}-{rand_id}" if request_data.get("cropped"): thumbnail = request_data["image"] @@ -376,7 +369,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): # write face to library folder = os.path.join(FACE_DIR, label) - file = os.path.join(folder, f"{id}.webp") + file = os.path.join( + folder, f"{label}_{datetime.datetime.now().timestamp()}.webp" + ) os.makedirs(folder, exist_ok=True) # save face image diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index 8ec7b9617..a19a856bf 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -7,7 +7,9 @@ from multiprocessing.sharedctypes import Synchronized class DataProcessorMetrics: image_embeddings_speed: Synchronized + image_embeddings_eps: Synchronized text_embeddings_speed: Synchronized + text_embeddings_eps: Synchronized face_rec_speed: Synchronized face_rec_fps: Synchronized alpr_speed: Synchronized @@ -16,15 +18,15 @@ class DataProcessorMetrics: yolov9_lpr_pps: Synchronized def __init__(self): - self.image_embeddings_speed = mp.Value("d", 0.01) + self.image_embeddings_speed = mp.Value("d", 0.0) self.image_embeddings_eps = mp.Value("d", 0.0) - self.text_embeddings_speed = mp.Value("d", 0.01) + self.text_embeddings_speed = mp.Value("d", 0.0) self.text_embeddings_eps = mp.Value("d", 0.0) - self.face_rec_speed = mp.Value("d", 0.01) + self.face_rec_speed = mp.Value("d", 0.0) self.face_rec_fps = mp.Value("d", 0.0) - self.alpr_speed = mp.Value("d", 0.01) + self.alpr_speed = mp.Value("d", 0.0) self.alpr_pps = mp.Value("d", 0.0) - self.yolov9_lpr_speed = mp.Value("d", 0.01) + self.yolov9_lpr_speed = mp.Value("d", 0.0) self.yolov9_lpr_pps = mp.Value("d", 0.0) diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index e719e1062..f14da57a8 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -126,6 +126,9 @@ class ModelConfig(BaseModel): if not self.path or not self.path.startswith("plus://"): return + # ensure that model cache dir exists + os.makedirs(MODEL_CACHE_DIR, exist_ok=True) + model_id = self.path[7:] self.path = os.path.join(MODEL_CACHE_DIR, model_id) model_info_path = f"{self.path}.json" diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 3650303b5..3687021b0 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -235,7 +235,7 @@ class EmbeddingsContext: if os.path.isfile(file_path): os.unlink(file_path) - if len(os.listdir(folder)) == 0: + if face != "train" and len(os.listdir(folder)) == 0: os.rmdir(folder) self.requestor.send_data( diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index 2fda584d3..096077916 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -21,7 +21,7 @@ from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event from frigate.types import ModelStatusTypesEnum -from frigate.util.builtin import EventsPerSecond, serialize +from frigate.util.builtin import EventsPerSecond, InferenceSpeed, serialize from frigate.util.path import get_event_thumbnail_bytes from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding @@ -75,8 +75,10 @@ class Embeddings: self.metrics = metrics self.requestor = InterProcessRequestor() + self.image_inference_speed = InferenceSpeed(self.metrics.image_embeddings_speed) self.image_eps = EventsPerSecond() self.image_eps.start() + self.text_inference_speed = InferenceSpeed(self.metrics.text_embeddings_speed) self.text_eps = EventsPerSecond() self.text_eps.start() @@ -183,10 +185,7 @@ class Embeddings: (event_id, serialize(embedding)), ) - duration = datetime.datetime.now().timestamp() - start - self.metrics.image_embeddings_speed.value = ( - self.metrics.image_embeddings_speed.value * 9 + duration - ) / 10 + self.image_inference_speed.update(datetime.datetime.now().timestamp() - start) self.image_eps.update() return embedding @@ -220,9 +219,7 @@ class Embeddings: ) duration = datetime.datetime.now().timestamp() - start - self.metrics.text_embeddings_speed.value = ( - self.metrics.text_embeddings_speed.value * 9 + (duration / len(ids)) - ) / 10 + self.text_inference_speed.update(duration / len(ids)) return embeddings @@ -241,10 +238,7 @@ class Embeddings: (event_id, serialize(embedding)), ) - duration = datetime.datetime.now().timestamp() - start - self.metrics.text_embeddings_speed.value = ( - self.metrics.text_embeddings_speed.value * 9 + duration - ) / 10 + self.text_inference_speed.update(datetime.datetime.now().timestamp() - start) self.text_eps.update() return embedding @@ -276,10 +270,7 @@ class Embeddings: items, ) - duration = datetime.datetime.now().timestamp() - start - self.metrics.text_embeddings_speed.value = ( - self.metrics.text_embeddings_speed.value * 9 + (duration / len(ids)) - ) / 10 + self.text_inference_speed.update(datetime.datetime.now().timestamp() - start) return embeddings diff --git a/frigate/embeddings/onnx/face_embedding.py b/frigate/embeddings/onnx/face_embedding.py index 860caab57..c0f35a581 100644 --- a/frigate/embeddings/onnx/face_embedding.py +++ b/frigate/embeddings/onnx/face_embedding.py @@ -23,10 +23,7 @@ FACENET_INPUT_SIZE = 160 class FaceNetEmbedding(BaseEmbedding): - def __init__( - self, - device: str = "AUTO", - ): + def __init__(self): super().__init__( model_name="facedet", model_file="facenet.tflite", @@ -34,7 +31,6 @@ class FaceNetEmbedding(BaseEmbedding): "facenet.tflite": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facenet.tflite", }, ) - self.device = device self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) self.tokenizer = None self.feature_extractor = None @@ -113,10 +109,7 @@ class FaceNetEmbedding(BaseEmbedding): class ArcfaceEmbedding(BaseEmbedding): - def __init__( - self, - device: str = "AUTO", - ): + def __init__(self): super().__init__( model_name="facedet", model_file="arcface.onnx", @@ -124,7 +117,6 @@ class ArcfaceEmbedding(BaseEmbedding): "arcface.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/arcface.onnx", }, ) - self.device = device self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) self.tokenizer = None self.feature_extractor = None @@ -154,7 +146,7 @@ class ArcfaceEmbedding(BaseEmbedding): self.runner = ONNXModelRunner( os.path.join(self.download_path, self.model_file), - self.device, + "GPU", ) def _preprocess_inputs(self, raw_inputs): diff --git a/frigate/review/maintainer.py b/frigate/review/maintainer.py index 004beb8b3..6b5c32956 100644 --- a/frigate/review/maintainer.py +++ b/frigate/review/maintainer.py @@ -1,5 +1,6 @@ """Maintain review segments in db.""" +import copy import json import logging import os @@ -119,21 +120,23 @@ class PendingReviewSegment: ) def get_data(self, ended: bool) -> dict: - return { - ReviewSegment.id.name: self.id, - ReviewSegment.camera.name: self.camera, - ReviewSegment.start_time.name: self.start_time, - ReviewSegment.end_time.name: self.last_update if ended else None, - ReviewSegment.severity.name: self.severity.value, - ReviewSegment.thumb_path.name: self.frame_path, - ReviewSegment.data.name: { - "detections": list(set(self.detections.keys())), - "objects": list(set(self.detections.values())), - "sub_labels": list(self.sub_labels.values()), - "zones": self.zones, - "audio": list(self.audio), - }, - }.copy() + return copy.deepcopy( + { + ReviewSegment.id.name: self.id, + ReviewSegment.camera.name: self.camera, + ReviewSegment.start_time.name: self.start_time, + ReviewSegment.end_time.name: self.last_update if ended else None, + ReviewSegment.severity.name: self.severity.value, + ReviewSegment.thumb_path.name: self.frame_path, + ReviewSegment.data.name: { + "detections": list(set(self.detections.keys())), + "objects": list(set(self.detections.values())), + "sub_labels": list(self.sub_labels.values()), + "zones": self.zones, + "audio": list(self.audio), + }, + } + ) class ReviewSegmentMaintainer(threading.Thread): diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index 9690581f1..77ebc6646 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -154,7 +154,7 @@ class TrackedObject: "attributes": obj_data["attributes"], "current_estimated_speed": self.current_estimated_speed, "velocity_angle": self.velocity_angle, - "path_data": self.path_data, + "path_data": self.path_data.copy(), "recognized_license_plate": obj_data.get( "recognized_license_plate" ), @@ -378,7 +378,7 @@ class TrackedObject: "current_estimated_speed": self.current_estimated_speed, "average_estimated_speed": self.average_estimated_speed, "velocity_angle": self.velocity_angle, - "path_data": self.path_data, + "path_data": self.path_data.copy(), "recognized_license_plate": self.obj_data.get("recognized_license_plate"), } diff --git a/frigate/util/builtin.py b/frigate/util/builtin.py index 5f573ef78..0f245107a 100644 --- a/frigate/util/builtin.py +++ b/frigate/util/builtin.py @@ -11,6 +11,7 @@ import shlex import struct import urllib.parse from collections.abc import Mapping +from multiprocessing.sharedctypes import Synchronized from pathlib import Path from typing import Any, Optional, Tuple, Union from zoneinfo import ZoneInfoNotFoundError @@ -26,16 +27,16 @@ logger = logging.getLogger(__name__) class EventsPerSecond: - def __init__(self, max_events=1000, last_n_seconds=10): + def __init__(self, max_events=1000, last_n_seconds=10) -> None: self._start = None self._max_events = max_events self._last_n_seconds = last_n_seconds self._timestamps = [] - def start(self): + def start(self) -> None: self._start = datetime.datetime.now().timestamp() - def update(self): + def update(self) -> None: now = datetime.datetime.now().timestamp() if self._start is None: self._start = now @@ -45,7 +46,7 @@ class EventsPerSecond: self._timestamps = self._timestamps[(1 - self._max_events) :] self.expire_timestamps(now) - def eps(self): + def eps(self) -> float: now = datetime.datetime.now().timestamp() if self._start is None: self._start = now @@ -58,12 +59,29 @@ class EventsPerSecond: return len(self._timestamps) / seconds # remove aged out timestamps - def expire_timestamps(self, now): + def expire_timestamps(self, now: float) -> None: threshold = now - self._last_n_seconds while self._timestamps and self._timestamps[0] < threshold: del self._timestamps[0] +class InferenceSpeed: + def __init__(self, metric: Synchronized) -> None: + self.__metric = metric + self.__initialized = False + + def update(self, inference_time: float) -> None: + if not self.__initialized: + self.__metric.value = inference_time + self.__initialized = True + return + + self.__metric.value = (self.__metric.value * 9 + inference_time) / 10 + + def current(self) -> float: + return self.__metric.value + + def deep_merge(dct1: dict, dct2: dict, override=False, merge_lists=False) -> dict: """ :param dct1: First dict to merge diff --git a/web/public/locales/en/views/faceLibrary.json b/web/public/locales/en/views/faceLibrary.json index 5ba744f15..50f2f587d 100644 --- a/web/public/locales/en/views/faceLibrary.json +++ b/web/public/locales/en/views/faceLibrary.json @@ -8,14 +8,16 @@ "subLabelScore": "Sub Label Score", "scoreInfo": "The sub label score is the weighted score for all of the recognized face confidences, so this may differ from the score shown on the snapshot.", "face": "Face Details", - "faceDesc": "Details for the face and associated object", - "timestamp": "Timestamp" + "faceDesc": "Details of the tracked object that generated this face", + "timestamp": "Timestamp", + "unknown": "Unknown" }, "documentTitle": "Face Library - Frigate", "uploadFaceImage": { "title": "Upload Face Image", "desc": "Upload an image to scan for faces and include for {{pageToggle}}" }, + "collections": "Collections", "createFaceLibrary": { "title": "Create Collection", "desc": "Create a new collection", @@ -25,7 +27,10 @@ "steps": { "faceName": "Enter Face Name", "uploadFace": "Upload Face Image", - "nextSteps": "Next Steps" + "nextSteps": "Next Steps", + "description": { + "uploadFace": "Upload an image of {{name}} that shows their face from a front-facing angle. The image does not need to be cropped to just their face." + } }, "train": { "title": "Train", @@ -38,12 +43,17 @@ "title": "Delete Name", "desc": "Are you sure you want to delete the collection {{name}}? This will permanently delete all associated faces." }, + "deleteFaceAttempts": { + "title": "Delete Faces", + "desc_one": "Are you sure you want to delete {{count}} face? This action cannot be undone.", + "desc_other": "Are you sure you want to delete {{count}} faces? This action cannot be undone." + }, "renameFace": { "title": "Rename Face", "desc": "Enter a new name for {{name}}" }, "button": { - "deleteFaceAttempts": "Delete Face Attempts", + "deleteFaceAttempts": "Delete Faces", "addFace": "Add Face", "renameFace": "Rename Face", "deleteFace": "Delete Face", diff --git a/web/public/locales/en/views/settings.json b/web/public/locales/en/views/settings.json index b1a9f2a24..62ad98f35 100644 --- a/web/public/locales/en/views/settings.json +++ b/web/public/locales/en/views/settings.json @@ -84,6 +84,7 @@ }, "classification": { "title": "Classification Settings", + "unsavedChanges": "Unsaved Classification settings changes", "birdClassification": { "title": "Bird Classification", "desc": "Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a sub_label. This information is included in the UI, filters, as well as in notifications." @@ -168,11 +169,12 @@ "notSelectDetections": "All {{detectionsLabels}} objects detected in {{zone}} on {{cameraName}} not categorized as Alerts will be shown as Detections regardless of which zone they are in.", "regardlessOfZoneObjectDetectionsTips": "All {{detectionsLabels}} objects not categorized on {{cameraName}} will be shown as Detections regardless of which zone they are in." }, + "unsavedChanges": "Unsaved Review Classification settings for {{camera}}", "selectAlertsZones": "Select zones for Alerts", "selectDetectionsZones": "Select zones for Detections", "limitDetections": "Limit detections to specific zones", "toast": { - "success": "Review classification configuration has been saved. Restart Frigate to apply changes." + "success": "Review Classification configuration has been saved. Restart Frigate to apply changes." } } }, @@ -338,6 +340,7 @@ }, "motionDetectionTuner": { "title": "Motion Detection Tuner", + "unsavedChanges": "Unsaved Motion Tuner changes ({{camera}})", "desc": { "title": "Frigate uses motion detection as a first line check to see if there is anything happening in the frame worth checking with object detection.", "documentation": "Read the Motion Tuning Guide" @@ -527,6 +530,8 @@ "registerDevice": "Register This Device", "unregisterDevice": "Unregister This Device", "sendTestNotification": "Send a test notification", + "unsavedRegistrations": "Unsaved Notification registrations", + "unsavedChanges": "Unsaved Notification changes", "active": "Notifications Active", "suspended": "Notifications suspended {{time}}", "suspendTime": { @@ -587,6 +592,7 @@ "loadingAvailableModels": "Loading available models…", "modelSelect": "Your available models on Frigate+ can be selected here. Note that only models compatible with your current detector configuration can be selected." }, + "unsavedChanges": "Unsaved Frigate+ settings changes", "restart_required": "Restart required (Frigate+ model changed)", "toast": { "success": "Frigate+ settings have been saved. Restart Frigate to apply changes.", diff --git a/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx b/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx index b2ac8d591..9fb3c9fad 100644 --- a/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx +++ b/web/src/components/overlay/detail/FaceCreateWizardDialog.tsx @@ -128,13 +128,18 @@ export default function CreateFaceWizardDialog({ )} {step == 1 && ( - -
- + <> +
+ {t("steps.description.uploadFace", { name })}
- + +
+ +
+
+ )} {step == 2 && (
diff --git a/web/src/components/player/PreviewPlayer.tsx b/web/src/components/player/PreviewPlayer.tsx index d103ecb36..4e807d771 100644 --- a/web/src/components/player/PreviewPlayer.tsx +++ b/web/src/components/player/PreviewPlayer.tsx @@ -23,6 +23,7 @@ import { import { useTranslation } from "react-i18next"; type PreviewPlayerProps = { + previewRef?: (ref: HTMLDivElement | null) => void; className?: string; camera: string; timeRange: TimeRange; @@ -30,16 +31,19 @@ type PreviewPlayerProps = { startTime?: number; isScrubbing: boolean; forceAspect?: number; + isVisible?: boolean; onControllerReady: (controller: PreviewController) => void; onClick?: () => void; }; export default function PreviewPlayer({ + previewRef, className, camera, timeRange, cameraPreviews, startTime, isScrubbing, + isVisible = true, onControllerReady, onClick, }: PreviewPlayerProps) { @@ -54,6 +58,7 @@ export default function PreviewPlayer({ if (currentPreview) { return ( void; className?: string; camera: string; timeRange: TimeRange; @@ -117,12 +124,14 @@ type PreviewVideoPlayerProps = { initialPreview?: Preview; startTime?: number; isScrubbing: boolean; + isVisible: boolean; currentHourFrame?: string; onControllerReady: (controller: PreviewVideoController) => void; onClick?: () => void; setCurrentHourFrame: (src: string | undefined) => void; }; function PreviewVideoPlayer({ + visibilityRef, className, camera, timeRange, @@ -130,6 +139,7 @@ function PreviewVideoPlayer({ initialPreview, startTime, isScrubbing, + isVisible, currentHourFrame, onControllerReady, onClick, @@ -267,11 +277,13 @@ function PreviewVideoPlayer({ return (
- + )} {cameraPreviews && !currentPreview && (
{t("noPreviewFoundFor", { camera: camera.replaceAll("_", " ") })} diff --git a/web/src/pages/ConfigEditor.tsx b/web/src/pages/ConfigEditor.tsx index f0efee90a..dc3e642ec 100644 --- a/web/src/pages/ConfigEditor.tsx +++ b/web/src/pages/ConfigEditor.tsx @@ -143,6 +143,12 @@ function ConfigEditor() { scrollBeyondLastLine: false, theme: (systemTheme || theme) == "dark" ? "vs-dark" : "vs-light", }); + editorRef.current?.addCommand( + monaco.KeyMod.CtrlCmd | monaco.KeyCode.KeyS, + () => { + onHandleSaveConfig("saveonly"); + }, + ); } else if (editorRef.current) { editorRef.current.setModel(modelRef.current); } @@ -158,7 +164,7 @@ function ConfigEditor() { } schemaConfiguredRef.current = false; }; - }, [config, apiHost, systemTheme, theme]); + }, [config, apiHost, systemTheme, theme, onHandleSaveConfig]); // monitoring state diff --git a/web/src/pages/FaceLibrary.tsx b/web/src/pages/FaceLibrary.tsx index b3c28e7c0..d5ba4c26e 100644 --- a/web/src/pages/FaceLibrary.tsx +++ b/web/src/pages/FaceLibrary.tsx @@ -6,7 +6,17 @@ import CreateFaceWizardDialog from "@/components/overlay/detail/FaceCreateWizard import TextEntryDialog from "@/components/overlay/dialog/TextEntryDialog"; import UploadImageDialog from "@/components/overlay/dialog/UploadImageDialog"; import FaceSelectionDialog from "@/components/overlay/FaceSelectionDialog"; -import { Button } from "@/components/ui/button"; +import { Button, buttonVariants } from "@/components/ui/button"; +import { + AlertDialog, + AlertDialogAction, + AlertDialogCancel, + AlertDialogContent, + AlertDialogDescription, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogTitle, +} from "@/components/ui/alert-dialog"; import { Dialog, DialogContent, @@ -44,7 +54,7 @@ import { TooltipPortal } from "@radix-ui/react-tooltip"; import axios from "axios"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { isDesktop, isMobile } from "react-device-detect"; -import { useTranslation } from "react-i18next"; +import { Trans, useTranslation } from "react-i18next"; import { LuFolderCheck, LuImagePlus, @@ -165,6 +175,11 @@ export default function FaceLibrary() { [selectedFaces, setSelectedFaces], ); + const [deleteDialogOpen, setDeleteDialogOpen] = useState<{ + name: string; + ids: string[]; + } | null>(null); + const onDelete = useCallback( (name: string, ids: string[], isName: boolean = false) => { axios @@ -191,7 +206,7 @@ export default function FaceLibrary() { if (faceImages.length == 1) { // face has been deleted - setPageToggle(""); + setPageToggle("train"); } refreshFaces(); @@ -244,29 +259,32 @@ export default function FaceLibrary() { // keyboard - useKeyboardListener( - page === "train" ? ["a", "Escape"] : [], - (key, modifiers) => { - if (modifiers.repeat || !modifiers.down) { - return; - } + useKeyboardListener(["a", "Escape"], (key, modifiers) => { + if (modifiers.repeat || !modifiers.down) { + return; + } - switch (key) { - case "a": - if (modifiers.ctrl) { - if (selectedFaces.length) { - setSelectedFaces([]); - } else { - setSelectedFaces([...trainImages]); - } + switch (key) { + case "a": + if (modifiers.ctrl) { + if (selectedFaces.length) { + setSelectedFaces([]); + } else { + setSelectedFaces([ + ...(pageToggle === "train" ? trainImages : faceImages), + ]); } - break; - case "Escape": - setSelectedFaces([]); - break; - } - }, - ); + } + break; + case "Escape": + setSelectedFaces([]); + break; + } + }); + + useEffect(() => { + setSelectedFaces([]); + }, [pageToggle]); if (!config) { return ; @@ -276,6 +294,41 @@ export default function FaceLibrary() {
+ setDeleteDialogOpen(null)} + > + + + {t("deleteFaceAttempts.title")} + + + + deleteFaceAttempts.desc + + + + + {t("button.cancel", { ns: "common" })} + + { + if (deleteDialogOpen) { + onDelete(deleteDialogOpen.name, deleteDialogOpen.ids); + setDeleteDialogOpen(null); + } + }} + > + {t("button.delete", { ns: "common" })} + + + + +
)}
- {pageToggle && + {pageToggle && faceImages.length === 0 && pageToggle !== "train" ? ( +
+ + No faces available +
+ ) : ( + pageToggle && (pageToggle == "train" ? ( - ))} + )) + )}
); } @@ -443,7 +507,7 @@ function LibrarySelector({
{event && ( @@ -968,7 +1032,9 @@ function FaceAttempt({
-
{data.name}
+
+ {data.name == "unknown" ? t("details.unknown") : data.name} +
void; onDelete: (name: string, ids: string[]) => void; }; -function FaceGrid({ faceImages, pageToggle, onDelete }: FaceGridProps) { - const sortedFaces = useMemo(() => faceImages.sort().reverse(), [faceImages]); +function FaceGrid({ + faceImages, + pageToggle, + selectedFaces, + onClickFaces, + onDelete, +}: FaceGridProps) { + const sortedFaces = useMemo( + () => (faceImages || []).sort().reverse(), + [faceImages], + ); + + if (sortedFaces.length === 0) { + return ( +
+ + No faces available +
+ ); + } return (
{sortedFaces.map((image: string) => ( @@ -1024,6 +1110,8 @@ function FaceGrid({ faceImages, pageToggle, onDelete }: FaceGridProps) { key={image} name={pageToggle} image={image} + selected={selectedFaces.includes(image)} + onClickFaces={onClickFaces} onDelete={onDelete} /> ))} @@ -1034,22 +1122,44 @@ function FaceGrid({ faceImages, pageToggle, onDelete }: FaceGridProps) { type FaceImageProps = { name: string; image: string; + selected: boolean; + onClickFaces: (images: string[], ctrl: boolean) => void; onDelete: (name: string, ids: string[]) => void; }; -function FaceImage({ name, image, onDelete }: FaceImageProps) { +function FaceImage({ + name, + image, + selected, + onClickFaces, + onDelete, +}: FaceImageProps) { const { t } = useTranslation(["views/faceLibrary"]); return ( -
+
{ + e.stopPropagation(); + onClickFaces([image], e.ctrlKey || e.metaKey); + }} + >
- +
-
+
{name}
@@ -1059,7 +1169,10 @@ function FaceImage({ name, image, onDelete }: FaceImageProps) { onDelete(name, [image])} + onClick={(e) => { + e.stopPropagation(); + onDelete(name, [image]); + }} /> {t("button.deleteFaceAttempts")} diff --git a/web/src/views/recording/RecordingView.tsx b/web/src/views/recording/RecordingView.tsx index ecb11ee20..344fd8caa 100644 --- a/web/src/views/recording/RecordingView.tsx +++ b/web/src/views/recording/RecordingView.tsx @@ -385,6 +385,55 @@ export function RecordingView({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [previewRowRef.current?.scrollWidth, previewRowRef.current?.scrollHeight]); + // visibility listener for lazy loading + + const [visiblePreviews, setVisiblePreviews] = useState([]); + const visiblePreviewObserver = useRef(null); + useEffect(() => { + const visibleCameras = new Set(); + visiblePreviewObserver.current = new IntersectionObserver( + (entries) => { + entries.forEach((entry) => { + const camera = (entry.target as HTMLElement).dataset.camera; + + if (!camera) { + return; + } + + if (entry.isIntersecting) { + visibleCameras.add(camera); + } else { + visibleCameras.delete(camera); + } + + setVisiblePreviews([...visibleCameras]); + }); + }, + { threshold: 0.1 }, + ); + + return () => { + visiblePreviewObserver.current?.disconnect(); + }; + }, []); + + const previewRef = useCallback( + (node: HTMLElement | null) => { + if (!visiblePreviewObserver.current) { + return; + } + + try { + if (node) visiblePreviewObserver.current.observe(node); + } catch (e) { + // no op + } + }, + // we need to listen on the value of the ref + // eslint-disable-next-line react-hooks/exhaustive-deps + [visiblePreviewObserver.current], + ); + return (
@@ -631,12 +680,14 @@ export function RecordingView({ }} > { previewRefs.current[cam] = controller; controller.scrubToTimestamp(startTime); diff --git a/web/src/views/settings/CameraSettingsView.tsx b/web/src/views/settings/CameraSettingsView.tsx index 16506b008..9e8605660 100644 --- a/web/src/views/settings/CameraSettingsView.tsx +++ b/web/src/views/settings/CameraSettingsView.tsx @@ -230,7 +230,9 @@ export default function CameraSettingsView({ if (changedValue) { addMessage( "camera_settings", - `Unsaved review classification settings for ${capitalizeFirstLetter(selectedCamera)}`, + t("camera.reviewClassification.unsavedChanges", { + camera: selectedCamera, + }), undefined, `review_classification_settings_${selectedCamera}`, ); diff --git a/web/src/views/settings/ClassificationSettingsView.tsx b/web/src/views/settings/ClassificationSettingsView.tsx index 360fe6fd0..23e656f98 100644 --- a/web/src/views/settings/ClassificationSettingsView.tsx +++ b/web/src/views/settings/ClassificationSettingsView.tsx @@ -220,7 +220,7 @@ export default function ClassificationSettingsView({ if (changedValue) { addMessage( "search_settings", - `Unsaved Classification settings changes`, + t("classification.unsavedChanges"), undefined, "search_settings", ); diff --git a/web/src/views/settings/FrigatePlusSettingsView.tsx b/web/src/views/settings/FrigatePlusSettingsView.tsx index 6aa467a43..144abda33 100644 --- a/web/src/views/settings/FrigatePlusSettingsView.tsx +++ b/web/src/views/settings/FrigatePlusSettingsView.tsx @@ -176,7 +176,7 @@ export default function FrigatePlusSettingsView({ if (changedValue) { addMessage( "plus_settings", - `Unsaved Frigate+ settings changes`, + t("frigatePlus.unsavedChanges"), undefined, "plus_settings", ); diff --git a/web/src/views/settings/MotionTunerView.tsx b/web/src/views/settings/MotionTunerView.tsx index 98169b4f8..c7e73fb33 100644 --- a/web/src/views/settings/MotionTunerView.tsx +++ b/web/src/views/settings/MotionTunerView.tsx @@ -167,7 +167,7 @@ export default function MotionTunerView({ if (changedValue) { addMessage( "motion_tuner", - `Unsaved motion tuner changes (${selectedCamera})`, + t("motionDetectionTuner.unsavedChanges", { camera: selectedCamera }), undefined, `motion_tuner_${selectedCamera}`, ); diff --git a/web/src/views/settings/NotificationsSettingsView.tsx b/web/src/views/settings/NotificationsSettingsView.tsx index a2d43ba91..f3476decf 100644 --- a/web/src/views/settings/NotificationsSettingsView.tsx +++ b/web/src/views/settings/NotificationsSettingsView.tsx @@ -105,7 +105,7 @@ export default function NotificationView({ if (changedValue) { addMessage( "notification_settings", - `Unsaved notification settings`, + t("notification.unsavedChanges"), undefined, `notification_settings`, ); @@ -128,7 +128,7 @@ export default function NotificationView({ if (registration) { addMessage( "notification_settings", - "Unsaved Notification Registrations", + t("notification.unsavedRegistrations"), undefined, "registration", );