From 2020cdffd5f3fcd006ec27e3ce9ec215f15a260a Mon Sep 17 00:00:00 2001 From: Mitch Ross Date: Mon, 17 Feb 2025 08:17:15 -0500 Subject: [PATCH 01/51] Fix prometheus client exporter (#16620) * wip * wip * put it back * formatter * Delete hailort.log * Delete hailort.log * lint --------- Co-authored-by: Nicolas Mowen --- docker-compose.yml | 2 +- docker/main/requirements-wheels.txt | 1 + frigate/api/app.py | 12 +- frigate/stats/prometheus.py | 664 ++++++++++++++++++++-------- 4 files changed, 486 insertions(+), 193 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index f36880593..2d905d385 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -38,4 +38,4 @@ services: container_name: mqtt image: eclipse-mosquitto:1.6 ports: - - "1883:1883" + - "1883:1883" \ No newline at end of file diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index bb4ac622b..e43e74155 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -68,3 +68,4 @@ netaddr==0.8.* netifaces==0.10.* verboselogs==1.7.* virtualenv==20.17.* +prometheus-client == 0.21.* \ No newline at end of file diff --git a/frigate/api/app.py b/frigate/api/app.py index 52e686af1..c55e36a4b 100644 --- a/frigate/api/app.py +++ b/frigate/api/app.py @@ -20,7 +20,6 @@ from fastapi.params import Depends from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse from markupsafe import escape from peewee import operator -from prometheus_client import CONTENT_TYPE_LATEST, generate_latest from pydantic import ValidationError from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters @@ -28,6 +27,7 @@ from frigate.api.defs.request.app_body import AppConfigSetBody from frigate.api.defs.tags import Tags from frigate.config import FrigateConfig from frigate.models import Event, Timeline +from frigate.stats.prometheus import get_metrics, update_metrics from frigate.util.builtin import ( clean_camera_user_pass, get_tz_modifiers, @@ -113,9 +113,13 @@ def stats_history(request: Request, keys: str = None): @router.get("/metrics") -def metrics(): - """Expose Prometheus metrics endpoint""" - return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST) +def metrics(request: Request): + """Expose Prometheus metrics endpoint and update metrics with latest stats""" + # Retrieve the latest statistics and update the Prometheus metrics + stats = request.app.stats_emitter.get_latest_stats() + update_metrics(stats) + content, content_type = get_metrics() + return Response(content=content, media_type=content_type) @router.get("/config") diff --git a/frigate/stats/prometheus.py b/frigate/stats/prometheus.py index a43c091e2..015e551af 100644 --- a/frigate/stats/prometheus.py +++ b/frigate/stats/prometheus.py @@ -1,207 +1,495 @@ -from typing import Dict +import logging +import re -from prometheus_client import ( - CONTENT_TYPE_LATEST, - Counter, - Gauge, - Info, - generate_latest, -) - -# System metrics -SYSTEM_INFO = Info("frigate_system", "System information") -CPU_USAGE = Gauge( - "frigate_cpu_usage_percent", - "Process CPU usage %", - ["pid", "name", "process", "type", "cmdline"], -) -MEMORY_USAGE = Gauge( - "frigate_mem_usage_percent", - "Process memory usage %", - ["pid", "name", "process", "type", "cmdline"], -) - -# Camera metrics -CAMERA_FPS = Gauge( - "frigate_camera_fps", - "Frames per second being consumed from your camera", - ["camera_name"], -) -DETECTION_FPS = Gauge( - "frigate_detection_fps", - "Number of times detection is run per second", - ["camera_name"], -) -PROCESS_FPS = Gauge( - "frigate_process_fps", - "Frames per second being processed by frigate", - ["camera_name"], -) -SKIPPED_FPS = Gauge( - "frigate_skipped_fps", "Frames per second skipped for processing", ["camera_name"] -) -DETECTION_ENABLED = Gauge( - "frigate_detection_enabled", "Detection enabled for camera", ["camera_name"] -) -AUDIO_DBFS = Gauge("frigate_audio_dBFS", "Audio dBFS for camera", ["camera_name"]) -AUDIO_RMS = Gauge("frigate_audio_rms", "Audio RMS for camera", ["camera_name"]) - -# Detector metrics -DETECTOR_INFERENCE = Gauge( - "frigate_detector_inference_speed_seconds", - "Time spent running object detection in seconds", - ["name"], -) -DETECTOR_START = Gauge( - "frigate_detection_start", "Detector start time (unix timestamp)", ["name"] -) - -# GPU metrics -GPU_USAGE = Gauge("frigate_gpu_usage_percent", "GPU utilisation %", ["gpu_name"]) -GPU_MEMORY = Gauge("frigate_gpu_mem_usage_percent", "GPU memory usage %", ["gpu_name"]) - -# Storage metrics -STORAGE_FREE = Gauge("frigate_storage_free_bytes", "Storage free bytes", ["storage"]) -STORAGE_TOTAL = Gauge("frigate_storage_total_bytes", "Storage total bytes", ["storage"]) -STORAGE_USED = Gauge("frigate_storage_used_bytes", "Storage used bytes", ["storage"]) -STORAGE_MOUNT = Info( - "frigate_storage_mount_type", "Storage mount type", ["mount_type", "storage"] -) - -# Service metrics -UPTIME = Gauge("frigate_service_uptime_seconds", "Uptime seconds") -LAST_UPDATE = Gauge( - "frigate_service_last_updated_timestamp", "Stats recorded time (unix timestamp)" -) -TEMPERATURE = Gauge("frigate_device_temperature", "Device Temperature", ["device"]) - -# Event metrics -CAMERA_EVENTS = Counter( - "frigate_camera_events", - "Count of camera events since exporter started", - ["camera", "label"], +from prometheus_client import CONTENT_TYPE_LATEST, generate_latest +from prometheus_client.core import ( + REGISTRY, + CounterMetricFamily, + GaugeMetricFamily, + InfoMetricFamily, ) -def update_metrics(stats: Dict) -> None: - """Update Prometheus metrics based on Frigate stats""" - try: - # Update process metrics - if "cpu_usages" in stats: - for pid, proc_stats in stats["cpu_usages"].items(): - cmdline = proc_stats.get("cmdline", "") - process_type = "Other" - process_name = cmdline +class CustomCollector(object): + def __init__(self, _url): + self.process_stats = {} + self.previous_event_id = None + self.previous_event_start_time = None + self.all_events = {} - CPU_USAGE.labels( - pid=pid, - name=process_name, - process=process_name, - type=process_type, - cmdline=cmdline, - ).set(float(proc_stats["cpu"])) + def add_metric(self, metric, label, stats, key, multiplier=1.0): # Now a method + try: + string = str(stats[key]) + value = float(re.findall(r"-?\d*\.?\d*", string)[0]) + metric.add_metric(label, value * multiplier) + except (KeyError, TypeError, IndexError, ValueError): + pass - MEMORY_USAGE.labels( - pid=pid, - name=process_name, - process=process_name, - type=process_type, - cmdline=cmdline, - ).set(float(proc_stats["mem"])) + def add_metric_process( + self, + metric, + camera_stats, + camera_name, + pid_name, + process_name, + cpu_or_memory, + process_type, + ): + try: + pid = str(camera_stats[pid_name]) + label_values = [pid, camera_name, process_name, process_type] + try: + # new frigate:0.13.0-beta3 stat 'cmdline' + label_values.append(self.process_stats[pid]["cmdline"]) + except KeyError: + pass + metric.add_metric(label_values, self.process_stats[pid][cpu_or_memory]) + del self.process_stats[pid][cpu_or_memory] + except (KeyError, TypeError, IndexError): + pass - # Update camera metrics - if "cameras" in stats: - for camera_name, camera_stats in stats["cameras"].items(): - if "camera_fps" in camera_stats: - CAMERA_FPS.labels(camera_name=camera_name).set( - camera_stats["camera_fps"] - ) - if "detection_fps" in camera_stats: - DETECTION_FPS.labels(camera_name=camera_name).set( - camera_stats["detection_fps"] - ) - if "process_fps" in camera_stats: - PROCESS_FPS.labels(camera_name=camera_name).set( - camera_stats["process_fps"] - ) - if "skipped_fps" in camera_stats: - SKIPPED_FPS.labels(camera_name=camera_name).set( - camera_stats["skipped_fps"] - ) - if "detection_enabled" in camera_stats: - DETECTION_ENABLED.labels(camera_name=camera_name).set( - camera_stats["detection_enabled"] - ) - if "audio_dBFS" in camera_stats: - AUDIO_DBFS.labels(camera_name=camera_name).set( - camera_stats["audio_dBFS"] - ) - if "audio_rms" in camera_stats: - AUDIO_RMS.labels(camera_name=camera_name).set( - camera_stats["audio_rms"] - ) + def collect(self): + stats = self.process_stats # Assign self.process_stats to local variable stats - # Update detector metrics - if "detectors" in stats: - for name, detector in stats["detectors"].items(): - if "inference_speed" in detector: - DETECTOR_INFERENCE.labels(name=name).set( - detector["inference_speed"] * 0.001 - ) # ms to seconds - if "detection_start" in detector: - DETECTOR_START.labels(name=name).set(detector["detection_start"]) + try: + self.process_stats = stats["cpu_usages"] + except KeyError: + pass - # Update GPU metrics - if "gpu_usages" in stats: - for gpu_name, gpu_stats in stats["gpu_usages"].items(): - if "gpu" in gpu_stats: - GPU_USAGE.labels(gpu_name=gpu_name).set(float(gpu_stats["gpu"])) - if "mem" in gpu_stats: - GPU_MEMORY.labels(gpu_name=gpu_name).set(float(gpu_stats["mem"])) + # process stats for cameras, detectors and other + cpu_usages = GaugeMetricFamily( + "frigate_cpu_usage_percent", + "Process CPU usage %", + labels=["pid", "name", "process", "type", "cmdline"], + ) + mem_usages = GaugeMetricFamily( + "frigate_mem_usage_percent", + "Process memory usage %", + labels=["pid", "name", "process", "type", "cmdline"], + ) - # Update service metrics - if "service" in stats: - service = stats["service"] + # camera stats + audio_dBFS = GaugeMetricFamily( + "frigate_audio_dBFS", "Audio dBFS for camera", labels=["camera_name"] + ) + audio_rms = GaugeMetricFamily( + "frigate_audio_rms", "Audio RMS for camera", labels=["camera_name"] + ) + camera_fps = GaugeMetricFamily( + "frigate_camera_fps", + "Frames per second being consumed from your camera.", + labels=["camera_name"], + ) + detection_enabled = GaugeMetricFamily( + "frigate_detection_enabled", + "Detection enabled for camera", + labels=["camera_name"], + ) + detection_fps = GaugeMetricFamily( + "frigate_detection_fps", + "Number of times detection is run per second.", + labels=["camera_name"], + ) + process_fps = GaugeMetricFamily( + "frigate_process_fps", + "Frames per second being processed by frigate.", + labels=["camera_name"], + ) + skipped_fps = GaugeMetricFamily( + "frigate_skipped_fps", + "Frames per second skip for processing by frigate.", + labels=["camera_name"], + ) - if "uptime" in service: - UPTIME.set(service["uptime"]) - if "last_updated" in service: - LAST_UPDATE.set(service["last_updated"]) + # read camera stats assuming version < frigate:0.13.0-beta3 + cameras = stats + try: + # try to read camera stats in case >= frigate:0.13.0-beta3 + cameras = stats["cameras"] + except KeyError: + pass - # Storage metrics - if "storage" in service: - for path, storage in service["storage"].items(): - if "free" in storage: - STORAGE_FREE.labels(storage=path).set( - storage["free"] * 1e6 - ) # MB to bytes - if "total" in storage: - STORAGE_TOTAL.labels(storage=path).set(storage["total"] * 1e6) - if "used" in storage: - STORAGE_USED.labels(storage=path).set(storage["used"] * 1e6) - if "mount_type" in storage: - STORAGE_MOUNT.labels(storage=path).info( - {"mount_type": storage["mount_type"], "storage": path} - ) + for camera_name, camera_stats in cameras.items(): + self.add_metric(audio_dBFS, [camera_name], camera_stats, "audio_dBFS") + self.add_metric(audio_rms, [camera_name], camera_stats, "audio_rms") + self.add_metric(camera_fps, [camera_name], camera_stats, "camera_fps") + self.add_metric( + detection_enabled, [camera_name], camera_stats, "detection_enabled" + ) + self.add_metric(detection_fps, [camera_name], camera_stats, "detection_fps") + self.add_metric(process_fps, [camera_name], camera_stats, "process_fps") + self.add_metric(skipped_fps, [camera_name], camera_stats, "skipped_fps") - # Temperature metrics - if "temperatures" in service: - for device, temp in service["temperatures"].items(): - TEMPERATURE.labels(device=device).set(temp) + self.add_metric_process( + cpu_usages, + camera_stats, + camera_name, + "ffmpeg_pid", + "ffmpeg", + "cpu", + "Camera", + ) + self.add_metric_process( + cpu_usages, + camera_stats, + camera_name, + "capture_pid", + "capture", + "cpu", + "Camera", + ) + self.add_metric_process( + cpu_usages, camera_stats, camera_name, "pid", "detect", "cpu", "Camera" + ) - # Version info - if "version" in service and "latest_version" in service: - SYSTEM_INFO.info( - { - "version": service["version"], - "latest_version": service["latest_version"], - } + self.add_metric_process( + mem_usages, + camera_stats, + camera_name, + "ffmpeg_pid", + "ffmpeg", + "mem", + "Camera", + ) + self.add_metric_process( + mem_usages, + camera_stats, + camera_name, + "capture_pid", + "capture", + "mem", + "Camera", + ) + self.add_metric_process( + mem_usages, camera_stats, camera_name, "pid", "detect", "mem", "Camera" + ) + + yield audio_dBFS + yield audio_rms + yield camera_fps + yield detection_enabled + yield detection_fps + yield process_fps + yield skipped_fps + + # bandwidth stats + bandwidth_usages = GaugeMetricFamily( + "frigate_bandwidth_usages_kBps", + "bandwidth usages kilobytes per second", + labels=["pid", "name", "process", "cmdline"], + ) + + try: + for b_pid, b_stats in stats["bandwidth_usages"].items(): + label = [b_pid] # pid label + try: + n = stats["cpu_usages"][b_pid]["cmdline"] + for p_name, p_stats in stats["processes"].items(): + if str(p_stats["pid"]) == b_pid: + n = p_name + break + + # new frigate:0.13.0-beta3 stat 'cmdline' + label.append(n) # name label + label.append(stats["cpu_usages"][b_pid]["cmdline"]) # process label + label.append(stats["cpu_usages"][b_pid]["cmdline"]) # cmdline label + self.add_metric(bandwidth_usages, label, b_stats, "bandwidth") + except KeyError: + pass + except KeyError: + pass + + yield bandwidth_usages + + # detector stats + try: + yield GaugeMetricFamily( + "frigate_detection_total_fps", + "Sum of detection_fps across all cameras and detectors.", + value=stats["detection_fps"], + ) + except KeyError: + pass + + detector_inference_speed = GaugeMetricFamily( + "frigate_detector_inference_speed_seconds", + "Time spent running object detection in seconds.", + labels=["name"], + ) + + detector_detection_start = GaugeMetricFamily( + "frigate_detection_start", + "Detector start time (unix timestamp)", + labels=["name"], + ) + + try: + for detector_name, detector_stats in stats["detectors"].items(): + self.add_metric( + detector_inference_speed, + [detector_name], + detector_stats, + "inference_speed", + 0.001, + ) # ms to seconds + self.add_metric( + detector_detection_start, + [detector_name], + detector_stats, + "detection_start", ) + self.add_metric_process( + cpu_usages, + stats["detectors"], + detector_name, + "pid", + "detect", + "cpu", + "Detector", + ) + self.add_metric_process( + mem_usages, + stats["detectors"], + detector_name, + "pid", + "detect", + "mem", + "Detector", + ) + except KeyError: + pass + yield detector_inference_speed + yield detector_detection_start + + # detector process stats + try: + for detector_name, detector_stats in stats["detectors"].items(): + p_pid = str(detector_stats["pid"]) + label = [p_pid] # pid label + try: + # new frigate:0.13.0-beta3 stat 'cmdline' + label.append(detector_name) # name label + label.append(detector_name) # process label + label.append("detectors") # type label + label.append(self.process_stats[p_pid]["cmdline"]) # cmdline label + self.add_metric(cpu_usages, label, self.process_stats[p_pid], "cpu") + self.add_metric(mem_usages, label, self.process_stats[p_pid], "mem") + del self.process_stats[p_pid] + except KeyError: + pass + + except KeyError: + pass + + # other named process stats + try: + for process_name, process_stats in stats["processes"].items(): + p_pid = str(process_stats["pid"]) + label = [p_pid] # pid label + try: + # new frigate:0.13.0-beta3 stat 'cmdline' + label.append(process_name) # name label + label.append(process_name) # process label + label.append(process_name) # type label + label.append(self.process_stats[p_pid]["cmdline"]) # cmdline label + self.add_metric(cpu_usages, label, self.process_stats[p_pid], "cpu") + self.add_metric(mem_usages, label, self.process_stats[p_pid], "mem") + del self.process_stats[p_pid] + except KeyError: + pass + + except KeyError: + pass + + # remaining process stats + try: + for process_id, pid_stats in self.process_stats.items(): + label = [process_id] # pid label + try: + # new frigate:0.13.0-beta3 stat 'cmdline' + label.append(pid_stats["cmdline"]) # name label + label.append(pid_stats["cmdline"]) # process label + label.append("Other") # type label + label.append(pid_stats["cmdline"]) # cmdline label + except KeyError: + pass + self.add_metric(cpu_usages, label, pid_stats, "cpu") + self.add_metric(mem_usages, label, pid_stats, "mem") + except KeyError: + pass + + yield cpu_usages + yield mem_usages + + # gpu stats + gpu_usages = GaugeMetricFamily( + "frigate_gpu_usage_percent", "GPU utilisation %", labels=["gpu_name"] + ) + gpu_mem_usages = GaugeMetricFamily( + "frigate_gpu_mem_usage_percent", "GPU memory usage %", labels=["gpu_name"] + ) + + try: + for gpu_name, gpu_stats in stats["gpu_usages"].items(): + self.add_metric(gpu_usages, [gpu_name], gpu_stats, "gpu") + self.add_metric(gpu_mem_usages, [gpu_name], gpu_stats, "mem") + except KeyError: + pass + + yield gpu_usages + yield gpu_mem_usages + + # service stats + uptime_seconds = GaugeMetricFamily( + "frigate_service_uptime_seconds", "Uptime seconds" + ) + last_updated_timestamp = GaugeMetricFamily( + "frigate_service_last_updated_timestamp", + "Stats recorded time (unix timestamp)", + ) + + try: + service_stats = stats["service"] + self.add_metric(uptime_seconds, [""], service_stats, "uptime") + self.add_metric(last_updated_timestamp, [""], service_stats, "last_updated") + + info = { + "latest_version": stats["service"]["latest_version"], + "version": stats["service"]["version"], + } + yield InfoMetricFamily( + "frigate_service", "Frigate version info", value=info + ) + + except KeyError: + pass + + yield uptime_seconds + yield last_updated_timestamp + + temperatures = GaugeMetricFamily( + "frigate_device_temperature", "Device Temperature", labels=["device"] + ) + try: + for device_name in stats["service"]["temperatures"]: + self.add_metric( + temperatures, + [device_name], + stats["service"]["temperatures"], + device_name, + ) + except KeyError: + pass + + yield temperatures + + storage_free = GaugeMetricFamily( + "frigate_storage_free_bytes", "Storage free bytes", labels=["storage"] + ) + storage_mount_type = InfoMetricFamily( + "frigate_storage_mount_type", + "Storage mount type", + labels=["mount_type", "storage"], + ) + storage_total = GaugeMetricFamily( + "frigate_storage_total_bytes", "Storage total bytes", labels=["storage"] + ) + storage_used = GaugeMetricFamily( + "frigate_storage_used_bytes", "Storage used bytes", labels=["storage"] + ) + + try: + for storage_path, storage_stats in stats["service"]["storage"].items(): + self.add_metric( + storage_free, [storage_path], storage_stats, "free", 1e6 + ) # MB to bytes + self.add_metric( + storage_total, [storage_path], storage_stats, "total", 1e6 + ) # MB to bytes + self.add_metric( + storage_used, [storage_path], storage_stats, "used", 1e6 + ) # MB to bytes + storage_mount_type.add_metric( + storage_path, + { + "mount_type": storage_stats["mount_type"], + "storage": storage_path, + }, + ) + except KeyError: + pass + + yield storage_free + yield storage_mount_type + yield storage_total + yield storage_used + + # count events + events = [] + + if len(events) > 0: + # events[0] is newest event, last element is oldest, don't need to sort + + if not self.previous_event_id: + # ignore all previous events on startup, prometheus might have already counted them + self.previous_event_id = events[0]["id"] + self.previous_event_start_time = int(events[0]["start_time"]) + + for event in events: + # break if event already counted + if event["id"] == self.previous_event_id: + break + + # break if event starts before previous event + if event["start_time"] < self.previous_event_start_time: + break + + # store counted events in a dict + try: + cam = self.all_events[event["camera"]] + try: + cam[event["label"]] += 1 + except KeyError: + # create label dict if not exists + cam.update({event["label"]: 1}) + except KeyError: + # create camera and label dict if not exists + self.all_events.update({event["camera"]: {event["label"]: 1}}) + + # don't recount events next time + self.previous_event_id = events[0]["id"] + self.previous_event_start_time = int(events[0]["start_time"]) + + camera_events = CounterMetricFamily( + "frigate_camera_events", + "Count of camera events since exporter started", + labels=["camera", "label"], + ) + + for camera, cam_dict in self.all_events.items(): + for label, label_value in cam_dict.items(): + camera_events.add_metric([camera, label], label_value) + + yield camera_events + + +collector = CustomCollector(None) +REGISTRY.register(collector) + + +def update_metrics(stats): + """Updates the Prometheus metrics with the given stats data.""" + try: + collector.process_stats = stats # Directly assign the stats data + # Important: Since we are not fetching from URL, we need to manually call collect + for _ in collector.collect(): + pass except Exception as e: - print(f"Error updating Prometheus metrics: {str(e)}") + logging.error(f"Error updating metrics: {e}") -def get_metrics() -> tuple[str, str]: - """Get Prometheus metrics in text format""" - return generate_latest(), CONTENT_TYPE_LATEST +def get_metrics(): + """Returns the Prometheus metrics in text format.""" + content = generate_latest(REGISTRY) # Use generate_latest + return content, CONTENT_TYPE_LATEST From 7b7387a68c7fdbe4bab268f67351b4f10bbc63cf Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 17 Feb 2025 06:38:29 -0700 Subject: [PATCH 02/51] Update info for face recognition (#16629) --- docs/docs/configuration/face_recognition.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index 22968762a..aaab92e6d 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -5,11 +5,7 @@ title: Face Recognition Face recognition allows people to be assigned names and when their face is recognized Frigate will assign the person's name as a sub label. This information is included in the UI, filters, as well as in notifications. -Frigate has support for FaceNet to create face embeddings, which runs locally. Embeddings are then saved to Frigate's database. - -## Minimum System Requirements - -Face recognition works by running a large AI model locally on your system. Systems without a GPU will not run Face Recognition reliably or at all. +Frigate has support for CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally. A lightweight face landmark detection model is also used to align faces before running them through the face recognizer. ## Configuration From 1e709f5b3ffcba4d017e95c8754140975087eb6c Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Mon, 17 Feb 2025 06:57:05 -0700 Subject: [PATCH 03/51] Update coral deps and remove unused pycoral (#16630) --- docker/main/install_deps.sh | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docker/main/install_deps.sh b/docker/main/install_deps.sh index f8f68398f..ee84f6a14 100755 --- a/docker/main/install_deps.sh +++ b/docker/main/install_deps.sh @@ -24,20 +24,18 @@ apt-get -qq install --no-install-recommends -y \ mkdir -p -m 600 /root/.gnupg # install coral runtime -wget -q -O /tmp/libedgetpu1-max.deb "https://github.com/feranick/libedgetpu/releases/download/16.0TF2.17.0-1/libedgetpu1-max_16.0tf2.17.0-1.bookworm_${TARGETARCH}.deb" +wget -q -O /tmp/libedgetpu1-max.deb "https://github.com/feranick/libedgetpu/releases/download/16.0TF2.17.1-1/libedgetpu1-max_16.0tf2.17.1-1.bookworm_${TARGETARCH}.deb" unset DEBIAN_FRONTEND yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive rm /tmp/libedgetpu1-max.deb # install python3 & tflite runtime if [[ "${TARGETARCH}" == "amd64" ]]; then - pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_x86_64.whl - pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_x86_64.whl + pip3 install --break-system-packages https://github.com/frigate-nvr/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl fi if [[ "${TARGETARCH}" == "arm64" ]]; then - pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_aarch64.whl - pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_aarch64.whl + pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl fi # btbn-ffmpeg -> amd64 From 3f07d2d37ce8883d97a431c9fe5b91207a641049 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 17 Feb 2025 08:19:03 -0600 Subject: [PATCH 04/51] Improve notifications (#16632) * add notification cooldown * cooldown docs * show alert box when notifications are used in an insecure context * add ability to suspend notifications from dashboard context menu --- .cspell/frigate-dictionary.txt | 1 + docs/docs/configuration/notifications.md | 29 +++- docs/docs/configuration/reference.md | 2 + frigate/comms/webpush.py | 31 +++- frigate/config/camera/notification.py | 3 + web/components.json | 7 +- web/src/components/menu/LiveContextMenu.tsx | 161 +++++++++++++++++- web/src/components/ui/alert.tsx | 59 +++++++ web/src/pages/Settings.tsx | 15 +- web/src/views/live/DraggableGridLayout.tsx | 4 + web/src/views/live/LiveDashboardView.tsx | 1 + .../settings/NotificationsSettingsView.tsx | 60 ++++++- 12 files changed, 351 insertions(+), 22 deletions(-) create mode 100644 web/src/components/ui/alert.tsx diff --git a/.cspell/frigate-dictionary.txt b/.cspell/frigate-dictionary.txt index cc6adcc02..dbab9600e 100644 --- a/.cspell/frigate-dictionary.txt +++ b/.cspell/frigate-dictionary.txt @@ -44,6 +44,7 @@ codeproject colormap colorspace comms +cooldown coro ctypeslib CUDA diff --git a/docs/docs/configuration/notifications.md b/docs/docs/configuration/notifications.md index 9225ea6e8..8ae2f6d47 100644 --- a/docs/docs/configuration/notifications.md +++ b/docs/docs/configuration/notifications.md @@ -11,14 +11,37 @@ Frigate offers native notifications using the [WebPush Protocol](https://web.dev In order to use notifications the following requirements must be met: -- Frigate must be accessed via a secure https connection +- Frigate must be accessed via a secure `https` connection ([see the authorization docs](/configuration/authentication)). - A supported browser must be used. Currently Chrome, Firefox, and Safari are known to be supported. -- In order for notifications to be usable externally, Frigate must be accessible externally +- In order for notifications to be usable externally, Frigate must be accessible externally. ### Configuration To configure notifications, go to the Frigate WebUI -> Settings -> Notifications and enable, then fill out the fields and save. +Optionally, you can change the default cooldown period for notifications through the `cooldown` parameter in your config file. This parameter can also be overridden at the camera level. + +Notifications will be prevented if either: + +- The global cooldown period hasn't elapsed since any camera's last notification +- The camera-specific cooldown period hasn't elapsed for the specific camera + +```yaml +notifications: + enabled: True + email: "johndoe@gmail.com" + cooldown: 10 # wait 10 seconds before sending another notification from any camera +``` + +```yaml +cameras: + doorbell: + ... + notifications: + enabled: True + cooldown: 30 # wait 30 seconds before sending another notification from the doorbell camera +``` + ### Registration Once notifications are enabled, press the `Register for Notifications` button on all devices that you would like to receive notifications on. This will register the background worker. After this Frigate must be restarted and then notifications will begin to be sent. @@ -39,4 +62,4 @@ Different platforms handle notifications differently, some settings changes may ### Android -Most Android phones have battery optimization settings. To get reliable Notification delivery the browser (Chrome, Firefox) should have battery optimizations disabled. If Frigate is running as a PWA then the Frigate app should have battery optimizations disabled as well. \ No newline at end of file +Most Android phones have battery optimization settings. To get reliable Notification delivery the browser (Chrome, Firefox) should have battery optimizations disabled. If Frigate is running as a PWA then the Frigate app should have battery optimizations disabled as well. diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index 21b60f449..b791e708a 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -420,6 +420,8 @@ notifications: # Optional: Email for push service to reach out to # NOTE: This is required to use notifications email: "admin@example.com" + # Optional: Cooldown time for notifications in seconds (default: shown below) + cooldown: 0 # Optional: Record configuration # NOTE: Can be overridden at the camera level diff --git a/frigate/comms/webpush.py b/frigate/comms/webpush.py index b55b7e82c..b845c3afd 100644 --- a/frigate/comms/webpush.py +++ b/frigate/comms/webpush.py @@ -47,6 +47,10 @@ class WebPushClient(Communicator): # type: ignore[misc] self.suspended_cameras: dict[str, int] = { c.name: 0 for c in self.config.cameras.values() } + self.last_camera_notification_time: dict[str, float] = { + c.name: 0 for c in self.config.cameras.values() + } + self.last_notification_time: float = 0 self.notification_queue: queue.Queue[PushNotification] = queue.Queue() self.notification_thread = threading.Thread( target=self._process_notifications, daemon=True @@ -264,6 +268,29 @@ class WebPushClient(Communicator): # type: ignore[misc] ): return + camera: str = payload["after"]["camera"] + current_time = datetime.datetime.now().timestamp() + + # Check global cooldown period + if ( + current_time - self.last_notification_time + < self.config.notifications.cooldown + ): + logger.debug( + f"Skipping notification for {camera} - in global cooldown period" + ) + return + + # Check camera-specific cooldown period + if ( + current_time - self.last_camera_notification_time[camera] + < self.config.cameras[camera].notifications.cooldown + ): + logger.debug( + f"Skipping notification for {camera} - in camera-specific cooldown period" + ) + return + self.check_registrations() state = payload["type"] @@ -278,6 +305,9 @@ class WebPushClient(Communicator): # type: ignore[misc] ): return + self.last_camera_notification_time[camera] = current_time + self.last_notification_time = current_time + reviewId = payload["after"]["id"] sorted_objects: set[str] = set() @@ -287,7 +317,6 @@ class WebPushClient(Communicator): # type: ignore[misc] sorted_objects.update(payload["after"]["data"]["sub_labels"]) - camera: str = payload["after"]["camera"] title = f"{', '.join(sorted_objects).replace('_', ' ').title()}{' was' if state == 'end' else ''} detected in {', '.join(payload['after']['data']['zones']).replace('_', ' ').title()}" message = f"Detected on {camera.replace('_', ' ').title()}" image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}" diff --git a/frigate/config/camera/notification.py b/frigate/config/camera/notification.py index 79355b8ae..b0d7cebf9 100644 --- a/frigate/config/camera/notification.py +++ b/frigate/config/camera/notification.py @@ -10,6 +10,9 @@ __all__ = ["NotificationConfig"] class NotificationConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable notifications") email: Optional[str] = Field(default=None, title="Email required for push.") + cooldown: Optional[int] = Field( + default=0, ge=0, title="Cooldown period for notifications (time in seconds)." + ) enabled_in_config: Optional[bool] = Field( default=None, title="Keep track of original state of notifications." ) diff --git a/web/components.json b/web/components.json index 053bbcf62..3f112537b 100644 --- a/web/components.json +++ b/web/components.json @@ -11,6 +11,9 @@ }, "aliases": { "components": "@/components", - "utils": "@/lib/utils" + "utils": "@/lib/utils", + "ui": "@/components/ui", + "lib": "@/lib", + "hooks": "@/hooks" } -} \ No newline at end of file +} diff --git a/web/src/components/menu/LiveContextMenu.tsx b/web/src/components/menu/LiveContextMenu.tsx index 07909a311..969e647a0 100644 --- a/web/src/components/menu/LiveContextMenu.tsx +++ b/web/src/components/menu/LiveContextMenu.tsx @@ -11,6 +11,9 @@ import { ContextMenuContent, ContextMenuItem, ContextMenuSeparator, + ContextMenuSub, + ContextMenuSubContent, + ContextMenuSubTrigger, ContextMenuTrigger, } from "@/components/ui/context-menu"; import { @@ -24,12 +27,19 @@ import { VolumeSlider } from "@/components/ui/slider"; import { CameraStreamingDialog } from "../settings/CameraStreamingDialog"; import { AllGroupsStreamingSettings, + FrigateConfig, GroupStreamingSettings, } from "@/types/frigateConfig"; import { useStreamingSettings } from "@/context/streaming-settings-provider"; -import { IoIosWarning } from "react-icons/io"; +import { + IoIosNotifications, + IoIosNotificationsOff, + IoIosWarning, +} from "react-icons/io"; import { cn } from "@/lib/utils"; import { useNavigate } from "react-router-dom"; +import { formatUnixTimestampToDateTime } from "@/utils/dateUtil"; +import { useNotifications, useNotificationSuspend } from "@/api/ws"; type LiveContextMenuProps = { className?: string; @@ -48,6 +58,7 @@ type LiveContextMenuProps = { statsState: boolean; toggleStats: () => void; resetPreferredLiveMode: () => void; + config?: FrigateConfig; children?: ReactNode; }; export default function LiveContextMenu({ @@ -67,6 +78,7 @@ export default function LiveContextMenu({ statsState, toggleStats, resetPreferredLiveMode, + config, children, }: LiveContextMenuProps) { const [showSettings, setShowSettings] = useState(false); @@ -185,6 +197,44 @@ export default function LiveContextMenu({ const navigate = useNavigate(); + // notifications + + const notificationsEnabledInConfig = + config?.cameras[camera].notifications.enabled_in_config; + + const { payload: notificationState, send: sendNotification } = + useNotifications(camera); + const { payload: notificationSuspendUntil, send: sendNotificationSuspend } = + useNotificationSuspend(camera); + const [isSuspended, setIsSuspended] = useState(false); + + useEffect(() => { + if (notificationSuspendUntil) { + setIsSuspended( + notificationSuspendUntil !== "0" || notificationState === "OFF", + ); + } + }, [notificationSuspendUntil, notificationState]); + + const handleSuspend = (duration: string) => { + if (duration === "off") { + sendNotification("OFF"); + } else { + sendNotificationSuspend(Number.parseInt(duration)); + } + }; + + const formatSuspendedUntil = (timestamp: string) => { + if (timestamp === "0") return "Frigate restarts."; + + return formatUnixTimestampToDateTime(Number.parseInt(timestamp), { + time_style: "medium", + date_style: "medium", + timezone: config?.ui.timezone, + strftime_fmt: `%b %d, ${config?.ui.time_format == "24hour" ? "%H:%M" : "%I:%M %p"}`, + }); + }; + return (
@@ -288,6 +338,115 @@ export default function LiveContextMenu({ )} + {notificationsEnabledInConfig && ( + <> + + + +
+ Notifications +
+
+ +
+
+ {notificationState === "ON" ? ( + <> + {isSuspended ? ( + <> + + Suspended + + ) : ( + <> + + Enabled + + )} + + ) : ( + <> + + Disabled + + )} +
+ {isSuspended && ( + + Until {formatSuspendedUntil(notificationSuspendUntil)} + + )} +
+ + {isSuspended ? ( + <> + + { + sendNotification("ON"); + sendNotificationSuspend(0); + }} + > +
+ {notificationState === "ON" ? ( + Unsuspend + ) : ( + Enable + )} +
+
+ + ) : ( + notificationState === "ON" && ( + <> + +
+

+ Suspend for: +

+
+ handleSuspend("5")}> + 5 minutes + + handleSuspend("10")} + > + 10 minutes + + handleSuspend("30")} + > + 30 minutes + + handleSuspend("60")} + > + 1 hour + + handleSuspend("840")} + > + 12 hours + + handleSuspend("1440")} + > + 24 hours + + handleSuspend("off")} + > + Until restart + +
+
+ + ) + )} +
+
+ + )}
diff --git a/web/src/components/ui/alert.tsx b/web/src/components/ui/alert.tsx new file mode 100644 index 000000000..41fa7e056 --- /dev/null +++ b/web/src/components/ui/alert.tsx @@ -0,0 +1,59 @@ +import * as React from "react" +import { cva, type VariantProps } from "class-variance-authority" + +import { cn } from "@/lib/utils" + +const alertVariants = cva( + "relative w-full rounded-lg border p-4 [&>svg~*]:pl-7 [&>svg+div]:translate-y-[-3px] [&>svg]:absolute [&>svg]:left-4 [&>svg]:top-4 [&>svg]:text-foreground", + { + variants: { + variant: { + default: "bg-background text-foreground", + destructive: + "border-destructive/50 text-destructive dark:border-destructive [&>svg]:text-destructive", + }, + }, + defaultVariants: { + variant: "default", + }, + } +) + +const Alert = React.forwardRef< + HTMLDivElement, + React.HTMLAttributes & VariantProps +>(({ className, variant, ...props }, ref) => ( +
+)) +Alert.displayName = "Alert" + +const AlertTitle = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +AlertTitle.displayName = "AlertTitle" + +const AlertDescription = React.forwardRef< + HTMLParagraphElement, + React.HTMLAttributes +>(({ className, ...props }, ref) => ( +
+)) +AlertDescription.displayName = "AlertDescription" + +export { Alert, AlertTitle, AlertDescription } diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index fbf31a00a..6eeb5bcc3 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -61,19 +61,6 @@ export default function Settings() { const [searchParams] = useSearchParams(); - // available settings views - - const settingsViews = useMemo(() => { - const views = [...allSettingsViews]; - - if (!("Notification" in window) || !window.isSecureContext) { - const index = views.indexOf("notifications"); - views.splice(index, 1); - } - - return views; - }, []); - // TODO: confirm leave page const [unsavedChanges, setUnsavedChanges] = useState(false); const [confirmationDialogOpen, setConfirmationDialogOpen] = useState(false); @@ -160,7 +147,7 @@ export default function Settings() { } }} > - {Object.values(settingsViews).map((item) => ( + {Object.values(allSettingsViews).map((item) => ( resetPreferredLiveMode(camera.name) } + config={config} > void; unmuteAll: () => void; resetPreferredLiveMode: () => void; + config?: FrigateConfig; }; const GridLiveContextMenu = React.forwardRef< @@ -819,6 +821,7 @@ const GridLiveContextMenu = React.forwardRef< muteAll, unmuteAll, resetPreferredLiveMode, + config, ...props }, ref, @@ -849,6 +852,7 @@ const GridLiveContextMenu = React.forwardRef< muteAll={muteAll} unmuteAll={unmuteAll} resetPreferredLiveMode={resetPreferredLiveMode} + config={config} > {children} diff --git a/web/src/views/live/LiveDashboardView.tsx b/web/src/views/live/LiveDashboardView.tsx index 89a2aeef2..45d0d5302 100644 --- a/web/src/views/live/LiveDashboardView.tsx +++ b/web/src/views/live/LiveDashboardView.tsx @@ -507,6 +507,7 @@ export default function LiveDashboardView({ resetPreferredLiveMode={() => resetPreferredLiveMode(camera.name) } + config={config} > (); useEffect(() => { + if (!("Notification" in window) || !window.isSecureContext) { + return; + } navigator.serviceWorker .getRegistration(NOTIFICATION_SERVICE_WORKER) .then((worker) => { @@ -279,6 +283,60 @@ export default function NotificationView({ saveToConfig(values as NotificationSettingsValueType); } + if (!("Notification" in window) || !window.isSecureContext) { + return ( +
+
+
+ + Notification Settings + +
+
+

+ Frigate can natively send push notifications to your device + when it is running in the browser or installed as a PWA. +

+
+ + Read the Documentation{" "} + + +
+
+
+ + + Notifications Unavailable + + + Web push notifications require a secure context ( + https://...). This is a browser limitation. Access + Frigate securely to use notifications. +
+ + Read the Documentation{" "} + + +
+
+
+
+
+
+ ); + } + return ( <>
From 124d92daa9f55a99cc33ceefce487968df082cea Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 17 Feb 2025 10:37:17 -0600 Subject: [PATCH 05/51] Improve Object Lifecycle pane (#16635) * add path point tracking to backend * types * draw paths on lifecycle pane * make points clickable * don't display a path if we don't have any saved path points * only object lifecycle points should have a click handler * change to debug log * better debug log message --- frigate/api/event.py | 2 + frigate/events/maintainer.py | 2 + frigate/track/tracked_object.py | 27 +++ .../overlay/detail/ObjectLifecycle.tsx | 207 +++++++++++++++++- web/src/types/event.ts | 1 + web/src/types/timeline.ts | 22 +- 6 files changed, 245 insertions(+), 16 deletions(-) diff --git a/frigate/api/event.py b/frigate/api/event.py index 247366920..2df32471e 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -336,6 +336,7 @@ def events_explore(limit: int = 10): "sub_label_score", "average_estimated_speed", "velocity_angle", + "path_data", ] }, "event_count": label_counts[event.label], @@ -622,6 +623,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends()) "sub_label_score", "average_estimated_speed", "velocity_angle", + "path_data", ] } diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index d49da5a97..fc02dd37a 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -28,6 +28,7 @@ def should_update_db(prev_event: Event, current_event: Event) -> bool: or prev_event["average_estimated_speed"] != current_event["average_estimated_speed"] or prev_event["velocity_angle"] != current_event["velocity_angle"] + or prev_event["path_data"] != current_event["path_data"] ): return True return False @@ -217,6 +218,7 @@ class EventProcessor(threading.Thread): "velocity_angle": event_data["velocity_angle"], "type": "object", "max_severity": event_data.get("max_severity"), + "path_data": event_data.get("path_data"), }, } diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index ac57083df..0e7464bc2 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -2,6 +2,7 @@ import base64 import logging +import math from collections import defaultdict from statistics import median from typing import Optional @@ -66,6 +67,7 @@ class TrackedObject: self.current_estimated_speed = 0 self.average_estimated_speed = 0 self.velocity_angle = 0 + self.path_data = [] self.previous = self.to_dict() @property @@ -148,6 +150,7 @@ class TrackedObject: "attributes": obj_data["attributes"], "current_estimated_speed": self.current_estimated_speed, "velocity_angle": self.velocity_angle, + "path_data": self.path_data, } thumb_update = True @@ -300,6 +303,29 @@ class TrackedObject: if self.obj_data["frame_time"] - self.previous["frame_time"] >= (1 / 3): autotracker_update = True + # update path + width = self.camera_config.detect.width + height = self.camera_config.detect.height + bottom_center = ( + round(obj_data["centroid"][0] / width, 4), + round(obj_data["box"][3] / height, 4), + ) + + # calculate a reasonable movement threshold (e.g., 5% of the frame diagonal) + threshold = 0.05 * math.sqrt(width**2 + height**2) / max(width, height) + + if not self.path_data: + self.path_data.append((bottom_center, obj_data["frame_time"])) + elif ( + math.dist(self.path_data[-1][0], bottom_center) >= threshold + or len(self.path_data) == 1 + ): + # check Euclidean distance before appending + self.path_data.append((bottom_center, obj_data["frame_time"])) + logger.debug( + f"Point tracking: {obj_data['id']}, {bottom_center}, {obj_data['frame_time']}" + ) + self.obj_data.update(obj_data) self.current_zones = current_zones return (thumb_update, significant_change, autotracker_update) @@ -336,6 +362,7 @@ class TrackedObject: "current_estimated_speed": self.current_estimated_speed, "average_estimated_speed": self.average_estimated_speed, "velocity_angle": self.velocity_angle, + "path_data": self.path_data, } if include_thumbnail: diff --git a/web/src/components/overlay/detail/ObjectLifecycle.tsx b/web/src/components/overlay/detail/ObjectLifecycle.tsx index 7481607eb..656ae275c 100644 --- a/web/src/components/overlay/detail/ObjectLifecycle.tsx +++ b/web/src/components/overlay/detail/ObjectLifecycle.tsx @@ -11,7 +11,7 @@ import { CarouselPrevious, } from "@/components/ui/carousel"; import { Button } from "@/components/ui/button"; -import { ObjectLifecycleSequence } from "@/types/timeline"; +import { ClassType, ObjectLifecycleSequence } from "@/types/timeline"; import Heading from "@/components/ui/heading"; import { ReviewDetailPaneType } from "@/types/review"; import { FrigateConfig } from "@/types/frigateConfig"; @@ -53,6 +53,13 @@ import { } from "@/components/ui/context-menu"; import { useNavigate } from "react-router-dom"; +type Position = { + x: number; + y: number; + timestamp: number; + lifecycle_item?: ObjectLifecycleSequence; +}; + type ObjectLifecycleProps = { className?: string; event: Event; @@ -108,6 +115,17 @@ export default function ObjectLifecycle({ [config, event], ); + const getObjectColor = useCallback( + (label: string) => { + const objectColor = config?.model?.colormap[label]; + if (objectColor) { + const reversed = [...objectColor].reverse(); + return reversed; + } + }, + [config], + ); + const getZonePolygon = useCallback( (zoneName: string) => { if (!imgRef.current || !config) { @@ -120,7 +138,7 @@ export default function ObjectLifecycle({ return zonePoints .split(",") - .map(parseFloat) + .map(Number.parseFloat) .reduce((acc, value, index) => { const isXCoordinate = index % 2 === 0; const coordinate = isXCoordinate @@ -158,6 +176,43 @@ export default function ObjectLifecycle({ ); }, [config, event.camera]); + const savedPathPoints = useMemo(() => { + return ( + event.data.path_data?.map(([coords, timestamp]: [number[], number]) => ({ + x: coords[0], + y: coords[1], + timestamp, + lifecycle_item: undefined, + })) || [] + ); + }, [event.data.path_data]); + + const eventSequencePoints = useMemo(() => { + return ( + eventSequence + ?.filter((event) => event.data.box !== undefined) + .map((event) => { + const [left, top, width, height] = event.data.box!; + + return { + x: left + width / 2, // Center x-coordinate + y: top + height, // Bottom y-coordinate + timestamp: event.timestamp, + lifecycle_item: event, + }; + }) || [] + ); + }, [eventSequence]); + + // final object path with timeline points included + const pathPoints = useMemo(() => { + // don't display a path if we don't have any saved path points + if (savedPathPoints.length === 0) return []; + return [...savedPathPoints, ...eventSequencePoints].sort( + (a, b) => a.timestamp - b.timestamp, + ); + }, [savedPathPoints, eventSequencePoints]); + const [timeIndex, setTimeIndex] = useState(0); const handleSetBox = useCallback( @@ -171,12 +226,13 @@ export default function ObjectLifecycle({ top: `${box[1] * imgRect.height}px`, width: `${box[2] * imgRect.width}px`, height: `${box[3] * imgRect.height}px`, + borderColor: `rgb(${getObjectColor(event.label)?.join(",")})`, }; setBoxStyle(style); } }, - [imgRef], + [imgRef, event, getObjectColor], ); // image @@ -254,6 +310,21 @@ export default function ObjectLifecycle({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [mainApi, thumbnailApi]); + const handlePathPointClick = useCallback( + (index: number) => { + if (!mainApi || !thumbnailApi || !eventSequence) return; + const sequenceIndex = eventSequence.findIndex( + (item) => item.timestamp === pathPoints[index].timestamp, + ); + if (sequenceIndex !== -1) { + mainApi.scrollTo(sequenceIndex); + thumbnailApi.scrollTo(sequenceIndex); + setCurrent(sequenceIndex); + } + }, + [mainApi, thumbnailApi, eventSequence, pathPoints], + ); + if (!event.id || !eventSequence || !config || !timeIndex) { return ; } @@ -355,13 +426,33 @@ export default function ObjectLifecycle({ ))} {boxStyle && ( -
+
)} + {pathPoints && pathPoints.length > 0 && ( +
+ + + +
+ )} @@ -699,3 +790,105 @@ function getLifecycleItemDescription(lifecycleItem: ObjectLifecycleSequence) { return `${label} detected`; } } + +type ObjectPathProps = { + positions?: Position[]; + color?: number[]; + width?: number; + pointRadius?: number; + imgRef: React.RefObject; + onPointClick?: (index: number) => void; +}; + +const typeColorMap: Partial> = { + [ClassType.VISIBLE]: [0, 255, 0], // Green + [ClassType.GONE]: [255, 0, 0], // Red + [ClassType.ENTERED_ZONE]: [255, 165, 0], // Orange + [ClassType.ATTRIBUTE]: [128, 0, 128], // Purple + [ClassType.ACTIVE]: [255, 255, 0], // Yellow + [ClassType.STATIONARY]: [128, 128, 128], // Gray + [ClassType.HEARD]: [0, 255, 255], // Cyan + [ClassType.EXTERNAL]: [165, 42, 42], // Brown +}; + +function ObjectPath({ + positions, + color = [0, 0, 255], + width = 2, + pointRadius = 4, + imgRef, + onPointClick, +}: ObjectPathProps) { + const getAbsolutePositions = useCallback(() => { + if (!imgRef.current || !positions) return []; + const imgRect = imgRef.current.getBoundingClientRect(); + return positions.map((pos) => ({ + x: pos.x * imgRect.width, + y: pos.y * imgRect.height, + timestamp: pos.timestamp, + lifecycle_item: pos.lifecycle_item, + })); + }, [positions, imgRef]); + + const generateStraightPath = useCallback((points: Position[]) => { + if (!points || points.length < 2) return ""; + let path = `M ${points[0].x} ${points[0].y}`; + for (let i = 1; i < points.length; i++) { + path += ` L ${points[i].x} ${points[i].y}`; + } + return path; + }, []); + + const getPointColor = (baseColor: number[], type?: ClassType) => { + if (type) { + const typeColor = typeColorMap[type]; + if (typeColor) { + return `rgb(${typeColor.join(",")})`; + } + } + // normal path point + return `rgb(${baseColor.map((c) => Math.max(0, c - 10)).join(",")})`; + }; + + if (!imgRef.current) return null; + const absolutePositions = getAbsolutePositions(); + const lineColor = `rgb(${color.join(",")})`; + + return ( + + + {absolutePositions.map((pos, index) => ( + + + + pos.lifecycle_item && onPointClick && onPointClick(index) + } + style={{ cursor: pos.lifecycle_item ? "pointer" : "default" }} + /> + + + + {pos.lifecycle_item + ? getLifecycleItemDescription(pos.lifecycle_item) + : "Tracked point"} + + + + ))} + + ); +} diff --git a/web/src/types/event.ts b/web/src/types/event.ts index 0e7aa9916..d7c8ca665 100644 --- a/web/src/types/event.ts +++ b/web/src/types/event.ts @@ -22,5 +22,6 @@ export interface Event { area: number; ratio: number; type: "object" | "audio" | "manual"; + path_data: [number[], number][]; }; } diff --git a/web/src/types/timeline.ts b/web/src/types/timeline.ts index 94ef75eba..66366c2f0 100644 --- a/web/src/types/timeline.ts +++ b/web/src/types/timeline.ts @@ -1,3 +1,15 @@ +export enum ClassType { + VISIBLE = "visible", + GONE = "gone", + ENTERED_ZONE = "entered_zone", + ATTRIBUTE = "attribute", + ACTIVE = "active", + STATIONARY = "stationary", + HEARD = "heard", + EXTERNAL = "external", + PATH_POINT = "path_point", +} + export type ObjectLifecycleSequence = { camera: string; timestamp: number; @@ -10,15 +22,7 @@ export type ObjectLifecycleSequence = { attribute: string; zones: string[]; }; - class_type: - | "visible" - | "gone" - | "entered_zone" - | "attribute" - | "active" - | "stationary" - | "heard" - | "external"; + class_type: ClassType; source_id: string; source: string; }; From b961235187454c0288a750a0debc9b319ec780ea Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 17 Feb 2025 15:49:24 -0600 Subject: [PATCH 06/51] Tracking fixes (#16645) * use config enabled check for ptz cam tracker * ensure we have an object match before accessing score history * add comment for clarity --- frigate/track/norfair_tracker.py | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/frigate/track/norfair_tracker.py b/frigate/track/norfair_tracker.py index d168bfe94..db17f9313 100644 --- a/frigate/track/norfair_tracker.py +++ b/frigate/track/norfair_tracker.py @@ -263,12 +263,13 @@ class NorfairTracker(ObjectTracker): # Get the correct tracker for this object's label tracker = self.get_tracker(obj["label"]) - obj["score_history"] = [ - p.data["score"] - for p in next( - (o for o in tracker.tracked_objects if o.global_id == track_id) - ).past_detections - ] + obj_match = next( + (o for o in tracker.tracked_objects if o.global_id == track_id), None + ) + # if we don't have a match, we have a new object + obj["score_history"] = ( + [p.data["score"] for p in obj_match.past_detections] if obj_match else [] + ) self.tracked_objects[id] = obj self.disappeared[id] = 0 self.positions[id] = { @@ -519,7 +520,11 @@ class NorfairTracker(ObjectTracker): default_detections.extend(dets) # Update default tracker with untracked detections - mode = "ptz" if self.ptz_metrics.autotracker_enabled.value else "static" + mode = ( + "ptz" + if self.camera_config.onvif.autotracking.enabled_in_config + else "static" + ) tracked_objects = self.default_tracker[mode].update( detections=default_detections, coord_transformations=coord_transformations ) From 11b1dbf0ff585d84720b7e856149fef5bb2b1e1f Mon Sep 17 00:00:00 2001 From: Ashton Johnson Date: Mon, 17 Feb 2025 16:04:35 -0600 Subject: [PATCH 07/51] Update contributing.md (#16641) Added note to include buildx plugin with Docker --- docs/docs/development/contributing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/development/contributing.md b/docs/docs/development/contributing.md index 32fc13e1f..eb33765fe 100644 --- a/docs/docs/development/contributing.md +++ b/docs/docs/development/contributing.md @@ -34,7 +34,7 @@ Fork [blakeblackshear/frigate-hass-integration](https://github.com/blakeblackshe ### Prerequisites - GNU make -- Docker +- Docker (including buildx plugin) - An extra detector (Coral, OpenVINO, etc.) is optional but recommended to simulate real world performance. :::note From 4f88a5f2ad0968d51390fce0f59a14a8939b5613 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 17 Feb 2025 17:03:51 -0600 Subject: [PATCH 08/51] Object Lifecycle tweaks (#16648) * Disable object path and add warning for autotracking cameras * clean up --- .../components/overlay/detail/ObjectLifecycle.tsx | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/web/src/components/overlay/detail/ObjectLifecycle.tsx b/web/src/components/overlay/detail/ObjectLifecycle.tsx index 656ae275c..de343861e 100644 --- a/web/src/components/overlay/detail/ObjectLifecycle.tsx +++ b/web/src/components/overlay/detail/ObjectLifecycle.tsx @@ -207,11 +207,15 @@ export default function ObjectLifecycle({ // final object path with timeline points included const pathPoints = useMemo(() => { // don't display a path if we don't have any saved path points - if (savedPathPoints.length === 0) return []; + if ( + savedPathPoints.length === 0 || + config?.cameras[event.camera]?.onvif.autotracking.enabled_in_config + ) + return []; return [...savedPathPoints, ...eventSequencePoints].sort( (a, b) => a.timestamp - b.timestamp, ); - }, [savedPathPoints, eventSequencePoints]); + }, [savedPathPoints, eventSequencePoints, config, event]); const [timeIndex, setTimeIndex] = useState(0); @@ -503,6 +507,11 @@ export default function ObjectLifecycle({ {current + 1} of {eventSequence.length}
+ {config?.cameras[event.camera]?.onvif.autotracking.enabled_in_config && ( +
+ Bounding box positions will be inaccurate for autotracking cameras. +
+ )} {showControls && ( Date: Tue, 18 Feb 2025 08:17:51 -0600 Subject: [PATCH 09/51] Reorganize Lifecycle components (#16663) * reorganize lifecycle components * clean up --- .../overlay/detail/ObjectLifecycle.tsx | 206 +++--------------- .../components/overlay/detail/ObjectPath.tsx | 113 ++++++++++ web/src/types/timeline.ts | 11 +- web/src/utils/lifecycleUtil.ts | 47 ++++ 4 files changed, 199 insertions(+), 178 deletions(-) create mode 100644 web/src/components/overlay/detail/ObjectPath.tsx create mode 100644 web/src/utils/lifecycleUtil.ts diff --git a/web/src/components/overlay/detail/ObjectLifecycle.tsx b/web/src/components/overlay/detail/ObjectLifecycle.tsx index de343861e..40ab543c3 100644 --- a/web/src/components/overlay/detail/ObjectLifecycle.tsx +++ b/web/src/components/overlay/detail/ObjectLifecycle.tsx @@ -11,7 +11,7 @@ import { CarouselPrevious, } from "@/components/ui/carousel"; import { Button } from "@/components/ui/button"; -import { ClassType, ObjectLifecycleSequence } from "@/types/timeline"; +import { ObjectLifecycleSequence } from "@/types/timeline"; import Heading from "@/components/ui/heading"; import { ReviewDetailPaneType } from "@/types/review"; import { FrigateConfig } from "@/types/frigateConfig"; @@ -52,13 +52,8 @@ import { ContextMenuTrigger, } from "@/components/ui/context-menu"; import { useNavigate } from "react-router-dom"; - -type Position = { - x: number; - y: number; - timestamp: number; - lifecycle_item?: ObjectLifecycleSequence; -}; +import { ObjectPath } from "./ObjectPath"; +import { getLifecycleItemDescription } from "@/utils/lifecycleUtil"; type ObjectLifecycleProps = { className?: string; @@ -400,6 +395,8 @@ export default function ObjectLifecycle({ /> {showZones && + imgRef.current?.width && + imgRef.current?.height && lifecycleZones?.map((zone) => (
)} - {pathPoints && pathPoints.length > 0 && ( -
- 0 && ( +
- - -
- )} + + + +
+ )} @@ -755,149 +755,3 @@ export function LifecycleIcon({ return null; } } - -function getLifecycleItemDescription(lifecycleItem: ObjectLifecycleSequence) { - const label = ( - (Array.isArray(lifecycleItem.data.sub_label) - ? lifecycleItem.data.sub_label[0] - : lifecycleItem.data.sub_label) || lifecycleItem.data.label - ).replaceAll("_", " "); - - switch (lifecycleItem.class_type) { - case "visible": - return `${label} detected`; - case "entered_zone": - return `${label} entered ${lifecycleItem.data.zones - .join(" and ") - .replaceAll("_", " ")}`; - case "active": - return `${label} became active`; - case "stationary": - return `${label} became stationary`; - case "attribute": { - let title = ""; - if ( - lifecycleItem.data.attribute == "face" || - lifecycleItem.data.attribute == "license_plate" - ) { - title = `${lifecycleItem.data.attribute.replaceAll( - "_", - " ", - )} detected for ${label}`; - } else { - title = `${ - lifecycleItem.data.label - } recognized as ${lifecycleItem.data.attribute.replaceAll("_", " ")}`; - } - return title; - } - case "gone": - return `${label} left`; - case "heard": - return `${label} heard`; - case "external": - return `${label} detected`; - } -} - -type ObjectPathProps = { - positions?: Position[]; - color?: number[]; - width?: number; - pointRadius?: number; - imgRef: React.RefObject; - onPointClick?: (index: number) => void; -}; - -const typeColorMap: Partial> = { - [ClassType.VISIBLE]: [0, 255, 0], // Green - [ClassType.GONE]: [255, 0, 0], // Red - [ClassType.ENTERED_ZONE]: [255, 165, 0], // Orange - [ClassType.ATTRIBUTE]: [128, 0, 128], // Purple - [ClassType.ACTIVE]: [255, 255, 0], // Yellow - [ClassType.STATIONARY]: [128, 128, 128], // Gray - [ClassType.HEARD]: [0, 255, 255], // Cyan - [ClassType.EXTERNAL]: [165, 42, 42], // Brown -}; - -function ObjectPath({ - positions, - color = [0, 0, 255], - width = 2, - pointRadius = 4, - imgRef, - onPointClick, -}: ObjectPathProps) { - const getAbsolutePositions = useCallback(() => { - if (!imgRef.current || !positions) return []; - const imgRect = imgRef.current.getBoundingClientRect(); - return positions.map((pos) => ({ - x: pos.x * imgRect.width, - y: pos.y * imgRect.height, - timestamp: pos.timestamp, - lifecycle_item: pos.lifecycle_item, - })); - }, [positions, imgRef]); - - const generateStraightPath = useCallback((points: Position[]) => { - if (!points || points.length < 2) return ""; - let path = `M ${points[0].x} ${points[0].y}`; - for (let i = 1; i < points.length; i++) { - path += ` L ${points[i].x} ${points[i].y}`; - } - return path; - }, []); - - const getPointColor = (baseColor: number[], type?: ClassType) => { - if (type) { - const typeColor = typeColorMap[type]; - if (typeColor) { - return `rgb(${typeColor.join(",")})`; - } - } - // normal path point - return `rgb(${baseColor.map((c) => Math.max(0, c - 10)).join(",")})`; - }; - - if (!imgRef.current) return null; - const absolutePositions = getAbsolutePositions(); - const lineColor = `rgb(${color.join(",")})`; - - return ( - - - {absolutePositions.map((pos, index) => ( - - - - pos.lifecycle_item && onPointClick && onPointClick(index) - } - style={{ cursor: pos.lifecycle_item ? "pointer" : "default" }} - /> - - - - {pos.lifecycle_item - ? getLifecycleItemDescription(pos.lifecycle_item) - : "Tracked point"} - - - - ))} - - ); -} diff --git a/web/src/components/overlay/detail/ObjectPath.tsx b/web/src/components/overlay/detail/ObjectPath.tsx new file mode 100644 index 000000000..d85750ee7 --- /dev/null +++ b/web/src/components/overlay/detail/ObjectPath.tsx @@ -0,0 +1,113 @@ +import { useCallback } from "react"; +import { LifecycleClassType, Position } from "@/types/timeline"; +import { + Tooltip, + TooltipContent, + TooltipTrigger, +} from "@/components/ui/tooltip"; +import { TooltipPortal } from "@radix-ui/react-tooltip"; +import { getLifecycleItemDescription } from "@/utils/lifecycleUtil"; + +type ObjectPathProps = { + positions?: Position[]; + color?: number[]; + width?: number; + pointRadius?: number; + imgRef: React.RefObject; + onPointClick?: (index: number) => void; +}; + +const typeColorMap: Partial< + Record +> = { + [LifecycleClassType.VISIBLE]: [0, 255, 0], // Green + [LifecycleClassType.GONE]: [255, 0, 0], // Red + [LifecycleClassType.ENTERED_ZONE]: [255, 165, 0], // Orange + [LifecycleClassType.ATTRIBUTE]: [128, 0, 128], // Purple + [LifecycleClassType.ACTIVE]: [255, 255, 0], // Yellow + [LifecycleClassType.STATIONARY]: [128, 128, 128], // Gray + [LifecycleClassType.HEARD]: [0, 255, 255], // Cyan + [LifecycleClassType.EXTERNAL]: [165, 42, 42], // Brown +}; + +export function ObjectPath({ + positions, + color = [0, 0, 255], + width = 2, + pointRadius = 4, + imgRef, + onPointClick, +}: ObjectPathProps) { + const getAbsolutePositions = useCallback(() => { + if (!imgRef.current || !positions) return []; + const imgRect = imgRef.current.getBoundingClientRect(); + return positions.map((pos) => ({ + x: pos.x * imgRect.width, + y: pos.y * imgRect.height, + timestamp: pos.timestamp, + lifecycle_item: pos.lifecycle_item, + })); + }, [positions, imgRef]); + + const generateStraightPath = useCallback((points: Position[]) => { + if (!points || points.length < 2) return ""; + let path = `M ${points[0].x} ${points[0].y}`; + for (let i = 1; i < points.length; i++) { + path += ` L ${points[i].x} ${points[i].y}`; + } + return path; + }, []); + + const getPointColor = (baseColor: number[], type?: LifecycleClassType) => { + if (type) { + const typeColor = typeColorMap[type]; + if (typeColor) { + return `rgb(${typeColor.join(",")})`; + } + } + // normal path point + return `rgb(${baseColor.map((c) => Math.max(0, c - 10)).join(",")})`; + }; + + if (!imgRef.current) return null; + const absolutePositions = getAbsolutePositions(); + const lineColor = `rgb(${color.join(",")})`; + + return ( + + + {absolutePositions.map((pos, index) => ( + + + + pos.lifecycle_item && onPointClick && onPointClick(index) + } + style={{ cursor: pos.lifecycle_item ? "pointer" : "default" }} + /> + + + + {pos.lifecycle_item + ? getLifecycleItemDescription(pos.lifecycle_item) + : "Tracked point"} + + + + ))} + + ); +} diff --git a/web/src/types/timeline.ts b/web/src/types/timeline.ts index 66366c2f0..45a0821ed 100644 --- a/web/src/types/timeline.ts +++ b/web/src/types/timeline.ts @@ -1,4 +1,4 @@ -export enum ClassType { +export enum LifecycleClassType { VISIBLE = "visible", GONE = "gone", ENTERED_ZONE = "entered_zone", @@ -22,7 +22,7 @@ export type ObjectLifecycleSequence = { attribute: string; zones: string[]; }; - class_type: ClassType; + class_type: LifecycleClassType; source_id: string; source: string; }; @@ -32,3 +32,10 @@ export type TimeRange = { before: number; after: number }; export type TimelineType = "timeline" | "events"; export type TimelineScrubMode = "auto" | "drag" | "hover" | "compat"; + +export type Position = { + x: number; + y: number; + timestamp: number; + lifecycle_item?: ObjectLifecycleSequence; +}; diff --git a/web/src/utils/lifecycleUtil.ts b/web/src/utils/lifecycleUtil.ts new file mode 100644 index 000000000..f59f3eac9 --- /dev/null +++ b/web/src/utils/lifecycleUtil.ts @@ -0,0 +1,47 @@ +import { ObjectLifecycleSequence } from "@/types/timeline"; + +export function getLifecycleItemDescription( + lifecycleItem: ObjectLifecycleSequence, +) { + const label = ( + (Array.isArray(lifecycleItem.data.sub_label) + ? lifecycleItem.data.sub_label[0] + : lifecycleItem.data.sub_label) || lifecycleItem.data.label + ).replaceAll("_", " "); + + switch (lifecycleItem.class_type) { + case "visible": + return `${label} detected`; + case "entered_zone": + return `${label} entered ${lifecycleItem.data.zones + .join(" and ") + .replaceAll("_", " ")}`; + case "active": + return `${label} became active`; + case "stationary": + return `${label} became stationary`; + case "attribute": { + let title = ""; + if ( + lifecycleItem.data.attribute == "face" || + lifecycleItem.data.attribute == "license_plate" + ) { + title = `${lifecycleItem.data.attribute.replaceAll( + "_", + " ", + )} detected for ${label}`; + } else { + title = `${ + lifecycleItem.data.label + } recognized as ${lifecycleItem.data.attribute.replaceAll("_", " ")}`; + } + return title; + } + case "gone": + return `${label} left`; + case "heard": + return `${label} heard`; + case "external": + return `${label} detected`; + } +} From 5bd412071ad37c06d5e63275b4cf4f28d43cd214 Mon Sep 17 00:00:00 2001 From: Lander Noterman Date: Tue, 18 Feb 2025 15:38:07 +0100 Subject: [PATCH 10/51] Add support for JetPack 6 (#16571) * it builds! * some fixes * use python 3.11 (rc1) * add deadsnakes ppa for more recent python 3.11 in jetson images * fix pip stuff * revert to tensor 8.6 * revert changes to docker/main * add hook to install deadsnakes ppa for tensorrt/jetson * remove unnecessary pip break-system-packages * move tflite_runtime to requirements-wheels.txt --- .github/workflows/ci.yml | 29 ++++++++++++++++ docker/main/Dockerfile | 33 ++++++++++++++----- docker/main/install_deps.sh | 13 ++------ docker/main/requirements-wheels.txt | 5 ++- docker/rockchip/Dockerfile | 3 +- docker/tensorrt/Dockerfile.amd64 | 4 +-- docker/tensorrt/Dockerfile.arm64 | 29 ++++++++++++---- docker/tensorrt/build_jetson_ffmpeg.sh | 17 ++++++++-- .../detector/build_python_tensorrt.sh | 14 ++++---- docker/tensorrt/requirements-arm64.txt | 2 +- docker/tensorrt/trt.hcl | 13 +++++++- docker/tensorrt/trt.mk | 12 +++++++ 12 files changed, 132 insertions(+), 42 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2046ed100..c9b34c62f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -136,6 +136,35 @@ jobs: tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp5 *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5 *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max + jetson_jp6_build: + runs-on: ubuntu-22.04 + name: Jetson Jetpack 6 + steps: + - name: Check out code + uses: actions/checkout@v4 + with: + persist-credentials: false + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push TensorRT (Jetson, Jetpack 6) + env: + ARCH: arm64 + BASE_IMAGE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu + SLIM_BASE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu + TRT_BASE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu + uses: docker/bake-action@v6 + with: + source: . + push: true + targets: tensorrt + files: docker/tensorrt/trt.hcl + set: | + tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp6 + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp6 + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp6,mode=max amd64_extra_builds: runs-on: ubuntu-22.04 name: AMD64 Extra Build diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index 4c3416789..0bafeab80 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -3,14 +3,27 @@ # https://askubuntu.com/questions/972516/debian-frontend-environment-variable ARG DEBIAN_FRONTEND=noninteractive +# Globally set pip break-system-packages option to avoid having to specify it every time +ARG PIP_BREAK_SYSTEM_PACKAGES=1 + ARG BASE_IMAGE=debian:12 ARG SLIM_BASE=debian:12-slim +# A hook that allows us to inject commands right after the base images +ARG BASE_HOOK= + FROM ${BASE_IMAGE} AS base +ARG PIP_BREAK_SYSTEM_PACKAGES + +RUN ${BASE_HOOK} FROM --platform=${BUILDPLATFORM} debian:12 AS base_host +ARG PIP_BREAK_SYSTEM_PACKAGES FROM ${SLIM_BASE} AS slim-base +ARG PIP_BREAK_SYSTEM_PACKAGES + +RUN ${BASE_HOOK} FROM slim-base AS wget ARG DEBIAN_FRONTEND @@ -66,8 +79,8 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt RUN apt-get -qq update \ && apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \ && wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ - && python3 get-pip.py "pip" --break-system-packages \ - && pip install --break-system-packages -r /requirements-ov.txt + && python3 get-pip.py "pip" \ + && pip install -r /requirements-ov.txt # Get OpenVino Model RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \ @@ -142,8 +155,8 @@ RUN apt-get -qq update \ apt-transport-https wget \ && apt-get -qq update \ && apt-get -qq install -y \ - python3 \ - python3-dev \ + python3.11 \ + python3.11-dev \ # opencv dependencies build-essential cmake git pkg-config libgtk-3-dev \ libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \ @@ -157,11 +170,13 @@ RUN apt-get -qq update \ gcc gfortran libopenblas-dev liblapack-dev && \ rm -rf /var/lib/apt/lists/* +RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 + RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ - && python3 get-pip.py "pip" --break-system-packages + && python3 get-pip.py "pip" COPY docker/main/requirements.txt /requirements.txt -RUN pip3 install -r /requirements.txt --break-system-packages +RUN pip3 install -r /requirements.txt # Build pysqlite3 from source COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh @@ -215,8 +230,8 @@ RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_de /deps/install_deps.sh RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \ - python3 -m pip install --upgrade pip --break-system-packages && \ - pip3 install -U /deps/wheels/*.whl --break-system-packages + python3 -m pip install --upgrade pip && \ + pip3 install -U /deps/wheels/*.whl COPY --from=deps-rootfs / / @@ -263,7 +278,7 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \ - pip3 install -r requirements-dev.txt --break-system-packages + pip3 install -r requirements-dev.txt HEALTHCHECK NONE diff --git a/docker/main/install_deps.sh b/docker/main/install_deps.sh index ee84f6a14..a7b7789c0 100755 --- a/docker/main/install_deps.sh +++ b/docker/main/install_deps.sh @@ -11,7 +11,7 @@ apt-get -qq install --no-install-recommends -y \ lbzip2 \ procps vainfo \ unzip locales tzdata libxml2 xz-utils \ - python3 \ + python3.11 \ python3-pip \ curl \ lsof \ @@ -21,6 +21,8 @@ apt-get -qq install --no-install-recommends -y \ libglib2.0-0 \ libusb-1.0.0 +update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 + mkdir -p -m 600 /root/.gnupg # install coral runtime @@ -29,15 +31,6 @@ unset DEBIAN_FRONTEND yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive rm /tmp/libedgetpu1-max.deb -# install python3 & tflite runtime -if [[ "${TARGETARCH}" == "amd64" ]]; then - pip3 install --break-system-packages https://github.com/frigate-nvr/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl -fi - -if [[ "${TARGETARCH}" == "arm64" ]]; then - pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl -fi - # btbn-ffmpeg -> amd64 if [[ "${TARGETARCH}" == "amd64" ]]; then mkdir -p /usr/lib/ffmpeg/5.0 diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index e43e74155..f06f82d88 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -68,4 +68,7 @@ netaddr==0.8.* netifaces==0.10.* verboselogs==1.7.* virtualenv==20.17.* -prometheus-client == 0.21.* \ No newline at end of file +prometheus-client == 0.21.* +# TFLite +tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64' +tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64' diff --git a/docker/rockchip/Dockerfile b/docker/rockchip/Dockerfile index e9c9602a8..09380dfb3 100644 --- a/docker/rockchip/Dockerfile +++ b/docker/rockchip/Dockerfile @@ -8,7 +8,6 @@ COPY docker/main/requirements-wheels.txt /requirements-wheels.txt COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt RUN sed -i "/https:\/\//d" /requirements-wheels.txt RUN sed -i "/onnxruntime/d" /requirements-wheels.txt -RUN python3 -m pip config set global.break-system-packages true RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt RUN rm -rf /rk-wheels/opencv_python-* @@ -16,7 +15,7 @@ FROM deps AS rk-frigate ARG TARGETARCH RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \ - pip3 install --no-deps -U /deps/rk-wheels/*.whl --break-system-packages + pip3 install --no-deps -U /deps/rk-wheels/*.whl WORKDIR /opt/frigate/ COPY --from=rootfs / / diff --git a/docker/tensorrt/Dockerfile.amd64 b/docker/tensorrt/Dockerfile.amd64 index 276094ed2..6be11c210 100644 --- a/docker/tensorrt/Dockerfile.amd64 +++ b/docker/tensorrt/Dockerfile.amd64 @@ -17,7 +17,7 @@ FROM tensorrt-base AS frigate-tensorrt ENV TRT_VER=8.6.1 RUN python3 -m pip config set global.break-system-packages true RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ - pip3 install -U /deps/trt-wheels/*.whl --break-system-packages && \ + pip3 install -U /deps/trt-wheels/*.whl && \ ldconfig WORKDIR /opt/frigate/ @@ -32,4 +32,4 @@ COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda COPY docker/tensorrt/detector/rootfs/ / COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ - pip3 install -U /deps/trt-wheels/*.whl --break-system-packages + pip3 install -U /deps/trt-wheels/*.whl diff --git a/docker/tensorrt/Dockerfile.arm64 b/docker/tensorrt/Dockerfile.arm64 index ba2638fcb..33fd8182a 100644 --- a/docker/tensorrt/Dockerfile.arm64 +++ b/docker/tensorrt/Dockerfile.arm64 @@ -7,20 +7,25 @@ ARG BASE_IMAGE FROM ${BASE_IMAGE} AS build-wheels ARG DEBIAN_FRONTEND +# Add deadsnakes PPA for python3.11 +RUN apt-get -qq update && \ + apt-get -qq install -y --no-install-recommends \ + software-properties-common \ + && add-apt-repository ppa:deadsnakes/ppa + # Use a separate container to build wheels to prevent build dependencies in final image RUN apt-get -qq update \ && apt-get -qq install -y --no-install-recommends \ - python3.9 python3.9-dev \ + python3.11 python3.11-dev \ wget build-essential cmake git \ && rm -rf /var/lib/apt/lists/* -# Ensure python3 defaults to python3.9 -RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 +# Ensure python3 defaults to python3.11 +RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ && python3 get-pip.py "pip" - FROM build-wheels AS trt-wheels ARG DEBIAN_FRONTEND ARG TARGETARCH @@ -41,11 +46,12 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t && TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt -ADD https://nvidia.box.com/shared/static/psl23iw3bh7hlgku0mjo1xekxpego3e3.whl /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl +# See https://elinux.org/Jetson_Zoo#ONNX_Runtime +ADD https://nvidia.box.com/shared/static/9yvw05k6u343qfnkhdv2x6xhygze0aq1.whl /tmp/onnxruntime_gpu-1.19.0-cp311-cp311-linux_aarch64.whl RUN pip3 uninstall -y onnxruntime-openvino \ && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \ - && pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl + && pip3 install --no-deps /tmp/onnxruntime_gpu-1.19.0-cp311-cp311-linux_aarch64.whl FROM build-wheels AS trt-model-wheels ARG DEBIAN_FRONTEND @@ -67,12 +73,18 @@ RUN --mount=type=bind,source=docker/tensorrt/build_jetson_ffmpeg.sh,target=/deps # Frigate w/ TensorRT for NVIDIA Jetson platforms FROM tensorrt-base AS frigate-tensorrt RUN apt-get update \ - && apt-get install -y python-is-python3 libprotobuf17 \ + && apt-get install -y python-is-python3 libprotobuf23 \ && rm -rf /var/lib/apt/lists/* RUN rm -rf /usr/lib/btbn-ffmpeg/ COPY --from=jetson-ffmpeg /rootfs / +# ffmpeg runtime dependencies +RUN apt-get -qq update \ + && apt-get -qq install -y --no-install-recommends \ + libx264-163 libx265-199 libegl1 \ + && rm -rf /var/lib/apt/lists/* + COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ --mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \ @@ -81,3 +93,6 @@ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels WORKDIR /opt/frigate/ COPY --from=rootfs / / + +# Fixes "Error importing detector runtime: /usr/lib/aarch64-linux-gnu/libstdc++.so.6: cannot allocate memory in static TLS block" +ENV LD_PRELOAD /usr/lib/aarch64-linux-gnu/libstdc++.so.6 diff --git a/docker/tensorrt/build_jetson_ffmpeg.sh b/docker/tensorrt/build_jetson_ffmpeg.sh index f4e55c2bb..692612137 100755 --- a/docker/tensorrt/build_jetson_ffmpeg.sh +++ b/docker/tensorrt/build_jetson_ffmpeg.sh @@ -14,14 +14,27 @@ apt-get -qq install -y --no-install-recommends libx264-dev libx265-dev pushd /tmp # Install libnvmpi to enable nvmpi decoders (h264_nvmpi, hevc_nvmpi) -if [ -e /usr/local/cuda-10.2 ]; then +if [ -e /usr/local/cuda-12 ]; then + # assume Jetpack 6.2 + apt-key adv --fetch-key https://repo.download.nvidia.com/jetson/jetson-ota-public.asc + echo "deb https://repo.download.nvidia.com/jetson/common r36.4 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list + echo "deb https://repo.download.nvidia.com/jetson/t234 r36.4 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list + echo "deb https://repo.download.nvidia.com/jetson/ffmpeg r36.4 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list + + mkdir -p /opt/nvidia/l4t-packages/ + touch /opt/nvidia/l4t-packages/.nv-l4t-disable-boot-fw-update-in-preinstall + + apt-get update + apt-get -qq install -y --no-install-recommends -o Dpkg::Options::="--force-confold" nvidia-l4t-jetson-multimedia-api +elif [ -e /usr/local/cuda-10.2 ]; then # assume Jetpack 4.X wget -q https://developer.nvidia.com/embedded/L4T/r32_Release_v5.0/T186/Jetson_Multimedia_API_R32.5.0_aarch64.tbz2 -O jetson_multimedia_api.tbz2 + tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2 else # assume Jetpack 5.X wget -q https://developer.nvidia.com/downloads/embedded/l4t/r35_release_v3.1/release/jetson_multimedia_api_r35.3.1_aarch64.tbz2 -O jetson_multimedia_api.tbz2 + tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2 fi -tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2 wget -q https://github.com/AndBobsYourUncle/jetson-ffmpeg/archive/9c17b09.zip -O jetson-ffmpeg.zip unzip jetson-ffmpeg.zip && rm jetson-ffmpeg.zip && mv jetson-ffmpeg-* jetson-ffmpeg && cd jetson-ffmpeg diff --git a/docker/tensorrt/detector/build_python_tensorrt.sh b/docker/tensorrt/detector/build_python_tensorrt.sh index 21b6ae268..325103485 100755 --- a/docker/tensorrt/detector/build_python_tensorrt.sh +++ b/docker/tensorrt/detector/build_python_tensorrt.sh @@ -6,23 +6,23 @@ mkdir -p /trt-wheels if [[ "${TARGETARCH}" == "arm64" ]]; then - # NVIDIA supplies python-tensorrt for python3.8, but frigate uses python3.9, + # NVIDIA supplies python-tensorrt for python3.10, but frigate uses python3.11, # so we must build python-tensorrt ourselves. # Get python-tensorrt source - mkdir /workspace + mkdir -p /workspace cd /workspace - git clone -b ${TENSORRT_VER} https://github.com/NVIDIA/TensorRT.git --depth=1 + git clone -b release/8.6 https://github.com/NVIDIA/TensorRT.git --depth=1 # Collect dependencies EXT_PATH=/workspace/external && mkdir -p $EXT_PATH - pip3 install pybind11 && ln -s /usr/local/lib/python3.9/dist-packages/pybind11 $EXT_PATH/pybind11 - ln -s /usr/include/python3.9 $EXT_PATH/python3.9 + pip3 install pybind11 && ln -s /usr/local/lib/python3.11/dist-packages/pybind11 $EXT_PATH/pybind11 + ln -s /usr/include/python3.11 $EXT_PATH/python3.11 ln -s /usr/include/aarch64-linux-gnu/NvOnnxParser.h /workspace/TensorRT/parsers/onnx/ # Build wheel cd /workspace/TensorRT/python - EXT_PATH=$EXT_PATH PYTHON_MAJOR_VERSION=3 PYTHON_MINOR_VERSION=9 TARGET_ARCHITECTURE=aarch64 /bin/bash ./build.sh - mv build/dist/*.whl /trt-wheels/ + EXT_PATH=$EXT_PATH PYTHON_MAJOR_VERSION=3 PYTHON_MINOR_VERSION=11 TARGET_ARCHITECTURE=aarch64 TENSORRT_MODULE=tensorrt /bin/bash ./build.sh + mv build/bindings_wheel/dist/*.whl /trt-wheels/ fi diff --git a/docker/tensorrt/requirements-arm64.txt b/docker/tensorrt/requirements-arm64.txt index 67489f80b..c9b618180 100644 --- a/docker/tensorrt/requirements-arm64.txt +++ b/docker/tensorrt/requirements-arm64.txt @@ -1 +1 @@ -cuda-python == 11.7; platform_machine == 'aarch64' \ No newline at end of file +cuda-python == 12.6.*; platform_machine == 'aarch64' diff --git a/docker/tensorrt/trt.hcl b/docker/tensorrt/trt.hcl index 3195fb5bf..730f54053 100644 --- a/docker/tensorrt/trt.hcl +++ b/docker/tensorrt/trt.hcl @@ -13,13 +13,24 @@ variable "TRT_BASE" { variable "COMPUTE_LEVEL" { default = "" } +variable "BASE_HOOK" { + # Ensure an up-to-date python 3.11 is available in tensorrt/jetson image + default = < Date: Tue, 18 Feb 2025 07:46:29 -0700 Subject: [PATCH 11/51] Remove thumb from database field (#16647) * Remove thumbnail from dict * Create thumbnail diectory * Cleanup handling of tracked object images * Make thumbnail optional * Handle cases where thumbnail is used * Expand options for thumbnail api * Fix up the does not exist condition * Remove absolute usages of thumbnails * Write thumbnails for external events * Reduce webp quality * Use webp everywhere in frontend * Formatting * Always consider all events when re-indexing * Add thumbnail deletion and cleanup path management * Cleanup imports * Rename def * Don't save thumbnail for every object * Correct event count * Use correct function * Include thumbnail in query * Remove unused * Fix requiring exception --- frigate/api/media.py | 35 ++++-- frigate/app.py | 2 + frigate/const.py | 1 + frigate/embeddings/embeddings.py | 26 ++--- frigate/embeddings/maintainer.py | 5 +- frigate/events/cleanup.py | 38 +++---- frigate/events/external.py | 17 ++- frigate/events/maintainer.py | 5 +- frigate/object_processing.py | 52 +++------ frigate/track/tracked_object.py | 100 ++++++++++++++---- frigate/util/path.py | 51 +++++++++ migrations/028_optional_event_thumbnail.py | 36 +++++++ web/src/components/card/SearchThumbnail.tsx | 2 +- .../overlay/detail/ReviewDetailDialog.tsx | 4 +- .../overlay/detail/SearchDetailDialog.tsx | 2 +- web/src/views/explore/ExploreView.tsx | 2 +- 16 files changed, 241 insertions(+), 137 deletions(-) create mode 100644 frigate/util/path.py create mode 100644 migrations/028_optional_event_thumbnail.py diff --git a/frigate/api/media.py b/frigate/api/media.py index a9455919b..74e9e7aaa 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -1,6 +1,5 @@ """Image and video apis.""" -import base64 import glob import logging import os @@ -40,6 +39,7 @@ from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment from frigate.object_processing import TrackedObjectProcessor from frigate.util.builtin import get_tz_modifiers from frigate.util.image import get_image_from_recording +from frigate.util.path import get_event_thumbnail_bytes logger = logging.getLogger(__name__) @@ -804,10 +804,11 @@ def event_snapshot( ) -@router.get("/events/{event_id}/thumbnail.jpg") +@router.get("/events/{event_id}/thumbnail.{extension}") def event_thumbnail( request: Request, event_id: str, + extension: str, max_cache_age: int = Query( 2592000, description="Max cache age in seconds. Default 30 days in seconds." ), @@ -816,11 +817,15 @@ def event_thumbnail( thumbnail_bytes = None event_complete = False try: - event = Event.get(Event.id == event_id) + event: Event = Event.get(Event.id == event_id) if event.end_time is not None: event_complete = True - thumbnail_bytes = base64.b64decode(event.thumbnail) + + thumbnail_bytes = get_event_thumbnail_bytes(event) except DoesNotExist: + thumbnail_bytes = None + + if thumbnail_bytes is None: # see if the object is currently being tracked try: camera_states = request.app.detected_frames_processor.camera_states.values() @@ -828,7 +833,7 @@ def event_thumbnail( if event_id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(event_id) if tracked_obj is not None: - thumbnail_bytes = tracked_obj.get_thumbnail() + thumbnail_bytes = tracked_obj.get_thumbnail(extension) except Exception: return JSONResponse( content={"success": False, "message": "Event not found"}, @@ -843,8 +848,8 @@ def event_thumbnail( # android notifications prefer a 2:1 ratio if format == "android": - jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8) - img = cv2.imdecode(jpg_as_np, flags=1) + img_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8) + img = cv2.imdecode(img_as_np, flags=1) thumbnail = cv2.copyMakeBorder( img, 0, @@ -854,17 +859,25 @@ def event_thumbnail( cv2.BORDER_CONSTANT, (0, 0, 0), ) - ret, jpg = cv2.imencode(".jpg", thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) - thumbnail_bytes = jpg.tobytes() + + quality_params = None + + if extension == "jpg" or extension == "jpeg": + quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), 70] + elif extension == "webp": + quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), 60] + + _, img = cv2.imencode(f".{img}", thumbnail, quality_params) + thumbnail_bytes = img.tobytes() return Response( thumbnail_bytes, - media_type="image/jpeg", + media_type=f"image/{extension}", headers={ "Cache-Control": f"private, max-age={max_cache_age}" if event_complete else "no-store", - "Content-Type": "image/jpeg", + "Content-Type": f"image/{extension}", }, ) diff --git a/frigate/app.py b/frigate/app.py index 6ff4a1a41..400d4bca0 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -39,6 +39,7 @@ from frigate.const import ( MODEL_CACHE_DIR, RECORD_DIR, SHM_FRAMES_VAR, + THUMB_DIR, ) from frigate.data_processing.types import DataProcessorMetrics from frigate.db.sqlitevecq import SqliteVecQueueDatabase @@ -105,6 +106,7 @@ class FrigateApp: dirs = [ CONFIG_DIR, RECORD_DIR, + THUMB_DIR, f"{CLIPS_DIR}/cache", CACHE_DIR, MODEL_CACHE_DIR, diff --git a/frigate/const.py b/frigate/const.py index 16df8b887..eb48e9bf9 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -7,6 +7,7 @@ BASE_DIR = "/media/frigate" CLIPS_DIR = f"{BASE_DIR}/clips" EXPORT_DIR = f"{BASE_DIR}/exports" FACE_DIR = f"{CLIPS_DIR}/faces" +THUMB_DIR = f"{CLIPS_DIR}/thumbs" RECORD_DIR = f"{BASE_DIR}/recordings" BIRDSEYE_PIPE = "/tmp/cache/birdseye" CACHE_DIR = "/tmp/cache" diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index d8a4a2f4d..5ce7ba86d 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -1,6 +1,5 @@ """SQLite-vec embeddings database.""" -import base64 import datetime import logging import os @@ -21,6 +20,7 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event from frigate.types import ModelStatusTypesEnum from frigate.util.builtin import serialize +from frigate.util.path import get_event_thumbnail_bytes from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum @@ -264,14 +264,7 @@ class Embeddings: st = time.time() # Get total count of events to process - total_events = ( - Event.select() - .where( - (Event.has_clip == True | Event.has_snapshot == True) - & Event.thumbnail.is_null(False) - ) - .count() - ) + total_events = Event.select().count() batch_size = 32 current_page = 1 @@ -289,10 +282,6 @@ class Embeddings: events = ( Event.select() - .where( - (Event.has_clip == True | Event.has_snapshot == True) - & Event.thumbnail.is_null(False) - ) .order_by(Event.start_time.desc()) .paginate(current_page, batch_size) ) @@ -302,7 +291,12 @@ class Embeddings: batch_thumbs = {} batch_descs = {} for event in events: - batch_thumbs[event.id] = base64.b64decode(event.thumbnail) + thumbnail = get_event_thumbnail_bytes(event) + + if thumbnail is None: + continue + + batch_thumbs[event.id] = thumbnail totals["thumbnails"] += 1 if description := event.data.get("description", "").strip(): @@ -341,10 +335,6 @@ class Embeddings: current_page += 1 events = ( Event.select() - .where( - (Event.has_clip == True | Event.has_snapshot == True) - & Event.thumbnail.is_null(False) - ) .order_by(Event.start_time.desc()) .paginate(current_page, batch_size) ) diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index b7623722d..7925345b2 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -38,6 +38,7 @@ from frigate.models import Event from frigate.types import TrackedObjectUpdateTypesEnum from frigate.util.builtin import serialize from frigate.util.image import SharedMemoryFrameManager, calculate_region +from frigate.util.path import get_event_thumbnail_bytes from .embeddings import Embeddings @@ -215,7 +216,7 @@ class EmbeddingMaintainer(threading.Thread): continue # Extract valid thumbnail - thumbnail = base64.b64decode(event.thumbnail) + thumbnail = get_event_thumbnail_bytes(event) # Embed the thumbnail self._embed_thumbnail(event_id, thumbnail) @@ -390,7 +391,7 @@ class EmbeddingMaintainer(threading.Thread): logger.error(f"GenAI not enabled for camera {event.camera}") return - thumbnail = base64.b64decode(event.thumbnail) + thumbnail = get_event_thumbnail_bytes(event) logger.debug( f"Trying {source} regeneration for {event}, has_snapshot: {event.has_snapshot}" diff --git a/frigate/events/cleanup.py b/frigate/events/cleanup.py index d4efb26e8..ae39e3fd2 100644 --- a/frigate/events/cleanup.py +++ b/frigate/events/cleanup.py @@ -11,6 +11,7 @@ from frigate.config import FrigateConfig from frigate.const import CLIPS_DIR from frigate.db.sqlitevecq import SqliteVecQueueDatabase from frigate.models import Event, Timeline +from frigate.util.path import delete_event_images logger = logging.getLogger(__name__) @@ -64,7 +65,6 @@ class EventCleanup(threading.Thread): def expire_snapshots(self) -> list[str]: ## Expire events from unlisted cameras based on the global config retain_config = self.config.snapshots.retain - file_extension = "jpg" update_params = {"has_snapshot": False} distinct_labels = self.get_removed_camera_labels() @@ -83,6 +83,7 @@ class EventCleanup(threading.Thread): Event.select( Event.id, Event.camera, + Event.thumbnail, ) .where( Event.camera.not_in(self.camera_keys), @@ -94,22 +95,15 @@ class EventCleanup(threading.Thread): .iterator() ) logger.debug(f"{len(list(expired_events))} events can be expired") + # delete the media from disk for expired in expired_events: - media_name = f"{expired.camera}-{expired.id}" - media_path = Path( - f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}" - ) + deleted = delete_event_images(expired) - try: - media_path.unlink(missing_ok=True) - if file_extension == "jpg": - media_path = Path( - f"{os.path.join(CLIPS_DIR, media_name)}-clean.png" - ) - media_path.unlink(missing_ok=True) - except OSError as e: - logger.warning(f"Unable to delete event images: {e}") + if not deleted: + logger.warning( + f"Unable to delete event images for {expired.camera}: {expired.id}" + ) # update the clips attribute for the db entry query = Event.select(Event.id).where( @@ -165,6 +159,7 @@ class EventCleanup(threading.Thread): Event.select( Event.id, Event.camera, + Event.thumbnail, ) .where( Event.camera == name, @@ -181,19 +176,12 @@ class EventCleanup(threading.Thread): # so no need to delete mp4 files for event in expired_events: events_to_update.append(event.id) + deleted = delete_event_images(event) - try: - media_name = f"{event.camera}-{event.id}" - media_path = Path( - f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}" + if not deleted: + logger.warning( + f"Unable to delete event images for {event.camera}: {event.id}" ) - media_path.unlink(missing_ok=True) - media_path = Path( - f"{os.path.join(CLIPS_DIR, media_name)}-clean.png" - ) - media_path.unlink(missing_ok=True) - except OSError as e: - logger.warning(f"Unable to delete event images: {e}") # update the clips attribute for the db entry for i in range(0, len(events_to_update), CHUNK_SIZE): diff --git a/frigate/events/external.py b/frigate/events/external.py index 0d3408975..5423d08be 100644 --- a/frigate/events/external.py +++ b/frigate/events/external.py @@ -1,6 +1,5 @@ """Handle external events created by the user.""" -import base64 import datetime import logging import os @@ -15,7 +14,7 @@ from numpy import ndarray from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum from frigate.comms.events_updater import EventUpdatePublisher from frigate.config import CameraConfig, FrigateConfig -from frigate.const import CLIPS_DIR +from frigate.const import CLIPS_DIR, THUMB_DIR from frigate.events.types import EventStateEnum, EventTypeEnum from frigate.util.image import draw_box_with_label @@ -55,9 +54,7 @@ class ExternalEventProcessor: rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6)) event_id = f"{now}-{rand_id}" - thumbnail = self._write_images( - camera_config, label, event_id, draw, snapshot_frame - ) + self._write_images(camera_config, label, event_id, draw, snapshot_frame) end = now + duration if duration is not None else None self.event_sender.publish( @@ -74,7 +71,6 @@ class ExternalEventProcessor: "camera": camera, "start_time": now - camera_config.record.event_pre_capture, "end_time": end, - "thumbnail": thumbnail, "has_clip": camera_config.record.enabled and include_recording, "has_snapshot": True, "type": source_type, @@ -134,9 +130,9 @@ class ExternalEventProcessor: event_id: str, draw: dict[str, any], img_frame: Optional[ndarray], - ) -> Optional[str]: + ) -> None: if img_frame is None: - return None + return # write clean snapshot if enabled if camera_config.snapshots.clean_copy: @@ -182,8 +178,9 @@ class ExternalEventProcessor: # create thumbnail with max height of 175 and save width = int(175 * img_frame.shape[1] / img_frame.shape[0]) thumb = cv2.resize(img_frame, dsize=(width, 175), interpolation=cv2.INTER_AREA) - ret, jpg = cv2.imencode(".jpg", thumb) - return base64.b64encode(jpg.tobytes()).decode("utf-8") + cv2.imwrite( + os.path.join(THUMB_DIR, camera_config.name, f"{event_id}.webp"), thumb + ) def stop(self): self.event_sender.stop() diff --git a/frigate/events/maintainer.py b/frigate/events/maintainer.py index fc02dd37a..5cfa7c716 100644 --- a/frigate/events/maintainer.py +++ b/frigate/events/maintainer.py @@ -23,7 +23,6 @@ def should_update_db(prev_event: Event, current_event: Event) -> bool: if ( prev_event["top_score"] != current_event["top_score"] or prev_event["entered_zones"] != current_event["entered_zones"] - or prev_event["thumbnail"] != current_event["thumbnail"] or prev_event["end_time"] != current_event["end_time"] or prev_event["average_estimated_speed"] != current_event["average_estimated_speed"] @@ -202,7 +201,7 @@ class EventProcessor(threading.Thread): Event.start_time: start_time, Event.end_time: end_time, Event.zones: list(event_data["entered_zones"]), - Event.thumbnail: event_data["thumbnail"], + Event.thumbnail: event_data.get("thumbnail"), Event.has_clip: event_data["has_clip"], Event.has_snapshot: event_data["has_snapshot"], Event.model_hash: first_detector.model.model_hash, @@ -258,7 +257,7 @@ class EventProcessor(threading.Thread): Event.camera: event_data["camera"], Event.start_time: event_data["start_time"], Event.end_time: event_data["end_time"], - Event.thumbnail: event_data["thumbnail"], + Event.thumbnail: event_data.get("thumbnail"), Event.has_clip: event_data["has_clip"], Event.has_snapshot: event_data["has_snapshot"], Event.zones: [], diff --git a/frigate/object_processing.py b/frigate/object_processing.py index 484f4a082..aa966bab8 100644 --- a/frigate/object_processing.py +++ b/frigate/object_processing.py @@ -1,7 +1,6 @@ import datetime import json import logging -import os import queue import threading from collections import defaultdict @@ -16,13 +15,13 @@ from frigate.comms.dispatcher import Dispatcher from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher from frigate.comms.inter_process import InterProcessRequestor from frigate.config import ( + CameraMqttConfig, FrigateConfig, - MqttConfig, RecordConfig, SnapshotsConfig, ZoomingModeEnum, ) -from frigate.const import CLIPS_DIR, UPDATE_CAMERA_ACTIVITY +from frigate.const import UPDATE_CAMERA_ACTIVITY from frigate.events.types import EventStateEnum, EventTypeEnum from frigate.ptz.autotrack import PtzAutoTrackerThread from frigate.track.tracked_object import TrackedObject @@ -479,7 +478,7 @@ class TrackedObjectProcessor(threading.Thread): EventStateEnum.update, camera, frame_name, - obj.to_dict(include_thumbnail=True), + obj.to_dict(), ) ) @@ -491,41 +490,13 @@ class TrackedObjectProcessor(threading.Thread): obj.has_snapshot = self.should_save_snapshot(camera, obj) obj.has_clip = self.should_retain_recording(camera, obj) + # write thumbnail to disk if it will be saved as an event + if obj.has_snapshot or obj.has_clip: + obj.write_thumbnail_to_disk() + # write the snapshot to disk if obj.has_snapshot: - snapshot_config: SnapshotsConfig = self.config.cameras[camera].snapshots - jpg_bytes = obj.get_jpg_bytes( - timestamp=snapshot_config.timestamp, - bounding_box=snapshot_config.bounding_box, - crop=snapshot_config.crop, - height=snapshot_config.height, - quality=snapshot_config.quality, - ) - if jpg_bytes is None: - logger.warning(f"Unable to save snapshot for {obj.obj_data['id']}.") - else: - with open( - os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"), - "wb", - ) as j: - j.write(jpg_bytes) - - # write clean snapshot if enabled - if snapshot_config.clean_copy: - png_bytes = obj.get_clean_png() - if png_bytes is None: - logger.warning( - f"Unable to save clean snapshot for {obj.obj_data['id']}." - ) - else: - with open( - os.path.join( - CLIPS_DIR, - f"{camera}-{obj.obj_data['id']}-clean.png", - ), - "wb", - ) as p: - p.write(png_bytes) + obj.write_snapshot_to_disk() if not obj.false_positive: message = { @@ -542,14 +513,15 @@ class TrackedObjectProcessor(threading.Thread): EventStateEnum.end, camera, frame_name, - obj.to_dict(include_thumbnail=True), + obj.to_dict(), ) ) def snapshot(camera, obj: TrackedObject, frame_name: str): - mqtt_config: MqttConfig = self.config.cameras[camera].mqtt + mqtt_config: CameraMqttConfig = self.config.cameras[camera].mqtt if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj): - jpg_bytes = obj.get_jpg_bytes( + jpg_bytes = obj.get_img_bytes( + ext="jpg", timestamp=mqtt_config.timestamp, bounding_box=mqtt_config.bounding_box, crop=mqtt_config.crop, diff --git a/frigate/track/tracked_object.py b/frigate/track/tracked_object.py index 0e7464bc2..f1eb29328 100644 --- a/frigate/track/tracked_object.py +++ b/frigate/track/tracked_object.py @@ -1,8 +1,8 @@ """Object attribute.""" -import base64 import logging import math +import os from collections import defaultdict from statistics import median from typing import Optional @@ -13,8 +13,10 @@ import numpy as np from frigate.config import ( CameraConfig, ModelConfig, + SnapshotsConfig, UIConfig, ) +from frigate.const import CLIPS_DIR, THUMB_DIR from frigate.review.types import SeverityEnum from frigate.util.image import ( area, @@ -330,7 +332,7 @@ class TrackedObject: self.current_zones = current_zones return (thumb_update, significant_change, autotracker_update) - def to_dict(self, include_thumbnail: bool = False): + def to_dict(self): event = { "id": self.obj_data["id"], "camera": self.camera_config.name, @@ -365,9 +367,6 @@ class TrackedObject: "path_data": self.path_data, } - if include_thumbnail: - event["thumbnail"] = base64.b64encode(self.get_thumbnail()).decode("utf-8") - return event def is_active(self): @@ -379,22 +378,16 @@ class TrackedObject: > self.camera_config.detect.stationary.threshold ) - def get_thumbnail(self): - if ( - self.thumbnail_data is None - or self.thumbnail_data["frame_time"] not in self.frame_cache - ): - ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8)) - - jpg_bytes = self.get_jpg_bytes( - timestamp=False, bounding_box=False, crop=True, height=175 + def get_thumbnail(self, ext: str): + img_bytes = self.get_img_bytes( + ext, timestamp=False, bounding_box=False, crop=True, height=175 ) - if jpg_bytes: - return jpg_bytes + if img_bytes: + return img_bytes else: - ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8)) - return jpg.tobytes() + _, img = cv2.imencode(f".{ext}", np.zeros((175, 175, 3), np.uint8)) + return img.tobytes() def get_clean_png(self): if self.thumbnail_data is None: @@ -417,8 +410,14 @@ class TrackedObject: else: return None - def get_jpg_bytes( - self, timestamp=False, bounding_box=False, crop=False, height=None, quality=70 + def get_img_bytes( + self, + ext: str, + timestamp=False, + bounding_box=False, + crop=False, + height: int | None = None, + quality: int | None = None, ): if self.thumbnail_data is None: return None @@ -503,14 +502,69 @@ class TrackedObject: position=self.camera_config.timestamp_style.position, ) - ret, jpg = cv2.imencode( - ".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), quality] - ) + quality_params = None + + if ext == "jpg": + quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality or 70] + elif ext == "webp": + quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), quality or 60] + + ret, jpg = cv2.imencode(f".{ext}", best_frame, quality_params) + if ret: return jpg.tobytes() else: return None + def write_snapshot_to_disk(self) -> None: + snapshot_config: SnapshotsConfig = self.camera_config.snapshots + jpg_bytes = self.get_img_bytes( + ext="jpg", + timestamp=snapshot_config.timestamp, + bounding_box=snapshot_config.bounding_box, + crop=snapshot_config.crop, + height=snapshot_config.height, + quality=snapshot_config.quality, + ) + if jpg_bytes is None: + logger.warning(f"Unable to save snapshot for {self.obj_data['id']}.") + else: + with open( + os.path.join( + CLIPS_DIR, f"{self.camera_config.name}-{self.obj_data['id']}.jpg" + ), + "wb", + ) as j: + j.write(jpg_bytes) + + # write clean snapshot if enabled + if snapshot_config.clean_copy: + png_bytes = self.get_clean_png() + if png_bytes is None: + logger.warning( + f"Unable to save clean snapshot for {self.obj_data['id']}." + ) + else: + with open( + os.path.join( + CLIPS_DIR, + f"{self.camera_config.name}-{self.obj_data['id']}-clean.png", + ), + "wb", + ) as p: + p.write(png_bytes) + + def write_thumbnail_to_disk(self) -> None: + directory = os.path.join(THUMB_DIR, self.camera_config.name) + + if not os.path.exists(directory): + os.makedirs(directory) + + thumb_bytes = self.get_thumbnail("webp") + + with open(os.path.join(directory, f"{self.obj_data['id']}.webp"), "wb") as f: + f.write(thumb_bytes) + def zone_filtered(obj: TrackedObject, object_config): object_name = obj.obj_data["label"] diff --git a/frigate/util/path.py b/frigate/util/path.py new file mode 100644 index 000000000..dbe51abe5 --- /dev/null +++ b/frigate/util/path.py @@ -0,0 +1,51 @@ +"""Path utilities.""" + +import base64 +import os +from pathlib import Path + +from frigate.const import CLIPS_DIR, THUMB_DIR +from frigate.models import Event + + +def get_event_thumbnail_bytes(event: Event) -> bytes | None: + if event.thumbnail: + return base64.b64decode(event.thumbnail) + else: + try: + with open( + os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp"), "rb" + ) as f: + return f.read() + except Exception: + return None + + +### Deletion + + +def delete_event_images(event: Event) -> bool: + return delete_event_snapshot(event) and delete_event_thumbnail(event) + + +def delete_event_snapshot(event: Event) -> bool: + media_name = f"{event.camera}-{event.id}" + media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.jpg") + + try: + media_path.unlink(missing_ok=True) + media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}-clean.png") + media_path.unlink(missing_ok=True) + return True + except OSError: + return False + + +def delete_event_thumbnail(event: Event) -> bool: + if event.thumbnail: + return True + else: + Path(os.path.join(THUMB_DIR, event.camera, f"{event.id}.webp")).unlink( + missing_ok=True + ) + return True diff --git a/migrations/028_optional_event_thumbnail.py b/migrations/028_optional_event_thumbnail.py new file mode 100644 index 000000000..3e36a28cc --- /dev/null +++ b/migrations/028_optional_event_thumbnail.py @@ -0,0 +1,36 @@ +"""Peewee migrations -- 028_optional_event_thumbnail.py. + +Some examples (model - class or model name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" + +import peewee as pw + +from frigate.models import Event + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + migrator.drop_not_null(Event, "thumbnail") + + +def rollback(migrator, database, fake=False, **kwargs): + migrator.add_not_null(Event, "thumbnail") diff --git a/web/src/components/card/SearchThumbnail.tsx b/web/src/components/card/SearchThumbnail.tsx index b7dd64e79..ed98e86b4 100644 --- a/web/src/components/card/SearchThumbnail.tsx +++ b/web/src/components/card/SearchThumbnail.tsx @@ -80,7 +80,7 @@ export default function SearchThumbnail({ : undefined } draggable={false} - src={`${apiHost}api/events/${searchResult.id}/thumbnail.jpg`} + src={`${apiHost}api/events/${searchResult.id}/thumbnail.webp`} loading={isSafari ? "eager" : "lazy"} onLoad={() => { onImgLoad(); diff --git a/web/src/components/overlay/detail/ReviewDetailDialog.tsx b/web/src/components/overlay/detail/ReviewDetailDialog.tsx index 8d2f13d89..76234193c 100644 --- a/web/src/components/overlay/detail/ReviewDetailDialog.tsx +++ b/web/src/components/overlay/detail/ReviewDetailDialog.tsx @@ -385,7 +385,7 @@ function EventItem({ src={ event.has_snapshot ? `${apiHost}api/events/${event.id}/snapshot.jpg` - : `${apiHost}api/events/${event.id}/thumbnail.jpg` + : `${apiHost}api/events/${event.id}/thumbnail.webp` } /> {hovered && ( @@ -400,7 +400,7 @@ function EventItem({ href={ event.has_snapshot ? `${apiHost}api/events/${event.id}/snapshot.jpg` - : `${apiHost}api/events/${event.id}/thumbnail.jpg` + : `${apiHost}api/events/${event.id}/thumbnail.webp` } > diff --git a/web/src/components/overlay/detail/SearchDetailDialog.tsx b/web/src/components/overlay/detail/SearchDetailDialog.tsx index dd088ad83..03054d811 100644 --- a/web/src/components/overlay/detail/SearchDetailDialog.tsx +++ b/web/src/components/overlay/detail/SearchDetailDialog.tsx @@ -511,7 +511,7 @@ function ObjectDetailsTab({ : undefined } draggable={false} - src={`${apiHost}api/events/${search.id}/thumbnail.jpg`} + src={`${apiHost}api/events/${search.id}/thumbnail.webp`} /> {config?.semantic_search.enabled && search.data.type == "object" && ( )} +
diff --git a/web/src/components/overlay/dialog/TextEntryDialog.tsx b/web/src/components/overlay/dialog/TextEntryDialog.tsx index 1b0655078..d7b90aabb 100644 --- a/web/src/components/overlay/dialog/TextEntryDialog.tsx +++ b/web/src/components/overlay/dialog/TextEntryDialog.tsx @@ -10,7 +10,7 @@ import { import { Form, FormControl, FormField, FormItem } from "@/components/ui/form"; import { Input } from "@/components/ui/input"; import { zodResolver } from "@hookform/resolvers/zod"; -import { useCallback } from "react"; +import { useCallback, useEffect } from "react"; import { useForm } from "react-hook-form"; import { z } from "zod"; @@ -20,13 +20,18 @@ type TextEntryDialogProps = { description?: string; setOpen: (open: boolean) => void; onSave: (text: string) => void; + defaultValue?: string; + allowEmpty?: boolean; }; + export default function TextEntryDialog({ open, title, description, setOpen, onSave, + defaultValue = "", + allowEmpty = false, }: TextEntryDialogProps) { const formSchema = z.object({ text: z.string(), @@ -34,6 +39,7 @@ export default function TextEntryDialog({ const form = useForm>({ resolver: zodResolver(formSchema), + defaultValues: { text: defaultValue }, }); const fileRef = form.register("text"); @@ -41,15 +47,20 @@ export default function TextEntryDialog({ const onSubmit = useCallback( (data: z.infer) => { - if (!data["text"]) { + if (!allowEmpty && !data["text"]) { return; } - onSave(data["text"]); }, - [onSave], + [onSave, allowEmpty], ); + useEffect(() => { + if (open) { + form.reset({ text: defaultValue }); + } + }, [open, defaultValue, form]); + return ( From 1d8f1bd7ae7b7c379ccf4a2566cc22ad7ba081db Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Mon, 24 Feb 2025 08:02:36 -0600 Subject: [PATCH 38/51] Ensure sub label is null when submitting an empty string (#16779) * null sub_label when submitting an empty string * prevent cancel from submitting form * fix test --- frigate/api/event.py | 24 ++++++++++--------- frigate/test/test_http.py | 2 +- .../overlay/dialog/TextEntryDialog.tsx | 4 +++- 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/frigate/api/event.py b/frigate/api/event.py index 2df32471e..bb1bf7395 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -991,6 +991,10 @@ def set_sub_label( new_sub_label = body.subLabel new_score = body.subLabelScore + if new_sub_label == "": + new_sub_label = None + new_score = None + if tracked_obj: tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score) @@ -1001,21 +1005,19 @@ def set_sub_label( if event: event.sub_label = new_sub_label - - if new_score: - data = event.data + data = event.data + if new_sub_label is None: + data["sub_label_score"] = None + elif new_score is not None: data["sub_label_score"] = new_score - event.data = data - + event.data = data event.save() return JSONResponse( - content=( - { - "success": True, - "message": "Event " + event_id + " sub label set to " + new_sub_label, - } - ), + content={ + "success": True, + "message": f"Event {event_id} sub label set to {new_sub_label if new_sub_label is not None else 'None'}", + }, status_code=200, ) diff --git a/frigate/test/test_http.py b/frigate/test/test_http.py index 8c89e0433..46de1307f 100644 --- a/frigate/test/test_http.py +++ b/frigate/test/test_http.py @@ -275,7 +275,7 @@ class TestHttp(unittest.TestCase): event = client.get(f"/events/{id}").json() assert event assert event["id"] == id - assert event["sub_label"] == "" + assert event["sub_label"] == None def test_sub_label_list(self): app = create_fastapi_app( diff --git a/web/src/components/overlay/dialog/TextEntryDialog.tsx b/web/src/components/overlay/dialog/TextEntryDialog.tsx index d7b90aabb..c11a84ae7 100644 --- a/web/src/components/overlay/dialog/TextEntryDialog.tsx +++ b/web/src/components/overlay/dialog/TextEntryDialog.tsx @@ -86,7 +86,9 @@ export default function TextEntryDialog({ )} /> - + From 0de928703facc46fd2ed947dbb2be486b41ae4dc Mon Sep 17 00:00:00 2001 From: Jason Hunter Date: Mon, 24 Feb 2025 10:56:01 -0500 Subject: [PATCH 39/51] Initial implementation of D-FINE model via ONNX (#16772) * initial implementation of D-FINE model * revert docker-compose * add docs for D-FINE * remove weird auto-format issue --- .devcontainer/devcontainer.json | 47 +++++++++++++--- .../onnxruntime-gpu/devcontainer-feature.json | 22 ++++++++ .../features/onnxruntime-gpu/install.sh | 15 +++++ docs/docs/configuration/object_detectors.md | 55 ++++++++++++++++++- frigate/detectors/detector_config.py | 1 + frigate/detectors/plugins/onnx.py | 17 +++++- frigate/util/model.py | 27 +++++++++ 7 files changed, 172 insertions(+), 12 deletions(-) create mode 100644 .devcontainer/features/onnxruntime-gpu/devcontainer-feature.json create mode 100644 .devcontainer/features/onnxruntime-gpu/install.sh diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 63adae73d..c782fb32f 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -8,9 +8,25 @@ "overrideCommand": false, "remoteUser": "vscode", "features": { - "ghcr.io/devcontainers/features/common-utils:1": {} + "ghcr.io/devcontainers/features/common-utils:2": {} + // Uncomment the following lines to use ONNX Runtime with CUDA support + // "ghcr.io/devcontainers/features/nvidia-cuda:1": { + // "installCudnn": true, + // "installNvtx": true, + // "installToolkit": true, + // "cudaVersion": "12.5", + // "cudnnVersion": "9.4.0.58" + // }, + // "./features/onnxruntime-gpu": {} }, - "forwardPorts": [8971, 5000, 5001, 5173, 8554, 8555], + "forwardPorts": [ + 8971, + 5000, + 5001, + 5173, + 8554, + 8555 + ], "portsAttributes": { "8971": { "label": "External NGINX", @@ -64,10 +80,18 @@ "editor.formatOnType": true, "python.testing.pytestEnabled": false, "python.testing.unittestEnabled": true, - "python.testing.unittestArgs": ["-v", "-s", "./frigate/test"], + "python.testing.unittestArgs": [ + "-v", + "-s", + "./frigate/test" + ], "files.trimTrailingWhitespace": true, - "eslint.workingDirectories": ["./web"], - "isort.args": ["--settings-path=./pyproject.toml"], + "eslint.workingDirectories": [ + "./web" + ], + "isort.args": [ + "--settings-path=./pyproject.toml" + ], "[python]": { "editor.defaultFormatter": "charliermarsh.ruff", "editor.formatOnSave": true, @@ -86,9 +110,16 @@ ], "editor.tabSize": 2 }, - "cSpell.ignoreWords": ["rtmp"], - "cSpell.words": ["preact", "astype", "hwaccel", "mqtt"] + "cSpell.ignoreWords": [ + "rtmp" + ], + "cSpell.words": [ + "preact", + "astype", + "hwaccel", + "mqtt" + ] } } } -} +} \ No newline at end of file diff --git a/.devcontainer/features/onnxruntime-gpu/devcontainer-feature.json b/.devcontainer/features/onnxruntime-gpu/devcontainer-feature.json new file mode 100644 index 000000000..30514442b --- /dev/null +++ b/.devcontainer/features/onnxruntime-gpu/devcontainer-feature.json @@ -0,0 +1,22 @@ +{ + "id": "onnxruntime-gpu", + "version": "0.0.1", + "name": "ONNX Runtime GPU (Nvidia)", + "description": "Installs ONNX Runtime for Nvidia GPUs.", + "documentationURL": "", + "options": { + "version": { + "type": "string", + "proposals": [ + "latest", + "1.20.1", + "1.20.0" + ], + "default": "latest", + "description": "Version of ONNX Runtime to install" + } + }, + "installsAfter": [ + "ghcr.io/devcontainers/features/nvidia-cuda" + ] +} \ No newline at end of file diff --git a/.devcontainer/features/onnxruntime-gpu/install.sh b/.devcontainer/features/onnxruntime-gpu/install.sh new file mode 100644 index 000000000..0c090beec --- /dev/null +++ b/.devcontainer/features/onnxruntime-gpu/install.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -e + +VERSION=${VERSION} + +python3 -m pip config set global.break-system-packages true +# if VERSION == "latest" or VERSION is empty, install the latest version +if [ "$VERSION" == "latest" ] || [ -z "$VERSION" ]; then + python3 -m pip install onnxruntime-gpu +else + python3 -m pip install onnxruntime-gpu==$VERSION +fi + +echo "Done!" \ No newline at end of file diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 21ba46c2d..bc76779cb 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -10,25 +10,31 @@ title: Object Detectors Frigate supports multiple different detectors that work on different types of hardware: **Most Hardware** + - [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices. - [Hailo](#hailo-8l): The Hailo8 AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices. **AMD** + - [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection. - [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured. **Intel** + - [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection. - [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured. **Nvidia** + - [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models. - [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp(4/5)` Frigate images when a supported ONNX model is configured. **Rockchip** + - [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs. **For Testing** + - [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results. ::: @@ -147,7 +153,6 @@ model: path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef ``` - ## OpenVINO Detector The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. @@ -412,7 +417,7 @@ When using docker compose: ```yaml services: frigate: -... + environment: HSA_OVERRIDE_GFX_VERSION: "9.0.0" ``` @@ -555,6 +560,50 @@ model: Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. +#### D-FINE + +[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. + +To export as ONNX: + +1. Clone: https://github.com/Peterande/D-FINE and install all dependencies. +2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE). +3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)` +4. Run the export, making sure you select the right config, for your checkpoint. + +Example: + +``` +python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth +``` + +:::tip + +Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually. + +Make sure you change the batch size to 1 before exporting. + +::: + +After placing the downloaded onnx model in your config folder, you can use the following configuration: + +```yaml +detectors: + onnx: + type: onnx + +model: + model_type: dfine + width: 640 + height: 640 + input_tensor: nchw + input_dtype: float + path: /config/model_cache/dfine_m_obj2coco.onnx + labelmap_path: /labelmap/coco-80.txt +``` + +Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects. + ## CPU Detector (not recommended) The CPU detector type runs a TensorFlow Lite model utilizing the CPU without hardware acceleration. It is recommended to use a hardware accelerated detector type instead for better performance. To configure a CPU based detector, set the `"type"` attribute to `"cpu"`. @@ -704,7 +753,7 @@ To convert a onnx model to the rknn format using the [rknn-toolkit2](https://git This is an example configuration file that you need to adjust to your specific onnx model: ```yaml -soc: ["rk3562","rk3566", "rk3568", "rk3576", "rk3588"] +soc: ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"] quantization: false output_name: "{input_basename}" diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index c8aea0a1d..16599b141 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -37,6 +37,7 @@ class ModelTypeEnum(str, Enum): yolox = "yolox" yolov9 = "yolov9" yolonas = "yolonas" + dfine = "dfine" class ModelConfig(BaseModel): diff --git a/frigate/detectors/plugins/onnx.py b/frigate/detectors/plugins/onnx.py index c8589145a..13a948de9 100644 --- a/frigate/detectors/plugins/onnx.py +++ b/frigate/detectors/plugins/onnx.py @@ -9,7 +9,11 @@ from frigate.detectors.detector_config import ( BaseDetectorConfig, ModelTypeEnum, ) -from frigate.util.model import get_ort_providers, post_process_yolov9 +from frigate.util.model import ( + get_ort_providers, + post_process_dfine, + post_process_yolov9, +) logger = logging.getLogger(__name__) @@ -41,6 +45,7 @@ class ONNXDetector(DetectionApi): providers, options = get_ort_providers( detector_config.device == "CPU", detector_config.device ) + self.model = ort.InferenceSession( path, providers=providers, provider_options=options ) @@ -55,6 +60,16 @@ class ONNXDetector(DetectionApi): logger.info(f"ONNX: {path} loaded") def detect_raw(self, tensor_input: np.ndarray): + if self.onnx_model_type == ModelTypeEnum.dfine: + tensor_output = self.model.run( + None, + { + "images": tensor_input, + "orig_target_sizes": np.array([[self.h, self.w]], dtype=np.int64), + }, + ) + return post_process_dfine(tensor_output, self.w, self.h) + model_input_name = self.model.get_inputs()[0].name tensor_output = self.model.run(None, {model_input_name: tensor_input}) diff --git a/frigate/util/model.py b/frigate/util/model.py index da7b1a50a..0428a42ff 100644 --- a/frigate/util/model.py +++ b/frigate/util/model.py @@ -9,7 +9,34 @@ import onnxruntime as ort logger = logging.getLogger(__name__) + ### Post Processing +def post_process_dfine(tensor_output: np.ndarray, width, height) -> np.ndarray: + class_ids = tensor_output[0][tensor_output[2] > 0.4] + boxes = tensor_output[1][tensor_output[2] > 0.4] + scores = tensor_output[2][tensor_output[2] > 0.4] + + input_shape = np.array([height, width, height, width]) + boxes = np.divide(boxes, input_shape, dtype=np.float32) + indices = cv2.dnn.NMSBoxes(boxes, scores, score_threshold=0.4, nms_threshold=0.4) + detections = np.zeros((20, 6), np.float32) + + for i, (bbox, confidence, class_id) in enumerate( + zip(boxes[indices], scores[indices], class_ids[indices]) + ): + if i == 20: + break + + detections[i] = [ + class_id, + confidence, + bbox[1], + bbox[0], + bbox[3], + bbox[2], + ] + + return detections def post_process_yolov9(predictions: np.ndarray, width, height) -> np.ndarray: From 7ce1b354cc8fc0811d8ca21d400af16fdd9e916f Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 25 Feb 2025 10:02:56 -0700 Subject: [PATCH 40/51] Use native arm runner for arm docker builds (#16804) * Try building jetpack on latest ubuntu version * Update ci.yml * run natively on arm * Run all arm builds using arm runner * Update ci.yml --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 398d7fc8c..9a666b897 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,7 +42,7 @@ jobs: tags: ${{ steps.setup.outputs.image-name }}-amd64 cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 arm64_build: - runs-on: ubuntu-22.04 + runs-on: ubuntu-22.04-arm name: ARM Build steps: - name: Check out code @@ -107,7 +107,7 @@ jobs: *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5 *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max jetson_jp6_build: - runs-on: ubuntu-22.04 + runs-on: ubuntu-22.04-arm name: Jetson Jetpack 6 steps: - name: Check out code @@ -177,7 +177,7 @@ jobs: rocm.tags=${{ steps.setup.outputs.image-name }}-rocm *.cache-from=type=gha arm64_extra_builds: - runs-on: ubuntu-22.04 + runs-on: ubuntu-22.04-arm name: ARM Extra Build needs: - arm64_build From 7eb3c87fa0d8a5737d866867bdbaea7b690b4642 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Tue, 25 Feb 2025 18:17:39 -0700 Subject: [PATCH 41/51] UI tweaks (#16813) * Add escape to close review details * Refresh review page automatically if there are currently no items to review --- .../components/overlay/detail/ReviewDetailDialog.tsx | 9 +++++++++ web/src/views/events/EventView.tsx | 10 ++++++++++ 2 files changed, 19 insertions(+) diff --git a/web/src/components/overlay/detail/ReviewDetailDialog.tsx b/web/src/components/overlay/detail/ReviewDetailDialog.tsx index 76234193c..2570fd033 100644 --- a/web/src/components/overlay/detail/ReviewDetailDialog.tsx +++ b/web/src/components/overlay/detail/ReviewDetailDialog.tsx @@ -41,6 +41,7 @@ import { useOverlayState } from "@/hooks/use-overlay-state"; import { DownloadVideoButton } from "@/components/button/DownloadVideoButton"; import { TooltipPortal } from "@radix-ui/react-tooltip"; import { LuSearch } from "react-icons/lu"; +import useKeyboardListener from "@/hooks/use-keyboard-listener"; type ReviewDetailDialogProps = { review?: ReviewSegment; @@ -133,6 +134,14 @@ export default function ReviewDetailDialog({ // eslint-disable-next-line react-hooks/exhaustive-deps }, [review]); + // keyboard listener + + useKeyboardListener(["Esc"], (key, modifiers) => { + if (key == "Esc" && modifiers.down && !modifiers.repeat) { + setIsOpen(false); + } + }); + const Overlay = isDesktop ? Sheet : MobilePage; const Content = isDesktop ? SheetContent : MobilePageContent; const Header = isDesktop ? SheetHeader : MobilePageHeader; diff --git a/web/src/views/events/EventView.tsx b/web/src/views/events/EventView.tsx index e8e864e32..583b47fe9 100644 --- a/web/src/views/events/EventView.tsx +++ b/web/src/views/events/EventView.tsx @@ -621,6 +621,16 @@ function DetectionReview({ // existing review item + useEffect(() => { + if (loading || currentItems == null || itemsToReview == undefined) { + return; + } + + if (currentItems.length == 0 && itemsToReview > 0) { + pullLatestData(); + } + }, [loading, currentItems, itemsToReview, pullLatestData]); + useEffect(() => { if (!startTime || !currentItems || currentItems.length == 0) { return; From 447f26e1b97db60dec0a225744428dbe77a1a658 Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Wed, 26 Feb 2025 08:29:34 -0600 Subject: [PATCH 42/51] Fix lpr metrics and add yolov9 plate detection metric (#16827) --- .../common/license_plate/mixin.py | 22 +++++++++++++++++++ frigate/data_processing/post/license_plate.py | 15 ++++--------- .../real_time/license_plate.py | 9 -------- frigate/data_processing/types.py | 2 ++ frigate/stats/util.py | 4 ++++ 5 files changed, 32 insertions(+), 20 deletions(-) diff --git a/frigate/data_processing/common/license_plate/mixin.py b/frigate/data_processing/common/license_plate/mixin.py index 1723d213e..aa03bc985 100644 --- a/frigate/data_processing/common/license_plate/mixin.py +++ b/frigate/data_processing/common/license_plate/mixin.py @@ -816,6 +816,20 @@ class LicensePlateProcessingMixin: # 5. Return True if we should keep the previous plate (i.e., if it scores higher) return prev_score > curr_score + def __update_yolov9_metrics(self, duration: float) -> None: + """ + Update inference metrics. + """ + self.metrics.yolov9_lpr_fps.value = ( + self.metrics.yolov9_lpr_fps.value * 9 + duration + ) / 10 + + def __update_lpr_metrics(self, duration: float) -> None: + """ + Update inference metrics. + """ + self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10 + def lpr_process(self, obj_data: dict[str, any], frame: np.ndarray): """Look for license plates in image.""" @@ -843,6 +857,7 @@ class LicensePlateProcessingMixin: if self.requires_license_plate_detection: logger.debug("Running manual license_plate detection.") + car_box = obj_data.get("box") if not car_box: @@ -867,6 +882,9 @@ class LicensePlateProcessingMixin: logger.debug( f"YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms" ) + self.__update_yolov9_metrics( + datetime.datetime.now().timestamp() - yolov9_start + ) if not license_plate: logger.debug("Detected no license plates for car object.") @@ -945,11 +963,15 @@ class LicensePlateProcessingMixin: license_plate_frame, ) + start = datetime.datetime.now().timestamp() + # run detection, returns results sorted by confidence, best first license_plates, confidences, areas = self._process_license_plate( license_plate_frame ) + self.__update_lpr_metrics(datetime.datetime.now().timestamp() - start) + logger.debug(f"Text boxes: {license_plates}") logger.debug(f"Confidences: {confidences}") logger.debug(f"Areas: {areas}") diff --git a/frigate/data_processing/post/license_plate.py b/frigate/data_processing/post/license_plate.py index 9a9974bc7..2c80418c7 100644 --- a/frigate/data_processing/post/license_plate.py +++ b/frigate/data_processing/post/license_plate.py @@ -40,12 +40,6 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): self.config = config super().__init__(config, metrics, model_runner) - def __update_metrics(self, duration: float) -> None: - """ - Update inference metrics. - """ - self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10 - def process_data( self, data: dict[str, any], data_type: PostProcessDataEnum ) -> None: @@ -57,8 +51,6 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): Returns: None. """ - start = datetime.datetime.now().timestamp() - event_id = data["event_id"] camera_name = data["camera"] @@ -128,7 +120,10 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): return if WRITE_DEBUG_IMAGES: - cv2.imwrite(f"debug/frames/lpr_post_{start}.jpg", image) + cv2.imwrite( + f"debug/frames/lpr_post_{datetime.datetime.now().timestamp()}.jpg", + image, + ) # convert to yuv for processing frame = cv2.cvtColor(image, cv2.COLOR_BGR2YUV_I420) @@ -210,8 +205,6 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi): logger.debug(f"Post processing plate: {event_id}, {frame_time}") self.lpr_process(keyframe_obj_data, frame) - self.__update_metrics(datetime.datetime.now().timestamp() - start) - def handle_request(self, topic, request_data) -> dict[str, any] | None: if topic == EmbeddingsRequestEnum.reprocess_plate.value: event = request_data["event"] diff --git a/frigate/data_processing/real_time/license_plate.py b/frigate/data_processing/real_time/license_plate.py index 2809e861f..c8f0efa11 100644 --- a/frigate/data_processing/real_time/license_plate.py +++ b/frigate/data_processing/real_time/license_plate.py @@ -1,6 +1,5 @@ """Handle processing images for face detection and recognition.""" -import datetime import logging import numpy as np @@ -33,17 +32,9 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess self.config = config super().__init__(config, metrics) - def __update_metrics(self, duration: float) -> None: - """ - Update inference metrics. - """ - self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10 - def process_frame(self, obj_data: dict[str, any], frame: np.ndarray): """Look for license plates in image.""" - start = datetime.datetime.now().timestamp() self.lpr_process(obj_data, frame) - self.__update_metrics(datetime.datetime.now().timestamp() - start) def handle_request(self, topic, request_data) -> dict[str, any] | None: return diff --git a/frigate/data_processing/types.py b/frigate/data_processing/types.py index 6f87f77f9..29abb22d1 100644 --- a/frigate/data_processing/types.py +++ b/frigate/data_processing/types.py @@ -10,12 +10,14 @@ class DataProcessorMetrics: text_embeddings_sps: Synchronized face_rec_fps: Synchronized alpr_pps: Synchronized + yolov9_lpr_fps: Synchronized def __init__(self): self.image_embeddings_fps = mp.Value("d", 0.01) self.text_embeddings_sps = mp.Value("d", 0.01) self.face_rec_fps = mp.Value("d", 0.01) self.alpr_pps = mp.Value("d", 0.01) + self.yolov9_lpr_fps = mp.Value("d", 0.01) class DataProcessorModelRunner: diff --git a/frigate/stats/util.py b/frigate/stats/util.py index 262cec3d2..3d836868e 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -302,6 +302,10 @@ def stats_snapshot( stats["embeddings"]["plate_recognition_speed"] = round( embeddings_metrics.alpr_pps.value * 1000, 2 ) + if "license_plate" not in config.objects.all_objects: + stats["embeddings"]["yolov9_plate_detection_speed"] = round( + embeddings_metrics.yolov9_lpr_fps.value * 1000, 2 + ) get_processing_stats(config, stats, hwaccel_errors) From d0e9bcbfdcffdcbe0c7eb45971bd16ef41ddce8b Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Wed, 26 Feb 2025 08:58:25 -0600 Subject: [PATCH 43/51] Add ability to use Jina CLIP V2 for semantic search (#16826) * add wheels * move extra index url to bottom * config model option * add postprocess * fix config * jina v2 embedding class * use jina v2 in embeddings * fix ov inference * frontend * update reference config * revert device * fix truncation * return np tensors * use correct embeddings from inference * manual preprocess * clean up * docs * lower batch size for v2 only * docs clarity * wording --- docker/main/requirements-wheels.txt | 1 - docs/docs/configuration/reference.md | 2 + docs/docs/configuration/semantic_search.md | 34 ++- frigate/config/classification.py | 12 +- frigate/embeddings/embeddings.py | 86 +++++-- frigate/embeddings/onnx/base_embedding.py | 7 +- frigate/embeddings/onnx/jina_v2_embedding.py | 231 +++++++++++++++++++ frigate/embeddings/onnx/runner.py | 9 +- web/src/pages/Explore.tsx | 45 +++- web/src/types/frigateConfig.ts | 2 + 10 files changed, 380 insertions(+), 49 deletions(-) create mode 100644 frigate/embeddings/onnx/jina_v2_embedding.py diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index 320ce3334..25286617e 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -54,7 +54,6 @@ pywebpush == 2.0.* pyclipper == 1.3.* shapely == 2.0.* Levenshtein==0.26.* -prometheus-client == 0.21.* # HailoRT Wheels appdirs==1.4.* argcomplete==2.0.* diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index b791e708a..c64272214 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -536,6 +536,8 @@ semantic_search: enabled: False # Optional: Re-index embeddings database from historical tracked objects (default: shown below) reindex: False + # Optional: Set the model used for embeddings. (default: shown below) + model: "jinav1" # Optional: Set the model size used for embeddings. (default: shown below) # NOTE: small model runs on CPU and large model runs on GPU model_size: "small" diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md index bd3d79cae..07e2cbfb2 100644 --- a/docs/docs/configuration/semantic_search.md +++ b/docs/docs/configuration/semantic_search.md @@ -5,7 +5,7 @@ title: Semantic Search Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results. -Frigate uses [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create and save embeddings to Frigate's database. All of this runs locally. +Frigate uses models from [Jina AI](https://huggingface.co/jinaai) to create and save embeddings to Frigate's database. All of this runs locally. Semantic Search is accessed via the _Explore_ view in the Frigate UI. @@ -35,23 +35,47 @@ If you are enabling Semantic Search for the first time, be advised that Frigate ::: -### Jina AI CLIP +### Jina AI CLIP (version 1) -The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails. +The [V1 model from Jina](https://huggingface.co/jinaai/jina-clip-v1) has a vision model which is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails. -The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions. +The V1 text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions. -Differently weighted versions of the Jina model are available and can be selected by setting the `model_size` config option as `small` or `large`: +Differently weighted versions of the Jina models are available and can be selected by setting the `model_size` config option as `small` or `large`: ```yaml semantic_search: enabled: True + model: "jinav1" model_size: small ``` - Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable. - Configuring the `small` model employs a quantized version of the Jina model that uses less RAM and runs on CPU with a very negligible difference in embedding quality. +### Jina AI CLIP (version 2) + +Frigate also supports the [V2 model from Jina](https://huggingface.co/jinaai/jina-clip-v2), which introduces multilingual support (89 languages). In contrast, the V1 model only supports English. + +V2 offers only a 3% performance improvement over V1 in both text-image and text-text retrieval tasks, an upgrade that is unlikely to yield noticeable real-world benefits. Additionally, V2 has _significantly_ higher RAM and GPU requirements, leading to increased inference time and memory usage. If you plan to use V2, ensure your system has ample RAM and a discrete GPU. CPU inference (with the `small` model) using V2 is not recommended. + +To use the V2 model, update the `model` parameter in your config: + +```yaml +semantic_search: + enabled: True + model: "jinav2" + model_size: large +``` + +For most users, especially native English speakers, the V1 model remains the recommended choice. + +:::note + +Switching between V1 and V2 requires reindexing your embeddings. To do this, set `reindex: True` in your Semantic Search configuration and restart Frigate. The embeddings from V1 and V2 are incompatible, and failing to reindex will result in incorrect search results. + +::: + ### GPU Acceleration The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used. diff --git a/frigate/config/classification.py b/frigate/config/classification.py index 8a8e95861..f3416b009 100644 --- a/frigate/config/classification.py +++ b/frigate/config/classification.py @@ -1,3 +1,4 @@ +from enum import Enum from typing import Dict, List, Optional from pydantic import Field @@ -11,6 +12,11 @@ __all__ = [ ] +class SemanticSearchModelEnum(str, Enum): + jinav1 = "jinav1" + jinav2 = "jinav2" + + class BirdClassificationConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable bird classification.") threshold: float = Field( @@ -30,7 +36,11 @@ class ClassificationConfig(FrigateBaseModel): class SemanticSearchConfig(FrigateBaseModel): enabled: bool = Field(default=False, title="Enable semantic search.") reindex: Optional[bool] = Field( - default=False, title="Reindex all detections on startup." + default=False, title="Reindex all tracked objects on startup." + ) + model: Optional[SemanticSearchModelEnum] = Field( + default=SemanticSearchModelEnum.jinav1, + title="The CLIP model to use for semantic search.", ) model_size: str = Field( default="small", title="The size of the embeddings model used." diff --git a/frigate/embeddings/embeddings.py b/frigate/embeddings/embeddings.py index c06f46ba4..7e866d1fe 100644 --- a/frigate/embeddings/embeddings.py +++ b/frigate/embeddings/embeddings.py @@ -10,6 +10,7 @@ from playhouse.shortcuts import model_to_dict from frigate.comms.inter_process import InterProcessRequestor from frigate.config import FrigateConfig +from frigate.config.classification import SemanticSearchModelEnum from frigate.const import ( CONFIG_DIR, UPDATE_EMBEDDINGS_REINDEX_PROGRESS, @@ -23,6 +24,7 @@ from frigate.util.builtin import serialize from frigate.util.path import get_event_thumbnail_bytes from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding +from .onnx.jina_v2_embedding import JinaV2Embedding logger = logging.getLogger(__name__) @@ -75,18 +77,7 @@ class Embeddings: # Create tables if they don't exist self.db.create_embeddings_tables() - models = [ - "jinaai/jina-clip-v1-text_model_fp16.onnx", - "jinaai/jina-clip-v1-tokenizer", - "jinaai/jina-clip-v1-vision_model_fp16.onnx" - if config.semantic_search.model_size == "large" - else "jinaai/jina-clip-v1-vision_model_quantized.onnx", - "jinaai/jina-clip-v1-preprocessor_config.json", - "facenet-facenet.onnx", - "paddleocr-onnx-detection.onnx", - "paddleocr-onnx-classification.onnx", - "paddleocr-onnx-recognition.onnx", - ] + models = self.get_model_definitions() for model in models: self.requestor.send_data( @@ -97,17 +88,64 @@ class Embeddings: }, ) - self.text_embedding = JinaV1TextEmbedding( - model_size=config.semantic_search.model_size, - requestor=self.requestor, - device="CPU", + if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2: + # Single JinaV2Embedding instance for both text and vision + self.embedding = JinaV2Embedding( + model_size=self.config.semantic_search.model_size, + requestor=self.requestor, + device="GPU" + if self.config.semantic_search.model_size == "large" + else "CPU", + ) + self.text_embedding = lambda input_data: self.embedding( + input_data, embedding_type="text" + ) + self.vision_embedding = lambda input_data: self.embedding( + input_data, embedding_type="vision" + ) + else: # Default to jinav1 + self.text_embedding = JinaV1TextEmbedding( + model_size=config.semantic_search.model_size, + requestor=self.requestor, + device="CPU", + ) + self.vision_embedding = JinaV1ImageEmbedding( + model_size=config.semantic_search.model_size, + requestor=self.requestor, + device="GPU" if config.semantic_search.model_size == "large" else "CPU", + ) + + def get_model_definitions(self): + # Version-specific models + if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2: + models = [ + "jinaai/jina-clip-v2-tokenizer", + "jinaai/jina-clip-v2-model_fp16.onnx" + if self.config.semantic_search.model_size == "large" + else "jinaai/jina-clip-v2-model_quantized.onnx", + "jinaai/jina-clip-v2-preprocessor_config.json", + ] + else: # Default to jinav1 + models = [ + "jinaai/jina-clip-v1-text_model_fp16.onnx", + "jinaai/jina-clip-v1-tokenizer", + "jinaai/jina-clip-v1-vision_model_fp16.onnx" + if self.config.semantic_search.model_size == "large" + else "jinaai/jina-clip-v1-vision_model_quantized.onnx", + "jinaai/jina-clip-v1-preprocessor_config.json", + ] + + # Add common models + models.extend( + [ + "facenet-facenet.onnx", + "paddleocr-onnx-detection.onnx", + "paddleocr-onnx-classification.onnx", + "paddleocr-onnx-recognition.onnx", + ] ) - self.vision_embedding = JinaV1ImageEmbedding( - model_size=config.semantic_search.model_size, - requestor=self.requestor, - device="GPU" if config.semantic_search.model_size == "large" else "CPU", - ) + return models def embed_thumbnail( self, event_id: str, thumbnail: bytes, upsert: bool = True @@ -244,7 +282,11 @@ class Embeddings: # Get total count of events to process total_events = Event.select().count() - batch_size = 32 + batch_size = ( + 4 + if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2 + else 32 + ) current_page = 1 totals = { diff --git a/frigate/embeddings/onnx/base_embedding.py b/frigate/embeddings/onnx/base_embedding.py index 6f74afa2a..a2ea92674 100644 --- a/frigate/embeddings/onnx/base_embedding.py +++ b/frigate/embeddings/onnx/base_embedding.py @@ -72,6 +72,9 @@ class BaseEmbedding(ABC): return image + def _postprocess_outputs(self, outputs: any) -> any: + return outputs + def __call__( self, inputs: list[str] | list[Image.Image] | list[str] ) -> list[np.ndarray]: @@ -91,5 +94,7 @@ class BaseEmbedding(ABC): else: logger.warning(f"Expected input '{key}' not found in onnx_inputs") - embeddings = self.runner.run(onnx_inputs)[0] + outputs = self.runner.run(onnx_inputs)[0] + embeddings = self._postprocess_outputs(outputs) + return [embedding for embedding in embeddings] diff --git a/frigate/embeddings/onnx/jina_v2_embedding.py b/frigate/embeddings/onnx/jina_v2_embedding.py new file mode 100644 index 000000000..be6573e50 --- /dev/null +++ b/frigate/embeddings/onnx/jina_v2_embedding.py @@ -0,0 +1,231 @@ +"""JinaV2 Embeddings.""" + +import io +import logging +import os + +import numpy as np +from PIL import Image +from transformers import AutoTokenizer +from transformers.utils.logging import disable_progress_bar, set_verbosity_error + +from frigate.comms.inter_process import InterProcessRequestor +from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE +from frigate.types import ModelStatusTypesEnum +from frigate.util.downloader import ModelDownloader + +from .base_embedding import BaseEmbedding +from .runner import ONNXModelRunner + +# disables the progress bar and download logging for downloading tokenizers and image processors +disable_progress_bar() +set_verbosity_error() +logger = logging.getLogger(__name__) + + +class JinaV2Embedding(BaseEmbedding): + def __init__( + self, + model_size: str, + requestor: InterProcessRequestor, + device: str = "AUTO", + embedding_type: str = None, + ): + model_file = ( + "model_fp16.onnx" if model_size == "large" else "model_quantized.onnx" + ) + super().__init__( + model_name="jinaai/jina-clip-v2", + model_file=model_file, + download_urls={ + model_file: f"https://huggingface.co/jinaai/jina-clip-v2/resolve/main/onnx/{model_file}", + "preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v2/resolve/main/preprocessor_config.json", + }, + ) + self.tokenizer_file = "tokenizer" + self.embedding_type = embedding_type + self.requestor = requestor + self.model_size = model_size + self.device = device + self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name) + self.tokenizer = None + self.image_processor = None + self.runner = None + files_names = list(self.download_urls.keys()) + [self.tokenizer_file] + if not all( + os.path.exists(os.path.join(self.download_path, n)) for n in files_names + ): + logger.debug(f"starting model download for {self.model_name}") + self.downloader = ModelDownloader( + model_name=self.model_name, + download_path=self.download_path, + file_names=files_names, + download_func=self._download_model, + ) + self.downloader.ensure_model_files() + else: + self.downloader = None + ModelDownloader.mark_files_state( + self.requestor, + self.model_name, + files_names, + ModelStatusTypesEnum.downloaded, + ) + self._load_model_and_utils() + logger.debug(f"models are already downloaded for {self.model_name}") + + def _download_model(self, path: str): + try: + file_name = os.path.basename(path) + + if file_name in self.download_urls: + ModelDownloader.download_from_url(self.download_urls[file_name], path) + elif file_name == self.tokenizer_file: + if not os.path.exists(os.path.join(path, self.model_name)): + logger.info(f"Downloading {self.model_name} tokenizer") + + tokenizer = AutoTokenizer.from_pretrained( + self.model_name, + trust_remote_code=True, + cache_dir=os.path.join( + MODEL_CACHE_DIR, self.model_name, "tokenizer" + ), + clean_up_tokenization_spaces=True, + ) + tokenizer.save_pretrained(path) + self.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": f"{self.model_name}-{file_name}", + "state": ModelStatusTypesEnum.downloaded, + }, + ) + except Exception: + self.requestor.send_data( + UPDATE_MODEL_STATE, + { + "model": f"{self.model_name}-{file_name}", + "state": ModelStatusTypesEnum.error, + }, + ) + + def _load_model_and_utils(self): + if self.runner is None: + if self.downloader: + self.downloader.wait_for_download() + + tokenizer_path = os.path.join( + f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer" + ) + self.tokenizer = AutoTokenizer.from_pretrained( + self.model_name, + cache_dir=tokenizer_path, + trust_remote_code=True, + clean_up_tokenization_spaces=True, + ) + + self.runner = ONNXModelRunner( + os.path.join(self.download_path, self.model_file), + self.device, + self.model_size, + ) + + def _preprocess_image(self, image_data: bytes | Image.Image) -> np.ndarray: + """ + Manually preprocess a single image from bytes or PIL.Image to (3, 512, 512). + """ + if isinstance(image_data, bytes): + image = Image.open(io.BytesIO(image_data)) + else: + image = image_data + + if image.mode != "RGB": + image = image.convert("RGB") + + image = image.resize((512, 512), Image.Resampling.LANCZOS) + + # Convert to numpy array, normalize to [0, 1], and transpose to (channels, height, width) + image_array = np.array(image, dtype=np.float32) / 255.0 + image_array = np.transpose(image_array, (2, 0, 1)) # (H, W, C) -> (C, H, W) + + return image_array + + def _preprocess_inputs(self, raw_inputs): + """ + Preprocess inputs into a list of real input tensors (no dummies). + - For text: Returns list of input_ids. + - For vision: Returns list of pixel_values. + """ + if not isinstance(raw_inputs, list): + raw_inputs = [raw_inputs] + + processed = [] + if self.embedding_type == "text": + for text in raw_inputs: + input_ids = self.tokenizer([text], return_tensors="np")["input_ids"] + processed.append(input_ids) + elif self.embedding_type == "vision": + for img in raw_inputs: + pixel_values = self._preprocess_image(img) + processed.append( + pixel_values[np.newaxis, ...] + ) # Add batch dim: (1, 3, 512, 512) + else: + raise ValueError( + f"Invalid embedding_type: {self.embedding_type}. Must be 'text' or 'vision'." + ) + return processed + + def _postprocess_outputs(self, outputs): + """ + Process ONNX model outputs, truncating each embedding in the array to truncate_dim. + - outputs: NumPy array of embeddings. + - Returns: List of truncated embeddings. + """ + # size of vector in database + truncate_dim = 768 + + # jina v2 defaults to 1024 and uses Matryoshka representation, so + # truncating only causes an extremely minor decrease in retrieval accuracy + if outputs.shape[-1] > truncate_dim: + outputs = outputs[..., :truncate_dim] + + return outputs + + def __call__( + self, inputs: list[str] | list[Image.Image] | list[str], embedding_type=None + ) -> list[np.ndarray]: + self.embedding_type = embedding_type + if not self.embedding_type: + raise ValueError( + "embedding_type must be specified either in __init__ or __call__" + ) + + self._load_model_and_utils() + processed = self._preprocess_inputs(inputs) + batch_size = len(processed) + + # Prepare ONNX inputs with matching batch sizes + onnx_inputs = {} + if self.embedding_type == "text": + onnx_inputs["input_ids"] = np.stack([x[0] for x in processed]) + onnx_inputs["pixel_values"] = np.zeros( + (batch_size, 3, 512, 512), dtype=np.float32 + ) + elif self.embedding_type == "vision": + onnx_inputs["input_ids"] = np.zeros((batch_size, 16), dtype=np.int64) + onnx_inputs["pixel_values"] = np.stack([x[0] for x in processed]) + else: + raise ValueError("Invalid embedding type") + + # Run inference + outputs = self.runner.run(onnx_inputs) + if self.embedding_type == "text": + embeddings = outputs[2] # text embeddings + elif self.embedding_type == "vision": + embeddings = outputs[3] # image embeddings + else: + raise ValueError("Invalid embedding type") + + embeddings = self._postprocess_outputs(embeddings) + return [embedding for embedding in embeddings] diff --git a/frigate/embeddings/onnx/runner.py b/frigate/embeddings/onnx/runner.py index d380f45c1..c785c28f1 100644 --- a/frigate/embeddings/onnx/runner.py +++ b/frigate/embeddings/onnx/runner.py @@ -66,14 +66,9 @@ class ONNXModelRunner: def run(self, input: dict[str, Any]) -> Any: if self.type == "ov": infer_request = self.interpreter.create_infer_request() - input_tensor = list(input.values()) - if len(input_tensor) == 1: - input_tensor = ov.Tensor(array=input_tensor[0]) - else: - input_tensor = ov.Tensor(array=input_tensor) + outputs = infer_request.infer(input) - infer_request.infer(input_tensor) - return [infer_request.get_output_tensor().data] + return outputs elif self.type == "ort": return self.ort.run(None, input) diff --git a/web/src/pages/Explore.tsx b/web/src/pages/Explore.tsx index c005c43c2..af23c18f4 100644 --- a/web/src/pages/Explore.tsx +++ b/web/src/pages/Explore.tsx @@ -267,20 +267,41 @@ export default function Explore() { // model states - const { payload: textModelState } = useModelState( - "jinaai/jina-clip-v1-text_model_fp16.onnx", - ); - const { payload: textTokenizerState } = useModelState( - "jinaai/jina-clip-v1-tokenizer", - ); - const modelFile = - config?.semantic_search.model_size === "large" - ? "jinaai/jina-clip-v1-vision_model_fp16.onnx" - : "jinaai/jina-clip-v1-vision_model_quantized.onnx"; + const modelVersion = config?.semantic_search.model || "jinav1"; + const modelSize = config?.semantic_search.model_size || "small"; - const { payload: visionModelState } = useModelState(modelFile); + // Text model state + const { payload: textModelState } = useModelState( + modelVersion === "jinav1" + ? "jinaai/jina-clip-v1-text_model_fp16.onnx" + : modelSize === "large" + ? "jinaai/jina-clip-v2-model_fp16.onnx" + : "jinaai/jina-clip-v2-model_quantized.onnx", + ); + + // Tokenizer state + const { payload: textTokenizerState } = useModelState( + modelVersion === "jinav1" + ? "jinaai/jina-clip-v1-tokenizer" + : "jinaai/jina-clip-v2-tokenizer", + ); + + // Vision model state (same as text model for jinav2) + const visionModelFile = + modelVersion === "jinav1" + ? modelSize === "large" + ? "jinaai/jina-clip-v1-vision_model_fp16.onnx" + : "jinaai/jina-clip-v1-vision_model_quantized.onnx" + : modelSize === "large" + ? "jinaai/jina-clip-v2-model_fp16.onnx" + : "jinaai/jina-clip-v2-model_quantized.onnx"; + const { payload: visionModelState } = useModelState(visionModelFile); + + // Preprocessor/feature extractor state const { payload: visionFeatureExtractorState } = useModelState( - "jinaai/jina-clip-v1-preprocessor_config.json", + modelVersion === "jinav1" + ? "jinaai/jina-clip-v1-preprocessor_config.json" + : "jinaai/jina-clip-v2-preprocessor_config.json", ); const allModelsLoaded = useMemo(() => { diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index 263883976..d021fde0f 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -20,6 +20,7 @@ export interface BirdseyeConfig { width: number; } +export type SearchModel = "jinav1" | "jinav2"; export type SearchModelSize = "small" | "large"; export interface CameraConfig { @@ -458,6 +459,7 @@ export interface FrigateConfig { semantic_search: { enabled: boolean; reindex: boolean; + model: SearchModel; model_size: SearchModelSize; }; From 4f855f82ea440d7a5f24eba0f93f5d633c251a86 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Wed, 26 Feb 2025 13:39:19 -0700 Subject: [PATCH 44/51] Simplify tensorrt (#16835) * Remove unneccessary trt wheels build * Cleanup * Try without local cuda * Keep specific cuda libs only * Cleanup * Add newer libcufft * remove target * Include more --- docker/tensorrt/Dockerfile.amd64 | 20 +++++++------------ docker/tensorrt/Dockerfile.base | 7 ++++++- .../etc/ld.so.conf.d/cuda_tensorrt.conf | 2 +- docker/tensorrt/requirements-amd64.txt | 1 + docker/tensorrt/trt.hcl | 1 - 5 files changed, 15 insertions(+), 16 deletions(-) diff --git a/docker/tensorrt/Dockerfile.amd64 b/docker/tensorrt/Dockerfile.amd64 index 6be11c210..e6429aa90 100644 --- a/docker/tensorrt/Dockerfile.amd64 +++ b/docker/tensorrt/Dockerfile.amd64 @@ -3,22 +3,16 @@ # https://askubuntu.com/questions/972516/debian-frontend-environment-variable ARG DEBIAN_FRONTEND=noninteractive -# Make this a separate target so it can be built/cached optionally -FROM wheels as trt-wheels -ARG DEBIAN_FRONTEND -ARG TARGETARCH -RUN python3 -m pip config set global.break-system-packages true - -# Add TensorRT wheels to another folder -COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt -RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt +# Globally set pip break-system-packages option to avoid having to specify it every time +ARG PIP_BREAK_SYSTEM_PACKAGES=1 FROM tensorrt-base AS frigate-tensorrt +ARG PIP_BREAK_SYSTEM_PACKAGES ENV TRT_VER=8.6.1 -RUN python3 -m pip config set global.break-system-packages true -RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ - pip3 install -U /deps/trt-wheels/*.whl && \ - ldconfig + +# Install TensorRT wheels +COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt +RUN pip3 install -U -r /requirements-tensorrt.txt && ldconfig WORKDIR /opt/frigate/ COPY --from=rootfs / / diff --git a/docker/tensorrt/Dockerfile.base b/docker/tensorrt/Dockerfile.base index f9cdde587..6d8d9591b 100644 --- a/docker/tensorrt/Dockerfile.base +++ b/docker/tensorrt/Dockerfile.base @@ -22,9 +22,14 @@ FROM deps AS tensorrt-base #Disable S6 Global timeout ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 +# COPY TensorRT Model Generation Deps COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos -COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda + +# COPY Individual CUDA deps +COPY --from=trt-deps /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda/ +COPY --from=trt-deps /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda/ + COPY docker/tensorrt/detector/rootfs/ / ENV YOLO_MODELS="" diff --git a/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf b/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf index 561b7bcd4..72eec56e0 100644 --- a/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf +++ b/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf @@ -1,5 +1,5 @@ /usr/local/lib -/usr/local/cuda/lib64 +/usr/local/cuda /usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib /usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib /usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib diff --git a/docker/tensorrt/requirements-amd64.txt b/docker/tensorrt/requirements-amd64.txt index 8d520d9f9..0e003ca3d 100644 --- a/docker/tensorrt/requirements-amd64.txt +++ b/docker/tensorrt/requirements-amd64.txt @@ -11,6 +11,7 @@ nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64' nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64' nvidia-cudnn-cu12 == 9.5.0.*; platform_machine == 'x86_64' nvidia-cufft-cu11==10.*; platform_machine == 'x86_64' +nvidia-cufft-cu12==11.*; platform_machine == 'x86_64' onnx==1.16.*; platform_machine == 'x86_64' onnxruntime-gpu==1.20.*; platform_machine == 'x86_64' protobuf==3.20.3; platform_machine == 'x86_64' diff --git a/docker/tensorrt/trt.hcl b/docker/tensorrt/trt.hcl index ba3b93244..80757ba6d 100644 --- a/docker/tensorrt/trt.hcl +++ b/docker/tensorrt/trt.hcl @@ -95,7 +95,6 @@ target "tensorrt" { wget = "target:wget", tensorrt-base = "target:tensorrt-base", rootfs = "target:rootfs" - wheels = "target:wheels" } target = "frigate-tensorrt" inherits = ["_build_args"] From 2b7b5e3f08e982c55eac890cc446bfc6db37c767 Mon Sep 17 00:00:00 2001 From: toperichvania Date: Thu, 27 Feb 2025 16:28:53 +0100 Subject: [PATCH 45/51] Fix incorrect storage usage per camera (#16825) (#16851) --- frigate/record/maintainer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frigate/record/maintainer.py b/frigate/record/maintainer.py index faa41f75f..1cabbfdda 100644 --- a/frigate/record/maintainer.py +++ b/frigate/record/maintainer.py @@ -473,7 +473,7 @@ class RecordingMaintainer(threading.Thread): # get the segment size of the cache file # file without faststart is same size segment_size = round( - float(os.path.getsize(cache_path)) / pow(2, 20), 1 + float(os.path.getsize(cache_path)) / pow(2, 20), 2 ) except OSError: segment_size = 0 From f221a7ae74573b60898a863de86c0161cfe0511b Mon Sep 17 00:00:00 2001 From: Jared Date: Thu, 27 Feb 2025 09:45:32 -0700 Subject: [PATCH 46/51] Quality of life documentation updates (#16852) * Update getting_started with full host:container syntax for hwacc * Update edgetpu.md Add a tip about the coral TPU not changing identification until after Frigate runs an inference on the TPU. --- docs/docs/guides/getting_started.md | 2 +- docs/docs/troubleshooting/edgetpu.md | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/docs/guides/getting_started.md b/docs/docs/guides/getting_started.md index bb880b8f0..ed2cfb4f4 100644 --- a/docs/docs/guides/getting_started.md +++ b/docs/docs/guides/getting_started.md @@ -177,7 +177,7 @@ services: frigate: ... devices: - - /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware + - /dev/dri/renderD128:/dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware ... ``` diff --git a/docs/docs/troubleshooting/edgetpu.md b/docs/docs/troubleshooting/edgetpu.md index 2e10f0839..90006c41e 100644 --- a/docs/docs/troubleshooting/edgetpu.md +++ b/docs/docs/troubleshooting/edgetpu.md @@ -10,6 +10,12 @@ There are many possible causes for a USB coral not being detected and some are O 1. When the device is first plugged in and has not initialized it will appear as `1a6e:089a Global Unichip Corp.` when running `lsusb` or checking the hardware page in HA OS. 2. Once initialized, the device will appear as `18d1:9302 Google Inc.` when running `lsusb` or checking the hardware page in HA OS. +:::tip + +Using `lsusb` or checking the hardware page in HA OS will show as `1a6e:089a Global Unichip Corp.` until Frigate runs an inferance using the coral. So don't worry about the identification until after Frigate has attempted to detect the coral. + +::: + If the coral does not initialize then Frigate can not interface with it. Some common reasons for the USB based Coral not initializing are: ### Not Enough Power From db4152c4cab10decbc81eeaf3111f99a76e38e14 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Thu, 27 Feb 2025 16:24:03 -0700 Subject: [PATCH 47/51] Fix jetson (#16854) * Fix jetson build * Update ci.yml * Update Dockerfile.base * Update Dockerfile.base * Update Dockerfile.base * Fix * Update ci.yml --- docker/tensorrt/Dockerfile.base | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/docker/tensorrt/Dockerfile.base b/docker/tensorrt/Dockerfile.base index 6d8d9591b..5ae018773 100644 --- a/docker/tensorrt/Dockerfile.base +++ b/docker/tensorrt/Dockerfile.base @@ -16,8 +16,16 @@ RUN apt-get update \ RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ /tensorrt_libyolo.sh +# COPY required individual CUDA deps +RUN mkdir -p /usr/local/cuda-deps +RUN if [ "$TARGETARCH" = "amd64" ]; then \ + cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda-deps/ && \ + cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ ; \ + fi + # Frigate w/ TensorRT Support as separate image FROM deps AS tensorrt-base +ARG TARGETARCH #Disable S6 Global timeout ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 @@ -26,9 +34,8 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos -# COPY Individual CUDA deps -COPY --from=trt-deps /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda/ -COPY --from=trt-deps /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda/ +# COPY Individual CUDA deps folder +COPY --from=trt-deps /usr/local/cuda-deps /usr/local/cuda COPY docker/tensorrt/detector/rootfs/ / ENV YOLO_MODELS="" From 8d2f461350ed6f1881eee15d7089014e24a9b0ed Mon Sep 17 00:00:00 2001 From: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com> Date: Fri, 28 Feb 2025 12:43:08 -0600 Subject: [PATCH 48/51] Embeddings tweaks (#16864) * make semantic search optional * config * frontend metrics * docs * tweak * fixes * also check genai cameras for embeddings context --- docs/docs/configuration/face_recognition.md | 5 +- docs/docs/configuration/genai.md | 6 -- docs/docs/configuration/reference.md | 1 - frigate/api/event.py | 5 +- frigate/app.py | 30 +++++++++- frigate/config/config.py | 11 ---- frigate/embeddings/__init__.py | 4 -- frigate/embeddings/maintainer.py | 65 ++++++++++++--------- frigate/stats/util.py | 29 +++++---- web/src/pages/System.tsx | 6 +- web/src/types/frigateConfig.ts | 4 ++ 11 files changed, 95 insertions(+), 71 deletions(-) diff --git a/docs/docs/configuration/face_recognition.md b/docs/docs/configuration/face_recognition.md index aaab92e6d..4d934afce 100644 --- a/docs/docs/configuration/face_recognition.md +++ b/docs/docs/configuration/face_recognition.md @@ -9,7 +9,7 @@ Frigate has support for CV2 Local Binary Pattern Face Recognizer to recognize fa ## Configuration -Face recognition is disabled by default and requires semantic search to be enabled, face recognition must be enabled in your config file before it can be used. Semantic Search and face recognition are global configuration settings. +Face recognition is disabled by default, face recognition must be enabled in your config file before it can be used. Face recognition is a global configuration setting. ```yaml face_recognition: @@ -36,6 +36,7 @@ The accuracy of face recognition is heavily dependent on the quality of data giv :::tip When choosing images to include in the face training set it is recommended to always follow these recommendations: + - If it is difficult to make out details in a persons face it will not be helpful in training. - Avoid images with under/over-exposure. - Avoid blurry / pixelated images. @@ -52,4 +53,4 @@ Then it is recommended to use the `Face Library` tab in Frigate to select and tr ### Step 2 - Expanding The Dataset -Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone. \ No newline at end of file +Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone. diff --git a/docs/docs/configuration/genai.md b/docs/docs/configuration/genai.md index 23f1c06be..e46107a82 100644 --- a/docs/docs/configuration/genai.md +++ b/docs/docs/configuration/genai.md @@ -7,12 +7,6 @@ Generative AI can be used to automatically generate descriptive text based on th Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI. -:::info - -Semantic Search must be enabled to use Generative AI. - -::: - ## Configuration Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below. diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md index c64272214..b53d9268f 100644 --- a/docs/docs/configuration/reference.md +++ b/docs/docs/configuration/reference.md @@ -570,7 +570,6 @@ lpr: known_plates: {} # Optional: Configuration for AI generated tracked object descriptions -# NOTE: Semantic Search must be enabled for this to do anything. # WARNING: Depending on the provider, this will send thumbnails over the internet # to Google or OpenAI's LLMs to generate descriptions. It can be overridden at # the camera level (enabled: False) to enhance privacy for indoor cameras. diff --git a/frigate/api/event.py b/frigate/api/event.py index bb1bf7395..9a5578bae 100644 --- a/frigate/api/event.py +++ b/frigate/api/event.py @@ -1083,10 +1083,7 @@ def regenerate_description( camera_config = request.app.frigate_config.cameras[event.camera] - if ( - request.app.frigate_config.semantic_search.enabled - and camera_config.genai.enabled - ): + if camera_config.genai.enabled: request.app.event_metadata_updater.publish((event.id, params.source)) return JSONResponse( diff --git a/frigate/app.py b/frigate/app.py index 400d4bca0..8b63ab0a0 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -93,7 +93,13 @@ class FrigateApp: self.log_queue: Queue = mp.Queue() self.camera_metrics: dict[str, CameraMetrics] = {} self.embeddings_metrics: DataProcessorMetrics | None = ( - DataProcessorMetrics() if config.semantic_search.enabled else None + DataProcessorMetrics() + if ( + config.semantic_search.enabled + or config.lpr.enabled + or config.face_recognition.enabled + ) + else None ) self.ptz_metrics: dict[str, PTZMetrics] = {} self.processes: dict[str, int] = {} @@ -236,7 +242,16 @@ class FrigateApp: logger.info(f"Review process started: {review_segment_process.pid}") def init_embeddings_manager(self) -> None: - if not self.config.semantic_search.enabled: + genai_cameras = [ + c for c in self.config.cameras.values() if c.enabled and c.genai.enabled + ] + + if ( + not self.config.semantic_search.enabled + and not genai_cameras + and not self.config.lpr.enabled + and not self.config.face_recognition.enabled + ): return embedding_process = util.Process( @@ -293,7 +308,16 @@ class FrigateApp: migrate_exports(self.config.ffmpeg, list(self.config.cameras.keys())) def init_embeddings_client(self) -> None: - if self.config.semantic_search.enabled: + genai_cameras = [ + c for c in self.config.cameras.values() if c.enabled and c.genai.enabled + ] + + if ( + self.config.semantic_search.enabled + or self.config.lpr.enabled + or genai_cameras + or self.config.face_recognition.enabled + ): # Create a client for other processes to use self.embeddings = EmbeddingsContext(self.db) diff --git a/frigate/config/config.py b/frigate/config/config.py index 39ee31411..d2ca9a6f5 100644 --- a/frigate/config/config.py +++ b/frigate/config/config.py @@ -172,16 +172,6 @@ class RestreamConfig(BaseModel): model_config = ConfigDict(extra="allow") -def verify_semantic_search_dependent_configs(config: FrigateConfig) -> None: - """Verify that semantic search is enabled if required features are enabled.""" - if not config.semantic_search.enabled: - if config.genai.enabled: - raise ValueError("Genai requires semantic search to be enabled.") - - if config.face_recognition.enabled: - raise ValueError("Face recognition requires semantic to be enabled.") - - def verify_config_roles(camera_config: CameraConfig) -> None: """Verify that roles are setup in the config correctly.""" assigned_roles = list( @@ -647,7 +637,6 @@ class FrigateConfig(FrigateBaseModel): detector_config.model = model self.detectors[key] = detector_config - verify_semantic_search_dependent_configs(self) return self @field_validator("cameras") diff --git a/frigate/embeddings/__init__.py b/frigate/embeddings/__init__.py index 18673c4e9..56bd097d6 100644 --- a/frigate/embeddings/__init__.py +++ b/frigate/embeddings/__init__.py @@ -28,10 +28,6 @@ logger = logging.getLogger(__name__) def manage_embeddings(config: FrigateConfig, metrics: DataProcessorMetrics) -> None: - # Only initialize embeddings if semantic search is enabled - if not config.semantic_search.enabled: - return - stop_event = mp.Event() def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None: diff --git a/frigate/embeddings/maintainer.py b/frigate/embeddings/maintainer.py index a18ca7a7f..c9b6062c9 100644 --- a/frigate/embeddings/maintainer.py +++ b/frigate/embeddings/maintainer.py @@ -71,11 +71,14 @@ class EmbeddingMaintainer(threading.Thread): super().__init__(name="embeddings_maintainer") self.config = config self.metrics = metrics - self.embeddings = Embeddings(config, db, metrics) + self.embeddings = None - # Check if we need to re-index events - if config.semantic_search.reindex: - self.embeddings.reindex() + if config.semantic_search.enabled: + self.embeddings = Embeddings(config, db, metrics) + + # Check if we need to re-index events + if config.semantic_search.reindex: + self.embeddings.reindex() # create communication for updating event descriptions self.requestor = InterProcessRequestor() @@ -152,30 +155,30 @@ class EmbeddingMaintainer(threading.Thread): def _handle_request(topic: str, data: dict[str, any]) -> str: try: - if topic == EmbeddingsRequestEnum.embed_description.value: - return serialize( - self.embeddings.embed_description( - data["id"], data["description"] - ), - pack=False, - ) - elif topic == EmbeddingsRequestEnum.embed_thumbnail.value: - thumbnail = base64.b64decode(data["thumbnail"]) - return serialize( - self.embeddings.embed_thumbnail(data["id"], thumbnail), - pack=False, - ) - elif topic == EmbeddingsRequestEnum.generate_search.value: - return serialize( - self.embeddings.embed_description("", data, upsert=False), - pack=False, - ) - else: - processors = [self.realtime_processors, self.post_processors] - for processor_list in processors: - for processor in processor_list: - resp = processor.handle_request(topic, data) - + # First handle the embedding-specific topics when semantic search is enabled + if self.config.semantic_search.enabled: + if topic == EmbeddingsRequestEnum.embed_description.value: + return serialize( + self.embeddings.embed_description( + data["id"], data["description"] + ), + pack=False, + ) + elif topic == EmbeddingsRequestEnum.embed_thumbnail.value: + thumbnail = base64.b64decode(data["thumbnail"]) + return serialize( + self.embeddings.embed_thumbnail(data["id"], thumbnail), + pack=False, + ) + elif topic == EmbeddingsRequestEnum.generate_search.value: + return serialize( + self.embeddings.embed_description("", data, upsert=False), + pack=False, + ) + processors = [self.realtime_processors, self.post_processors] + for processor_list in processors: + for processor in processor_list: + resp = processor.handle_request(topic, data) if resp is not None: return resp except Exception as e: @@ -432,6 +435,9 @@ class EmbeddingMaintainer(threading.Thread): def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None: """Embed the thumbnail for an event.""" + if not self.config.semantic_search.enabled: + return + self.embeddings.embed_thumbnail(event_id, thumbnail) def _embed_description(self, event: Event, thumbnails: list[bytes]) -> None: @@ -457,7 +463,8 @@ class EmbeddingMaintainer(threading.Thread): ) # Embed the description - self.embeddings.embed_description(event.id, description) + if self.config.semantic_search.enabled: + self.embeddings.embed_description(event.id, description) logger.debug( "Generated description for %s (%d images): %s", diff --git a/frigate/stats/util.py b/frigate/stats/util.py index 3d836868e..287c384cd 100644 --- a/frigate/stats/util.py +++ b/frigate/stats/util.py @@ -282,16 +282,24 @@ def stats_snapshot( } stats["detection_fps"] = round(total_detection_fps, 2) - if config.semantic_search.enabled: - embeddings_metrics = stats_tracking["embeddings_metrics"] - stats["embeddings"] = { - "image_embedding_speed": round( - embeddings_metrics.image_embeddings_fps.value * 1000, 2 - ), - "text_embedding_speed": round( - embeddings_metrics.text_embeddings_sps.value * 1000, 2 - ), - } + stats["embeddings"] = {} + + # Get metrics if available + embeddings_metrics = stats_tracking.get("embeddings_metrics") + + if embeddings_metrics: + # Add metrics based on what's enabled + if config.semantic_search.enabled: + stats["embeddings"].update( + { + "image_embedding_speed": round( + embeddings_metrics.image_embeddings_fps.value * 1000, 2 + ), + "text_embedding_speed": round( + embeddings_metrics.text_embeddings_sps.value * 1000, 2 + ), + } + ) if config.face_recognition.enabled: stats["embeddings"]["face_recognition_speed"] = round( @@ -302,6 +310,7 @@ def stats_snapshot( stats["embeddings"]["plate_recognition_speed"] = round( embeddings_metrics.alpr_pps.value * 1000, 2 ) + if "license_plate" not in config.objects.all_objects: stats["embeddings"]["yolov9_plate_detection_speed"] = round( embeddings_metrics.yolov9_lpr_fps.value * 1000, 2 diff --git a/web/src/pages/System.tsx b/web/src/pages/System.tsx index 491149be2..05eed5b3e 100644 --- a/web/src/pages/System.tsx +++ b/web/src/pages/System.tsx @@ -28,7 +28,11 @@ function System() { const metrics = useMemo(() => { const metrics = [...allMetrics]; - if (!config?.semantic_search.enabled) { + if ( + !config?.semantic_search.enabled && + !config?.lpr.enabled && + !config?.face_recognition.enabled + ) { const index = metrics.indexOf("features"); metrics.splice(index, 1); } diff --git a/web/src/types/frigateConfig.ts b/web/src/types/frigateConfig.ts index d021fde0f..4ec4de853 100644 --- a/web/src/types/frigateConfig.ts +++ b/web/src/types/frigateConfig.ts @@ -363,6 +363,10 @@ export interface FrigateConfig { camera_groups: { [groupName: string]: CameraGroupConfig }; + lpr: { + enabled: boolean; + }; + logger: { default: string; logs: Record; From 06d6e21de813f3cd8665a82d004cd9d0138a2423 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Fri, 28 Feb 2025 13:48:08 -0700 Subject: [PATCH 49/51] Fix cuda targetarch (#16869) --- docker/tensorrt/Dockerfile.base | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/tensorrt/Dockerfile.base b/docker/tensorrt/Dockerfile.base index 5ae018773..4305f1d74 100644 --- a/docker/tensorrt/Dockerfile.base +++ b/docker/tensorrt/Dockerfile.base @@ -8,6 +8,7 @@ ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3 # Build TensorRT-specific library FROM ${TRT_BASE} AS trt-deps +ARG TARGETARCH ARG COMPUTE_LEVEL RUN apt-get update \ @@ -25,7 +26,6 @@ RUN if [ "$TARGETARCH" = "amd64" ]; then \ # Frigate w/ TensorRT Support as separate image FROM deps AS tensorrt-base -ARG TARGETARCH #Disable S6 Global timeout ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 From 458134de5d02002084e65c0b5301a3450dbac9b8 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Sat, 1 Mar 2025 05:35:09 +0100 Subject: [PATCH 50/51] Reuse constants (#16874) --- frigate/api/media.py | 7 +++++-- frigate/api/preview.py | 4 ++-- frigate/const.py | 1 + frigate/data_processing/real_time/face.py | 6 ++++-- frigate/detectors/detector_config.py | 4 ++-- frigate/detectors/plugins/hailo8l.py | 3 ++- frigate/detectors/plugins/openvino.py | 7 +++++-- frigate/detectors/plugins/rknn.py | 3 ++- frigate/detectors/plugins/rocm.py | 3 ++- frigate/embeddings/onnx/runner.py | 4 +++- frigate/output/birdseye.py | 6 ++++-- frigate/test/http_api/base_http_test.py | 7 ++++--- frigate/test/test_config.py | 4 ++-- frigate/test/test_http.py | 7 ++++--- frigate/util/config.py | 2 +- frigate/util/model.py | 17 ++++++++++++----- 16 files changed, 55 insertions(+), 30 deletions(-) diff --git a/frigate/api/media.py b/frigate/api/media.py index 74e9e7aaa..e3f74ea98 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -31,6 +31,7 @@ from frigate.config import FrigateConfig from frigate.const import ( CACHE_DIR, CLIPS_DIR, + INSTALL_DIR, MAX_SEGMENT_DURATION, PREVIEW_FRAME_TYPE, RECORD_DIR, @@ -155,7 +156,9 @@ def latest_frame( frame_processor.get_current_frame_time(camera_name) + retry_interval ): if request.app.camera_error_image is None: - error_image = glob.glob("/opt/frigate/frigate/images/camera-error.jpg") + error_image = glob.glob( + os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg") + ) if len(error_image) > 0: request.app.camera_error_image = cv2.imread( @@ -550,7 +553,7 @@ def recording_clip( ) file_name = sanitize_filename(f"playlist_{camera_name}_{start_ts}-{end_ts}.txt") - file_path = f"/tmp/cache/{file_name}" + file_path = os.path.join(CACHE_DIR, file_name) with open(file_path, "w") as file: clip: Recordings for clip in recordings: diff --git a/frigate/api/preview.py b/frigate/api/preview.py index d14a15ff1..2db2326ab 100644 --- a/frigate/api/preview.py +++ b/frigate/api/preview.py @@ -9,7 +9,7 @@ from fastapi import APIRouter from fastapi.responses import JSONResponse from frigate.api.defs.tags import Tags -from frigate.const import CACHE_DIR, PREVIEW_FRAME_TYPE +from frigate.const import BASE_DIR, CACHE_DIR, PREVIEW_FRAME_TYPE from frigate.models import Previews logger = logging.getLogger(__name__) @@ -52,7 +52,7 @@ def preview_ts(camera_name: str, start_ts: float, end_ts: float): clips.append( { "camera": preview["camera"], - "src": preview["path"].replace("/media/frigate", ""), + "src": preview["path"].replace(BASE_DIR, ""), "type": "video/mp4", "start": preview["start_time"], "end": preview["end_time"], diff --git a/frigate/const.py b/frigate/const.py index 866fa3d29..ffd1ca406 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -1,6 +1,7 @@ import os import re +INSTALL_DIR = "/opt/frigate" CONFIG_DIR = "/config" DEFAULT_DB_PATH = f"{CONFIG_DIR}/frigate.db" MODEL_CACHE_DIR = f"{CONFIG_DIR}/model_cache" diff --git a/frigate/data_processing/real_time/face.py b/frigate/data_processing/real_time/face.py index d2b677653..e7cf622e9 100644 --- a/frigate/data_processing/real_time/face.py +++ b/frigate/data_processing/real_time/face.py @@ -76,14 +76,16 @@ class FaceRealTimeProcessor(RealTimeProcessorApi): def __build_detector(self) -> None: self.face_detector = cv2.FaceDetectorYN.create( - "/config/model_cache/facedet/facedet.onnx", + os.path.join(MODEL_CACHE_DIR, "facedet/facedet.onnx"), config="", input_size=(320, 320), score_threshold=0.8, nms_threshold=0.3, ) self.landmark_detector = cv2.face.createFacemarkLBF() - self.landmark_detector.loadModel("/config/model_cache/facedet/landmarkdet.yaml") + self.landmark_detector.loadModel( + os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml") + ) def __build_classifier(self) -> None: if not self.landmark_detector: diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index 16599b141..fceab5a19 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -9,7 +9,7 @@ import requests from pydantic import BaseModel, ConfigDict, Field from pydantic.fields import PrivateAttr -from frigate.const import DEFAULT_ATTRIBUTE_LABEL_MAP +from frigate.const import DEFAULT_ATTRIBUTE_LABEL_MAP, MODEL_CACHE_DIR from frigate.plus import PlusApi from frigate.util.builtin import generate_color_palette, load_labels @@ -123,7 +123,7 @@ class ModelConfig(BaseModel): return model_id = self.path[7:] - self.path = f"/config/model_cache/{model_id}" + self.path = os.path.join(MODEL_CACHE_DIR, model_id) model_info_path = f"{self.path}.json" # download the model if it doesn't exist diff --git a/frigate/detectors/plugins/hailo8l.py b/frigate/detectors/plugins/hailo8l.py index b66d78bd6..69e86bc5b 100644 --- a/frigate/detectors/plugins/hailo8l.py +++ b/frigate/detectors/plugins/hailo8l.py @@ -22,6 +22,7 @@ except ModuleNotFoundError: from pydantic import BaseModel, Field from typing_extensions import Literal +from frigate.const import MODEL_CACHE_DIR from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig @@ -57,7 +58,7 @@ class HailoDetector(DetectionApi): self.h8l_tensor_format = detector_config.model.input_tensor self.h8l_pixel_format = detector_config.model.input_pixel_format self.model_url = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.11.0/hailo8l/ssd_mobilenet_v1.hef" - self.cache_dir = "/config/model_cache/h8l_cache" + self.cache_dir = os.path.join(MODEL_CACHE_DIR, "h8l_cache") self.expected_model_filename = "ssd_mobilenet_v1.hef" output_type = "FLOAT32" diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py index 27be6b9bd..0f0b99a1f 100644 --- a/frigate/detectors/plugins/openvino.py +++ b/frigate/detectors/plugins/openvino.py @@ -7,6 +7,7 @@ import openvino.properties as props from pydantic import Field from typing_extensions import Literal +from frigate.const import MODEL_CACHE_DIR from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum from frigate.util.model import post_process_yolov9 @@ -41,8 +42,10 @@ class OvDetector(DetectionApi): logger.error(f"OpenVino model file {detector_config.model.path} not found.") raise FileNotFoundError - os.makedirs("/config/model_cache/openvino", exist_ok=True) - self.ov_core.set_property({props.cache_dir: "/config/model_cache/openvino"}) + os.makedirs(os.path.join(MODEL_CACHE_DIR, "openvino"), exist_ok=True) + self.ov_core.set_property( + {props.cache_dir: os.path.join(MODEL_CACHE_DIR, "openvino")} + ) self.interpreter = self.ov_core.compile_model( model=detector_config.model.path, device_name=detector_config.device ) diff --git a/frigate/detectors/plugins/rknn.py b/frigate/detectors/plugins/rknn.py index bfd7866e6..407c93917 100644 --- a/frigate/detectors/plugins/rknn.py +++ b/frigate/detectors/plugins/rknn.py @@ -6,6 +6,7 @@ from typing import Literal from pydantic import Field +from frigate.const import MODEL_CACHE_DIR from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum @@ -17,7 +18,7 @@ supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"] supported_models = {ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$"} -model_cache_dir = "/config/model_cache/rknn_cache/" +model_cache_dir = os.path.join(MODEL_CACHE_DIR, "rknn_cache/") class RknnDetectorConfig(BaseDetectorConfig): diff --git a/frigate/detectors/plugins/rocm.py b/frigate/detectors/plugins/rocm.py index 60118d129..7c87edb50 100644 --- a/frigate/detectors/plugins/rocm.py +++ b/frigate/detectors/plugins/rocm.py @@ -9,6 +9,7 @@ import numpy as np from pydantic import Field from typing_extensions import Literal +from frigate.const import MODEL_CACHE_DIR from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detector_config import ( BaseDetectorConfig, @@ -116,7 +117,7 @@ class ROCmDetector(DetectionApi): logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}") - os.makedirs("/config/model_cache/rocm", exist_ok=True) + os.makedirs(os.path.join(MODEL_CACHE_DIR, "rocm"), exist_ok=True) migraphx.save(self.model, mxr_path) logger.info("AMD/ROCm: model loaded") diff --git a/frigate/embeddings/onnx/runner.py b/frigate/embeddings/onnx/runner.py index c785c28f1..7badae325 100644 --- a/frigate/embeddings/onnx/runner.py +++ b/frigate/embeddings/onnx/runner.py @@ -1,10 +1,12 @@ """Convenience runner for onnx models.""" import logging +import os.path from typing import Any import onnxruntime as ort +from frigate.const import MODEL_CACHE_DIR from frigate.util.model import get_ort_providers try: @@ -32,7 +34,7 @@ class ONNXModelRunner: self.type = "ov" self.ov = ov.Core() self.ov.set_property( - {ov.properties.cache_dir: "/config/model_cache/openvino"} + {ov.properties.cache_dir: os.path.join(MODEL_CACHE_DIR, "openvino")} ) self.interpreter = self.ov.compile_model( model=model_path, device_name=device diff --git a/frigate/output/birdseye.py b/frigate/output/birdseye.py index 00f17c8f4..8331eb64a 100644 --- a/frigate/output/birdseye.py +++ b/frigate/output/birdseye.py @@ -16,7 +16,7 @@ import numpy as np from frigate.comms.config_updater import ConfigSubscriber from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig -from frigate.const import BASE_DIR, BIRDSEYE_PIPE +from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR from frigate.util.image import ( SharedMemoryFrameManager, copy_yuv_to_position, @@ -297,7 +297,9 @@ class BirdsEyeFrameManager: birdseye_logo = cv2.imread(custom_logo_files[0], cv2.IMREAD_UNCHANGED) if birdseye_logo is None: - logo_files = glob.glob("/opt/frigate/frigate/images/birdseye.png") + logo_files = glob.glob( + os.path.join(INSTALL_DIR, "frigate/images/birdseye.png") + ) if len(logo_files) > 0: birdseye_logo = cv2.imread(logo_files[0], cv2.IMREAD_UNCHANGED) diff --git a/frigate/test/http_api/base_http_test.py b/frigate/test/http_api/base_http_test.py index c16ab9926..f5a0aca3c 100644 --- a/frigate/test/http_api/base_http_test.py +++ b/frigate/test/http_api/base_http_test.py @@ -10,6 +10,7 @@ from pydantic import Json from frigate.api.fastapi_app import create_fastapi_app from frigate.config import FrigateConfig +from frigate.const import BASE_DIR, CACHE_DIR from frigate.models import Event, Recordings, ReviewSegment from frigate.review.types import SeverityEnum from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS @@ -73,19 +74,19 @@ class BaseTestHttp(unittest.TestCase): "total": 67.1, "used": 16.6, }, - "/media/frigate/clips": { + os.path.join(BASE_DIR, "clips"): { "free": 42429.9, "mount_type": "ext4", "total": 244529.7, "used": 189607.0, }, - "/media/frigate/recordings": { + os.path.join(BASE_DIR, "recordings"): { "free": 0.2, "mount_type": "ext4", "total": 8.0, "used": 7.8, }, - "/tmp/cache": { + CACHE_DIR: { "free": 976.8, "mount_type": "tmpfs", "total": 1000.0, diff --git a/frigate/test/test_config.py b/frigate/test/test_config.py index e6cb1274e..5a3deefda 100644 --- a/frigate/test/test_config.py +++ b/frigate/test/test_config.py @@ -854,9 +854,9 @@ class TestConfig(unittest.TestCase): assert frigate_config.model.merged_labelmap[0] == "person" def test_plus_labelmap(self): - with open("/config/model_cache/test", "w") as f: + with open(os.path.join(MODEL_CACHE_DIR, "test"), "w") as f: json.dump(self.plus_model_info, f) - with open("/config/model_cache/test.json", "w") as f: + with open(os.path.join(MODEL_CACHE_DIR, "test.json"), "w") as f: json.dump(self.plus_model_info, f) config = { diff --git a/frigate/test/test_http.py b/frigate/test/test_http.py index 46de1307f..0238c766c 100644 --- a/frigate/test/test_http.py +++ b/frigate/test/test_http.py @@ -11,6 +11,7 @@ from playhouse.sqliteq import SqliteQueueDatabase from frigate.api.fastapi_app import create_fastapi_app from frigate.config import FrigateConfig +from frigate.const import BASE_DIR, CACHE_DIR from frigate.models import Event, Recordings, Timeline from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS @@ -74,19 +75,19 @@ class TestHttp(unittest.TestCase): "total": 67.1, "used": 16.6, }, - "/media/frigate/clips": { + os.path.join(BASE_DIR, "clips"): { "free": 42429.9, "mount_type": "ext4", "total": 244529.7, "used": 189607.0, }, - "/media/frigate/recordings": { + os.path.join(BASE_DIR, "recordings"): { "free": 0.2, "mount_type": "ext4", "total": 8.0, "used": 7.8, }, - "/tmp/cache": { + CACHE_DIR: { "free": 976.8, "mount_type": "tmpfs", "total": 1000.0, diff --git a/frigate/util/config.py b/frigate/util/config.py index 5b40fe37b..1ed82f802 100644 --- a/frigate/util/config.py +++ b/frigate/util/config.py @@ -14,7 +14,7 @@ from frigate.util.services import get_video_properties logger = logging.getLogger(__name__) CURRENT_CONFIG_VERSION = "0.16-0" -DEFAULT_CONFIG_FILE = "/config/config.yml" +DEFAULT_CONFIG_FILE = os.path.join(CONFIG_DIR, "config.yml") def find_config_file() -> str: diff --git a/frigate/util/model.py b/frigate/util/model.py index 0428a42ff..d96493ee6 100644 --- a/frigate/util/model.py +++ b/frigate/util/model.py @@ -7,6 +7,8 @@ import cv2 import numpy as np import onnxruntime as ort +from frigate.const import MODEL_CACHE_DIR + logger = logging.getLogger(__name__) @@ -105,7 +107,8 @@ def get_ort_providers( # so it is not enabled by default if device == "Tensorrt": os.makedirs( - "/config/model_cache/tensorrt/ort/trt-engines", exist_ok=True + os.path.join(MODEL_CACHE_DIR, "tensorrt/ort/trt-engines"), + exist_ok=True, ) device_id = 0 if not device.isdigit() else int(device) providers.append(provider) @@ -116,19 +119,23 @@ def get_ort_providers( and os.environ.get("USE_FP_16", "True") != "False", "trt_timing_cache_enable": True, "trt_engine_cache_enable": True, - "trt_timing_cache_path": "/config/model_cache/tensorrt/ort", - "trt_engine_cache_path": "/config/model_cache/tensorrt/ort/trt-engines", + "trt_timing_cache_path": os.path.join( + MODEL_CACHE_DIR, "tensorrt/ort" + ), + "trt_engine_cache_path": os.path.join( + MODEL_CACHE_DIR, "tensorrt/ort/trt-engines" + ), } ) else: continue elif provider == "OpenVINOExecutionProvider": - os.makedirs("/config/model_cache/openvino/ort", exist_ok=True) + os.makedirs(os.path.join(MODEL_CACHE_DIR, "openvino/ort"), exist_ok=True) providers.append(provider) options.append( { "arena_extend_strategy": "kSameAsRequested", - "cache_dir": "/config/model_cache/openvino/ort", + "cache_dir": os.path.join(MODEL_CACHE_DIR, "openvino/ort"), "device_type": device, } ) From f56668e4676232d9cd97b86770ae378e5e9121e4 Mon Sep 17 00:00:00 2001 From: Nicolas Mowen Date: Sat, 1 Mar 2025 16:09:41 -0700 Subject: [PATCH 51/51] Update d-fine documentation (#16881) --- docs/docs/configuration/object_detectors.md | 46 ++++++++++++--------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index bc76779cb..37ce86b07 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -562,30 +562,15 @@ Note that the labelmap uses a subset of the complete COCO label set that has onl #### D-FINE -[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. +[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the YOLO-NAS model for use in Frigate. -To export as ONNX: +:::warning -1. Clone: https://github.com/Peterande/D-FINE and install all dependencies. -2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE). -3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)` -4. Run the export, making sure you select the right config, for your checkpoint. - -Example: - -``` -python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth -``` - -:::tip - -Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually. - -Make sure you change the batch size to 1 before exporting. +D-FINE is currently not supported on OpenVINO ::: -After placing the downloaded onnx model in your config folder, you can use the following configuration: +After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration: ```yaml detectors: @@ -784,6 +769,29 @@ Some model types are not included in Frigate by default. Here are some tips for getting different model types +### Downloading D-FINE Model + +To export as ONNX: + +1. Clone: https://github.com/Peterande/D-FINE and install all dependencies. +2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE). +3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)` +4. Run the export, making sure you select the right config, for your checkpoint. + +Example: + +``` +python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth +``` + +:::tip + +Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually. + +Make sure you change the batch size to 1 before exporting. + +::: + ### Downloading YOLO-NAS Model You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).