* Only check if an object is stationary to avoid mqtt snapshot

* docs heading tweak

* Add more API descriptions

* Add missing lib for new rocm onnxruntime whl

* Update inference times to reflect better rocm performance

* Cleanup resetting tracked object activity

* remove print

---------

Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
This commit is contained in:
Nicolas Mowen 2025-05-15 16:13:18 -06:00 committed by GitHub
parent 3538a1df3d
commit f48356cbee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 46 additions and 35 deletions

View File

@ -22,7 +22,7 @@ RUN apt update && \
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
RUN cd /opt/rocm-$ROCM/lib && \
cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \
cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocsolver*.so* librocfft*.so* librocprofiler*.so* libroctx*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/ && \
mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib && \
cp -dpr migraphx/lib/* /opt/rocm-dist/opt/rocm-$ROCM/lib/migraphx/lib
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm

View File

@ -172,6 +172,6 @@ Face recognition does not run on the recording stream, this would be suboptimal
By default iOS devices will use HEIC (High Efficiency Image Container) for images, but this format is not supported for uploads. Choosing `large` as the format instead of `original` will use JPG which will work correctly.
## How can I delete the face database and start over?
### How can I delete the face database and start over?
Frigate does not store anything in its database related to face recognition. You can simply delete all of your faces through the Frigate UI or remove the contents of the `/media/frigate/clips/faces` directory.

View File

@ -145,7 +145,7 @@ With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detec
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
| --------- | --------------------- | ------------------------- |
| AMD 780M | ~ 14 ms | 320: ~ 30 ms 640: ~ 60 ms |
| AMD 780M | ~ 14 ms | 320: ~ 25 ms 640: ~ 50 ms |
| AMD 8700G | | 320: ~ 20 ms 640: ~ 40 ms |
## Community Supported Detectors

View File

@ -640,7 +640,10 @@ def recording_clip(
)
@router.get("/vod/{camera_name}/start/{start_ts}/end/{end_ts}")
@router.get(
"/vod/{camera_name}/start/{start_ts}/end/{end_ts}",
description="Returns an HLS playlist for the specified timestamp-range on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
def vod_ts(camera_name: str, start_ts: float, end_ts: float):
recordings = (
Recordings.select(
@ -714,7 +717,10 @@ def vod_ts(camera_name: str, start_ts: float, end_ts: float):
)
@router.get("/vod/{year_month}/{day}/{hour}/{camera_name}")
@router.get(
"/vod/{year_month}/{day}/{hour}/{camera_name}",
description="Returns an HLS playlist for the specified date-time on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str):
"""VOD for specific hour. Uses the default timezone (UTC)."""
return vod_hour(
@ -722,7 +728,10 @@ def vod_hour_no_timezone(year_month: str, day: int, hour: int, camera_name: str)
)
@router.get("/vod/{year_month}/{day}/{hour}/{camera_name}/{tz_name}")
@router.get(
"/vod/{year_month}/{day}/{hour}/{camera_name}/{tz_name}",
description="Returns an HLS playlist for the specified date-time (with timezone) on the specified camera. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
def vod_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: str):
parts = year_month.split("-")
start_date = (
@ -736,7 +745,10 @@ def vod_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: st
return vod_ts(camera_name, start_ts, end_ts)
@router.get("/vod/event/{event_id}")
@router.get(
"/vod/event/{event_id}",
description="Returns an HLS playlist for the specified object. Append /master.m3u8 or /index.m3u8 for HLS playback.",
)
def vod_event(event_id: str):
try:
event: Event = Event.get(Event.id == event_id)

View File

@ -1570,10 +1570,26 @@ class LicensePlateProcessingMixin:
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
return
def expire_object(self, object_id: str, camera: str):
def lpr_expire(self, object_id: str, camera: str):
if object_id in self.detected_license_plates:
self.detected_license_plates.pop(object_id)
if object_id in self.camera_current_cars.get(camera, []):
self.camera_current_cars[camera].remove(object_id)
if len(self.camera_current_cars[camera]) == 0:
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.lpr,
"name": None,
"plate": None,
"camera": camera,
}
),
)
class CTCDecoder:
"""

View File

@ -293,10 +293,11 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
if camera not in self.camera_current_people:
self.camera_current_people[camera] = []
self.camera_current_people[camera].append(id)
self.person_face_history[id].append(
(sub_label, score, face_frame.shape[0] * face_frame.shape[1])
)
self.camera_current_people[camera].append(id)
(weighted_sub_label, weighted_score) = self.weighted_average(
self.person_face_history[id]
)

View File

@ -1,6 +1,5 @@
"""Handle processing images for face detection and recognition."""
import json
import logging
from typing import Any
@ -15,7 +14,6 @@ from frigate.data_processing.common.license_plate.mixin import (
from frigate.data_processing.common.license_plate.model import (
LicensePlateModelRunner,
)
from frigate.types import TrackedObjectUpdateTypesEnum
from ..types import DataProcessorMetrics
from .api import RealTimeProcessorApi
@ -55,21 +53,5 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
return
def expire_object(self, object_id: str, camera: str):
if object_id in self.detected_license_plates:
self.detected_license_plates.pop(object_id)
if object_id in self.camera_current_cars.get(camera, []):
self.camera_current_cars[camera].remove(object_id)
if len(self.camera_current_cars[camera]) == 0:
self.requestor.send_data(
"tracked_object_update",
json.dumps(
{
"type": TrackedObjectUpdateTypesEnum.lpr,
"name": None,
"plate": None,
"camera": camera,
}
),
)
"""Expire lpr objects."""
self.lpr_expire(object_id, camera)

View File

@ -249,7 +249,7 @@ class TrackedObjectProcessor(threading.Thread):
def should_mqtt_snapshot(self, camera, obj: TrackedObject):
# object never changed position
if obj.obj_data["position_changes"] == 0:
if obj.is_stationary():
return False
# if there are required zones and there is no overlap

View File

@ -384,16 +384,16 @@ class TrackedObject:
return event
def is_active(self):
def is_active(self) -> bool:
return not self.is_stationary()
def is_stationary(self):
def is_stationary(self) -> bool:
return (
self.obj_data["motionless_count"]
> self.camera_config.detect.stationary.threshold
)
def get_thumbnail(self, ext: str):
def get_thumbnail(self, ext: str) -> bytes | None:
img_bytes = self.get_img_bytes(
ext, timestamp=False, bounding_box=False, crop=True, height=175
)
@ -404,7 +404,7 @@ class TrackedObject:
_, img = cv2.imencode(f".{ext}", np.zeros((175, 175, 3), np.uint8))
return img.tobytes()
def get_clean_png(self):
def get_clean_png(self) -> bytes | None:
if self.thumbnail_data is None:
return None
@ -433,7 +433,7 @@ class TrackedObject:
crop=False,
height: int | None = None,
quality: int | None = None,
):
) -> bytes | None:
if self.thumbnail_data is None:
return None