mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-08-22 13:47:29 +02:00
Merge branch 'dev' into addon_config
This commit is contained in:
commit
a11eca74ce
@ -55,7 +55,7 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
|
||||
FROM scratch AS go2rtc
|
||||
ARG TARGETARCH
|
||||
WORKDIR /rootfs/usr/local/go2rtc/bin
|
||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.2/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||
ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.9.9/go2rtc_linux_${TARGETARCH}" go2rtc
|
||||
|
||||
FROM wget AS tempio
|
||||
ARG TARGETARCH
|
||||
|
@ -10,7 +10,7 @@ slowapi == 0.1.*
|
||||
imutils == 0.5.*
|
||||
joserfc == 1.0.*
|
||||
pathvalidate == 3.2.*
|
||||
markupsafe == 2.1.*
|
||||
markupsafe == 3.0.*
|
||||
python-multipart == 0.0.12
|
||||
# General
|
||||
mypy == 1.6.1
|
||||
|
@ -69,10 +69,6 @@ elif go2rtc_config["log"].get("format") is None:
|
||||
if go2rtc_config.get("webrtc") is None:
|
||||
go2rtc_config["webrtc"] = {}
|
||||
|
||||
# go2rtc should listen on 8555 tcp & udp by default
|
||||
if go2rtc_config["webrtc"].get("listen") is None:
|
||||
go2rtc_config["webrtc"]["listen"] = ":8555"
|
||||
|
||||
if go2rtc_config["webrtc"].get("candidates") is None:
|
||||
default_candidates = []
|
||||
# use internal candidate if it was discovered when running through the add-on
|
||||
@ -84,33 +80,15 @@ if go2rtc_config["webrtc"].get("candidates") is None:
|
||||
|
||||
go2rtc_config["webrtc"]["candidates"] = default_candidates
|
||||
|
||||
# This prevents WebRTC from attempting to establish a connection to the internal
|
||||
# docker IPs which are not accessible from outside the container itself and just
|
||||
# wastes time during negotiation. Note that this is only necessary because
|
||||
# Frigate container doesn't run in host network mode.
|
||||
if go2rtc_config["webrtc"].get("filter") is None:
|
||||
go2rtc_config["webrtc"]["filter"] = {"candidates": []}
|
||||
elif go2rtc_config["webrtc"]["filter"].get("candidates") is None:
|
||||
go2rtc_config["webrtc"]["filter"]["candidates"] = []
|
||||
if go2rtc_config.get("rtsp", {}).get("username") is not None:
|
||||
go2rtc_config["rtsp"]["username"] = go2rtc_config["rtsp"]["username"].format(
|
||||
**FRIGATE_ENV_VARS
|
||||
)
|
||||
|
||||
# sets default RTSP response to be equivalent to ?video=h264,h265&audio=aac
|
||||
# this means user does not need to specify audio codec when using restream
|
||||
# as source for frigate and the integration supports HLS playback
|
||||
if go2rtc_config.get("rtsp") is None:
|
||||
go2rtc_config["rtsp"] = {"default_query": "mp4"}
|
||||
else:
|
||||
if go2rtc_config["rtsp"].get("default_query") is None:
|
||||
go2rtc_config["rtsp"]["default_query"] = "mp4"
|
||||
|
||||
if go2rtc_config["rtsp"].get("username") is not None:
|
||||
go2rtc_config["rtsp"]["username"] = go2rtc_config["rtsp"]["username"].format(
|
||||
**FRIGATE_ENV_VARS
|
||||
)
|
||||
|
||||
if go2rtc_config["rtsp"].get("password") is not None:
|
||||
go2rtc_config["rtsp"]["password"] = go2rtc_config["rtsp"]["password"].format(
|
||||
**FRIGATE_ENV_VARS
|
||||
)
|
||||
if go2rtc_config.get("rtsp", {}).get("password") is not None:
|
||||
go2rtc_config["rtsp"]["password"] = go2rtc_config["rtsp"]["password"].format(
|
||||
**FRIGATE_ENV_VARS
|
||||
)
|
||||
|
||||
# ensure ffmpeg path is set correctly
|
||||
path = config.get("ffmpeg", {}).get("path", "default")
|
||||
|
@ -86,6 +86,9 @@ RUN apt-get -qq update \
|
||||
libx264-163 libx265-199 libegl1 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Fixes "Error loading shared libs"
|
||||
RUN mkdir -p /etc/ld.so.conf.d && echo /usr/lib/ffmpeg/jetson/lib/ > /etc/ld.so.conf.d/ffmpeg.conf
|
||||
|
||||
COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
--mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \
|
||||
|
@ -186,7 +186,7 @@ To do this:
|
||||
|
||||
### Custom go2rtc version
|
||||
|
||||
Frigate currently includes go2rtc v1.9.2, there may be certain cases where you want to run a different version of go2rtc.
|
||||
Frigate currently includes go2rtc v1.9.9, there may be certain cases where you want to run a different version of go2rtc.
|
||||
|
||||
To do this:
|
||||
|
||||
|
@ -97,15 +97,35 @@ python3 -c 'import secrets; print(secrets.token_hex(64))'
|
||||
|
||||
### Header mapping
|
||||
|
||||
If you have disabled Frigate's authentication and your proxy supports passing a header with the authenticated username, you can use the `header_map` config to specify the header name so it is passed to Frigate. For example, the following will map the `X-Forwarded-User` value. Header names are not case sensitive.
|
||||
If you have disabled Frigate's authentication and your proxy supports passing a header with authenticated usernames and/or roles, you can use the `header_map` config to specify the header name so it is passed to Frigate. For example, the following will map the `X-Forwarded-User` and `X-Forwarded-Role` values. Header names are not case sensitive.
|
||||
|
||||
```yaml
|
||||
proxy:
|
||||
...
|
||||
header_map:
|
||||
user: x-forwarded-user
|
||||
role: x-forwarded-role
|
||||
```
|
||||
|
||||
Frigate supports both `admin` and `viewer` roles (see below). When using port `8971`, Frigate validates these headers and subsequent requests use the headers `remote-user` and `remote-role` for authorization.
|
||||
|
||||
#### Port Considerations
|
||||
|
||||
**Authenticated Port (8971)**
|
||||
|
||||
- Header mapping is **fully supported**.
|
||||
- The `remote-role` header determines the user’s privileges:
|
||||
- **admin** → Full access (user management, configuration changes).
|
||||
- **viewer** → Read-only access.
|
||||
- Ensure your **proxy sends both user and role headers** for proper role enforcement.
|
||||
|
||||
**Unauthenticated Port (5000)**
|
||||
|
||||
- Headers are **ignored** for role enforcement.
|
||||
- All requests are treated as **anonymous**.
|
||||
- The `remote-role` value is **overridden** to **admin-level access**.
|
||||
- This design ensures **unauthenticated internal use** within a trusted network.
|
||||
|
||||
Note that only the following list of headers are permitted by default:
|
||||
|
||||
```
|
||||
@ -126,8 +146,6 @@ X-authentik-uid
|
||||
|
||||
If you would like to add more options, you can overwrite the default file with a docker bind mount at `/usr/local/nginx/conf/proxy_trusted_headers.conf`. Reference the source code for the default file formatting.
|
||||
|
||||
Future versions of Frigate may leverage group and role headers for authorization in Frigate as well.
|
||||
|
||||
### Login page redirection
|
||||
|
||||
Frigate gracefully performs login page redirection that should work with most authentication proxies. If your reverse proxy returns a `Location` header on `401`, `302`, or `307` unauthorized responses, Frigate's frontend will automatically detect it and redirect to that URL.
|
||||
@ -135,3 +153,31 @@ Frigate gracefully performs login page redirection that should work with most au
|
||||
### Custom logout url
|
||||
|
||||
If your reverse proxy has a dedicated logout url, you can specify using the `logout_url` config option. This will update the link for the `Logout` link in the UI.
|
||||
|
||||
## User Roles
|
||||
|
||||
Frigate supports user roles to control access to certain features in the UI and API, such as managing users or modifying configuration settings. Roles are assigned to users in the database or through proxy headers and are enforced when accessing the UI or API through the authenticated port (`8971`).
|
||||
|
||||
### Supported Roles
|
||||
|
||||
- **admin**: Full access to all features, including user management and configuration.
|
||||
- **viewer**: Read-only access to the UI and API, including viewing cameras, review items, and historical footage. Configuration editor and settings in the UI are inaccessible.
|
||||
|
||||
### Role Enforcement
|
||||
|
||||
When using the authenticated port (`8971`), roles are validated via the JWT token or proxy headers (e.g., `remote-role`).
|
||||
|
||||
On the internal **unauthenticated** port (`5000`), roles are **not enforced**. All requests are treated as **anonymous**, granting access equivalent to the **admin** role without restrictions.
|
||||
|
||||
To use role-based access control, you must connect to Frigate via the **authenticated port (`8971`)** directly or through a reverse proxy.
|
||||
|
||||
### Role Visibility in the UI
|
||||
|
||||
- When logged in via port `8971`, your **username and role** are displayed in the **account menu** (bottom corner).
|
||||
- When using port `5000`, the UI will always display "anonymous" for the username and "admin" for the role.
|
||||
|
||||
### Managing User Roles
|
||||
|
||||
1. Log in as an **admin** user via port `8971`.
|
||||
2. Navigate to **Settings > Users**.
|
||||
3. Edit a user’s role by selecting **admin** or **viewer**.
|
||||
|
@ -219,7 +219,7 @@ go2rtc:
|
||||
- rtspx://192.168.1.1:7441/abcdefghijk
|
||||
```
|
||||
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-rtsp)
|
||||
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-rtsp)
|
||||
|
||||
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
|
||||
|
||||
|
@ -3,16 +3,16 @@ id: license_plate_recognition
|
||||
title: License Plate Recognition (LPR)
|
||||
---
|
||||
|
||||
Frigate can recognize license plates on vehicles and automatically add the detected characters or recognized name as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
|
||||
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
|
||||
|
||||
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. However, LPR does not run on stationary vehicles.
|
||||
|
||||
When a plate is recognized, the detected characters or recognized name is:
|
||||
When a plate is recognized, the recognized name is:
|
||||
|
||||
- Added as a `sub_label` to the `car` tracked object.
|
||||
- Added to the `car` tracked object as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown)
|
||||
- Viewable in the Review Item Details pane in Review and the Tracked Object Details pane in Explore.
|
||||
- Filterable through the More Filters menu in Explore.
|
||||
- Published via the `frigate/events` MQTT topic as a `sub_label` for the tracked object.
|
||||
- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the tracked object.
|
||||
|
||||
## Model Requirements
|
||||
|
||||
@ -71,6 +71,7 @@ Fine-tune the LPR feature using these optional parameters:
|
||||
|
||||
- **`known_plates`**: List of strings or regular expressions that assign custom a `sub_label` to `car` objects when a recognized plate matches a known value.
|
||||
- These labels appear in the UI, filters, and notifications.
|
||||
- Unknown plates are still saved but are added to the `recognized_license_plate` field rather than the `sub_label`.
|
||||
- **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate.
|
||||
- For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`.
|
||||
- This parameter will _not_ operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`.
|
||||
|
@ -12,7 +12,7 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
**Most Hardware**
|
||||
|
||||
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
|
||||
- [Hailo](#hailo-8l): The Hailo8 AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
|
||||
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
|
||||
|
||||
**AMD**
|
||||
|
||||
@ -129,15 +129,58 @@ detectors:
|
||||
type: edgetpu
|
||||
device: pci
|
||||
```
|
||||
---
|
||||
|
||||
## Hailo-8l
|
||||
|
||||
This detector is available for use with Hailo-8 AI Acceleration Module.
|
||||
## Hailo-8
|
||||
|
||||
See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the hailo8.
|
||||
This detector is available for use with both Hailo-8 and Hailo-8L AI Acceleration Modules. The integration automatically detects your hardware architecture via the Hailo CLI and selects the appropriate default model if no custom model is specified.
|
||||
|
||||
See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the Hailo hardware.
|
||||
|
||||
### Configuration
|
||||
|
||||
When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**.
|
||||
If both are provided, the detector will first check for the model at the given local path. If the file is not found, it will download the model from the specified URL. The model file is cached under `/config/model_cache/hailo`.
|
||||
|
||||
#### YOLO
|
||||
|
||||
Use this configuration for YOLO-based models. When no custom model path or URL is provided, the detector automatically downloads the default model based on the detected hardware:
|
||||
- **Hailo-8 hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`)
|
||||
- **Hailo-8L hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`)
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
hailo8l:
|
||||
type: hailo8l
|
||||
device: PCIe
|
||||
|
||||
model:
|
||||
width: 320
|
||||
height: 320
|
||||
input_tensor: nhwc
|
||||
input_pixel_format: rgb
|
||||
input_dtype: int
|
||||
model_type: yolo-generic
|
||||
|
||||
# The detector automatically selects the default model based on your hardware:
|
||||
# - For Hailo-8 hardware: YOLOv6n (default: yolov6n.hef)
|
||||
# - For Hailo-8L hardware: YOLOv6n (default: yolov6n.hef)
|
||||
#
|
||||
# Optionally, you can specify a local model path to override the default.
|
||||
# If a local path is provided and the file exists, it will be used instead of downloading.
|
||||
# Example:
|
||||
# path: /config/model_cache/hailo/yolov6n.hef
|
||||
#
|
||||
# You can also override using a custom URL:
|
||||
# path: https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.14.0/hailo8/yolov6n.hef
|
||||
# just make sure to give it the write configuration based on the model
|
||||
```
|
||||
|
||||
#### SSD
|
||||
|
||||
For SSD-based models, provide either a model path or URL to your compiled SSD model. The integration will first check the local path before downloading if necessary.
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
hailo8l:
|
||||
@ -148,11 +191,50 @@ model:
|
||||
width: 300
|
||||
height: 300
|
||||
input_tensor: nhwc
|
||||
input_pixel_format: bgr
|
||||
input_pixel_format: rgb
|
||||
model_type: ssd
|
||||
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
|
||||
# Specify the local model path (if available) or URL for SSD MobileNet v1.
|
||||
# Example with a local path:
|
||||
# path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
|
||||
#
|
||||
# Or override using a custom URL:
|
||||
# path: https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.14.0/hailo8l/ssd_mobilenet_v1.hef
|
||||
```
|
||||
|
||||
#### Custom Models
|
||||
|
||||
The Hailo detector supports all YOLO models compiled for Hailo hardware that include post-processing. You can specify a custom URL or a local path to download or use your model directly. If both are provided, the detector checks the local path first.
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
hailo8l:
|
||||
type: hailo8l
|
||||
device: PCIe
|
||||
|
||||
model:
|
||||
width: 640
|
||||
height: 640
|
||||
input_tensor: nhwc
|
||||
input_pixel_format: rgb
|
||||
input_dtype: int
|
||||
model_type: yolo-generic
|
||||
# Optional: Specify a local model path.
|
||||
# path: /config/model_cache/hailo/custom_model.hef
|
||||
#
|
||||
# Alternatively, or as a fallback, provide a custom URL:
|
||||
# path: https://custom-model-url.com/path/to/model.hef
|
||||
```
|
||||
For additional ready-to-use models, please visit: https://github.com/hailo-ai/hailo_model_zoo
|
||||
|
||||
Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-processing. You're welcome to choose any of these pre-configured models for your implementation.
|
||||
|
||||
> **Note:**
|
||||
> The config.path parameter can accept either a local file path or a URL ending with .hef. When provided, the detector will first check if the path is a local file path. If the file exists locally, it will use it directly. If the file is not found locally or if a URL was provided, it will attempt to download the model from the specified URL.
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
## OpenVINO Detector
|
||||
|
||||
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
|
||||
|
@ -591,7 +591,7 @@ genai:
|
||||
person: "My special person prompt."
|
||||
|
||||
# Optional: Restream configuration
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.9)
|
||||
# NOTE: The default go2rtc API port (1984) must be used,
|
||||
# changing this port for the integrated go2rtc instance is not supported.
|
||||
go2rtc:
|
||||
|
@ -7,7 +7,7 @@ title: Restream
|
||||
|
||||
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
|
||||
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.2) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration) for more advanced configurations and features.
|
||||
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.9) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#configuration) for more advanced configurations and features.
|
||||
|
||||
:::note
|
||||
|
||||
@ -134,7 +134,7 @@ cameras:
|
||||
|
||||
## Handling Complex Passwords
|
||||
|
||||
go2rtc expects URL-encoded passwords in the config, [urlencoder.org](https://urlencoder.org) can be used for this purpose.
|
||||
go2rtc expects URL-encoded passwords in the config, [urlencoder.org](https://urlencoder.org) can be used for this purpose.
|
||||
|
||||
For example:
|
||||
|
||||
@ -156,7 +156,7 @@ See [this comment(https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-224
|
||||
|
||||
## Advanced Restream Configurations
|
||||
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
|
||||
|
||||
NOTE: The output will need to be passed with two curly braces `{{output}}`
|
||||
|
||||
|
@ -21,23 +21,77 @@ I may earn a small commission for my endorsement, recommendation, testimonial, o
|
||||
|
||||
## Server
|
||||
|
||||
My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Hailo8 or Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
|
||||
| Name | Coral Inference Speed | Coral Compatibility | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| Beelink EQ13 (<a href="https://amzn.to/4iQaBKu" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| Name | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| Beelink EQ13 (<a href="https://amzn.to/4iQaBKu" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
|
||||
## Detectors
|
||||
|
||||
A detector is a device which is optimized for running inferences efficiently to detect objects. Using a recommended detector means there will be less latency between detections and more detections can be run per second. Frigate is designed around the expectation that a detector is used to achieve very low inference speeds. Offloading TensorFlow to a detector is an order of magnitude faster and will reduce your CPU load dramatically. As of 0.12, Frigate supports a handful of different detector types with varying inference speeds and performance.
|
||||
A detector is a device which is optimized for running inferences efficiently to detect objects. Using a recommended detector means there will be less latency between detections and more detections can be run per second. Frigate is designed around the expectation that a detector is used to achieve very low inference speeds. Offloading TensorFlow to a detector is an order of magnitude faster and will reduce your CPU load dramatically.
|
||||
|
||||
:::info
|
||||
|
||||
Frigate supports multiple different detectors that work on different types of hardware:
|
||||
|
||||
**Most Hardware**
|
||||
|
||||
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices offering a wide range of compatibility with devices.
|
||||
- [Supports many model architectures](../../configuration/object_detectors#configuration)
|
||||
- Runs best with tiny or small size models
|
||||
|
||||
- [Google Coral EdgeTPU](#google-coral-tpu): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
|
||||
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector)
|
||||
|
||||
**AMD**
|
||||
|
||||
- [ROCm](#amd-gpus): ROCm can run on AMD Discrete GPUs to provide efficient object detection
|
||||
- [Supports limited model architectures](../../configuration/object_detectors#supported-models-1)
|
||||
- Runs best on discrete AMD GPUs
|
||||
|
||||
**Intel**
|
||||
|
||||
- [OpenVino](#openvino): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
|
||||
- [Supports majority of model architectures](../../configuration/object_detectors#supported-models)
|
||||
- Runs best with tiny, small, or medium models
|
||||
|
||||
**Nvidia**
|
||||
|
||||
- [TensortRT](#tensorrt---nvidia-gpu): TensorRT can run on Nvidia GPUs and Jetson devices.
|
||||
- [Supports majority of model architectures via ONNX](../../configuration/object_detectors#supported-models-2)
|
||||
- Runs well with any size models including large
|
||||
|
||||
**Rockchip**
|
||||
|
||||
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs to provide efficient object detection.
|
||||
- [Supports limited model architectures](../../configuration/object_detectors#choosing-a-model)
|
||||
- Runs best with tiny or small size models
|
||||
- Runs efficiently on low power hardware
|
||||
|
||||
:::
|
||||
|
||||
### Hailo-8
|
||||
|
||||
|
||||
Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isn’t provided.
|
||||
|
||||
**Default Model Configuration:**
|
||||
- **Hailo-8L:** Default model is **YOLOv6n**.
|
||||
- **Hailo-8:** Default model is **YOLOv6n**.
|
||||
|
||||
In real-world deployments, even with multiple cameras running concurrently, Frigate has demonstrated consistent performance. Testing on x86 platforms—with dual PCIe lanes—yields further improvements in FPS, throughput, and latency compared to the Raspberry Pi setup.
|
||||
|
||||
| Name | Hailo‑8 Inference Time | Hailo‑8L Inference Time |
|
||||
| ---------------- | ---------------------- | ----------------------- |
|
||||
| ssd mobilenet v1 | ~ 6 ms | ~ 10 ms |
|
||||
| yolov6n | ~ 7 ms | ~ 11 ms |
|
||||
|
||||
### Google Coral TPU
|
||||
|
||||
It is strongly recommended to use a Google Coral. A $60 device will outperform $2000 CPU. Frigate should work with any supported Coral device from https://coral.ai
|
||||
|
||||
The USB version is compatible with the widest variety of hardware and does not require a driver on the host machine. However, it does lack the automatic throttling features of the other versions.
|
||||
|
||||
The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai
|
||||
Frigate supports both the USB and M.2 versions of the Google Coral.
|
||||
- The USB version is compatible with the widest variety of hardware and does not require a driver on the host machine. However, it does lack the automatic throttling features of the other versions.
|
||||
- The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai
|
||||
|
||||
A single Coral can handle many cameras using the default model and will be sufficient for the majority of users. You can calculate the maximum performance of your Coral based on the inference speed reported by Frigate. With an inference speed of 10, your Coral will top out at `1000/10=100`, or 100 frames per second. If your detection fps is regularly getting close to that, you should first consider tuning motion masks. If those are already properly configured, a second Coral may be needed.
|
||||
|
||||
@ -92,11 +146,9 @@ Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
|
||||
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
|
||||
|
||||
### Hailo-8l PCIe
|
||||
|
||||
Frigate supports the Hailo-8l M.2 card on any hardware but currently it is only tested on the Raspberry Pi5 PCIe hat from the AI kit.
|
||||
|
||||
The inference time for the Hailo-8L chip at time of writing is around 17-21 ms for the SSD MobileNet Version 1 model.
|
||||
| Name | YoloV9 Inference Time | YOLO-NAS Inference Time |
|
||||
| --------------- | --------------------- | ------------------------- |
|
||||
| AMD 780M | ~ 14 ms | ~ 60 ms |
|
||||
|
||||
## Community Supported Detectors
|
||||
|
||||
|
@ -6,7 +6,7 @@ slug: /
|
||||
|
||||
A complete and local NVR designed for Home Assistant with AI object detection. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras.
|
||||
|
||||
Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but strongly recommended. CPU detection should only be used for testing purposes. The Coral will outperform even the best CPUs and can process 100+ FPS with very little overhead.
|
||||
Use of a [Recommended Detector](/frigate/hardware#detectors) is optional, but strongly recommended. CPU detection should only be used for testing purposes.
|
||||
|
||||
- Tight integration with Home Assistant via a [custom component](https://github.com/blakeblackshear/frigate-hass-integration)
|
||||
- Designed to minimize resource use and maximize performance by only looking for objects when and where it is necessary
|
||||
|
@ -100,9 +100,9 @@ By default, the Raspberry Pi limits the amount of memory available to the GPU. I
|
||||
|
||||
Additionally, the USB Coral draws a considerable amount of power. If using any other USB devices such as an SSD, you will experience instability due to the Pi not providing enough power to USB devices. You will need to purchase an external USB hub with it's own power supply. Some have reported success with <a href="https://amzn.to/3a2mH0P" target="_blank" rel="nofollow noopener sponsored">this</a> (affiliate link).
|
||||
|
||||
### Hailo-8L
|
||||
### Hailo-8
|
||||
|
||||
The Hailo-8L is an M.2 card typically connected to a carrier board for PCIe, which then connects to the Raspberry Pi 5 as part of the AI Kit. However, it can also be used on other boards equipped with an M.2 M key edge connector.
|
||||
The Hailo-8 and Hailo-8L AI accelerators are available in both M.2 and HAT form factors for the Raspberry Pi. The M.2 version typically connects to a carrier board for PCIe, which then interfaces with the Raspberry Pi 5 as part of the AI Kit. The HAT version can be mounted directly onto compatible Raspberry Pi models. Both form factors have been successfully tested on x86 platforms as well, making them versatile options for various computing environments.
|
||||
|
||||
#### Installation
|
||||
|
||||
|
@ -13,7 +13,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect
|
||||
|
||||
# Setup a go2rtc stream
|
||||
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#module-streams), not just rtsp.
|
||||
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#module-streams), not just rtsp.
|
||||
|
||||
:::tip
|
||||
|
||||
@ -32,69 +32,74 @@ go2rtc:
|
||||
|
||||
After adding this to the config, restart Frigate and try to watch the live stream for a single camera by clicking on it from the dashboard. It should look much clearer and more fluent than the original jsmpeg stream.
|
||||
|
||||
|
||||
### What if my video doesn't play?
|
||||
|
||||
- Check Logs:
|
||||
- Access the go2rtc logs in the Frigate UI under Logs in the sidebar.
|
||||
- If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log.
|
||||
|
||||
- Access the go2rtc logs in the Frigate UI under Logs in the sidebar.
|
||||
- If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log.
|
||||
|
||||
- Check go2rtc Web Interface: if you don't see any errors in the logs, try viewing the camera through go2rtc's web interface.
|
||||
- Navigate to port 1984 in your browser to access go2rtc's web interface.
|
||||
- If using Frigate through Home Assistant, enable the web interface at port 1984.
|
||||
- If using Docker, forward port 1984 before accessing the web interface.
|
||||
- Click `stream` for the specific camera to see if the camera's stream is being received.
|
||||
|
||||
- Navigate to port 1984 in your browser to access go2rtc's web interface.
|
||||
- If using Frigate through Home Assistant, enable the web interface at port 1984.
|
||||
- If using Docker, forward port 1984 before accessing the web interface.
|
||||
- Click `stream` for the specific camera to see if the camera's stream is being received.
|
||||
|
||||
- Check Video Codec:
|
||||
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
|
||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#codecs-madness) in go2rtc documentation.
|
||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
- "ffmpeg:back#video=h264#hardware"
|
||||
```
|
||||
|
||||
- Switch to FFmpeg if needed:
|
||||
- Some camera streams may need to use the ffmpeg module in go2rtc. This has the downside of slower startup times, but has compatibility with more stream types.
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
```
|
||||
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
|
||||
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#codecs-madness) in go2rtc documentation.
|
||||
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.9#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
- "ffmpeg:back#video=h264#hardware"
|
||||
```
|
||||
|
||||
- If you can see the video but do not have audio, this is most likely because your camera's audio stream codec is not AAC.
|
||||
- If possible, update your camera's audio settings to AAC in your camera's firmware.
|
||||
- If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows:
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
- "ffmpeg:back#audio=aac"
|
||||
```
|
||||
- Switch to FFmpeg if needed:
|
||||
|
||||
If you need to convert **both** the audio and video streams, you can use the following:
|
||||
- Some camera streams may need to use the ffmpeg module in go2rtc. This has the downside of slower startup times, but has compatibility with more stream types.
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
- "ffmpeg:back#video=h264#audio=aac#hardware"
|
||||
```
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
```
|
||||
|
||||
When using the ffmpeg module, you would add AAC audio like this:
|
||||
- If you can see the video but do not have audio, this is most likely because your camera's audio stream codec is not AAC.
|
||||
- If possible, update your camera's audio settings to AAC in your camera's firmware.
|
||||
- If your cameras do not support AAC audio, you will need to tell go2rtc to re-encode the audio to AAC on demand if you want audio. This will use additional CPU and add some latency. To add AAC audio on demand, you can update your go2rtc config as follows:
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac#hardware"
|
||||
```
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
- "ffmpeg:back#audio=aac"
|
||||
```
|
||||
|
||||
If you need to convert **both** the audio and video streams, you can use the following:
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||
- "ffmpeg:back#video=h264#audio=aac#hardware"
|
||||
```
|
||||
|
||||
When using the ffmpeg module, you would add AAC audio like this:
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
back:
|
||||
- "ffmpeg:rtsp://user:password@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2#video=copy#audio=copy#audio=aac#hardware"
|
||||
```
|
||||
|
||||
:::warning
|
||||
|
||||
|
@ -54,7 +54,9 @@ Message published for each changed tracked object. The first message is publishe
|
||||
}, // attributes with top score that have been identified on the object at any point
|
||||
"current_attributes": [], // detailed data about the current attributes in this frame
|
||||
"current_estimated_speed": 0.71, // current estimated speed (mph or kph) for objects moving through zones with speed estimation enabled
|
||||
"velocity_angle": 180 // direction of travel relative to the frame for objects moving through zones with speed estimation enabled
|
||||
"velocity_angle": 180, // direction of travel relative to the frame for objects moving through zones with speed estimation enabled
|
||||
"recognized_license_plate": "ABC12345", // a recognized license plate for car objects
|
||||
"recognized_license_plate_score": 0.933451
|
||||
},
|
||||
"after": {
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
@ -93,7 +95,9 @@ Message published for each changed tracked object. The first message is publishe
|
||||
}
|
||||
],
|
||||
"current_estimated_speed": 0.77, // current estimated speed (mph or kph) for objects moving through zones with speed estimation enabled
|
||||
"velocity_angle": 180 // direction of travel relative to the frame for objects moving through zones with speed estimation enabled
|
||||
"velocity_angle": 180, // direction of travel relative to the frame for objects moving through zones with speed estimation enabled
|
||||
"recognized_license_plate": "ABC12345", // a recognized license plate for car objects
|
||||
"recognized_license_plate_score": 0.933451
|
||||
}
|
||||
}
|
||||
```
|
||||
|
138
docs/sidebars.ts
138
docs/sidebars.ts
@ -1,106 +1,106 @@
|
||||
import type { SidebarsConfig, } from '@docusaurus/plugin-content-docs';
|
||||
import { PropSidebarItemLink } from '@docusaurus/plugin-content-docs';
|
||||
import frigateHttpApiSidebar from './docs/integrations/api/sidebar';
|
||||
import type { SidebarsConfig } from "@docusaurus/plugin-content-docs";
|
||||
import { PropSidebarItemLink } from "@docusaurus/plugin-content-docs";
|
||||
import frigateHttpApiSidebar from "./docs/integrations/api/sidebar";
|
||||
|
||||
const sidebars: SidebarsConfig = {
|
||||
docs: {
|
||||
Frigate: [
|
||||
'frigate/index',
|
||||
'frigate/hardware',
|
||||
'frigate/installation',
|
||||
'frigate/camera_setup',
|
||||
'frigate/video_pipeline',
|
||||
'frigate/glossary',
|
||||
"frigate/index",
|
||||
"frigate/hardware",
|
||||
"frigate/installation",
|
||||
"frigate/camera_setup",
|
||||
"frigate/video_pipeline",
|
||||
"frigate/glossary",
|
||||
],
|
||||
Guides: [
|
||||
'guides/getting_started',
|
||||
'guides/configuring_go2rtc',
|
||||
'guides/ha_notifications',
|
||||
'guides/ha_network_storage',
|
||||
'guides/reverse_proxy',
|
||||
"guides/getting_started",
|
||||
"guides/configuring_go2rtc",
|
||||
"guides/ha_notifications",
|
||||
"guides/ha_network_storage",
|
||||
"guides/reverse_proxy",
|
||||
],
|
||||
Configuration: {
|
||||
'Configuration Files': [
|
||||
'configuration/index',
|
||||
'configuration/reference',
|
||||
"Configuration Files": [
|
||||
"configuration/index",
|
||||
"configuration/reference",
|
||||
{
|
||||
type: 'link',
|
||||
label: 'Go2RTC Configuration Reference',
|
||||
href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration',
|
||||
type: "link",
|
||||
label: "Go2RTC Configuration Reference",
|
||||
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.9#configuration",
|
||||
} as PropSidebarItemLink,
|
||||
],
|
||||
Detectors: [
|
||||
'configuration/object_detectors',
|
||||
'configuration/audio_detectors',
|
||||
"configuration/object_detectors",
|
||||
"configuration/audio_detectors",
|
||||
],
|
||||
Classifiers: [
|
||||
'configuration/semantic_search',
|
||||
'configuration/genai',
|
||||
'configuration/face_recognition',
|
||||
'configuration/license_plate_recognition',
|
||||
"configuration/semantic_search",
|
||||
"configuration/genai",
|
||||
"configuration/face_recognition",
|
||||
"configuration/license_plate_recognition",
|
||||
],
|
||||
Cameras: [
|
||||
'configuration/cameras',
|
||||
'configuration/review',
|
||||
'configuration/record',
|
||||
'configuration/snapshots',
|
||||
'configuration/motion_detection',
|
||||
'configuration/birdseye',
|
||||
'configuration/live',
|
||||
'configuration/restream',
|
||||
'configuration/autotracking',
|
||||
'configuration/camera_specific',
|
||||
"configuration/cameras",
|
||||
"configuration/review",
|
||||
"configuration/record",
|
||||
"configuration/snapshots",
|
||||
"configuration/motion_detection",
|
||||
"configuration/birdseye",
|
||||
"configuration/live",
|
||||
"configuration/restream",
|
||||
"configuration/autotracking",
|
||||
"configuration/camera_specific",
|
||||
],
|
||||
Objects: [
|
||||
'configuration/object_filters',
|
||||
'configuration/masks',
|
||||
'configuration/zones',
|
||||
'configuration/objects',
|
||||
'configuration/stationary_objects',
|
||||
"configuration/object_filters",
|
||||
"configuration/masks",
|
||||
"configuration/zones",
|
||||
"configuration/objects",
|
||||
"configuration/stationary_objects",
|
||||
],
|
||||
'Extra Configuration': [
|
||||
'configuration/authentication',
|
||||
'configuration/notifications',
|
||||
'configuration/hardware_acceleration',
|
||||
'configuration/ffmpeg_presets',
|
||||
"Extra Configuration": [
|
||||
"configuration/authentication",
|
||||
"configuration/notifications",
|
||||
"configuration/hardware_acceleration",
|
||||
"configuration/ffmpeg_presets",
|
||||
"configuration/pwa",
|
||||
'configuration/tls',
|
||||
'configuration/advanced',
|
||||
"configuration/tls",
|
||||
"configuration/advanced",
|
||||
],
|
||||
},
|
||||
Integrations: [
|
||||
'integrations/plus',
|
||||
'integrations/home-assistant',
|
||||
"integrations/plus",
|
||||
"integrations/home-assistant",
|
||||
// This is the HTTP API generated by OpenAPI
|
||||
{
|
||||
type: 'category',
|
||||
label: 'HTTP API',
|
||||
type: "category",
|
||||
label: "HTTP API",
|
||||
link: {
|
||||
type: 'generated-index',
|
||||
title: 'Frigate HTTP API',
|
||||
description: 'HTTP API',
|
||||
slug: '/integrations/api/frigate-http-api',
|
||||
type: "generated-index",
|
||||
title: "Frigate HTTP API",
|
||||
description: "HTTP API",
|
||||
slug: "/integrations/api/frigate-http-api",
|
||||
},
|
||||
items: frigateHttpApiSidebar,
|
||||
},
|
||||
'integrations/mqtt',
|
||||
'configuration/metrics',
|
||||
'integrations/third_party_extensions',
|
||||
"integrations/mqtt",
|
||||
"configuration/metrics",
|
||||
"integrations/third_party_extensions",
|
||||
],
|
||||
'Frigate+': [
|
||||
'plus/index',
|
||||
'plus/first_model',
|
||||
'plus/improving_model',
|
||||
'plus/faq',
|
||||
"Frigate+": [
|
||||
"plus/index",
|
||||
"plus/first_model",
|
||||
"plus/improving_model",
|
||||
"plus/faq",
|
||||
],
|
||||
Troubleshooting: [
|
||||
'troubleshooting/faqs',
|
||||
'troubleshooting/recordings',
|
||||
'troubleshooting/edgetpu',
|
||||
"troubleshooting/faqs",
|
||||
"troubleshooting/recordings",
|
||||
"troubleshooting/edgetpu",
|
||||
],
|
||||
Development: [
|
||||
'development/contributing',
|
||||
'development/contributing-boards',
|
||||
"development/contributing",
|
||||
"development/contributing-boards",
|
||||
],
|
||||
},
|
||||
};
|
||||
|
@ -619,6 +619,41 @@ def get_sub_labels(split_joined: Optional[int] = None):
|
||||
return JSONResponse(content=sub_labels)
|
||||
|
||||
|
||||
@router.get("/recognized_license_plates")
|
||||
def get_recognized_license_plates(split_joined: Optional[int] = None):
|
||||
try:
|
||||
events = Event.select(Event.data).distinct()
|
||||
except Exception:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": "Failed to get recognized license plates"}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
recognized_license_plates = []
|
||||
for e in events:
|
||||
if e.data is not None and "recognized_license_plate" in e.data:
|
||||
recognized_license_plates.append(e.data["recognized_license_plate"])
|
||||
|
||||
while None in recognized_license_plates:
|
||||
recognized_license_plates.remove(None)
|
||||
|
||||
if split_joined:
|
||||
original_recognized_license_plates = recognized_license_plates.copy()
|
||||
for recognized_license_plate in original_recognized_license_plates:
|
||||
if recognized_license_plate and "," in recognized_license_plate:
|
||||
recognized_license_plates.remove(recognized_license_plate)
|
||||
parts = recognized_license_plate.split(",")
|
||||
for part in parts:
|
||||
if part.strip() not in recognized_license_plates:
|
||||
recognized_license_plates.append(part.strip())
|
||||
|
||||
recognized_license_plates = list(set(recognized_license_plates))
|
||||
recognized_license_plates.sort()
|
||||
return JSONResponse(content=recognized_license_plates)
|
||||
|
||||
|
||||
@router.get("/timeline")
|
||||
def timeline(camera: str = "all", limit: int = 100, source_id: Optional[str] = None):
|
||||
clauses = []
|
||||
|
@ -136,7 +136,7 @@ def get_jwt_secret() -> str:
|
||||
logger.debug("Using jwt secret from .jwt_secret file in config directory.")
|
||||
with open(jwt_secret_file) as f:
|
||||
try:
|
||||
jwt_secret = f.readline()
|
||||
jwt_secret = f.readline().strip()
|
||||
except Exception:
|
||||
logger.warning(
|
||||
"Unable to read jwt token from .jwt_secret file in config directory. A new jwt token will be created at each startup."
|
||||
@ -259,17 +259,24 @@ def auth(request: Request):
|
||||
# pass the user header value from the upstream proxy if a mapping is specified
|
||||
# or use anonymous if none are specified
|
||||
user_header = proxy_config.header_map.user
|
||||
role_header = proxy_config.header_map.get("role", "Remote-Role")
|
||||
role_header = proxy_config.header_map.role
|
||||
success_response.headers["remote-user"] = (
|
||||
request.headers.get(user_header, default="anonymous")
|
||||
if user_header
|
||||
else "anonymous"
|
||||
)
|
||||
success_response.headers["remote-role"] = (
|
||||
role_header = proxy_config.header_map.role
|
||||
role = (
|
||||
request.headers.get(role_header, default="viewer")
|
||||
if role_header
|
||||
else "viewer"
|
||||
)
|
||||
|
||||
# if comma-separated with "admin", use "admin", else "viewer"
|
||||
success_response.headers["remote-role"] = (
|
||||
"admin" if role and "admin" in role else "viewer"
|
||||
)
|
||||
|
||||
return success_response
|
||||
|
||||
# now apply authentication
|
||||
@ -359,14 +366,8 @@ def auth(request: Request):
|
||||
@router.get("/profile")
|
||||
def profile(request: Request):
|
||||
username = request.headers.get("remote-user", "anonymous")
|
||||
if username != "anonymous":
|
||||
try:
|
||||
user = User.get_by_id(username)
|
||||
role = getattr(user, "role", "viewer")
|
||||
except DoesNotExist:
|
||||
role = "viewer" # Fallback if user deleted
|
||||
else:
|
||||
role = None
|
||||
role = request.headers.get("remote-role", "viewer")
|
||||
|
||||
return JSONResponse(content={"username": username, "role": role})
|
||||
|
||||
|
||||
|
@ -27,6 +27,7 @@ class EventsQueryParams(BaseModel):
|
||||
max_score: Optional[float] = None
|
||||
min_speed: Optional[float] = None
|
||||
max_speed: Optional[float] = None
|
||||
recognized_license_plate: Optional[str] = "all"
|
||||
is_submitted: Optional[int] = None
|
||||
min_length: Optional[float] = None
|
||||
max_length: Optional[float] = None
|
||||
@ -55,6 +56,7 @@ class EventsSearchQueryParams(BaseModel):
|
||||
max_score: Optional[float] = None
|
||||
min_speed: Optional[float] = None
|
||||
max_speed: Optional[float] = None
|
||||
recognized_license_plate: Optional[str] = "all"
|
||||
sort: Optional[str] = None
|
||||
|
||||
|
||||
|
@ -3,6 +3,8 @@
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
from functools import reduce
|
||||
from pathlib import Path
|
||||
from urllib.parse import unquote
|
||||
@ -40,11 +42,11 @@ from frigate.api.defs.response.event_response import (
|
||||
)
|
||||
from frigate.api.defs.response.generic_response import GenericResponse
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.comms.event_metadata_updater import EventMetadataTypeEnum
|
||||
from frigate.const import CLIPS_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.events.external import ExternalEventProcessor
|
||||
from frigate.models import Event, ReviewSegment, Timeline
|
||||
from frigate.object_processing import TrackedObject, TrackedObjectProcessor
|
||||
from frigate.track.object_processing import TrackedObject
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -99,6 +101,7 @@ def events(params: EventsQueryParams = Depends()):
|
||||
min_length = params.min_length
|
||||
max_length = params.max_length
|
||||
event_id = params.event_id
|
||||
recognized_license_plate = params.recognized_license_plate
|
||||
|
||||
sort = params.sort
|
||||
|
||||
@ -156,6 +159,45 @@ def events(params: EventsQueryParams = Depends()):
|
||||
sub_label_clause = reduce(operator.or_, sub_label_clauses)
|
||||
clauses.append((sub_label_clause))
|
||||
|
||||
if recognized_license_plate != "all":
|
||||
# use matching so joined recognized_license_plates are included
|
||||
# for example a recognized license plate 'ABC123' would get events
|
||||
# with recognized license plates 'ABC123' and 'ABC123, XYZ789'
|
||||
recognized_license_plate_clauses = []
|
||||
filtered_recognized_license_plates = recognized_license_plate.split(",")
|
||||
|
||||
if "None" in filtered_recognized_license_plates:
|
||||
filtered_recognized_license_plates.remove("None")
|
||||
recognized_license_plate_clauses.append(
|
||||
(Event.data["recognized_license_plate"].is_null())
|
||||
)
|
||||
|
||||
for recognized_license_plate in filtered_recognized_license_plates:
|
||||
# Exact matching plus list inclusion
|
||||
recognized_license_plate_clauses.append(
|
||||
(
|
||||
Event.data["recognized_license_plate"].cast("text")
|
||||
== recognized_license_plate
|
||||
)
|
||||
)
|
||||
recognized_license_plate_clauses.append(
|
||||
(
|
||||
Event.data["recognized_license_plate"].cast("text")
|
||||
% f"*{recognized_license_plate},*"
|
||||
)
|
||||
)
|
||||
recognized_license_plate_clauses.append(
|
||||
(
|
||||
Event.data["recognized_license_plate"].cast("text")
|
||||
% f"*, {recognized_license_plate}*"
|
||||
)
|
||||
)
|
||||
|
||||
recognized_license_plate_clause = reduce(
|
||||
operator.or_, recognized_license_plate_clauses
|
||||
)
|
||||
clauses.append((recognized_license_plate_clause))
|
||||
|
||||
if zones != "all":
|
||||
# use matching so events with multiple zones
|
||||
# still match on a search where any zone matches
|
||||
@ -338,6 +380,8 @@ def events_explore(limit: int = 10):
|
||||
"average_estimated_speed",
|
||||
"velocity_angle",
|
||||
"path_data",
|
||||
"recognized_license_plate",
|
||||
"recognized_license_plate_score",
|
||||
]
|
||||
},
|
||||
"event_count": label_counts[event.label],
|
||||
@ -395,6 +439,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
||||
has_clip = params.has_clip
|
||||
has_snapshot = params.has_snapshot
|
||||
is_submitted = params.is_submitted
|
||||
recognized_license_plate = params.recognized_license_plate
|
||||
|
||||
# for similarity search
|
||||
event_id = params.event_id
|
||||
@ -464,6 +509,45 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
||||
|
||||
event_filters.append((reduce(operator.or_, zone_clauses)))
|
||||
|
||||
if recognized_license_plate != "all":
|
||||
# use matching so joined recognized_license_plates are included
|
||||
# for example an recognized_license_plate 'ABC123' would get events
|
||||
# with recognized_license_plates 'ABC123' and 'ABC123, XYZ789'
|
||||
recognized_license_plate_clauses = []
|
||||
filtered_recognized_license_plates = recognized_license_plate.split(",")
|
||||
|
||||
if "None" in filtered_recognized_license_plates:
|
||||
filtered_recognized_license_plates.remove("None")
|
||||
recognized_license_plate_clauses.append(
|
||||
(Event.data["recognized_license_plate"].is_null())
|
||||
)
|
||||
|
||||
for recognized_license_plate in filtered_recognized_license_plates:
|
||||
# Exact matching plus list inclusion
|
||||
recognized_license_plate_clauses.append(
|
||||
(
|
||||
Event.data["recognized_license_plate"].cast("text")
|
||||
== recognized_license_plate
|
||||
)
|
||||
)
|
||||
recognized_license_plate_clauses.append(
|
||||
(
|
||||
Event.data["recognized_license_plate"].cast("text")
|
||||
% f"*{recognized_license_plate},*"
|
||||
)
|
||||
)
|
||||
recognized_license_plate_clauses.append(
|
||||
(
|
||||
Event.data["recognized_license_plate"].cast("text")
|
||||
% f"*, {recognized_license_plate}*"
|
||||
)
|
||||
)
|
||||
|
||||
recognized_license_plate_clause = reduce(
|
||||
operator.or_, recognized_license_plate_clauses
|
||||
)
|
||||
event_filters.append((recognized_license_plate_clause))
|
||||
|
||||
if after:
|
||||
event_filters.append((Event.start_time > after))
|
||||
|
||||
@ -625,6 +709,8 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
||||
"average_estimated_speed",
|
||||
"velocity_angle",
|
||||
"path_data",
|
||||
"recognized_license_plate",
|
||||
"recognized_license_plate_score",
|
||||
]
|
||||
}
|
||||
|
||||
@ -679,6 +765,7 @@ def events_summary(params: EventsSummaryQueryParams = Depends()):
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
Event.data,
|
||||
fn.strftime(
|
||||
"%Y-%m-%d",
|
||||
fn.datetime(
|
||||
@ -693,6 +780,7 @@ def events_summary(params: EventsSummaryQueryParams = Depends()):
|
||||
Event.camera,
|
||||
Event.label,
|
||||
Event.sub_label,
|
||||
Event.data,
|
||||
(Event.start_time + seconds_offset).cast("int") / (3600 * 24),
|
||||
Event.zones,
|
||||
)
|
||||
@ -969,27 +1057,16 @@ def set_sub_label(
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
except DoesNotExist:
|
||||
if not body.camera:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": "Event "
|
||||
+ event_id
|
||||
+ " not found and camera is not provided.",
|
||||
}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
event = None
|
||||
|
||||
if request.app.detected_frames_processor:
|
||||
tracked_obj: TrackedObject = (
|
||||
request.app.detected_frames_processor.camera_states[
|
||||
event.camera if event else body.camera
|
||||
].tracked_objects.get(event_id)
|
||||
)
|
||||
tracked_obj: TrackedObject = None
|
||||
|
||||
for state in request.app.detected_frames_processor.camera_states.values():
|
||||
tracked_obj = state.tracked_objects.get(event_id)
|
||||
|
||||
if tracked_obj is not None:
|
||||
break
|
||||
else:
|
||||
tracked_obj = None
|
||||
|
||||
@ -1008,23 +1085,9 @@ def set_sub_label(
|
||||
new_sub_label = None
|
||||
new_score = None
|
||||
|
||||
if tracked_obj:
|
||||
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
|
||||
|
||||
# update timeline items
|
||||
Timeline.update(
|
||||
data=Timeline.data.update({"sub_label": (new_sub_label, new_score)})
|
||||
).where(Timeline.source_id == event_id).execute()
|
||||
|
||||
if event:
|
||||
event.sub_label = new_sub_label
|
||||
data = event.data
|
||||
if new_sub_label is None:
|
||||
data["sub_label_score"] = None
|
||||
elif new_score is not None:
|
||||
data["sub_label_score"] = new_score
|
||||
event.data = data
|
||||
event.save()
|
||||
request.app.event_metadata_updater.publish(
|
||||
EventMetadataTypeEnum.sub_label, (event_id, new_sub_label, new_score)
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
@ -1105,7 +1168,9 @@ def regenerate_description(
|
||||
camera_config = request.app.frigate_config.cameras[event.camera]
|
||||
|
||||
if camera_config.genai.enabled:
|
||||
request.app.event_metadata_updater.publish((event.id, params.source))
|
||||
request.app.event_metadata_updater.publish(
|
||||
EventMetadataTypeEnum.regenerate_description, (event.id, params.source)
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
@ -1224,28 +1289,25 @@ def create_event(
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
try:
|
||||
frame_processor: TrackedObjectProcessor = request.app.detected_frames_processor
|
||||
external_processor: ExternalEventProcessor = request.app.external_processor
|
||||
now = datetime.datetime.now().timestamp()
|
||||
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
event_id = f"{now}-{rand_id}"
|
||||
|
||||
frame = frame_processor.get_current_frame(camera_name)
|
||||
event_id = external_processor.create_manual_event(
|
||||
request.app.event_metadata_updater.publish(
|
||||
EventMetadataTypeEnum.manual_event_create,
|
||||
(
|
||||
now,
|
||||
camera_name,
|
||||
label,
|
||||
body.source_type,
|
||||
body.sub_label,
|
||||
body.score,
|
||||
body.duration,
|
||||
event_id,
|
||||
body.include_recording,
|
||||
body.score,
|
||||
body.sub_label,
|
||||
body.duration,
|
||||
body.source_type,
|
||||
body.draw,
|
||||
frame,
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": "An unknown error occurred"}),
|
||||
status_code=500,
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
@ -1267,7 +1329,9 @@ def create_event(
|
||||
def end_event(request: Request, event_id: str, body: EventsEndBody):
|
||||
try:
|
||||
end_time = body.end_time or datetime.datetime.now().timestamp()
|
||||
request.app.external_processor.finish_manual_event(event_id, end_time)
|
||||
request.app.event_metadata_updater.publish(
|
||||
EventMetadataTypeEnum.manual_event_end, (event_id, end_time)
|
||||
)
|
||||
except Exception:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
|
@ -27,7 +27,6 @@ from frigate.comms.event_metadata_updater import (
|
||||
)
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.events.external import ExternalEventProcessor
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
from frigate.stats.emitter import StatsEmitter
|
||||
from frigate.storage import StorageMaintainer
|
||||
@ -56,7 +55,6 @@ def create_fastapi_app(
|
||||
detected_frames_processor,
|
||||
storage_maintainer: StorageMaintainer,
|
||||
onvif: OnvifController,
|
||||
external_processor: ExternalEventProcessor,
|
||||
stats_emitter: StatsEmitter,
|
||||
event_metadata_updater: EventMetadataPublisher,
|
||||
):
|
||||
@ -129,7 +127,6 @@ def create_fastapi_app(
|
||||
app.onvif = onvif
|
||||
app.stats_emitter = stats_emitter
|
||||
app.event_metadata_updater = event_metadata_updater
|
||||
app.external_processor = external_processor
|
||||
app.jwt_token = get_jwt_secret() if frigate_config.auth.enabled else None
|
||||
|
||||
return app
|
||||
|
@ -37,7 +37,7 @@ from frigate.const import (
|
||||
RECORD_DIR,
|
||||
)
|
||||
from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
||||
from frigate.object_processing import TrackedObjectProcessor
|
||||
from frigate.track.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
|
@ -20,10 +20,7 @@ from frigate.camera import CameraMetrics, PTZMetrics
|
||||
from frigate.comms.base_communicator import Communicator
|
||||
from frigate.comms.config_updater import ConfigPublisher
|
||||
from frigate.comms.dispatcher import Dispatcher
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
EventMetadataPublisher,
|
||||
EventMetadataTypeEnum,
|
||||
)
|
||||
from frigate.comms.event_metadata_updater import EventMetadataPublisher
|
||||
from frigate.comms.inter_process import InterProcessCommunicator
|
||||
from frigate.comms.mqtt import MqttClient
|
||||
from frigate.comms.webpush import WebPushClient
|
||||
@ -46,7 +43,6 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.embeddings import EmbeddingsContext, manage_embeddings
|
||||
from frigate.events.audio import AudioProcessor
|
||||
from frigate.events.cleanup import EventCleanup
|
||||
from frigate.events.external import ExternalEventProcessor
|
||||
from frigate.events.maintainer import EventProcessor
|
||||
from frigate.models import (
|
||||
Event,
|
||||
@ -60,7 +56,6 @@ from frigate.models import (
|
||||
User,
|
||||
)
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
from frigate.object_processing import TrackedObjectProcessor
|
||||
from frigate.output.output import output_frames
|
||||
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
@ -72,6 +67,7 @@ from frigate.stats.emitter import StatsEmitter
|
||||
from frigate.stats.util import stats_init
|
||||
from frigate.storage import StorageMaintainer
|
||||
from frigate.timeline import TimelineProcessor
|
||||
from frigate.track.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.builtin import empty_and_close_queue
|
||||
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
|
||||
from frigate.util.object import get_camera_regions_grid
|
||||
@ -321,15 +317,10 @@ class FrigateApp:
|
||||
# Create a client for other processes to use
|
||||
self.embeddings = EmbeddingsContext(self.db)
|
||||
|
||||
def init_external_event_processor(self) -> None:
|
||||
self.external_event_processor = ExternalEventProcessor(self.config)
|
||||
|
||||
def init_inter_process_communicator(self) -> None:
|
||||
self.inter_process_communicator = InterProcessCommunicator()
|
||||
self.inter_config_updater = ConfigPublisher()
|
||||
self.event_metadata_updater = EventMetadataPublisher(
|
||||
EventMetadataTypeEnum.regenerate_description
|
||||
)
|
||||
self.event_metadata_updater = EventMetadataPublisher()
|
||||
self.inter_zmq_proxy = ZmqProxy()
|
||||
|
||||
def init_onvif(self) -> None:
|
||||
@ -600,6 +591,7 @@ class FrigateApp:
|
||||
User.insert(
|
||||
{
|
||||
User.username: "admin",
|
||||
User.role: "admin",
|
||||
User.password_hash: password_hash,
|
||||
User.notification_tokens: [],
|
||||
}
|
||||
@ -661,7 +653,6 @@ class FrigateApp:
|
||||
self.start_camera_capture_processes()
|
||||
self.start_audio_processor()
|
||||
self.start_storage_maintainer()
|
||||
self.init_external_event_processor()
|
||||
self.start_stats_emitter()
|
||||
self.start_timeline_processor()
|
||||
self.start_event_processor()
|
||||
@ -680,7 +671,6 @@ class FrigateApp:
|
||||
self.detected_frames_processor,
|
||||
self.storage_maintainer,
|
||||
self.onvif_controller,
|
||||
self.external_event_processor,
|
||||
self.stats_emitter,
|
||||
self.event_metadata_updater,
|
||||
),
|
||||
@ -752,7 +742,6 @@ class FrigateApp:
|
||||
self.review_segment_process.terminate()
|
||||
self.review_segment_process.join()
|
||||
|
||||
self.external_event_processor.stop()
|
||||
self.dispatcher.stop()
|
||||
self.ptz_autotracker_thread.join()
|
||||
|
||||
|
@ -1,29 +1,20 @@
|
||||
"""Maintains state of camera."""
|
||||
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import queue
|
||||
import os
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from typing import Callable, Optional
|
||||
from typing import Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
|
||||
from frigate.comms.dispatcher import Dispatcher
|
||||
from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import (
|
||||
CameraMqttConfig,
|
||||
FrigateConfig,
|
||||
RecordConfig,
|
||||
SnapshotsConfig,
|
||||
ZoomingModeEnum,
|
||||
)
|
||||
from frigate.const import UPDATE_CAMERA_ACTIVITY
|
||||
from frigate.events.types import EventStateEnum, EventTypeEnum
|
||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
||||
from frigate.track.tracked_object import TrackedObject
|
||||
from frigate.util.image import (
|
||||
@ -37,7 +28,6 @@ from frigate.util.image import (
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Maintains the state of a camera
|
||||
class CameraState:
|
||||
def __init__(
|
||||
self,
|
||||
@ -147,12 +137,16 @@ class CameraState:
|
||||
# draw the bounding boxes on the frame
|
||||
box = obj["box"]
|
||||
text = (
|
||||
obj["label"]
|
||||
obj["sub_label"][0]
|
||||
if (
|
||||
not obj.get("sub_label")
|
||||
or not is_label_printable(obj["sub_label"][0])
|
||||
obj.get("sub_label") and is_label_printable(obj["sub_label"][0])
|
||||
)
|
||||
else obj["sub_label"][0]
|
||||
else obj.get("recognized_license_plate", [None])[0]
|
||||
if (
|
||||
obj.get("recognized_license_plate")
|
||||
and obj["recognized_license_plate"][0]
|
||||
)
|
||||
else obj["label"]
|
||||
)
|
||||
draw_box_with_label(
|
||||
frame_copy,
|
||||
@ -415,390 +409,60 @@ class CameraState:
|
||||
|
||||
self.previous_frame_id = frame_name
|
||||
|
||||
def save_manual_event_image(
|
||||
self, event_id: str, label: str, draw: dict[str, list[dict]]
|
||||
) -> None:
|
||||
img_frame = self.get_current_frame()
|
||||
|
||||
# write clean snapshot if enabled
|
||||
if self.camera_config.snapshots.clean_copy:
|
||||
ret, png = cv2.imencode(".png", img_frame)
|
||||
|
||||
if ret:
|
||||
with open(
|
||||
os.path.join(
|
||||
CLIPS_DIR,
|
||||
f"{self.camera_config.name}-{event_id}-clean.png",
|
||||
),
|
||||
"wb",
|
||||
) as p:
|
||||
p.write(png.tobytes())
|
||||
|
||||
# write jpg snapshot with optional annotations
|
||||
if draw.get("boxes") and isinstance(draw.get("boxes"), list):
|
||||
for box in draw.get("boxes"):
|
||||
x = int(box["box"][0] * self.camera_config.detect.width)
|
||||
y = int(box["box"][1] * self.camera_config.detect.height)
|
||||
width = int(box["box"][2] * self.camera_config.detect.width)
|
||||
height = int(box["box"][3] * self.camera_config.detect.height)
|
||||
|
||||
draw_box_with_label(
|
||||
img_frame,
|
||||
x,
|
||||
y,
|
||||
x + width,
|
||||
y + height,
|
||||
label,
|
||||
f"{box.get('score', '-')}% {int(width * height)}",
|
||||
thickness=2,
|
||||
color=box.get("color", (255, 0, 0)),
|
||||
)
|
||||
|
||||
ret, jpg = cv2.imencode(".jpg", img_frame)
|
||||
with open(
|
||||
os.path.join(CLIPS_DIR, f"{self.camera_config.name}-{event_id}.jpg"),
|
||||
"wb",
|
||||
) as j:
|
||||
j.write(jpg.tobytes())
|
||||
|
||||
# create thumbnail with max height of 175 and save
|
||||
width = int(175 * img_frame.shape[1] / img_frame.shape[0])
|
||||
thumb = cv2.resize(img_frame, dsize=(width, 175), interpolation=cv2.INTER_AREA)
|
||||
cv2.imwrite(
|
||||
os.path.join(THUMB_DIR, self.camera_config.name, f"{event_id}.webp"), thumb
|
||||
)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
for obj in self.tracked_objects.values():
|
||||
if not obj.obj_data.get("end_time"):
|
||||
obj.write_thumbnail_to_disk()
|
||||
|
||||
|
||||
class TrackedObjectProcessor(threading.Thread):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
dispatcher: Dispatcher,
|
||||
tracked_objects_queue,
|
||||
ptz_autotracker_thread,
|
||||
stop_event,
|
||||
):
|
||||
super().__init__(name="detected_frames_processor")
|
||||
self.config = config
|
||||
self.dispatcher = dispatcher
|
||||
self.tracked_objects_queue = tracked_objects_queue
|
||||
self.stop_event: MpEvent = stop_event
|
||||
self.camera_states: dict[str, CameraState] = {}
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.last_motion_detected: dict[str, float] = {}
|
||||
self.ptz_autotracker_thread = ptz_autotracker_thread
|
||||
|
||||
self.config_enabled_subscriber = ConfigSubscriber("config/enabled/")
|
||||
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.detection_publisher = DetectionPublisher(DetectionTypeEnum.video)
|
||||
self.event_sender = EventUpdatePublisher()
|
||||
self.event_end_subscriber = EventEndSubscriber()
|
||||
|
||||
self.camera_activity: dict[str, dict[str, any]] = {}
|
||||
|
||||
# {
|
||||
# 'zone_name': {
|
||||
# 'person': {
|
||||
# 'camera_1': 2,
|
||||
# 'camera_2': 1
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
self.zone_data = defaultdict(lambda: defaultdict(dict))
|
||||
self.active_zone_data = defaultdict(lambda: defaultdict(dict))
|
||||
|
||||
def start(camera: str, obj: TrackedObject, frame_name: str):
|
||||
self.event_sender.publish(
|
||||
(
|
||||
EventTypeEnum.tracked_object,
|
||||
EventStateEnum.start,
|
||||
camera,
|
||||
frame_name,
|
||||
obj.to_dict(),
|
||||
)
|
||||
)
|
||||
|
||||
def update(camera: str, obj: TrackedObject, frame_name: str):
|
||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||
after = obj.to_dict()
|
||||
message = {
|
||||
"before": obj.previous,
|
||||
"after": after,
|
||||
"type": "new" if obj.previous["false_positive"] else "update",
|
||||
}
|
||||
self.dispatcher.publish("events", json.dumps(message), retain=False)
|
||||
obj.previous = after
|
||||
self.event_sender.publish(
|
||||
(
|
||||
EventTypeEnum.tracked_object,
|
||||
EventStateEnum.update,
|
||||
camera,
|
||||
frame_name,
|
||||
obj.to_dict(),
|
||||
)
|
||||
)
|
||||
|
||||
def autotrack(camera: str, obj: TrackedObject, frame_name: str):
|
||||
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
|
||||
|
||||
def end(camera: str, obj: TrackedObject, frame_name: str):
|
||||
# populate has_snapshot
|
||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||
|
||||
# write thumbnail to disk if it will be saved as an event
|
||||
if obj.has_snapshot or obj.has_clip:
|
||||
obj.write_thumbnail_to_disk()
|
||||
|
||||
# write the snapshot to disk
|
||||
if obj.has_snapshot:
|
||||
obj.write_snapshot_to_disk()
|
||||
|
||||
if not obj.false_positive:
|
||||
message = {
|
||||
"before": obj.previous,
|
||||
"after": obj.to_dict(),
|
||||
"type": "end",
|
||||
}
|
||||
self.dispatcher.publish("events", json.dumps(message), retain=False)
|
||||
self.ptz_autotracker_thread.ptz_autotracker.end_object(camera, obj)
|
||||
|
||||
self.event_sender.publish(
|
||||
(
|
||||
EventTypeEnum.tracked_object,
|
||||
EventStateEnum.end,
|
||||
camera,
|
||||
frame_name,
|
||||
obj.to_dict(),
|
||||
)
|
||||
)
|
||||
|
||||
def snapshot(camera, obj: TrackedObject, frame_name: str):
|
||||
mqtt_config: CameraMqttConfig = self.config.cameras[camera].mqtt
|
||||
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
|
||||
jpg_bytes = obj.get_img_bytes(
|
||||
ext="jpg",
|
||||
timestamp=mqtt_config.timestamp,
|
||||
bounding_box=mqtt_config.bounding_box,
|
||||
crop=mqtt_config.crop,
|
||||
height=mqtt_config.height,
|
||||
quality=mqtt_config.quality,
|
||||
)
|
||||
|
||||
if jpg_bytes is None:
|
||||
logger.warning(
|
||||
f"Unable to send mqtt snapshot for {obj.obj_data['id']}."
|
||||
)
|
||||
else:
|
||||
self.dispatcher.publish(
|
||||
f"{camera}/{obj.obj_data['label']}/snapshot",
|
||||
jpg_bytes,
|
||||
retain=True,
|
||||
)
|
||||
|
||||
def camera_activity(camera, activity):
|
||||
last_activity = self.camera_activity.get(camera)
|
||||
|
||||
if not last_activity or activity != last_activity:
|
||||
self.camera_activity[camera] = activity
|
||||
self.requestor.send_data(UPDATE_CAMERA_ACTIVITY, self.camera_activity)
|
||||
|
||||
for camera in self.config.cameras.keys():
|
||||
camera_state = CameraState(
|
||||
camera, self.config, self.frame_manager, self.ptz_autotracker_thread
|
||||
)
|
||||
camera_state.on("start", start)
|
||||
camera_state.on("autotrack", autotrack)
|
||||
camera_state.on("update", update)
|
||||
camera_state.on("end", end)
|
||||
camera_state.on("snapshot", snapshot)
|
||||
camera_state.on("camera_activity", camera_activity)
|
||||
self.camera_states[camera] = camera_state
|
||||
|
||||
def should_save_snapshot(self, camera, obj: TrackedObject):
|
||||
if obj.false_positive:
|
||||
return False
|
||||
|
||||
snapshot_config: SnapshotsConfig = self.config.cameras[camera].snapshots
|
||||
|
||||
if not snapshot_config.enabled:
|
||||
return False
|
||||
|
||||
# object never changed position
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# if there are required zones and there is no overlap
|
||||
required_zones = snapshot_config.required_zones
|
||||
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
|
||||
logger.debug(
|
||||
f"Not creating snapshot for {obj.obj_data['id']} because it did not enter required zones"
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def should_retain_recording(self, camera: str, obj: TrackedObject):
|
||||
if obj.false_positive:
|
||||
return False
|
||||
|
||||
record_config: RecordConfig = self.config.cameras[camera].record
|
||||
|
||||
# Recording is disabled
|
||||
if not record_config.enabled:
|
||||
return False
|
||||
|
||||
# object never changed position
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# If the object is not considered an alert or detection
|
||||
if obj.max_severity is None:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def should_mqtt_snapshot(self, camera, obj: TrackedObject):
|
||||
# object never changed position
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# if there are required zones and there is no overlap
|
||||
required_zones = self.config.cameras[camera].mqtt.required_zones
|
||||
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
|
||||
logger.debug(
|
||||
f"Not sending mqtt for {obj.obj_data['id']} because it did not enter required zones"
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def update_mqtt_motion(self, camera, frame_time, motion_boxes):
|
||||
# publish if motion is currently being detected
|
||||
if motion_boxes:
|
||||
# only send ON if motion isn't already active
|
||||
if self.last_motion_detected.get(camera, 0) == 0:
|
||||
self.dispatcher.publish(
|
||||
f"{camera}/motion",
|
||||
"ON",
|
||||
retain=False,
|
||||
)
|
||||
|
||||
# always updated latest motion
|
||||
self.last_motion_detected[camera] = frame_time
|
||||
elif self.last_motion_detected.get(camera, 0) > 0:
|
||||
mqtt_delay = self.config.cameras[camera].motion.mqtt_off_delay
|
||||
|
||||
# If no motion, make sure the off_delay has passed
|
||||
if frame_time - self.last_motion_detected.get(camera, 0) >= mqtt_delay:
|
||||
self.dispatcher.publish(
|
||||
f"{camera}/motion",
|
||||
"OFF",
|
||||
retain=False,
|
||||
)
|
||||
# reset the last_motion so redundant `off` commands aren't sent
|
||||
self.last_motion_detected[camera] = 0
|
||||
|
||||
def get_best(self, camera, label):
|
||||
# TODO: need a lock here
|
||||
camera_state = self.camera_states[camera]
|
||||
if label in camera_state.best_objects:
|
||||
best_obj = camera_state.best_objects[label]
|
||||
best = best_obj.thumbnail_data.copy()
|
||||
best["frame"] = camera_state.frame_cache.get(
|
||||
best_obj.thumbnail_data["frame_time"]
|
||||
)
|
||||
return best
|
||||
else:
|
||||
return {}
|
||||
|
||||
def get_current_frame(
|
||||
self, camera: str, draw_options: dict[str, any] = {}
|
||||
) -> Optional[np.ndarray]:
|
||||
if camera == "birdseye":
|
||||
return self.frame_manager.get(
|
||||
"birdseye",
|
||||
(self.config.birdseye.height * 3 // 2, self.config.birdseye.width),
|
||||
)
|
||||
|
||||
if camera not in self.camera_states:
|
||||
return None
|
||||
|
||||
return self.camera_states[camera].get_current_frame(draw_options)
|
||||
|
||||
def get_current_frame_time(self, camera) -> int:
|
||||
"""Returns the latest frame time for a given camera."""
|
||||
return self.camera_states[camera].current_frame_time
|
||||
|
||||
def force_end_all_events(self, camera: str, camera_state: CameraState):
|
||||
"""Ends all active events on camera when disabling."""
|
||||
last_frame_name = camera_state.previous_frame_id
|
||||
for obj_id, obj in list(camera_state.tracked_objects.items()):
|
||||
if "end_time" not in obj.obj_data:
|
||||
logger.debug(f"Camera {camera} disabled, ending active event {obj_id}")
|
||||
obj.obj_data["end_time"] = datetime.datetime.now().timestamp()
|
||||
# end callbacks
|
||||
for callback in camera_state.callbacks["end"]:
|
||||
callback(camera, obj, last_frame_name)
|
||||
|
||||
# camera activity callbacks
|
||||
for callback in camera_state.callbacks["camera_activity"]:
|
||||
callback(
|
||||
camera,
|
||||
{"enabled": False, "motion": 0, "objects": []},
|
||||
)
|
||||
|
||||
def run(self):
|
||||
while not self.stop_event.is_set():
|
||||
# check for config updates
|
||||
while True:
|
||||
(
|
||||
updated_enabled_topic,
|
||||
updated_enabled_config,
|
||||
) = self.config_enabled_subscriber.check_for_update()
|
||||
|
||||
if not updated_enabled_topic:
|
||||
break
|
||||
|
||||
camera_name = updated_enabled_topic.rpartition("/")[-1]
|
||||
self.config.cameras[
|
||||
camera_name
|
||||
].enabled = updated_enabled_config.enabled
|
||||
|
||||
if self.camera_states[camera_name].prev_enabled is None:
|
||||
self.camera_states[
|
||||
camera_name
|
||||
].prev_enabled = updated_enabled_config.enabled
|
||||
|
||||
# manage camera disabled state
|
||||
for camera, config in self.config.cameras.items():
|
||||
if not config.enabled_in_config:
|
||||
continue
|
||||
|
||||
current_enabled = config.enabled
|
||||
camera_state = self.camera_states[camera]
|
||||
|
||||
if camera_state.prev_enabled and not current_enabled:
|
||||
logger.debug(f"Not processing objects for disabled camera {camera}")
|
||||
self.force_end_all_events(camera, camera_state)
|
||||
|
||||
camera_state.prev_enabled = current_enabled
|
||||
|
||||
if not current_enabled:
|
||||
continue
|
||||
|
||||
try:
|
||||
(
|
||||
camera,
|
||||
frame_name,
|
||||
frame_time,
|
||||
current_tracked_objects,
|
||||
motion_boxes,
|
||||
regions,
|
||||
) = self.tracked_objects_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
if not self.config.cameras[camera].enabled:
|
||||
logger.debug(f"Camera {camera} disabled, skipping update")
|
||||
continue
|
||||
|
||||
camera_state = self.camera_states[camera]
|
||||
|
||||
camera_state.update(
|
||||
frame_name, frame_time, current_tracked_objects, motion_boxes, regions
|
||||
)
|
||||
|
||||
self.update_mqtt_motion(camera, frame_time, motion_boxes)
|
||||
|
||||
tracked_objects = [
|
||||
o.to_dict() for o in camera_state.tracked_objects.values()
|
||||
]
|
||||
|
||||
# publish info on this frame
|
||||
self.detection_publisher.publish(
|
||||
(
|
||||
camera,
|
||||
frame_name,
|
||||
frame_time,
|
||||
tracked_objects,
|
||||
motion_boxes,
|
||||
regions,
|
||||
)
|
||||
)
|
||||
|
||||
# cleanup event finished queue
|
||||
while not self.stop_event.is_set():
|
||||
update = self.event_end_subscriber.check_for_update(timeout=0.01)
|
||||
|
||||
if not update:
|
||||
break
|
||||
|
||||
event_id, camera, _ = update
|
||||
self.camera_states[camera].finished(event_id)
|
||||
|
||||
# shut down camera states
|
||||
for state in self.camera_states.values():
|
||||
state.shutdown()
|
||||
|
||||
self.requestor.stop()
|
||||
self.detection_publisher.stop()
|
||||
self.event_sender.stop()
|
||||
self.event_end_subscriber.stop()
|
||||
self.config_enabled_subscriber.stop()
|
||||
|
||||
logger.info("Exiting object processor...")
|
@ -2,9 +2,6 @@
|
||||
|
||||
import logging
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from frigate.events.types import RegenerateDescriptionEnum
|
||||
|
||||
from .zmq_proxy import Publisher, Subscriber
|
||||
|
||||
@ -13,7 +10,11 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class EventMetadataTypeEnum(str, Enum):
|
||||
all = ""
|
||||
manual_event_create = "manual_event_create"
|
||||
manual_event_end = "manual_event_end"
|
||||
regenerate_description = "regenerate_description"
|
||||
sub_label = "sub_label"
|
||||
recognized_license_plate = "recognized_license_plate"
|
||||
|
||||
|
||||
class EventMetadataPublisher(Publisher):
|
||||
@ -21,12 +22,11 @@ class EventMetadataPublisher(Publisher):
|
||||
|
||||
topic_base = "event_metadata/"
|
||||
|
||||
def __init__(self, topic: EventMetadataTypeEnum) -> None:
|
||||
topic = topic.value
|
||||
super().__init__(topic)
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
def publish(self, payload: tuple[str, RegenerateDescriptionEnum]) -> None:
|
||||
super().publish(payload)
|
||||
def publish(self, topic: EventMetadataTypeEnum, payload: any) -> None:
|
||||
super().publish(payload, topic.value)
|
||||
|
||||
|
||||
class EventMetadataSubscriber(Subscriber):
|
||||
@ -35,17 +35,14 @@ class EventMetadataSubscriber(Subscriber):
|
||||
topic_base = "event_metadata/"
|
||||
|
||||
def __init__(self, topic: EventMetadataTypeEnum) -> None:
|
||||
topic = topic.value
|
||||
super().__init__(topic)
|
||||
super().__init__(topic.value)
|
||||
|
||||
def check_for_update(
|
||||
self, timeout: float = 1
|
||||
) -> Optional[tuple[EventMetadataTypeEnum, str, RegenerateDescriptionEnum]]:
|
||||
def check_for_update(self, timeout: float = 1) -> tuple | None:
|
||||
return super().check_for_update(timeout)
|
||||
|
||||
def _return_object(self, topic: str, payload: any) -> any:
|
||||
def _return_object(self, topic: str, payload: tuple) -> tuple:
|
||||
if payload is None:
|
||||
return (None, None, None)
|
||||
return (None, None)
|
||||
|
||||
topic = EventMetadataTypeEnum[topic[len(self.topic_base) :]]
|
||||
event_id, source = payload
|
||||
return (topic, event_id, RegenerateDescriptionEnum(source))
|
||||
return (topic, payload)
|
||||
|
@ -21,7 +21,7 @@ __all__ = [
|
||||
FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning", "-threads", "2"]
|
||||
FFMPEG_INPUT_ARGS_DEFAULT = "preset-rtsp-generic"
|
||||
|
||||
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic"
|
||||
RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = "preset-record-generic-audio-aac"
|
||||
DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = [
|
||||
"-threads",
|
||||
"2",
|
||||
|
@ -67,6 +67,9 @@ class FaceRecognitionConfig(FrigateBaseModel):
|
||||
save_attempts: bool = Field(
|
||||
default=True, title="Save images of face detections for training."
|
||||
)
|
||||
blur_confidence_filter: bool = Field(
|
||||
default=True, title="Apply blur quality filter to face confidence."
|
||||
)
|
||||
|
||||
|
||||
class LicensePlateRecognitionConfig(FrigateBaseModel):
|
||||
|
@ -37,3 +37,5 @@ class LoggerConfig(FrigateBaseModel):
|
||||
|
||||
for log, level in log_levels.items():
|
||||
logging.getLogger(log).setLevel(level.value.upper())
|
||||
|
||||
return self
|
||||
|
@ -8,12 +8,11 @@ from typing import List, Optional, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import requests
|
||||
from Levenshtein import distance
|
||||
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
||||
from shapely.geometry import Polygon
|
||||
|
||||
from frigate.const import FRIGATE_LOCALHOST
|
||||
from frigate.comms.event_metadata_updater import EventMetadataTypeEnum
|
||||
from frigate.util.image import area
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -34,10 +33,10 @@ class LicensePlateProcessingMixin:
|
||||
self.batch_size = 6
|
||||
|
||||
# Detection specific parameters
|
||||
self.min_size = 3
|
||||
self.min_size = 8
|
||||
self.max_size = 960
|
||||
self.box_thresh = 0.8
|
||||
self.mask_thresh = 0.8
|
||||
self.box_thresh = 0.6
|
||||
self.mask_thresh = 0.6
|
||||
|
||||
def _detect(self, image: np.ndarray) -> List[np.ndarray]:
|
||||
"""
|
||||
@ -158,47 +157,40 @@ class LicensePlateProcessingMixin:
|
||||
logger.debug("Model runners not loaded")
|
||||
return [], [], []
|
||||
|
||||
plate_points = self._detect(image)
|
||||
if len(plate_points) == 0:
|
||||
logger.debug("No points found by OCR detector model")
|
||||
boxes = self._detect(image)
|
||||
if len(boxes) == 0:
|
||||
logger.debug("No boxes found by OCR detector model")
|
||||
return [], [], []
|
||||
|
||||
plate_points = self._sort_polygon(list(plate_points))
|
||||
plate_images = [self._crop_license_plate(image, x) for x in plate_points]
|
||||
rotated_images, _ = self._classify(plate_images)
|
||||
boxes = self._sort_boxes(list(boxes))
|
||||
plate_images = [self._crop_license_plate(image, x) for x in boxes]
|
||||
|
||||
# debug rotated and classification result
|
||||
if WRITE_DEBUG_IMAGES:
|
||||
current_time = int(datetime.datetime.now().timestamp())
|
||||
for i, img in enumerate(plate_images):
|
||||
cv2.imwrite(
|
||||
f"debug/frames/license_plate_rotated_{current_time}_{i + 1}.jpg",
|
||||
img,
|
||||
)
|
||||
for i, img in enumerate(rotated_images):
|
||||
cv2.imwrite(
|
||||
f"debug/frames/license_plate_classified_{current_time}_{i + 1}.jpg",
|
||||
f"debug/frames/license_plate_cropped_{current_time}_{i + 1}.jpg",
|
||||
img,
|
||||
)
|
||||
|
||||
# keep track of the index of each image for correct area calc later
|
||||
sorted_indices = np.argsort([x.shape[1] / x.shape[0] for x in rotated_images])
|
||||
sorted_indices = np.argsort([x.shape[1] / x.shape[0] for x in plate_images])
|
||||
reverse_mapping = {
|
||||
idx: original_idx for original_idx, idx in enumerate(sorted_indices)
|
||||
}
|
||||
|
||||
results, confidences = self._recognize(rotated_images)
|
||||
results, confidences = self._recognize(plate_images)
|
||||
|
||||
if results:
|
||||
license_plates = [""] * len(rotated_images)
|
||||
average_confidences = [[0.0]] * len(rotated_images)
|
||||
areas = [0] * len(rotated_images)
|
||||
license_plates = [""] * len(plate_images)
|
||||
average_confidences = [[0.0]] * len(plate_images)
|
||||
areas = [0] * len(plate_images)
|
||||
|
||||
# map results back to original image order
|
||||
for i, (plate, conf) in enumerate(zip(results, confidences)):
|
||||
original_idx = reverse_mapping[i]
|
||||
|
||||
height, width = rotated_images[original_idx].shape[:2]
|
||||
height, width = plate_images[original_idx].shape[:2]
|
||||
area = height * width
|
||||
|
||||
average_confidence = conf
|
||||
@ -206,7 +198,7 @@ class LicensePlateProcessingMixin:
|
||||
# set to True to write each cropped image for debugging
|
||||
if False:
|
||||
save_image = cv2.cvtColor(
|
||||
rotated_images[original_idx], cv2.COLOR_RGB2BGR
|
||||
plate_images[original_idx], cv2.COLOR_RGB2BGR
|
||||
)
|
||||
filename = f"debug/frames/plate_{original_idx}_{plate}_{area}.jpg"
|
||||
cv2.imwrite(filename, save_image)
|
||||
@ -328,7 +320,7 @@ class LicensePlateProcessingMixin:
|
||||
# Use pyclipper to shrink the polygon slightly based on the computed distance.
|
||||
offset = PyclipperOffset()
|
||||
offset.AddPath(points, JT_ROUND, ET_CLOSEDPOLYGON)
|
||||
points = np.array(offset.Execute(distance * 1.5)).reshape((-1, 1, 2))
|
||||
points = np.array(offset.Execute(distance * 1.75)).reshape((-1, 1, 2))
|
||||
|
||||
# get the minimum bounding box around the shrunken polygon.
|
||||
box, min_side = self._get_min_boxes(points)
|
||||
@ -453,46 +445,64 @@ class LicensePlateProcessingMixin:
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _clockwise_order(point: np.ndarray) -> np.ndarray:
|
||||
def _clockwise_order(pts: np.ndarray) -> np.ndarray:
|
||||
"""
|
||||
Arrange the points of a polygon in clockwise order based on their angular positions
|
||||
around the polygon's center.
|
||||
Arrange the points of a polygon in order: top-left, top-right, bottom-right, bottom-left.
|
||||
taken from https://github.com/PyImageSearch/imutils/blob/master/imutils/perspective.py
|
||||
|
||||
Args:
|
||||
point (np.ndarray): Array of points of the polygon.
|
||||
pts (np.ndarray): Array of points of the polygon.
|
||||
|
||||
Returns:
|
||||
np.ndarray: Points ordered in clockwise direction.
|
||||
np.ndarray: Points ordered clockwise starting from top-left.
|
||||
"""
|
||||
center = point.mean(axis=0)
|
||||
return point[
|
||||
np.argsort(np.arctan2(point[:, 1] - center[1], point[:, 0] - center[0]))
|
||||
]
|
||||
# Sort the points based on their x-coordinates
|
||||
x_sorted = pts[np.argsort(pts[:, 0]), :]
|
||||
|
||||
# Separate the left-most and right-most points
|
||||
left_most = x_sorted[:2, :]
|
||||
right_most = x_sorted[2:, :]
|
||||
|
||||
# Sort the left-most coordinates by y-coordinates
|
||||
left_most = left_most[np.argsort(left_most[:, 1]), :]
|
||||
(tl, bl) = left_most # Top-left and bottom-left
|
||||
|
||||
# Use the top-left as an anchor to calculate distances to right points
|
||||
# The further point will be the bottom-right
|
||||
distances = np.sqrt(
|
||||
((tl[0] - right_most[:, 0]) ** 2) + ((tl[1] - right_most[:, 1]) ** 2)
|
||||
)
|
||||
|
||||
# Sort right points by distance (descending)
|
||||
right_idx = np.argsort(distances)[::-1]
|
||||
(br, tr) = right_most[right_idx, :] # Bottom-right and top-right
|
||||
|
||||
return np.array([tl, tr, br, bl])
|
||||
|
||||
@staticmethod
|
||||
def _sort_polygon(points):
|
||||
def _sort_boxes(boxes):
|
||||
"""
|
||||
Sort polygons based on their position in the image. If polygons are close in vertical
|
||||
Sort polygons based on their position in the image. If boxes are close in vertical
|
||||
position (within 5 pixels), sort them by horizontal position.
|
||||
|
||||
Args:
|
||||
points: List of polygons to sort.
|
||||
points: detected text boxes with shape [4, 2]
|
||||
|
||||
Returns:
|
||||
List: Sorted list of polygons.
|
||||
List: sorted boxes(array) with shape [4, 2]
|
||||
"""
|
||||
points.sort(key=lambda x: (x[0][1], x[0][0]))
|
||||
for i in range(len(points) - 1):
|
||||
boxes.sort(key=lambda x: (x[0][1], x[0][0]))
|
||||
for i in range(len(boxes) - 1):
|
||||
for j in range(i, -1, -1):
|
||||
if abs(points[j + 1][0][1] - points[j][0][1]) < 5 and (
|
||||
points[j + 1][0][0] < points[j][0][0]
|
||||
if abs(boxes[j + 1][0][1] - boxes[j][0][1]) < 5 and (
|
||||
boxes[j + 1][0][0] < boxes[j][0][0]
|
||||
):
|
||||
temp = points[j]
|
||||
points[j] = points[j + 1]
|
||||
points[j + 1] = temp
|
||||
temp = boxes[j]
|
||||
boxes[j] = boxes[j + 1]
|
||||
boxes[j + 1] = temp
|
||||
else:
|
||||
break
|
||||
return points
|
||||
return boxes
|
||||
|
||||
@staticmethod
|
||||
def _zero_pad(image: np.ndarray) -> np.ndarray:
|
||||
@ -583,9 +593,11 @@ class LicensePlateProcessingMixin:
|
||||
for j in range(len(outputs)):
|
||||
label, score = outputs[j]
|
||||
results[indices[i + j]] = [label, score]
|
||||
# make sure we have high confidence if we need to flip a box, this will be rare in lpr
|
||||
if "180" in label and score >= 0.9:
|
||||
images[indices[i + j]] = cv2.rotate(images[indices[i + j]], 1)
|
||||
# make sure we have high confidence if we need to flip a box
|
||||
if "180" in label and score >= 0.7:
|
||||
images[indices[i + j]] = cv2.rotate(
|
||||
images[indices[i + j]], cv2.ROTATE_180
|
||||
)
|
||||
|
||||
return images, results
|
||||
|
||||
@ -682,7 +694,7 @@ class LicensePlateProcessingMixin:
|
||||
)
|
||||
height, width = image.shape[0:2]
|
||||
if height * 1.0 / width >= 1.5:
|
||||
image = np.rot90(image, k=3)
|
||||
image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
|
||||
return image
|
||||
|
||||
def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
||||
@ -942,9 +954,23 @@ class LicensePlateProcessingMixin:
|
||||
return
|
||||
|
||||
license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
|
||||
|
||||
# Expand the license_plate_box by 30%
|
||||
box_array = np.array(license_plate_box)
|
||||
expansion = (box_array[2:] - box_array[:2]) * 0.30
|
||||
expanded_box = np.array(
|
||||
[
|
||||
license_plate_box[0] - expansion[0],
|
||||
license_plate_box[1] - expansion[1],
|
||||
license_plate_box[2] + expansion[0],
|
||||
license_plate_box[3] + expansion[1],
|
||||
]
|
||||
).clip(0, [license_plate_frame.shape[1], license_plate_frame.shape[0]] * 2)
|
||||
|
||||
# Crop using the expanded box
|
||||
license_plate_frame = license_plate_frame[
|
||||
license_plate_box[1] : license_plate_box[3],
|
||||
license_plate_box[0] : license_plate_box[2],
|
||||
int(expanded_box[1]) : int(expanded_box[3]),
|
||||
int(expanded_box[0]) : int(expanded_box[2]),
|
||||
]
|
||||
|
||||
# double the size of the license plate frame for better OCR
|
||||
@ -1028,26 +1054,26 @@ class LicensePlateProcessingMixin:
|
||||
for plate in plates
|
||||
)
|
||||
),
|
||||
top_plate,
|
||||
None,
|
||||
)
|
||||
|
||||
# Send the result to the API
|
||||
resp = requests.post(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
|
||||
json={
|
||||
"camera": obj_data.get("camera"),
|
||||
"subLabel": sub_label,
|
||||
"subLabelScore": avg_confidence,
|
||||
},
|
||||
# If it's a known plate, publish to sub_label
|
||||
if sub_label is not None:
|
||||
self.sub_label_publisher.publish(
|
||||
EventMetadataTypeEnum.sub_label, (id, sub_label, avg_confidence)
|
||||
)
|
||||
|
||||
self.sub_label_publisher.publish(
|
||||
EventMetadataTypeEnum.recognized_license_plate,
|
||||
(id, top_plate, avg_confidence),
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
self.detected_license_plates[id] = {
|
||||
"plate": top_plate,
|
||||
"char_confidences": top_char_confidences,
|
||||
"area": top_area,
|
||||
"obj_data": obj_data,
|
||||
}
|
||||
self.detected_license_plates[id] = {
|
||||
"plate": top_plate,
|
||||
"char_confidences": top_char_confidences,
|
||||
"area": top_area,
|
||||
"obj_data": obj_data,
|
||||
}
|
||||
|
||||
def handle_request(self, topic, request_data) -> dict[str, any] | None:
|
||||
return
|
||||
|
@ -8,6 +8,7 @@ import numpy as np
|
||||
from peewee import DoesNotExist
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||
from frigate.comms.event_metadata_updater import EventMetadataPublisher
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.data_processing.common.license_plate.mixin import (
|
||||
WRITE_DEBUG_IMAGES,
|
||||
@ -30,6 +31,7 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
sub_label_publisher: EventMetadataPublisher,
|
||||
metrics: DataProcessorMetrics,
|
||||
model_runner: LicensePlateModelRunner,
|
||||
detected_license_plates: dict[str, dict[str, any]],
|
||||
@ -38,6 +40,7 @@ class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi):
|
||||
self.model_runner = model_runner
|
||||
self.lpr_config = config.lpr
|
||||
self.config = config
|
||||
self.sub_label_publisher = sub_label_publisher
|
||||
super().__init__(config, metrics, model_runner)
|
||||
|
||||
def process_data(
|
||||
|
@ -5,10 +5,13 @@ import os
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import requests
|
||||
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
EventMetadataPublisher,
|
||||
EventMetadataTypeEnum,
|
||||
)
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import FRIGATE_LOCALHOST, MODEL_CACHE_DIR
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.util.object import calculate_region
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
@ -23,9 +26,15 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BirdRealTimeProcessor(RealTimeProcessorApi):
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
sub_label_publisher: EventMetadataPublisher,
|
||||
metrics: DataProcessorMetrics,
|
||||
):
|
||||
super().__init__(config, metrics)
|
||||
self.interpreter: Interpreter = None
|
||||
self.sub_label_publisher = sub_label_publisher
|
||||
self.tensor_input_details: dict[str, any] = None
|
||||
self.tensor_output_details: dict[str, any] = None
|
||||
self.detected_birds: dict[str, float] = {}
|
||||
@ -134,17 +143,10 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
|
||||
logger.debug(f"Score {score} is worse than previous score {previous_score}")
|
||||
return
|
||||
|
||||
resp = requests.post(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{obj_data['id']}/sub_label",
|
||||
json={
|
||||
"camera": obj_data.get("camera"),
|
||||
"subLabel": self.labelmap[best_id],
|
||||
"subLabelScore": score,
|
||||
},
|
||||
self.sub_label_publisher.publish(
|
||||
EventMetadataTypeEnum.sub_label, (id, self.labelmap[best_id], score)
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
self.detected_birds[obj_data["id"]] = score
|
||||
self.detected_birds[obj_data["id"]] = score
|
||||
|
||||
def handle_request(self, topic, request_data):
|
||||
return None
|
||||
|
@ -11,11 +11,14 @@ from typing import Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import requests
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
EventMetadataPublisher,
|
||||
EventMetadataTypeEnum,
|
||||
)
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import FACE_DIR, FRIGATE_LOCALHOST, MODEL_CACHE_DIR
|
||||
from frigate.const import FACE_DIR, MODEL_CACHE_DIR
|
||||
from frigate.util.image import area
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
@ -28,9 +31,15 @@ MIN_MATCHING_FACES = 2
|
||||
|
||||
|
||||
class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
sub_label_publisher: EventMetadataPublisher,
|
||||
metrics: DataProcessorMetrics,
|
||||
):
|
||||
super().__init__(config, metrics)
|
||||
self.face_config = config.face_recognition
|
||||
self.sub_label_publisher = sub_label_publisher
|
||||
self.face_detector: cv2.FaceDetectorYN = None
|
||||
self.landmark_detector: cv2.face.FacemarkLBF = None
|
||||
self.recognizer: cv2.face.LBPHFaceRecognizer = None
|
||||
@ -183,6 +192,22 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
image, M, (output_width, output_height), flags=cv2.INTER_CUBIC
|
||||
)
|
||||
|
||||
def __get_blur_factor(self, input: np.ndarray) -> float:
|
||||
"""Calculates the factor for the confidence based on the blur of the image."""
|
||||
if not self.face_config.blur_confidence_filter:
|
||||
return 1.0
|
||||
|
||||
variance = cv2.Laplacian(input, cv2.CV_64F).var()
|
||||
|
||||
if variance < 60: # image is very blurry
|
||||
return 0.96
|
||||
elif variance < 70: # image moderately blurry
|
||||
return 0.98
|
||||
elif variance < 80: # image is slightly blurry
|
||||
return 0.99
|
||||
else:
|
||||
return 1.0
|
||||
|
||||
def __clear_classifier(self) -> None:
|
||||
self.face_recognizer = None
|
||||
self.label_map = {}
|
||||
@ -223,14 +248,21 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
if not self.recognizer:
|
||||
return None
|
||||
|
||||
# face recognition is best run on grayscale images
|
||||
img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
|
||||
|
||||
# get blur factor before aligning face
|
||||
blur_factor = self.__get_blur_factor(img)
|
||||
logger.debug(f"face detected with bluriness {blur_factor}")
|
||||
|
||||
# align face and run recognition
|
||||
img = self.__align_face(img, img.shape[1], img.shape[0])
|
||||
index, distance = self.recognizer.predict(img)
|
||||
|
||||
if index == -1:
|
||||
return None
|
||||
|
||||
score = 1.0 - (distance / 1000)
|
||||
score = (1.0 - (distance / 1000)) * blur_factor
|
||||
return self.label_map[index], round(score, 2)
|
||||
|
||||
def __update_metrics(self, duration: float) -> None:
|
||||
@ -349,18 +381,10 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||
return
|
||||
|
||||
resp = requests.post(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
|
||||
json={
|
||||
"camera": obj_data.get("camera"),
|
||||
"subLabel": sub_label,
|
||||
"subLabelScore": score,
|
||||
},
|
||||
self.sub_label_publisher.publish(
|
||||
EventMetadataTypeEnum.sub_label, (id, sub_label, score)
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
self.detected_faces[id] = face_score
|
||||
|
||||
self.detected_faces[id] = face_score
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||
|
||||
def handle_request(self, topic, request_data) -> dict[str, any] | None:
|
||||
|
@ -4,6 +4,7 @@ import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
from frigate.comms.event_metadata_updater import EventMetadataPublisher
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.data_processing.common.license_plate.mixin import (
|
||||
LicensePlateProcessingMixin,
|
||||
@ -22,6 +23,7 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
sub_label_publisher: EventMetadataPublisher,
|
||||
metrics: DataProcessorMetrics,
|
||||
model_runner: LicensePlateModelRunner,
|
||||
detected_license_plates: dict[str, dict[str, any]],
|
||||
@ -30,6 +32,7 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
|
||||
self.model_runner = model_runner
|
||||
self.lpr_config = config.lpr
|
||||
self.config = config
|
||||
self.sub_label_publisher = sub_label_publisher
|
||||
super().__init__(config, metrics)
|
||||
|
||||
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
|
||||
|
@ -38,6 +38,7 @@ class ModelTypeEnum(str, Enum):
|
||||
yolov9 = "yolov9"
|
||||
yolonas = "yolonas"
|
||||
dfine = "dfine"
|
||||
yologeneric = "yolo-generic"
|
||||
|
||||
|
||||
class ModelConfig(BaseModel):
|
||||
|
616
frigate/detectors/plugins/hailo8l.py
Normal file → Executable file
616
frigate/detectors/plugins/hailo8l.py
Normal file → Executable file
@ -1,286 +1,450 @@
|
||||
import logging
|
||||
import os
|
||||
import queue
|
||||
import subprocess
|
||||
import threading
|
||||
import urllib.request
|
||||
from functools import partial
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
from hailo_platform import (
|
||||
HEF,
|
||||
ConfigureParams,
|
||||
FormatType,
|
||||
HailoRTException,
|
||||
HailoStreamInterface,
|
||||
InferVStreams,
|
||||
InputVStreamParams,
|
||||
OutputVStreamParams,
|
||||
HailoSchedulingAlgorithm,
|
||||
VDevice,
|
||||
)
|
||||
except ModuleNotFoundError:
|
||||
pass
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
)
|
||||
|
||||
# Set up logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Define the detector key for Hailo
|
||||
|
||||
# ----------------- ResponseStore Class ----------------- #
|
||||
class ResponseStore:
|
||||
"""
|
||||
A thread-safe hash-based response store that maps request IDs
|
||||
to their results. Threads can wait on the condition variable until
|
||||
their request's result appears.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.responses = {} # Maps request_id -> (original_input, infer_results)
|
||||
self.lock = threading.Lock()
|
||||
self.cond = threading.Condition(self.lock)
|
||||
|
||||
def put(self, request_id, response):
|
||||
with self.cond:
|
||||
self.responses[request_id] = response
|
||||
self.cond.notify_all()
|
||||
|
||||
def get(self, request_id, timeout=None):
|
||||
with self.cond:
|
||||
if not self.cond.wait_for(
|
||||
lambda: request_id in self.responses, timeout=timeout
|
||||
):
|
||||
raise TimeoutError(f"Timeout waiting for response {request_id}")
|
||||
return self.responses.pop(request_id)
|
||||
|
||||
|
||||
# ----------------- Utility Functions ----------------- #
|
||||
|
||||
|
||||
def preprocess_tensor(image: np.ndarray, model_w: int, model_h: int) -> np.ndarray:
|
||||
"""
|
||||
Resize an image with unchanged aspect ratio using padding.
|
||||
Assumes input image shape is (H, W, 3).
|
||||
"""
|
||||
if image.ndim == 4 and image.shape[0] == 1:
|
||||
image = image[0]
|
||||
|
||||
h, w = image.shape[:2]
|
||||
|
||||
if (w, h) == (320, 320) and (model_w, model_h) == (640, 640):
|
||||
return cv2.resize(image, (model_w, model_h), interpolation=cv2.INTER_LINEAR)
|
||||
|
||||
scale = min(model_w / w, model_h / h)
|
||||
new_w, new_h = int(w * scale), int(h * scale)
|
||||
resized_image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_CUBIC)
|
||||
padded_image = np.full((model_h, model_w, 3), 114, dtype=image.dtype)
|
||||
x_offset = (model_w - new_w) // 2
|
||||
y_offset = (model_h - new_h) // 2
|
||||
padded_image[y_offset : y_offset + new_h, x_offset : x_offset + new_w] = (
|
||||
resized_image
|
||||
)
|
||||
return padded_image
|
||||
|
||||
|
||||
# ----------------- Global Constants ----------------- #
|
||||
DETECTOR_KEY = "hailo8l"
|
||||
ARCH = None
|
||||
H8_DEFAULT_MODEL = "yolov6n.hef"
|
||||
H8L_DEFAULT_MODEL = "yolov6n.hef"
|
||||
H8_DEFAULT_URL = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.14.0/hailo8/yolov6n.hef"
|
||||
H8L_DEFAULT_URL = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.14.0/hailo8l/yolov6n.hef"
|
||||
|
||||
|
||||
# Configuration class for model settings
|
||||
class ModelConfig(BaseModel):
|
||||
path: str = Field(default=None, title="Model Path") # Path to the HEF file
|
||||
def detect_hailo_arch():
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["hailortcli", "fw-control", "identify"], capture_output=True, text=True
|
||||
)
|
||||
if result.returncode != 0:
|
||||
logger.error(f"Inference error: {result.stderr}")
|
||||
return None
|
||||
for line in result.stdout.split("\n"):
|
||||
if "Device Architecture" in line:
|
||||
if "HAILO8L" in line:
|
||||
return "hailo8l"
|
||||
elif "HAILO8" in line:
|
||||
return "hailo8"
|
||||
logger.error("Inference error: Could not determine Hailo architecture.")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error(f"Inference error: {e}")
|
||||
return None
|
||||
|
||||
|
||||
# Configuration class for Hailo detector
|
||||
class HailoDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY] # Type of the detector
|
||||
device: str = Field(default="PCIe", title="Device Type") # Device type (e.g., PCIe)
|
||||
# ----------------- HailoAsyncInference Class ----------------- #
|
||||
class HailoAsyncInference:
|
||||
def __init__(
|
||||
self,
|
||||
hef_path: str,
|
||||
input_queue: queue.Queue,
|
||||
output_store: ResponseStore,
|
||||
batch_size: int = 1,
|
||||
input_type: Optional[str] = None,
|
||||
output_type: Optional[Dict[str, str]] = None,
|
||||
send_original_frame: bool = False,
|
||||
) -> None:
|
||||
self.input_queue = input_queue
|
||||
self.output_store = output_store
|
||||
|
||||
params = VDevice.create_params()
|
||||
params.scheduling_algorithm = HailoSchedulingAlgorithm.ROUND_ROBIN
|
||||
|
||||
self.hef = HEF(hef_path)
|
||||
self.target = VDevice(params)
|
||||
self.infer_model = self.target.create_infer_model(hef_path)
|
||||
self.infer_model.set_batch_size(batch_size)
|
||||
if input_type is not None:
|
||||
self._set_input_type(input_type)
|
||||
if output_type is not None:
|
||||
self._set_output_type(output_type)
|
||||
self.output_type = output_type
|
||||
self.send_original_frame = send_original_frame
|
||||
|
||||
def _set_input_type(self, input_type: Optional[str] = None) -> None:
|
||||
self.infer_model.input().set_format_type(getattr(FormatType, input_type))
|
||||
|
||||
def _set_output_type(
|
||||
self, output_type_dict: Optional[Dict[str, str]] = None
|
||||
) -> None:
|
||||
for output_name, output_type in output_type_dict.items():
|
||||
self.infer_model.output(output_name).set_format_type(
|
||||
getattr(FormatType, output_type)
|
||||
)
|
||||
|
||||
def callback(
|
||||
self,
|
||||
completion_info,
|
||||
bindings_list: List,
|
||||
input_batch: List,
|
||||
request_ids: List[int],
|
||||
):
|
||||
if completion_info.exception:
|
||||
logger.error(f"Inference error: {completion_info.exception}")
|
||||
else:
|
||||
for i, bindings in enumerate(bindings_list):
|
||||
if len(bindings._output_names) == 1:
|
||||
result = bindings.output().get_buffer()
|
||||
else:
|
||||
result = {
|
||||
name: np.expand_dims(bindings.output(name).get_buffer(), axis=0)
|
||||
for name in bindings._output_names
|
||||
}
|
||||
self.output_store.put(request_ids[i], (input_batch[i], result))
|
||||
|
||||
def _create_bindings(self, configured_infer_model) -> object:
|
||||
if self.output_type is None:
|
||||
output_buffers = {
|
||||
output_info.name: np.empty(
|
||||
self.infer_model.output(output_info.name).shape,
|
||||
dtype=getattr(
|
||||
np, str(output_info.format.type).split(".")[1].lower()
|
||||
),
|
||||
)
|
||||
for output_info in self.hef.get_output_vstream_infos()
|
||||
}
|
||||
else:
|
||||
output_buffers = {
|
||||
name: np.empty(
|
||||
self.infer_model.output(name).shape,
|
||||
dtype=getattr(np, self.output_type[name].lower()),
|
||||
)
|
||||
for name in self.output_type
|
||||
}
|
||||
return configured_infer_model.create_bindings(output_buffers=output_buffers)
|
||||
|
||||
def get_input_shape(self) -> Tuple[int, ...]:
|
||||
return self.hef.get_input_vstream_infos()[0].shape
|
||||
|
||||
def run(self) -> None:
|
||||
with self.infer_model.configure() as configured_infer_model:
|
||||
while True:
|
||||
batch_data = self.input_queue.get()
|
||||
if batch_data is None:
|
||||
break
|
||||
request_id, frame_data = batch_data
|
||||
preprocessed_batch = [frame_data]
|
||||
request_ids = [request_id]
|
||||
input_batch = preprocessed_batch # non-send_original_frame mode
|
||||
|
||||
bindings_list = []
|
||||
for frame in preprocessed_batch:
|
||||
bindings = self._create_bindings(configured_infer_model)
|
||||
bindings.input().set_buffer(np.array(frame))
|
||||
bindings_list.append(bindings)
|
||||
configured_infer_model.wait_for_async_ready(timeout_ms=10000)
|
||||
job = configured_infer_model.run_async(
|
||||
bindings_list,
|
||||
partial(
|
||||
self.callback,
|
||||
input_batch=input_batch,
|
||||
request_ids=request_ids,
|
||||
bindings_list=bindings_list,
|
||||
),
|
||||
)
|
||||
job.wait(100)
|
||||
|
||||
|
||||
# Hailo detector class implementation
|
||||
# ----------------- HailoDetector Class ----------------- #
|
||||
class HailoDetector(DetectionApi):
|
||||
type_key = DETECTOR_KEY # Set the type key to the Hailo detector key
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: HailoDetectorConfig):
|
||||
# Initialize device type and model path from the configuration
|
||||
self.h8l_device_type = detector_config.device
|
||||
self.h8l_model_path = detector_config.model.path
|
||||
self.h8l_model_height = detector_config.model.height
|
||||
self.h8l_model_width = detector_config.model.width
|
||||
self.h8l_model_type = detector_config.model.model_type
|
||||
self.h8l_tensor_format = detector_config.model.input_tensor
|
||||
self.h8l_pixel_format = detector_config.model.input_pixel_format
|
||||
self.model_url = "https://hailo-model-zoo.s3.eu-west-2.amazonaws.com/ModelZoo/Compiled/v2.11.0/hailo8l/ssd_mobilenet_v1.hef"
|
||||
self.cache_dir = os.path.join(MODEL_CACHE_DIR, "h8l_cache")
|
||||
self.expected_model_filename = "ssd_mobilenet_v1.hef"
|
||||
output_type = "FLOAT32"
|
||||
def __init__(self, detector_config: "HailoDetectorConfig"):
|
||||
global ARCH
|
||||
ARCH = detect_hailo_arch()
|
||||
self.cache_dir = MODEL_CACHE_DIR
|
||||
self.device_type = detector_config.device
|
||||
self.model_height = (
|
||||
detector_config.model.height
|
||||
if hasattr(detector_config.model, "height")
|
||||
else None
|
||||
)
|
||||
self.model_width = (
|
||||
detector_config.model.width
|
||||
if hasattr(detector_config.model, "width")
|
||||
else None
|
||||
)
|
||||
self.model_type = (
|
||||
detector_config.model.model_type
|
||||
if hasattr(detector_config.model, "model_type")
|
||||
else None
|
||||
)
|
||||
self.tensor_format = (
|
||||
detector_config.model.input_tensor
|
||||
if hasattr(detector_config.model, "input_tensor")
|
||||
else None
|
||||
)
|
||||
self.pixel_format = (
|
||||
detector_config.model.input_pixel_format
|
||||
if hasattr(detector_config.model, "input_pixel_format")
|
||||
else None
|
||||
)
|
||||
self.input_dtype = (
|
||||
detector_config.model.input_dtype
|
||||
if hasattr(detector_config.model, "input_dtype")
|
||||
else None
|
||||
)
|
||||
self.output_type = "FLOAT32"
|
||||
self.set_path_and_url(detector_config.model.path)
|
||||
self.working_model_path = self.check_and_prepare()
|
||||
|
||||
self.batch_size = 1
|
||||
self.input_queue = queue.Queue()
|
||||
self.response_store = ResponseStore()
|
||||
self.request_counter = 0
|
||||
self.request_counter_lock = threading.Lock()
|
||||
|
||||
logger.info(f"Initializing Hailo device as {self.h8l_device_type}")
|
||||
self.check_and_prepare_model()
|
||||
try:
|
||||
# Validate device type
|
||||
if self.h8l_device_type not in ["PCIe", "M.2"]:
|
||||
raise ValueError(f"Unsupported device type: {self.h8l_device_type}")
|
||||
|
||||
# Initialize the Hailo device
|
||||
self.target = VDevice()
|
||||
# Load the HEF (Hailo's binary format for neural networks)
|
||||
self.hef = HEF(self.h8l_model_path)
|
||||
# Create configuration parameters from the HEF
|
||||
self.configure_params = ConfigureParams.create_from_hef(
|
||||
hef=self.hef, interface=HailoStreamInterface.PCIe
|
||||
logger.debug(f"[INIT] Loading HEF model from {self.working_model_path}")
|
||||
self.inference_engine = HailoAsyncInference(
|
||||
self.working_model_path,
|
||||
self.input_queue,
|
||||
self.response_store,
|
||||
self.batch_size,
|
||||
)
|
||||
# Configure the device with the HEF
|
||||
self.network_groups = self.target.configure(self.hef, self.configure_params)
|
||||
self.network_group = self.network_groups[0]
|
||||
self.network_group_params = self.network_group.create_params()
|
||||
|
||||
# Create input and output virtual stream parameters
|
||||
self.input_vstream_params = InputVStreamParams.make(
|
||||
self.network_group,
|
||||
format_type=self.hef.get_input_vstream_infos()[0].format.type,
|
||||
self.input_shape = self.inference_engine.get_input_shape()
|
||||
logger.debug(f"[INIT] Model input shape: {self.input_shape}")
|
||||
self.inference_thread = threading.Thread(
|
||||
target=self.inference_engine.run, daemon=True
|
||||
)
|
||||
self.output_vstream_params = OutputVStreamParams.make(
|
||||
self.network_group, format_type=getattr(FormatType, output_type)
|
||||
)
|
||||
|
||||
# Get input and output stream information from the HEF
|
||||
self.input_vstream_info = self.hef.get_input_vstream_infos()
|
||||
self.output_vstream_info = self.hef.get_output_vstream_infos()
|
||||
|
||||
logger.info("Hailo device initialized successfully")
|
||||
logger.debug(f"[__init__] Model Path: {self.h8l_model_path}")
|
||||
logger.debug(f"[__init__] Input Tensor Format: {self.h8l_tensor_format}")
|
||||
logger.debug(f"[__init__] Input Pixel Format: {self.h8l_pixel_format}")
|
||||
logger.debug(f"[__init__] Input VStream Info: {self.input_vstream_info[0]}")
|
||||
logger.debug(
|
||||
f"[__init__] Output VStream Info: {self.output_vstream_info[0]}"
|
||||
)
|
||||
except HailoRTException as e:
|
||||
logger.error(f"HailoRTException during initialization: {e}")
|
||||
raise
|
||||
self.inference_thread.start()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize Hailo device: {e}")
|
||||
logger.error(f"[INIT] Failed to initialize HailoAsyncInference: {e}")
|
||||
raise
|
||||
|
||||
def check_and_prepare_model(self):
|
||||
# Ensure cache directory exists
|
||||
def set_path_and_url(self, path: str = None):
|
||||
if not path:
|
||||
self.model_path = None
|
||||
self.url = None
|
||||
return
|
||||
if self.is_url(path):
|
||||
self.url = path
|
||||
self.model_path = None
|
||||
else:
|
||||
self.model_path = path
|
||||
self.url = None
|
||||
|
||||
def is_url(self, url: str) -> bool:
|
||||
return (
|
||||
url.startswith("http://")
|
||||
or url.startswith("https://")
|
||||
or url.startswith("www.")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def extract_model_name(path: str = None, url: str = None) -> str:
|
||||
if path and path.endswith(".hef"):
|
||||
return os.path.basename(path)
|
||||
elif url and url.endswith(".hef"):
|
||||
return os.path.basename(url)
|
||||
else:
|
||||
if ARCH == "hailo8":
|
||||
return H8_DEFAULT_MODEL
|
||||
else:
|
||||
return H8L_DEFAULT_MODEL
|
||||
|
||||
@staticmethod
|
||||
def download_model(url: str, destination: str):
|
||||
if not url.endswith(".hef"):
|
||||
raise ValueError("Invalid model URL. Only .hef files are supported.")
|
||||
try:
|
||||
urllib.request.urlretrieve(url, destination)
|
||||
logger.debug(f"Downloaded model to {destination}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to download model from {url}: {str(e)}")
|
||||
|
||||
def check_and_prepare(self) -> str:
|
||||
if not os.path.exists(self.cache_dir):
|
||||
os.makedirs(self.cache_dir)
|
||||
model_name = self.extract_model_name(self.model_path, self.url)
|
||||
cached_model_path = os.path.join(self.cache_dir, model_name)
|
||||
if not self.model_path and not self.url:
|
||||
if os.path.exists(cached_model_path):
|
||||
logger.debug(f"Model found in cache: {cached_model_path}")
|
||||
return cached_model_path
|
||||
else:
|
||||
logger.debug(f"Downloading default model: {model_name}")
|
||||
if ARCH == "hailo8":
|
||||
self.download_model(H8_DEFAULT_URL, cached_model_path)
|
||||
else:
|
||||
self.download_model(H8L_DEFAULT_URL, cached_model_path)
|
||||
elif self.url:
|
||||
logger.debug(f"Downloading model from URL: {self.url}")
|
||||
self.download_model(self.url, cached_model_path)
|
||||
elif self.model_path:
|
||||
if os.path.exists(self.model_path):
|
||||
logger.debug(f"Using existing model at: {self.model_path}")
|
||||
return self.model_path
|
||||
else:
|
||||
raise FileNotFoundError(f"Model file not found at: {self.model_path}")
|
||||
return cached_model_path
|
||||
|
||||
# Check for the expected model file
|
||||
model_file_path = os.path.join(self.cache_dir, self.expected_model_filename)
|
||||
if not os.path.isfile(model_file_path):
|
||||
logger.info(
|
||||
f"A model file was not found at {model_file_path}, Downloading one from {self.model_url}."
|
||||
)
|
||||
urllib.request.urlretrieve(self.model_url, model_file_path)
|
||||
logger.info(f"A model file was downloaded to {model_file_path}.")
|
||||
else:
|
||||
logger.info(
|
||||
f"A model file already exists at {model_file_path} not downloading one."
|
||||
)
|
||||
def _get_request_id(self) -> int:
|
||||
with self.request_counter_lock:
|
||||
request_id = self.request_counter
|
||||
self.request_counter += 1
|
||||
if self.request_counter > 1000000:
|
||||
self.request_counter = 0
|
||||
return request_id
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
logger.debug("[detect_raw] Entering function")
|
||||
logger.debug(
|
||||
f"[detect_raw] The `tensor_input` = {tensor_input} tensor_input shape = {tensor_input.shape}"
|
||||
)
|
||||
request_id = self._get_request_id()
|
||||
|
||||
if tensor_input is None:
|
||||
raise ValueError(
|
||||
"[detect_raw] The 'tensor_input' argument must be provided"
|
||||
)
|
||||
|
||||
# Ensure tensor_input is a numpy array
|
||||
if isinstance(tensor_input, list):
|
||||
tensor_input = np.array(tensor_input)
|
||||
logger.debug(
|
||||
f"[detect_raw] Converted tensor_input to numpy array: shape {tensor_input.shape}"
|
||||
)
|
||||
|
||||
input_data = tensor_input
|
||||
logger.debug(
|
||||
f"[detect_raw] Input data for inference shape: {tensor_input.shape}, dtype: {tensor_input.dtype}"
|
||||
)
|
||||
tensor_input = self.preprocess(tensor_input)
|
||||
if isinstance(tensor_input, np.ndarray) and len(tensor_input.shape) == 3:
|
||||
tensor_input = np.expand_dims(tensor_input, axis=0)
|
||||
|
||||
self.input_queue.put((request_id, tensor_input))
|
||||
try:
|
||||
with InferVStreams(
|
||||
self.network_group,
|
||||
self.input_vstream_params,
|
||||
self.output_vstream_params,
|
||||
) as infer_pipeline:
|
||||
input_dict = {}
|
||||
if isinstance(input_data, dict):
|
||||
input_dict = input_data
|
||||
logger.debug("[detect_raw] it a dictionary.")
|
||||
elif isinstance(input_data, (list, tuple)):
|
||||
for idx, layer_info in enumerate(self.input_vstream_info):
|
||||
input_dict[layer_info.name] = input_data[idx]
|
||||
logger.debug("[detect_raw] converted from list/tuple.")
|
||||
else:
|
||||
if len(input_data.shape) == 3:
|
||||
input_data = np.expand_dims(input_data, axis=0)
|
||||
logger.debug("[detect_raw] converted from an array.")
|
||||
input_dict[self.input_vstream_info[0].name] = input_data
|
||||
original_input, infer_results = self.response_store.get(
|
||||
request_id, timeout=10.0
|
||||
)
|
||||
except TimeoutError:
|
||||
logger.error(
|
||||
f"Timeout waiting for inference results for request {request_id}"
|
||||
)
|
||||
return np.zeros((20, 6), dtype=np.float32)
|
||||
|
||||
logger.debug(
|
||||
f"[detect_raw] Input dictionary for inference keys: {input_dict.keys()}"
|
||||
)
|
||||
if isinstance(infer_results, list) and len(infer_results) == 1:
|
||||
infer_results = infer_results[0]
|
||||
|
||||
with self.network_group.activate(self.network_group_params):
|
||||
raw_output = infer_pipeline.infer(input_dict)
|
||||
logger.debug(f"[detect_raw] Raw inference output: {raw_output}")
|
||||
|
||||
if self.output_vstream_info[0].name not in raw_output:
|
||||
logger.error(
|
||||
f"[detect_raw] Missing output stream {self.output_vstream_info[0].name} in inference results"
|
||||
)
|
||||
return np.zeros((20, 6), np.float32)
|
||||
|
||||
raw_output = raw_output[self.output_vstream_info[0].name][0]
|
||||
logger.debug(
|
||||
f"[detect_raw] Raw output for stream {self.output_vstream_info[0].name}: {raw_output}"
|
||||
)
|
||||
|
||||
# Process the raw output
|
||||
detections = self.process_detections(raw_output)
|
||||
if len(detections) == 0:
|
||||
logger.debug(
|
||||
"[detect_raw] No detections found after processing. Setting default values."
|
||||
)
|
||||
return np.zeros((20, 6), np.float32)
|
||||
else:
|
||||
formatted_detections = detections
|
||||
if (
|
||||
formatted_detections.shape[1] != 6
|
||||
): # Ensure the formatted detections have 6 columns
|
||||
logger.error(
|
||||
f"[detect_raw] Unexpected shape for formatted detections: {formatted_detections.shape}. Expected (20, 6)."
|
||||
)
|
||||
return np.zeros((20, 6), np.float32)
|
||||
return formatted_detections
|
||||
except HailoRTException as e:
|
||||
logger.error(f"[detect_raw] HailoRTException during inference: {e}")
|
||||
return np.zeros((20, 6), np.float32)
|
||||
except Exception as e:
|
||||
logger.error(f"[detect_raw] Exception during inference: {e}")
|
||||
return np.zeros((20, 6), np.float32)
|
||||
finally:
|
||||
logger.debug("[detect_raw] Exiting function")
|
||||
|
||||
def process_detections(self, raw_detections, threshold=0.5):
|
||||
boxes, scores, classes = [], [], []
|
||||
num_detections = 0
|
||||
|
||||
logger.debug(f"[process_detections] Raw detections: {raw_detections}")
|
||||
|
||||
for i, detection_set in enumerate(raw_detections):
|
||||
threshold = 0.4
|
||||
all_detections = []
|
||||
for class_id, detection_set in enumerate(infer_results):
|
||||
if not isinstance(detection_set, np.ndarray) or detection_set.size == 0:
|
||||
logger.debug(
|
||||
f"[process_detections] Detection set {i} is empty or not an array, skipping."
|
||||
)
|
||||
continue
|
||||
|
||||
logger.debug(
|
||||
f"[process_detections] Detection set {i} shape: {detection_set.shape}"
|
||||
)
|
||||
|
||||
for detection in detection_set:
|
||||
if detection.shape[0] == 0:
|
||||
logger.debug(
|
||||
f"[process_detections] Detection in set {i} is empty, skipping."
|
||||
)
|
||||
for det in detection_set:
|
||||
if det.shape[0] < 5:
|
||||
continue
|
||||
|
||||
ymin, xmin, ymax, xmax = detection[:4]
|
||||
score = np.clip(detection[4], 0, 1) # Use np.clip for clarity
|
||||
|
||||
score = float(det[4])
|
||||
if score < threshold:
|
||||
logger.debug(
|
||||
f"[process_detections] Detection in set {i} has a score {score} below threshold {threshold}. Skipping."
|
||||
)
|
||||
continue
|
||||
all_detections.append([class_id, score, det[0], det[1], det[2], det[3]])
|
||||
|
||||
logger.debug(
|
||||
f"[process_detections] Adding detection with coordinates: ({xmin}, {ymin}), ({xmax}, {ymax}) and score: {score}"
|
||||
)
|
||||
boxes.append([ymin, xmin, ymax, xmax])
|
||||
scores.append(score)
|
||||
classes.append(i)
|
||||
num_detections += 1
|
||||
if len(all_detections) == 0:
|
||||
detections_array = np.zeros((20, 6), dtype=np.float32)
|
||||
else:
|
||||
detections_array = np.array(all_detections, dtype=np.float32)
|
||||
if detections_array.shape[0] > 20:
|
||||
detections_array = detections_array[:20, :]
|
||||
elif detections_array.shape[0] < 20:
|
||||
pad = np.zeros((20 - detections_array.shape[0], 6), dtype=np.float32)
|
||||
detections_array = np.vstack((detections_array, pad))
|
||||
|
||||
logger.debug(
|
||||
f"[process_detections] Boxes: {boxes}, Scores: {scores}, Classes: {classes}, Num detections: {num_detections}"
|
||||
)
|
||||
return detections_array
|
||||
|
||||
if num_detections == 0:
|
||||
logger.debug("[process_detections] No valid detections found.")
|
||||
return np.zeros((20, 6), np.float32)
|
||||
|
||||
combined = np.hstack(
|
||||
(
|
||||
np.array(classes)[:, np.newaxis],
|
||||
np.array(scores)[:, np.newaxis],
|
||||
np.array(boxes),
|
||||
def preprocess(self, image):
|
||||
if isinstance(image, np.ndarray):
|
||||
processed = preprocess_tensor(
|
||||
image, self.input_shape[1], self.input_shape[0]
|
||||
)
|
||||
)
|
||||
return np.expand_dims(processed, axis=0)
|
||||
else:
|
||||
raise ValueError("Unsupported image format for preprocessing")
|
||||
|
||||
if combined.shape[0] < 20:
|
||||
padding = np.zeros(
|
||||
(20 - combined.shape[0], combined.shape[1]), dtype=combined.dtype
|
||||
)
|
||||
combined = np.vstack((combined, padding))
|
||||
def close(self):
|
||||
"""Properly shuts down the inference engine and releases the VDevice."""
|
||||
logger.debug("[CLOSE] Closing HailoDetector")
|
||||
try:
|
||||
if hasattr(self, "inference_engine"):
|
||||
if hasattr(self.inference_engine, "target"):
|
||||
self.inference_engine.target.release()
|
||||
logger.debug("Hailo VDevice released successfully")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to close Hailo device: {e}")
|
||||
raise
|
||||
|
||||
logger.debug(
|
||||
f"[process_detections] Combined detections (padded to 20 if necessary): {np.array_str(combined, precision=4, suppress_small=True)}"
|
||||
)
|
||||
def __del__(self):
|
||||
"""Destructor to ensure cleanup when the object is deleted."""
|
||||
self.close()
|
||||
|
||||
return combined[:20, :6]
|
||||
|
||||
# ----------------- HailoDetectorConfig Class ----------------- #
|
||||
class HailoDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY]
|
||||
device: str = Field(default="PCIe", title="Device Type")
|
||||
|
@ -15,6 +15,7 @@ from playhouse.sqliteq import SqliteQueueDatabase
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsResponder
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
EventMetadataPublisher,
|
||||
EventMetadataSubscriber,
|
||||
EventMetadataTypeEnum,
|
||||
)
|
||||
@ -43,7 +44,7 @@ from frigate.data_processing.real_time.license_plate import (
|
||||
LicensePlateRealTimeProcessor,
|
||||
)
|
||||
from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum
|
||||
from frigate.events.types import EventTypeEnum
|
||||
from frigate.events.types import EventTypeEnum, RegenerateDescriptionEnum
|
||||
from frigate.genai import get_genai_client
|
||||
from frigate.models import Event
|
||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||
@ -89,6 +90,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
self.event_subscriber = EventUpdateSubscriber()
|
||||
self.event_end_subscriber = EventEndSubscriber()
|
||||
self.event_metadata_publisher = EventMetadataPublisher()
|
||||
self.event_metadata_subscriber = EventMetadataSubscriber(
|
||||
EventMetadataTypeEnum.regenerate_description
|
||||
)
|
||||
@ -108,15 +110,27 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self.realtime_processors: list[RealTimeProcessorApi] = []
|
||||
|
||||
if self.config.face_recognition.enabled:
|
||||
self.realtime_processors.append(FaceRealTimeProcessor(self.config, metrics))
|
||||
self.realtime_processors.append(
|
||||
FaceRealTimeProcessor(
|
||||
self.config, self.event_metadata_publisher, metrics
|
||||
)
|
||||
)
|
||||
|
||||
if self.config.classification.bird.enabled:
|
||||
self.realtime_processors.append(BirdRealTimeProcessor(self.config, metrics))
|
||||
self.realtime_processors.append(
|
||||
BirdRealTimeProcessor(
|
||||
self.config, self.event_metadata_publisher, metrics
|
||||
)
|
||||
)
|
||||
|
||||
if self.config.lpr.enabled:
|
||||
self.realtime_processors.append(
|
||||
LicensePlateRealTimeProcessor(
|
||||
self.config, metrics, lpr_model_runner, self.detected_license_plates
|
||||
self.config,
|
||||
self.event_metadata_publisher,
|
||||
metrics,
|
||||
lpr_model_runner,
|
||||
self.detected_license_plates,
|
||||
)
|
||||
)
|
||||
|
||||
@ -126,7 +140,11 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
if self.config.lpr.enabled:
|
||||
self.post_processors.append(
|
||||
LicensePlatePostProcessor(
|
||||
self.config, metrics, lpr_model_runner, self.detected_license_plates
|
||||
self.config,
|
||||
self.event_metadata_publisher,
|
||||
metrics,
|
||||
lpr_model_runner,
|
||||
self.detected_license_plates,
|
||||
)
|
||||
)
|
||||
|
||||
@ -150,6 +168,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self.event_subscriber.stop()
|
||||
self.event_end_subscriber.stop()
|
||||
self.recordings_subscriber.stop()
|
||||
self.event_metadata_publisher.stop()
|
||||
self.event_metadata_subscriber.stop()
|
||||
self.embeddings_responder.stop()
|
||||
self.requestor.stop()
|
||||
@ -375,15 +394,17 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
def _process_event_metadata(self):
|
||||
# Check for regenerate description requests
|
||||
(topic, event_id, source) = self.event_metadata_subscriber.check_for_update(
|
||||
timeout=0.01
|
||||
)
|
||||
(topic, payload) = self.event_metadata_subscriber.check_for_update(timeout=0.01)
|
||||
|
||||
if topic is None:
|
||||
return
|
||||
|
||||
event_id, source = payload
|
||||
|
||||
if event_id:
|
||||
self.handle_regenerate_description(event_id, source)
|
||||
self.handle_regenerate_description(
|
||||
event_id, RegenerateDescriptionEnum(source)
|
||||
)
|
||||
|
||||
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
|
||||
"""Return jpg thumbnail of a region of the frame."""
|
||||
|
@ -2,17 +2,22 @@
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import random
|
||||
import string
|
||||
import threading
|
||||
import time
|
||||
from typing import Tuple
|
||||
|
||||
import numpy as np
|
||||
import requests
|
||||
|
||||
import frigate.util as util
|
||||
from frigate.camera import CameraMetrics
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
EventMetadataPublisher,
|
||||
EventMetadataTypeEnum,
|
||||
)
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import CameraConfig, CameraInput, FfmpegConfig
|
||||
from frigate.const import (
|
||||
@ -21,7 +26,6 @@ from frigate.const import (
|
||||
AUDIO_MAX_BIT_RANGE,
|
||||
AUDIO_MIN_CONFIDENCE,
|
||||
AUDIO_SAMPLE_RATE,
|
||||
FRIGATE_LOCALHOST,
|
||||
)
|
||||
from frigate.ffmpeg_presets import parse_preset_input
|
||||
from frigate.log import LogPipe
|
||||
@ -139,6 +143,7 @@ class AudioEventMaintainer(threading.Thread):
|
||||
f"config/enabled/{camera.name}", True
|
||||
)
|
||||
self.detection_publisher = DetectionPublisher(DetectionTypeEnum.audio)
|
||||
self.event_metadata_publisher = EventMetadataPublisher()
|
||||
|
||||
self.was_enabled = camera.enabled
|
||||
|
||||
@ -207,24 +212,33 @@ class AudioEventMaintainer(threading.Thread):
|
||||
datetime.datetime.now().timestamp()
|
||||
)
|
||||
else:
|
||||
now = datetime.datetime.now().timestamp()
|
||||
rand_id = "".join(
|
||||
random.choices(string.ascii_lowercase + string.digits, k=6)
|
||||
)
|
||||
event_id = f"{now}-{rand_id}"
|
||||
self.requestor.send_data(f"{self.config.name}/audio/{label}", "ON")
|
||||
|
||||
resp = requests.post(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{self.config.name}/{label}/create",
|
||||
json={"duration": None, "score": score, "source_type": "audio"},
|
||||
self.event_metadata_publisher.publish(
|
||||
EventMetadataTypeEnum.manual_event_create,
|
||||
(
|
||||
now,
|
||||
self.config.name,
|
||||
label,
|
||||
event_id,
|
||||
True,
|
||||
score,
|
||||
None,
|
||||
None,
|
||||
"audio",
|
||||
{},
|
||||
),
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
event_id = resp.json()["event_id"]
|
||||
self.detections[label] = {
|
||||
"id": event_id,
|
||||
"label": label,
|
||||
"last_detection": datetime.datetime.now().timestamp(),
|
||||
}
|
||||
else:
|
||||
self.logger.warning(
|
||||
f"Failed to create audio event with status code {resp.status_code}"
|
||||
)
|
||||
self.detections[label] = {
|
||||
"id": event_id,
|
||||
"label": label,
|
||||
"last_detection": now,
|
||||
}
|
||||
|
||||
def expire_detections(self) -> None:
|
||||
now = datetime.datetime.now().timestamp()
|
||||
@ -241,17 +255,11 @@ class AudioEventMaintainer(threading.Thread):
|
||||
f"{self.config.name}/audio/{detection['label']}", "OFF"
|
||||
)
|
||||
|
||||
resp = requests.put(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{detection['id']}/end",
|
||||
json={"end_time": detection["last_detection"]},
|
||||
self.event_metadata_publisher.publish(
|
||||
EventMetadataTypeEnum.manual_event_end,
|
||||
(detection["id"], detection["last_detection"]),
|
||||
)
|
||||
|
||||
if resp.status_code == 200:
|
||||
self.detections[detection["label"]] = None
|
||||
else:
|
||||
self.logger.warning(
|
||||
f"Failed to end audio event {detection['id']} with status code {resp.status_code}"
|
||||
)
|
||||
self.detections[detection["label"]] = None
|
||||
|
||||
def expire_all_detections(self) -> None:
|
||||
"""Immediately end all current detections"""
|
||||
@ -259,16 +267,11 @@ class AudioEventMaintainer(threading.Thread):
|
||||
for label, detection in list(self.detections.items()):
|
||||
if detection:
|
||||
self.requestor.send_data(f"{self.config.name}/audio/{label}", "OFF")
|
||||
resp = requests.put(
|
||||
f"{FRIGATE_LOCALHOST}/api/events/{detection['id']}/end",
|
||||
json={"end_time": now},
|
||||
self.event_metadata_publisher.publish(
|
||||
EventMetadataTypeEnum.manual_event_end,
|
||||
(detection["id"], now),
|
||||
)
|
||||
if resp.status_code == 200:
|
||||
self.detections[label] = None
|
||||
else:
|
||||
self.logger.warning(
|
||||
f"Failed to end audio event {detection['id']} with status code {resp.status_code}"
|
||||
)
|
||||
self.detections[label] = None
|
||||
|
||||
def start_or_restart_ffmpeg(self) -> None:
|
||||
self.audio_listener = start_or_restart_ffmpeg(
|
||||
@ -351,7 +354,8 @@ class AudioEventMaintainer(threading.Thread):
|
||||
|
||||
self.read_audio()
|
||||
|
||||
stop_ffmpeg(self.audio_listener, self.logger)
|
||||
if self.audio_listener:
|
||||
stop_ffmpeg(self.audio_listener, self.logger)
|
||||
self.logpipe.close()
|
||||
self.requestor.stop()
|
||||
self.config_subscriber.stop()
|
||||
|
@ -1,187 +0,0 @@
|
||||
"""Handle external events created by the user."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
from numpy import ndarray
|
||||
|
||||
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
|
||||
from frigate.comms.events_updater import EventUpdatePublisher
|
||||
from frigate.config import CameraConfig, FrigateConfig
|
||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||
from frigate.events.types import EventStateEnum, EventTypeEnum
|
||||
from frigate.util.image import draw_box_with_label
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ManualEventState(str, Enum):
|
||||
complete = "complete"
|
||||
start = "start"
|
||||
end = "end"
|
||||
|
||||
|
||||
class ExternalEventProcessor:
|
||||
def __init__(self, config: FrigateConfig) -> None:
|
||||
self.config = config
|
||||
self.default_thumbnail = None
|
||||
self.event_sender = EventUpdatePublisher()
|
||||
self.detection_updater = DetectionPublisher(DetectionTypeEnum.api)
|
||||
self.event_camera = {}
|
||||
|
||||
def create_manual_event(
|
||||
self,
|
||||
camera: str,
|
||||
label: str,
|
||||
source_type: str,
|
||||
sub_label: Optional[str],
|
||||
score: int,
|
||||
duration: Optional[int],
|
||||
include_recording: bool,
|
||||
draw: dict[str, any],
|
||||
snapshot_frame: Optional[ndarray],
|
||||
) -> str:
|
||||
now = datetime.datetime.now().timestamp()
|
||||
camera_config = self.config.cameras.get(camera)
|
||||
|
||||
# create event id and start frame time
|
||||
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
event_id = f"{now}-{rand_id}"
|
||||
|
||||
self._write_images(camera_config, label, event_id, draw, snapshot_frame)
|
||||
end = now + duration if duration is not None else None
|
||||
|
||||
self.event_sender.publish(
|
||||
(
|
||||
EventTypeEnum.api,
|
||||
EventStateEnum.start,
|
||||
camera,
|
||||
"",
|
||||
{
|
||||
"id": event_id,
|
||||
"label": label,
|
||||
"sub_label": sub_label,
|
||||
"score": score,
|
||||
"camera": camera,
|
||||
"start_time": now - camera_config.record.event_pre_capture,
|
||||
"end_time": end,
|
||||
"has_clip": camera_config.record.enabled and include_recording,
|
||||
"has_snapshot": True,
|
||||
"type": source_type,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
if source_type == "api":
|
||||
self.event_camera[event_id] = camera
|
||||
self.detection_updater.publish(
|
||||
(
|
||||
camera,
|
||||
now,
|
||||
{
|
||||
"state": (
|
||||
ManualEventState.complete if end else ManualEventState.start
|
||||
),
|
||||
"label": f"{label}: {sub_label}" if sub_label else label,
|
||||
"event_id": event_id,
|
||||
"end_time": end,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
return event_id
|
||||
|
||||
def finish_manual_event(self, event_id: str, end_time: float) -> None:
|
||||
"""Finish external event with indeterminate duration."""
|
||||
self.event_sender.publish(
|
||||
(
|
||||
EventTypeEnum.api,
|
||||
EventStateEnum.end,
|
||||
None,
|
||||
"",
|
||||
{"id": event_id, "end_time": end_time},
|
||||
)
|
||||
)
|
||||
|
||||
if event_id in self.event_camera:
|
||||
self.detection_updater.publish(
|
||||
(
|
||||
self.event_camera[event_id],
|
||||
end_time,
|
||||
{
|
||||
"state": ManualEventState.end,
|
||||
"event_id": event_id,
|
||||
"end_time": end_time,
|
||||
},
|
||||
)
|
||||
)
|
||||
self.event_camera.pop(event_id)
|
||||
|
||||
def _write_images(
|
||||
self,
|
||||
camera_config: CameraConfig,
|
||||
label: str,
|
||||
event_id: str,
|
||||
draw: dict[str, any],
|
||||
img_frame: Optional[ndarray],
|
||||
) -> None:
|
||||
if img_frame is None:
|
||||
return
|
||||
|
||||
# write clean snapshot if enabled
|
||||
if camera_config.snapshots.clean_copy:
|
||||
ret, png = cv2.imencode(".png", img_frame)
|
||||
|
||||
if ret:
|
||||
with open(
|
||||
os.path.join(
|
||||
CLIPS_DIR,
|
||||
f"{camera_config.name}-{event_id}-clean.png",
|
||||
),
|
||||
"wb",
|
||||
) as p:
|
||||
p.write(png.tobytes())
|
||||
|
||||
# write jpg snapshot with optional annotations
|
||||
if draw.get("boxes") and isinstance(draw.get("boxes"), list):
|
||||
for box in draw.get("boxes"):
|
||||
x = int(box["box"][0] * camera_config.detect.width)
|
||||
y = int(box["box"][1] * camera_config.detect.height)
|
||||
width = int(box["box"][2] * camera_config.detect.width)
|
||||
height = int(box["box"][3] * camera_config.detect.height)
|
||||
|
||||
draw_box_with_label(
|
||||
img_frame,
|
||||
x,
|
||||
y,
|
||||
x + width,
|
||||
y + height,
|
||||
label,
|
||||
f"{box.get('score', '-')}% {int(width * height)}",
|
||||
thickness=2,
|
||||
color=box.get("color", (255, 0, 0)),
|
||||
)
|
||||
|
||||
ret, jpg = cv2.imencode(".jpg", img_frame)
|
||||
with open(
|
||||
os.path.join(CLIPS_DIR, f"{camera_config.name}-{event_id}.jpg"),
|
||||
"wb",
|
||||
) as j:
|
||||
j.write(jpg.tobytes())
|
||||
|
||||
# create thumbnail with max height of 175 and save
|
||||
width = int(175 * img_frame.shape[1] / img_frame.shape[0])
|
||||
thumb = cv2.resize(img_frame, dsize=(width, 175), interpolation=cv2.INTER_AREA)
|
||||
cv2.imwrite(
|
||||
os.path.join(THUMB_DIR, camera_config.name, f"{event_id}.webp"), thumb
|
||||
)
|
||||
|
||||
def stop(self):
|
||||
self.event_sender.stop()
|
||||
self.detection_updater.stop()
|
@ -27,6 +27,8 @@ def should_update_db(prev_event: Event, current_event: Event) -> bool:
|
||||
or prev_event["average_estimated_speed"]
|
||||
!= current_event["average_estimated_speed"]
|
||||
or prev_event["velocity_angle"] != current_event["velocity_angle"]
|
||||
or prev_event["recognized_license_plate"]
|
||||
!= current_event["recognized_license_plate"]
|
||||
or prev_event["path_data"] != current_event["path_data"]
|
||||
):
|
||||
return True
|
||||
@ -226,6 +228,15 @@ class EventProcessor(threading.Thread):
|
||||
event[Event.sub_label] = event_data["sub_label"][0]
|
||||
event[Event.data]["sub_label_score"] = event_data["sub_label"][1]
|
||||
|
||||
# only overwrite the recognized_license_plate in the database if it's set
|
||||
if event_data.get("recognized_license_plate") is not None:
|
||||
event[Event.data]["recognized_license_plate"] = event_data[
|
||||
"recognized_license_plate"
|
||||
][0]
|
||||
event[Event.data]["recognized_license_plate_score"] = event_data[
|
||||
"recognized_license_plate"
|
||||
][1]
|
||||
|
||||
(
|
||||
Event.insert(event)
|
||||
.on_conflict(
|
||||
|
@ -119,7 +119,7 @@ class User(Model): # type: ignore[misc]
|
||||
username = CharField(null=False, primary_key=True, max_length=30)
|
||||
role = CharField(
|
||||
max_length=20,
|
||||
default="viewer",
|
||||
default="admin",
|
||||
)
|
||||
password_hash = CharField(null=False, max_length=120)
|
||||
notification_tokens = JSONField()
|
||||
|
@ -22,7 +22,7 @@ from frigate.ffmpeg_presets import (
|
||||
parse_preset_hardware_acceleration_encode,
|
||||
)
|
||||
from frigate.models import Previews
|
||||
from frigate.object_processing import TrackedObject
|
||||
from frigate.track.object_processing import TrackedObject
|
||||
from frigate.util.image import copy_yuv_to_position, get_blank_yuv_frame, get_yuv_crop
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -23,10 +23,9 @@ from frigate.const import (
|
||||
CLIPS_DIR,
|
||||
UPSERT_REVIEW_SEGMENT,
|
||||
)
|
||||
from frigate.events.external import ManualEventState
|
||||
from frigate.models import ReviewSegment
|
||||
from frigate.object_processing import TrackedObject
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.track.object_processing import ManualEventState, TrackedObject
|
||||
from frigate.util.image import SharedMemoryFrameManager, calculate_16_9_crop
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
@ -117,7 +117,6 @@ class BaseTestHttp(unittest.TestCase):
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
stats,
|
||||
None,
|
||||
)
|
||||
|
@ -2,6 +2,7 @@ import datetime
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
from unittest.mock import Mock
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
from peewee_migrate import Router
|
||||
@ -10,6 +11,7 @@ from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
|
||||
from frigate.api.fastapi_app import create_fastapi_app
|
||||
from frigate.comms.event_metadata_updater import EventMetadataPublisher
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import BASE_DIR, CACHE_DIR
|
||||
from frigate.models import Event, Recordings, Timeline
|
||||
@ -120,7 +122,6 @@ class TestHttp(unittest.TestCase):
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
id = "123456.random"
|
||||
|
||||
@ -142,7 +143,6 @@ class TestHttp(unittest.TestCase):
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
id = "123456.random"
|
||||
bad_id = "654321.other"
|
||||
@ -163,7 +163,6 @@ class TestHttp(unittest.TestCase):
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
id = "123456.random"
|
||||
|
||||
@ -186,7 +185,6 @@ class TestHttp(unittest.TestCase):
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
id = "123456.random"
|
||||
|
||||
@ -213,7 +211,6 @@ class TestHttp(unittest.TestCase):
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
morning_id = "123456.random"
|
||||
evening_id = "654321.random"
|
||||
@ -243,6 +240,7 @@ class TestHttp(unittest.TestCase):
|
||||
assert len(events) == 1
|
||||
|
||||
def test_set_delete_sub_label(self):
|
||||
mock_event_updater = Mock(spec=EventMetadataPublisher)
|
||||
app = create_fastapi_app(
|
||||
FrigateConfig(**self.minimal_config),
|
||||
self.db,
|
||||
@ -251,12 +249,18 @@ class TestHttp(unittest.TestCase):
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
mock_event_updater,
|
||||
)
|
||||
id = "123456.random"
|
||||
sub_label = "sub"
|
||||
|
||||
def update_event(topic, payload):
|
||||
event = Event.get(id=id)
|
||||
event.sub_label = payload[1]
|
||||
event.save()
|
||||
|
||||
mock_event_updater.publish.side_effect = update_event
|
||||
|
||||
with TestClient(app) as client:
|
||||
_insert_mock_event(id)
|
||||
new_sub_label_response = client.post(
|
||||
@ -281,6 +285,7 @@ class TestHttp(unittest.TestCase):
|
||||
assert event["sub_label"] == None
|
||||
|
||||
def test_sub_label_list(self):
|
||||
mock_event_updater = Mock(spec=EventMetadataPublisher)
|
||||
app = create_fastapi_app(
|
||||
FrigateConfig(**self.minimal_config),
|
||||
self.db,
|
||||
@ -289,12 +294,18 @@ class TestHttp(unittest.TestCase):
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
mock_event_updater,
|
||||
)
|
||||
id = "123456.random"
|
||||
sub_label = "sub"
|
||||
|
||||
def update_event(topic, payload):
|
||||
event = Event.get(id=id)
|
||||
event.sub_label = payload[1]
|
||||
event.save()
|
||||
|
||||
mock_event_updater.publish.side_effect = update_event
|
||||
|
||||
with TestClient(app) as client:
|
||||
_insert_mock_event(id)
|
||||
client.post(
|
||||
@ -316,7 +327,6 @@ class TestHttp(unittest.TestCase):
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
|
||||
with TestClient(app) as client:
|
||||
@ -334,7 +344,6 @@ class TestHttp(unittest.TestCase):
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
id = "123456.random"
|
||||
|
||||
|
620
frigate/track/object_processing.py
Normal file
620
frigate/track/object_processing.py
Normal file
@ -0,0 +1,620 @@
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import queue
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
|
||||
import numpy as np
|
||||
from peewee import DoesNotExist
|
||||
|
||||
from frigate.camera.state import CameraState
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
|
||||
from frigate.comms.dispatcher import Dispatcher
|
||||
from frigate.comms.event_metadata_updater import (
|
||||
EventMetadataSubscriber,
|
||||
EventMetadataTypeEnum,
|
||||
)
|
||||
from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import (
|
||||
CameraMqttConfig,
|
||||
FrigateConfig,
|
||||
RecordConfig,
|
||||
SnapshotsConfig,
|
||||
)
|
||||
from frigate.const import UPDATE_CAMERA_ACTIVITY
|
||||
from frigate.events.types import EventStateEnum, EventTypeEnum
|
||||
from frigate.models import Event, Timeline
|
||||
from frigate.track.tracked_object import TrackedObject
|
||||
from frigate.util.image import SharedMemoryFrameManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ManualEventState(str, Enum):
|
||||
complete = "complete"
|
||||
start = "start"
|
||||
end = "end"
|
||||
|
||||
|
||||
class TrackedObjectProcessor(threading.Thread):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
dispatcher: Dispatcher,
|
||||
tracked_objects_queue,
|
||||
ptz_autotracker_thread,
|
||||
stop_event,
|
||||
):
|
||||
super().__init__(name="detected_frames_processor")
|
||||
self.config = config
|
||||
self.dispatcher = dispatcher
|
||||
self.tracked_objects_queue = tracked_objects_queue
|
||||
self.stop_event: MpEvent = stop_event
|
||||
self.camera_states: dict[str, CameraState] = {}
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.last_motion_detected: dict[str, float] = {}
|
||||
self.ptz_autotracker_thread = ptz_autotracker_thread
|
||||
|
||||
self.config_enabled_subscriber = ConfigSubscriber("config/enabled/")
|
||||
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.detection_publisher = DetectionPublisher(DetectionTypeEnum.all)
|
||||
self.event_sender = EventUpdatePublisher()
|
||||
self.event_end_subscriber = EventEndSubscriber()
|
||||
self.sub_label_subscriber = EventMetadataSubscriber(EventMetadataTypeEnum.all)
|
||||
|
||||
self.camera_activity: dict[str, dict[str, any]] = {}
|
||||
self.ongoing_manual_events: dict[str, str] = {}
|
||||
|
||||
# {
|
||||
# 'zone_name': {
|
||||
# 'person': {
|
||||
# 'camera_1': 2,
|
||||
# 'camera_2': 1
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
self.zone_data = defaultdict(lambda: defaultdict(dict))
|
||||
self.active_zone_data = defaultdict(lambda: defaultdict(dict))
|
||||
|
||||
def start(camera: str, obj: TrackedObject, frame_name: str):
|
||||
self.event_sender.publish(
|
||||
(
|
||||
EventTypeEnum.tracked_object,
|
||||
EventStateEnum.start,
|
||||
camera,
|
||||
frame_name,
|
||||
obj.to_dict(),
|
||||
)
|
||||
)
|
||||
|
||||
def update(camera: str, obj: TrackedObject, frame_name: str):
|
||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||
after = obj.to_dict()
|
||||
message = {
|
||||
"before": obj.previous,
|
||||
"after": after,
|
||||
"type": "new" if obj.previous["false_positive"] else "update",
|
||||
}
|
||||
self.dispatcher.publish("events", json.dumps(message), retain=False)
|
||||
obj.previous = after
|
||||
self.event_sender.publish(
|
||||
(
|
||||
EventTypeEnum.tracked_object,
|
||||
EventStateEnum.update,
|
||||
camera,
|
||||
frame_name,
|
||||
obj.to_dict(),
|
||||
)
|
||||
)
|
||||
|
||||
def autotrack(camera: str, obj: TrackedObject, frame_name: str):
|
||||
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
|
||||
|
||||
def end(camera: str, obj: TrackedObject, frame_name: str):
|
||||
# populate has_snapshot
|
||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||
|
||||
# write thumbnail to disk if it will be saved as an event
|
||||
if obj.has_snapshot or obj.has_clip:
|
||||
obj.write_thumbnail_to_disk()
|
||||
|
||||
# write the snapshot to disk
|
||||
if obj.has_snapshot:
|
||||
obj.write_snapshot_to_disk()
|
||||
|
||||
if not obj.false_positive:
|
||||
message = {
|
||||
"before": obj.previous,
|
||||
"after": obj.to_dict(),
|
||||
"type": "end",
|
||||
}
|
||||
self.dispatcher.publish("events", json.dumps(message), retain=False)
|
||||
self.ptz_autotracker_thread.ptz_autotracker.end_object(camera, obj)
|
||||
|
||||
self.event_sender.publish(
|
||||
(
|
||||
EventTypeEnum.tracked_object,
|
||||
EventStateEnum.end,
|
||||
camera,
|
||||
frame_name,
|
||||
obj.to_dict(),
|
||||
)
|
||||
)
|
||||
|
||||
def snapshot(camera, obj: TrackedObject, frame_name: str):
|
||||
mqtt_config: CameraMqttConfig = self.config.cameras[camera].mqtt
|
||||
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
|
||||
jpg_bytes = obj.get_img_bytes(
|
||||
ext="jpg",
|
||||
timestamp=mqtt_config.timestamp,
|
||||
bounding_box=mqtt_config.bounding_box,
|
||||
crop=mqtt_config.crop,
|
||||
height=mqtt_config.height,
|
||||
quality=mqtt_config.quality,
|
||||
)
|
||||
|
||||
if jpg_bytes is None:
|
||||
logger.warning(
|
||||
f"Unable to send mqtt snapshot for {obj.obj_data['id']}."
|
||||
)
|
||||
else:
|
||||
self.dispatcher.publish(
|
||||
f"{camera}/{obj.obj_data['label']}/snapshot",
|
||||
jpg_bytes,
|
||||
retain=True,
|
||||
)
|
||||
|
||||
def camera_activity(camera, activity):
|
||||
last_activity = self.camera_activity.get(camera)
|
||||
|
||||
if not last_activity or activity != last_activity:
|
||||
self.camera_activity[camera] = activity
|
||||
self.requestor.send_data(UPDATE_CAMERA_ACTIVITY, self.camera_activity)
|
||||
|
||||
for camera in self.config.cameras.keys():
|
||||
camera_state = CameraState(
|
||||
camera, self.config, self.frame_manager, self.ptz_autotracker_thread
|
||||
)
|
||||
camera_state.on("start", start)
|
||||
camera_state.on("autotrack", autotrack)
|
||||
camera_state.on("update", update)
|
||||
camera_state.on("end", end)
|
||||
camera_state.on("snapshot", snapshot)
|
||||
camera_state.on("camera_activity", camera_activity)
|
||||
self.camera_states[camera] = camera_state
|
||||
|
||||
def should_save_snapshot(self, camera, obj: TrackedObject):
|
||||
if obj.false_positive:
|
||||
return False
|
||||
|
||||
snapshot_config: SnapshotsConfig = self.config.cameras[camera].snapshots
|
||||
|
||||
if not snapshot_config.enabled:
|
||||
return False
|
||||
|
||||
# object never changed position
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# if there are required zones and there is no overlap
|
||||
required_zones = snapshot_config.required_zones
|
||||
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
|
||||
logger.debug(
|
||||
f"Not creating snapshot for {obj.obj_data['id']} because it did not enter required zones"
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def should_retain_recording(self, camera: str, obj: TrackedObject):
|
||||
if obj.false_positive:
|
||||
return False
|
||||
|
||||
record_config: RecordConfig = self.config.cameras[camera].record
|
||||
|
||||
# Recording is disabled
|
||||
if not record_config.enabled:
|
||||
return False
|
||||
|
||||
# object never changed position
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# If the object is not considered an alert or detection
|
||||
if obj.max_severity is None:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def should_mqtt_snapshot(self, camera, obj: TrackedObject):
|
||||
# object never changed position
|
||||
if obj.obj_data["position_changes"] == 0:
|
||||
return False
|
||||
|
||||
# if there are required zones and there is no overlap
|
||||
required_zones = self.config.cameras[camera].mqtt.required_zones
|
||||
if len(required_zones) > 0 and not set(obj.entered_zones) & set(required_zones):
|
||||
logger.debug(
|
||||
f"Not sending mqtt for {obj.obj_data['id']} because it did not enter required zones"
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def update_mqtt_motion(self, camera, frame_time, motion_boxes):
|
||||
# publish if motion is currently being detected
|
||||
if motion_boxes:
|
||||
# only send ON if motion isn't already active
|
||||
if self.last_motion_detected.get(camera, 0) == 0:
|
||||
self.dispatcher.publish(
|
||||
f"{camera}/motion",
|
||||
"ON",
|
||||
retain=False,
|
||||
)
|
||||
|
||||
# always updated latest motion
|
||||
self.last_motion_detected[camera] = frame_time
|
||||
elif self.last_motion_detected.get(camera, 0) > 0:
|
||||
mqtt_delay = self.config.cameras[camera].motion.mqtt_off_delay
|
||||
|
||||
# If no motion, make sure the off_delay has passed
|
||||
if frame_time - self.last_motion_detected.get(camera, 0) >= mqtt_delay:
|
||||
self.dispatcher.publish(
|
||||
f"{camera}/motion",
|
||||
"OFF",
|
||||
retain=False,
|
||||
)
|
||||
# reset the last_motion so redundant `off` commands aren't sent
|
||||
self.last_motion_detected[camera] = 0
|
||||
|
||||
def get_best(self, camera, label):
|
||||
# TODO: need a lock here
|
||||
camera_state = self.camera_states[camera]
|
||||
if label in camera_state.best_objects:
|
||||
best_obj = camera_state.best_objects[label]
|
||||
best = best_obj.thumbnail_data.copy()
|
||||
best["frame"] = camera_state.frame_cache.get(
|
||||
best_obj.thumbnail_data["frame_time"]
|
||||
)
|
||||
return best
|
||||
else:
|
||||
return {}
|
||||
|
||||
def get_current_frame(
|
||||
self, camera: str, draw_options: dict[str, any] = {}
|
||||
) -> np.ndarray | None:
|
||||
if camera == "birdseye":
|
||||
return self.frame_manager.get(
|
||||
"birdseye",
|
||||
(self.config.birdseye.height * 3 // 2, self.config.birdseye.width),
|
||||
)
|
||||
|
||||
if camera not in self.camera_states:
|
||||
return None
|
||||
|
||||
return self.camera_states[camera].get_current_frame(draw_options)
|
||||
|
||||
def get_current_frame_time(self, camera) -> int:
|
||||
"""Returns the latest frame time for a given camera."""
|
||||
return self.camera_states[camera].current_frame_time
|
||||
|
||||
def set_sub_label(
|
||||
self, event_id: str, sub_label: str | None, score: float | None
|
||||
) -> None:
|
||||
"""Update sub label for given event id."""
|
||||
tracked_obj: TrackedObject = None
|
||||
|
||||
for state in self.camera_states.values():
|
||||
tracked_obj = state.tracked_objects.get(event_id)
|
||||
|
||||
if tracked_obj is not None:
|
||||
break
|
||||
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
except DoesNotExist:
|
||||
event = None
|
||||
|
||||
if not tracked_obj and not event:
|
||||
return
|
||||
|
||||
if tracked_obj:
|
||||
tracked_obj.obj_data["sub_label"] = (sub_label, score)
|
||||
|
||||
if event:
|
||||
event.sub_label = sub_label
|
||||
data = event.data
|
||||
if sub_label is None:
|
||||
data["sub_label_score"] = None
|
||||
elif score is not None:
|
||||
data["sub_label_score"] = score
|
||||
event.data = data
|
||||
event.save()
|
||||
|
||||
# update timeline items
|
||||
Timeline.update(
|
||||
data=Timeline.data.update({"sub_label": (sub_label, score)})
|
||||
).where(Timeline.source_id == event_id).execute()
|
||||
|
||||
return True
|
||||
|
||||
def set_recognized_license_plate(
|
||||
self, event_id: str, recognized_license_plate: str | None, score: float | None
|
||||
) -> None:
|
||||
"""Update recognized license plate for given event id."""
|
||||
tracked_obj: TrackedObject = None
|
||||
|
||||
for state in self.camera_states.values():
|
||||
tracked_obj = state.tracked_objects.get(event_id)
|
||||
|
||||
if tracked_obj is not None:
|
||||
break
|
||||
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
except DoesNotExist:
|
||||
event = None
|
||||
|
||||
if not tracked_obj and not event:
|
||||
return
|
||||
|
||||
if tracked_obj:
|
||||
tracked_obj.obj_data["recognized_license_plate"] = (
|
||||
recognized_license_plate,
|
||||
score,
|
||||
)
|
||||
|
||||
if event:
|
||||
data = event.data
|
||||
data["recognized_license_plate"] = recognized_license_plate
|
||||
if recognized_license_plate is None:
|
||||
data["recognized_license_plate_score"] = None
|
||||
elif score is not None:
|
||||
data["recognized_license_plate_score"] = score
|
||||
event.data = data
|
||||
event.save()
|
||||
|
||||
return True
|
||||
|
||||
def create_manual_event(self, payload: tuple) -> None:
|
||||
(
|
||||
frame_time,
|
||||
camera_name,
|
||||
label,
|
||||
event_id,
|
||||
include_recording,
|
||||
score,
|
||||
sub_label,
|
||||
duration,
|
||||
source_type,
|
||||
draw,
|
||||
) = payload
|
||||
|
||||
# save the snapshot image
|
||||
self.camera_states[camera_name].save_manual_event_image(event_id, label, draw)
|
||||
end_time = frame_time + duration if duration is not None else None
|
||||
|
||||
# send event to event maintainer
|
||||
self.event_sender.publish(
|
||||
(
|
||||
EventTypeEnum.api,
|
||||
EventStateEnum.start,
|
||||
camera_name,
|
||||
"",
|
||||
{
|
||||
"id": event_id,
|
||||
"label": label,
|
||||
"sub_label": sub_label,
|
||||
"score": score,
|
||||
"camera": camera_name,
|
||||
"start_time": frame_time
|
||||
- self.config.cameras[camera_name].record.event_pre_capture,
|
||||
"end_time": end_time,
|
||||
"has_clip": self.config.cameras[camera_name].record.enabled
|
||||
and include_recording,
|
||||
"has_snapshot": True,
|
||||
"type": source_type,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
if source_type == "api":
|
||||
self.ongoing_manual_events[event_id] = camera_name
|
||||
self.detection_publisher.publish(
|
||||
(
|
||||
camera_name,
|
||||
frame_time,
|
||||
{
|
||||
"state": (
|
||||
ManualEventState.complete
|
||||
if end_time
|
||||
else ManualEventState.start
|
||||
),
|
||||
"label": f"{label}: {sub_label}" if sub_label else label,
|
||||
"event_id": event_id,
|
||||
"end_time": end_time,
|
||||
},
|
||||
),
|
||||
DetectionTypeEnum.api.value,
|
||||
)
|
||||
|
||||
def end_manual_event(self, payload: tuple) -> None:
|
||||
(event_id, end_time) = payload
|
||||
|
||||
self.event_sender.publish(
|
||||
(
|
||||
EventTypeEnum.api,
|
||||
EventStateEnum.end,
|
||||
None,
|
||||
"",
|
||||
{"id": event_id, "end_time": end_time},
|
||||
)
|
||||
)
|
||||
|
||||
if event_id in self.ongoing_manual_events:
|
||||
self.detection_publisher.publish(
|
||||
(
|
||||
self.ongoing_manual_events[event_id],
|
||||
end_time,
|
||||
{
|
||||
"state": ManualEventState.end,
|
||||
"event_id": event_id,
|
||||
"end_time": end_time,
|
||||
},
|
||||
),
|
||||
DetectionTypeEnum.api.value,
|
||||
)
|
||||
self.ongoing_manual_events.pop(event_id)
|
||||
|
||||
def force_end_all_events(self, camera: str, camera_state: CameraState):
|
||||
"""Ends all active events on camera when disabling."""
|
||||
last_frame_name = camera_state.previous_frame_id
|
||||
for obj_id, obj in list(camera_state.tracked_objects.items()):
|
||||
if "end_time" not in obj.obj_data:
|
||||
logger.debug(f"Camera {camera} disabled, ending active event {obj_id}")
|
||||
obj.obj_data["end_time"] = datetime.datetime.now().timestamp()
|
||||
# end callbacks
|
||||
for callback in camera_state.callbacks["end"]:
|
||||
callback(camera, obj, last_frame_name)
|
||||
|
||||
# camera activity callbacks
|
||||
for callback in camera_state.callbacks["camera_activity"]:
|
||||
callback(
|
||||
camera,
|
||||
{"enabled": False, "motion": 0, "objects": []},
|
||||
)
|
||||
|
||||
def run(self):
|
||||
while not self.stop_event.is_set():
|
||||
# check for config updates
|
||||
while True:
|
||||
(
|
||||
updated_enabled_topic,
|
||||
updated_enabled_config,
|
||||
) = self.config_enabled_subscriber.check_for_update()
|
||||
|
||||
if not updated_enabled_topic:
|
||||
break
|
||||
|
||||
camera_name = updated_enabled_topic.rpartition("/")[-1]
|
||||
self.config.cameras[
|
||||
camera_name
|
||||
].enabled = updated_enabled_config.enabled
|
||||
|
||||
if self.camera_states[camera_name].prev_enabled is None:
|
||||
self.camera_states[
|
||||
camera_name
|
||||
].prev_enabled = updated_enabled_config.enabled
|
||||
|
||||
# manage camera disabled state
|
||||
for camera, config in self.config.cameras.items():
|
||||
if not config.enabled_in_config:
|
||||
continue
|
||||
|
||||
current_enabled = config.enabled
|
||||
camera_state = self.camera_states[camera]
|
||||
|
||||
if camera_state.prev_enabled and not current_enabled:
|
||||
logger.debug(f"Not processing objects for disabled camera {camera}")
|
||||
self.force_end_all_events(camera, camera_state)
|
||||
|
||||
camera_state.prev_enabled = current_enabled
|
||||
|
||||
if not current_enabled:
|
||||
continue
|
||||
|
||||
# check for sub label updates
|
||||
while True:
|
||||
(raw_topic, payload) = self.sub_label_subscriber.check_for_update(
|
||||
timeout=0
|
||||
)
|
||||
|
||||
if not raw_topic:
|
||||
break
|
||||
|
||||
topic = str(raw_topic)
|
||||
|
||||
if topic.endswith(EventMetadataTypeEnum.sub_label.value):
|
||||
(event_id, sub_label, score) = payload
|
||||
self.set_sub_label(event_id, sub_label, score)
|
||||
if topic.endswith(EventMetadataTypeEnum.recognized_license_plate.value):
|
||||
(event_id, recognized_license_plate, score) = payload
|
||||
self.set_recognized_license_plate(
|
||||
event_id, recognized_license_plate, score
|
||||
)
|
||||
elif topic.endswith(EventMetadataTypeEnum.manual_event_create.value):
|
||||
self.create_manual_event(payload)
|
||||
elif topic.endswith(EventMetadataTypeEnum.manual_event_end.value):
|
||||
self.end_manual_event(payload)
|
||||
|
||||
try:
|
||||
(
|
||||
camera,
|
||||
frame_name,
|
||||
frame_time,
|
||||
current_tracked_objects,
|
||||
motion_boxes,
|
||||
regions,
|
||||
) = self.tracked_objects_queue.get(True, 1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
if not self.config.cameras[camera].enabled:
|
||||
logger.debug(f"Camera {camera} disabled, skipping update")
|
||||
continue
|
||||
|
||||
camera_state = self.camera_states[camera]
|
||||
|
||||
camera_state.update(
|
||||
frame_name, frame_time, current_tracked_objects, motion_boxes, regions
|
||||
)
|
||||
|
||||
self.update_mqtt_motion(camera, frame_time, motion_boxes)
|
||||
|
||||
tracked_objects = [
|
||||
o.to_dict() for o in camera_state.tracked_objects.values()
|
||||
]
|
||||
|
||||
# publish info on this frame
|
||||
self.detection_publisher.publish(
|
||||
(
|
||||
camera,
|
||||
frame_name,
|
||||
frame_time,
|
||||
tracked_objects,
|
||||
motion_boxes,
|
||||
regions,
|
||||
),
|
||||
DetectionTypeEnum.video.value,
|
||||
)
|
||||
|
||||
# cleanup event finished queue
|
||||
while not self.stop_event.is_set():
|
||||
update = self.event_end_subscriber.check_for_update(timeout=0.01)
|
||||
|
||||
if not update:
|
||||
break
|
||||
|
||||
event_id, camera, _ = update
|
||||
self.camera_states[camera].finished(event_id)
|
||||
|
||||
# shut down camera states
|
||||
for state in self.camera_states.values():
|
||||
state.shutdown()
|
||||
|
||||
self.requestor.stop()
|
||||
self.detection_publisher.stop()
|
||||
self.event_sender.stop()
|
||||
self.event_end_subscriber.stop()
|
||||
self.sub_label_subscriber.stop()
|
||||
self.config_enabled_subscriber.stop()
|
||||
|
||||
logger.info("Exiting object processor...")
|
@ -153,6 +153,12 @@ class TrackedObject:
|
||||
"current_estimated_speed": self.current_estimated_speed,
|
||||
"velocity_angle": self.velocity_angle,
|
||||
"path_data": self.path_data,
|
||||
"recognized_license_plate": obj_data.get(
|
||||
"recognized_license_plate"
|
||||
),
|
||||
"recognized_license_plate_score": obj_data.get(
|
||||
"recognized_license_plate_score"
|
||||
),
|
||||
}
|
||||
thumb_update = True
|
||||
|
||||
@ -365,6 +371,7 @@ class TrackedObject:
|
||||
"average_estimated_speed": self.average_estimated_speed,
|
||||
"velocity_angle": self.velocity_angle,
|
||||
"path_data": self.path_data,
|
||||
"recognized_license_plate": self.obj_data.get("recognized_license_plate"),
|
||||
}
|
||||
|
||||
return event
|
||||
|
@ -606,23 +606,24 @@ def process_frames(
|
||||
|
||||
startup_scan = True
|
||||
stationary_frame_counter = 0
|
||||
camera_enabled = True
|
||||
|
||||
region_min_size = get_min_region_size(model_config)
|
||||
|
||||
prev_enabled = None
|
||||
|
||||
while not stop_event.is_set():
|
||||
_, enabled_config = enabled_config_subscriber.check_for_update()
|
||||
current_enabled = (
|
||||
enabled_config.enabled
|
||||
if enabled_config
|
||||
else (prev_enabled if prev_enabled is not None else True)
|
||||
)
|
||||
if prev_enabled is None:
|
||||
prev_enabled = current_enabled
|
||||
_, updated_enabled_config = enabled_config_subscriber.check_for_update()
|
||||
|
||||
if prev_enabled and not current_enabled and camera_metrics.frame_queue.empty():
|
||||
if updated_enabled_config:
|
||||
prev_enabled = camera_enabled
|
||||
camera_enabled = updated_enabled_config.enabled
|
||||
|
||||
if (
|
||||
not camera_enabled
|
||||
and prev_enabled != camera_enabled
|
||||
and camera_metrics.frame_queue.empty()
|
||||
):
|
||||
logger.debug(f"Camera {camera_name} disabled, clearing tracked objects")
|
||||
prev_enabled = camera_enabled
|
||||
|
||||
# Clear norfair's dictionaries
|
||||
object_tracker.tracked_objects.clear()
|
||||
@ -638,9 +639,7 @@ def process_frames(
|
||||
for tracker in object_tracker.default_tracker.values():
|
||||
tracker.tracked_objects = []
|
||||
|
||||
prev_enabled = current_enabled
|
||||
|
||||
if not current_enabled:
|
||||
if not camera_enabled:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
|
@ -15,8 +15,8 @@ sys.path.append("/workspace/frigate")
|
||||
from frigate.config import FrigateConfig # noqa: E402
|
||||
from frigate.motion import MotionDetector # noqa: E402
|
||||
from frigate.object_detection import LocalObjectDetector # noqa: E402
|
||||
from frigate.object_processing import CameraState # noqa: E402
|
||||
from frigate.track.centroid_tracker import CentroidTracker # noqa: E402
|
||||
from frigate.track.object_processing import CameraState # noqa: E402
|
||||
from frigate.util import ( # noqa: E402
|
||||
EventsPerSecond,
|
||||
SharedMemoryFrameManager,
|
||||
|
@ -87,7 +87,7 @@ export function UserAuthForm({ className, ...props }: UserAuthFormProps) {
|
||||
return (
|
||||
<div className={cn("grid gap-6", className)} {...props}>
|
||||
<Form {...form}>
|
||||
<form onSubmit={form.handleSubmit(onSubmit)}>
|
||||
<form onSubmit={form.handleSubmit(onSubmit)} className="space-y-4">
|
||||
<FormField
|
||||
name="user"
|
||||
render={({ field }) => (
|
||||
|
@ -333,6 +333,23 @@ function ObjectDetailsTab({
|
||||
}
|
||||
}, [search]);
|
||||
|
||||
const recognizedLicensePlateScore = useMemo(() => {
|
||||
if (!search) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
if (
|
||||
search.data.recognized_license_plate &&
|
||||
search.data?.recognized_license_plate_score
|
||||
) {
|
||||
return Math.round(
|
||||
(search.data?.recognized_license_plate_score ?? 0) * 100,
|
||||
);
|
||||
} else {
|
||||
return undefined;
|
||||
}
|
||||
}, [search]);
|
||||
|
||||
const averageEstimatedSpeed = useMemo(() => {
|
||||
if (!search || !search.data?.average_estimated_speed) {
|
||||
return undefined;
|
||||
@ -538,6 +555,20 @@ function ObjectDetailsTab({
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
{search?.data.recognized_license_plate && (
|
||||
<div className="flex flex-col gap-1.5">
|
||||
<div className="text-sm text-primary/40">
|
||||
Recognized License Plate
|
||||
</div>
|
||||
<div className="flex flex-col space-y-0.5 text-sm">
|
||||
<div className="flex flex-row items-center gap-2">
|
||||
{search.data.recognized_license_plate}{" "}
|
||||
{recognizedLicensePlateScore &&
|
||||
` (${recognizedLicensePlateScore}%)`}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<div className="flex flex-col gap-1.5">
|
||||
<div className="text-sm text-primary/40">
|
||||
<div className="flex flex-row items-center gap-1">
|
||||
|
@ -33,6 +33,14 @@ import {
|
||||
TooltipProvider,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import {
|
||||
Command,
|
||||
CommandEmpty,
|
||||
CommandInput,
|
||||
CommandItem,
|
||||
CommandList,
|
||||
} from "@/components/ui/command";
|
||||
import { LuCheck } from "react-icons/lu";
|
||||
|
||||
type SearchFilterDialogProps = {
|
||||
config?: FrigateConfig;
|
||||
@ -77,7 +85,8 @@ export default function SearchFilterDialog({
|
||||
(currentFilter.max_score ?? 1) < 1 ||
|
||||
(currentFilter.max_speed ?? 150) < 150 ||
|
||||
(currentFilter.zones?.length ?? 0) > 0 ||
|
||||
(currentFilter.sub_labels?.length ?? 0) > 0),
|
||||
(currentFilter.sub_labels?.length ?? 0) > 0 ||
|
||||
(currentFilter.recognized_license_plate?.length ?? 0) > 0),
|
||||
[currentFilter],
|
||||
);
|
||||
|
||||
@ -119,6 +128,15 @@ export default function SearchFilterDialog({
|
||||
setCurrentFilter({ ...currentFilter, sub_labels: newSubLabels })
|
||||
}
|
||||
/>
|
||||
<RecognizedLicensePlatesFilterContent
|
||||
recognizedLicensePlates={currentFilter.recognized_license_plate}
|
||||
setRecognizedLicensePlates={(plate) =>
|
||||
setCurrentFilter({
|
||||
...currentFilter,
|
||||
recognized_license_plate: plate,
|
||||
})
|
||||
}
|
||||
/>
|
||||
<ScoreFilterContent
|
||||
minScore={currentFilter.min_score}
|
||||
maxScore={currentFilter.max_score}
|
||||
@ -192,6 +210,7 @@ export default function SearchFilterDialog({
|
||||
max_speed: undefined,
|
||||
has_snapshot: undefined,
|
||||
has_clip: undefined,
|
||||
recognized_license_plate: undefined,
|
||||
}));
|
||||
}}
|
||||
>
|
||||
@ -830,3 +849,130 @@ export function SnapshotClipFilterContent({
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
type RecognizedLicensePlatesFilterContentProps = {
|
||||
recognizedLicensePlates: string[] | undefined;
|
||||
setRecognizedLicensePlates: (
|
||||
recognizedLicensePlates: string[] | undefined,
|
||||
) => void;
|
||||
};
|
||||
|
||||
export function RecognizedLicensePlatesFilterContent({
|
||||
recognizedLicensePlates,
|
||||
setRecognizedLicensePlates,
|
||||
}: RecognizedLicensePlatesFilterContentProps) {
|
||||
const { data: allRecognizedLicensePlates, error } = useSWR<string[]>(
|
||||
"recognized_license_plates",
|
||||
{
|
||||
revalidateOnFocus: false,
|
||||
},
|
||||
);
|
||||
|
||||
const [selectedRecognizedLicensePlates, setSelectedRecognizedLicensePlates] =
|
||||
useState<string[]>(recognizedLicensePlates || []);
|
||||
const [inputValue, setInputValue] = useState("");
|
||||
|
||||
useEffect(() => {
|
||||
if (recognizedLicensePlates) {
|
||||
setSelectedRecognizedLicensePlates(recognizedLicensePlates);
|
||||
} else {
|
||||
setSelectedRecognizedLicensePlates([]);
|
||||
}
|
||||
}, [recognizedLicensePlates]);
|
||||
|
||||
const handleSelect = (recognizedLicensePlate: string) => {
|
||||
const newSelected = selectedRecognizedLicensePlates.includes(
|
||||
recognizedLicensePlate,
|
||||
)
|
||||
? selectedRecognizedLicensePlates.filter(
|
||||
(id) => id !== recognizedLicensePlate,
|
||||
) // Deselect
|
||||
: [...selectedRecognizedLicensePlates, recognizedLicensePlate]; // Select
|
||||
|
||||
setSelectedRecognizedLicensePlates(newSelected);
|
||||
if (newSelected.length === 0) {
|
||||
setRecognizedLicensePlates(undefined); // Clear filter if no plates selected
|
||||
} else {
|
||||
setRecognizedLicensePlates(newSelected);
|
||||
}
|
||||
};
|
||||
|
||||
if (!allRecognizedLicensePlates || allRecognizedLicensePlates.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const filteredRecognizedLicensePlates =
|
||||
allRecognizedLicensePlates?.filter((id) =>
|
||||
id.toLowerCase().includes(inputValue.toLowerCase()),
|
||||
) || [];
|
||||
|
||||
return (
|
||||
<div className="overflow-x-hidden">
|
||||
<DropdownMenuSeparator className="mb-3" />
|
||||
<div className="mb-3 text-lg">Recognized License Plates</div>
|
||||
{error ? (
|
||||
<p className="text-sm text-red-500">
|
||||
Failed to load recognized license plates.
|
||||
</p>
|
||||
) : !allRecognizedLicensePlates ? (
|
||||
<p className="text-sm text-muted-foreground">
|
||||
Loading recognized license plates...
|
||||
</p>
|
||||
) : (
|
||||
<>
|
||||
<Command className="border border-input bg-background">
|
||||
<CommandInput
|
||||
placeholder="Type to search license plates..."
|
||||
value={inputValue}
|
||||
onValueChange={setInputValue}
|
||||
/>
|
||||
<CommandList className="max-h-[200px] overflow-auto">
|
||||
{filteredRecognizedLicensePlates.length === 0 && inputValue && (
|
||||
<CommandEmpty>No license plates found.</CommandEmpty>
|
||||
)}
|
||||
{filteredRecognizedLicensePlates.map((plate) => (
|
||||
<CommandItem
|
||||
key={plate}
|
||||
value={plate}
|
||||
onSelect={() => handleSelect(plate)}
|
||||
className="cursor-pointer"
|
||||
>
|
||||
<LuCheck
|
||||
className={cn(
|
||||
"mr-2 h-4 w-4",
|
||||
selectedRecognizedLicensePlates.includes(plate)
|
||||
? "opacity-100"
|
||||
: "opacity-0",
|
||||
)}
|
||||
/>
|
||||
{plate}
|
||||
</CommandItem>
|
||||
))}
|
||||
</CommandList>
|
||||
</Command>
|
||||
{selectedRecognizedLicensePlates.length > 0 && (
|
||||
<div className="mt-2 flex flex-wrap gap-2">
|
||||
{selectedRecognizedLicensePlates.map((id) => (
|
||||
<span
|
||||
key={id}
|
||||
className="inline-flex items-center rounded bg-selected px-2 py-1 text-sm text-white"
|
||||
>
|
||||
{id}
|
||||
<button
|
||||
onClick={() => handleSelect(id)}
|
||||
className="ml-1 text-white hover:text-gray-200"
|
||||
>
|
||||
×
|
||||
</button>
|
||||
</span>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
<p className="mt-1 text-sm text-muted-foreground">
|
||||
Select one or more plates from the list.
|
||||
</p>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
@ -301,22 +301,6 @@ export default function LivePlayer({
|
||||
player = <ActivityIndicator />;
|
||||
}
|
||||
|
||||
// if (cameraConfig.name == "lpr")
|
||||
// console.log(
|
||||
// cameraConfig.name,
|
||||
// "enabled",
|
||||
// cameraEnabled,
|
||||
// "prev enabled",
|
||||
// prevCameraEnabledRef.current,
|
||||
// "offline",
|
||||
// offline,
|
||||
// "show still",
|
||||
// showStillWithoutActivity,
|
||||
// "live ready",
|
||||
// liveReady,
|
||||
// player,
|
||||
// );
|
||||
|
||||
return (
|
||||
<div
|
||||
ref={cameraRef ?? internalContainerRef}
|
||||
@ -378,7 +362,9 @@ export default function LivePlayer({
|
||||
{[
|
||||
...new Set([
|
||||
...(objects || []).map(({ label, sub_label }) =>
|
||||
label.endsWith("verified") ? sub_label : label,
|
||||
label.endsWith("verified")
|
||||
? sub_label
|
||||
: label.replaceAll("_", " "),
|
||||
),
|
||||
]),
|
||||
]
|
||||
@ -411,7 +397,7 @@ export default function LivePlayer({
|
||||
/>
|
||||
</div>
|
||||
|
||||
{offline && !showStillWithoutActivity && (
|
||||
{offline && !showStillWithoutActivity && cameraEnabled && (
|
||||
<div className="absolute inset-0 left-1/2 top-1/2 flex h-96 w-96 -translate-x-1/2 -translate-y-1/2">
|
||||
<div className="flex flex-col items-center justify-center rounded-lg bg-background/50 p-5">
|
||||
<p className="my-5 text-lg">Stream offline</p>
|
||||
|
@ -105,6 +105,8 @@ export default function Explore() {
|
||||
cameras: searchSearchParams["cameras"],
|
||||
labels: searchSearchParams["labels"],
|
||||
sub_labels: searchSearchParams["sub_labels"],
|
||||
recognized_license_plate:
|
||||
searchSearchParams["recognized_license_plate"],
|
||||
zones: searchSearchParams["zones"],
|
||||
before: searchSearchParams["before"],
|
||||
after: searchSearchParams["after"],
|
||||
@ -140,6 +142,8 @@ export default function Explore() {
|
||||
cameras: searchSearchParams["cameras"],
|
||||
labels: searchSearchParams["labels"],
|
||||
sub_labels: searchSearchParams["sub_labels"],
|
||||
recognized_license_plate:
|
||||
searchSearchParams["recognized_license_plate"],
|
||||
zones: searchSearchParams["zones"],
|
||||
before: searchSearchParams["before"],
|
||||
after: searchSearchParams["after"],
|
||||
|
@ -19,6 +19,7 @@ import {
|
||||
TooltipContent,
|
||||
TooltipTrigger,
|
||||
} from "@/components/ui/tooltip";
|
||||
import useKeyboardListener from "@/hooks/use-keyboard-listener";
|
||||
import useOptimisticState from "@/hooks/use-optimistic-state";
|
||||
import { cn } from "@/lib/utils";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
@ -141,6 +142,73 @@ export default function FaceLibrary() {
|
||||
[refreshFaces],
|
||||
);
|
||||
|
||||
// face multiselect
|
||||
|
||||
const [selectedFaces, setSelectedFaces] = useState<string[]>([]);
|
||||
|
||||
const onClickFace = useCallback(
|
||||
(imageId: string) => {
|
||||
const index = selectedFaces.indexOf(imageId);
|
||||
|
||||
if (index != -1) {
|
||||
if (selectedFaces.length == 1) {
|
||||
setSelectedFaces([]);
|
||||
} else {
|
||||
const copy = [
|
||||
...selectedFaces.slice(0, index),
|
||||
...selectedFaces.slice(index + 1),
|
||||
];
|
||||
setSelectedFaces(copy);
|
||||
}
|
||||
} else {
|
||||
const copy = [...selectedFaces];
|
||||
copy.push(imageId);
|
||||
setSelectedFaces(copy);
|
||||
}
|
||||
},
|
||||
[selectedFaces, setSelectedFaces],
|
||||
);
|
||||
|
||||
const onDelete = useCallback(() => {
|
||||
axios
|
||||
.post(`/faces/train/delete`, { ids: selectedFaces })
|
||||
.then((resp) => {
|
||||
setSelectedFaces([]);
|
||||
|
||||
if (resp.status == 200) {
|
||||
toast.success(`Successfully deleted face.`, {
|
||||
position: "top-center",
|
||||
});
|
||||
refreshFaces();
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
const errorMessage =
|
||||
error.response?.data?.message ||
|
||||
error.response?.data?.detail ||
|
||||
"Unknown error";
|
||||
toast.error(`Failed to delete: ${errorMessage}`, {
|
||||
position: "top-center",
|
||||
});
|
||||
});
|
||||
}, [selectedFaces, refreshFaces]);
|
||||
|
||||
// keyboard
|
||||
|
||||
useKeyboardListener(["a"], (key, modifiers) => {
|
||||
if (modifiers.repeat || !modifiers.down) {
|
||||
return;
|
||||
}
|
||||
|
||||
switch (key) {
|
||||
case "a":
|
||||
if (modifiers.ctrl) {
|
||||
setSelectedFaces([...trainImages]);
|
||||
}
|
||||
break;
|
||||
}
|
||||
});
|
||||
|
||||
if (!config) {
|
||||
return <ActivityIndicator />;
|
||||
}
|
||||
@ -210,16 +278,27 @@ export default function FaceLibrary() {
|
||||
<ScrollBar orientation="horizontal" className="h-0" />
|
||||
</div>
|
||||
</ScrollArea>
|
||||
<div className="flex items-center justify-center gap-2">
|
||||
<Button className="flex gap-2" onClick={() => setAddFace(true)}>
|
||||
<LuScanFace className="size-7 rounded-md p-1 text-secondary-foreground" />
|
||||
Add Face
|
||||
</Button>
|
||||
<Button className="flex gap-2" onClick={() => setUpload(true)}>
|
||||
<LuImagePlus className="size-7 rounded-md p-1 text-secondary-foreground" />
|
||||
Upload Image
|
||||
</Button>
|
||||
</div>
|
||||
{selectedFaces?.length > 0 ? (
|
||||
<div className="flex items-center justify-center gap-2">
|
||||
<Button className="flex gap-2" onClick={() => onDelete()}>
|
||||
<LuTrash2 className="size-7 rounded-md p-1 text-secondary-foreground" />
|
||||
Delete Face Attempts
|
||||
</Button>
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex items-center justify-center gap-2">
|
||||
<Button className="flex gap-2" onClick={() => setAddFace(true)}>
|
||||
<LuScanFace className="size-7 rounded-md p-1 text-secondary-foreground" />
|
||||
Add Face
|
||||
</Button>
|
||||
{pageToggle != "train" && (
|
||||
<Button className="flex gap-2" onClick={() => setUpload(true)}>
|
||||
<LuImagePlus className="size-7 rounded-md p-1 text-secondary-foreground" />
|
||||
Upload Image
|
||||
</Button>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
{pageToggle &&
|
||||
(pageToggle == "train" ? (
|
||||
@ -227,6 +306,8 @@ export default function FaceLibrary() {
|
||||
config={config}
|
||||
attemptImages={trainImages}
|
||||
faceNames={faces}
|
||||
selectedFaces={selectedFaces}
|
||||
onClickFace={onClickFace}
|
||||
onRefresh={refreshFaces}
|
||||
/>
|
||||
) : (
|
||||
@ -244,22 +325,28 @@ type TrainingGridProps = {
|
||||
config: FrigateConfig;
|
||||
attemptImages: string[];
|
||||
faceNames: string[];
|
||||
selectedFaces: string[];
|
||||
onClickFace: (image: string) => void;
|
||||
onRefresh: () => void;
|
||||
};
|
||||
function TrainingGrid({
|
||||
config,
|
||||
attemptImages,
|
||||
faceNames,
|
||||
selectedFaces,
|
||||
onClickFace,
|
||||
onRefresh,
|
||||
}: TrainingGridProps) {
|
||||
return (
|
||||
<div className="scrollbar-container flex flex-wrap gap-2 overflow-y-scroll">
|
||||
<div className="scrollbar-container flex flex-wrap gap-2 overflow-y-scroll p-1">
|
||||
{attemptImages.map((image: string) => (
|
||||
<FaceAttempt
|
||||
key={image}
|
||||
image={image}
|
||||
faceNames={faceNames}
|
||||
threshold={config.face_recognition.threshold}
|
||||
selected={selectedFaces.includes(image)}
|
||||
onClick={() => onClickFace(image)}
|
||||
onRefresh={onRefresh}
|
||||
/>
|
||||
))}
|
||||
@ -271,12 +358,16 @@ type FaceAttemptProps = {
|
||||
image: string;
|
||||
faceNames: string[];
|
||||
threshold: number;
|
||||
selected: boolean;
|
||||
onClick: () => void;
|
||||
onRefresh: () => void;
|
||||
};
|
||||
function FaceAttempt({
|
||||
image,
|
||||
faceNames,
|
||||
threshold,
|
||||
selected,
|
||||
onClick,
|
||||
onRefresh,
|
||||
}: FaceAttemptProps) {
|
||||
const data = useMemo(() => {
|
||||
@ -336,30 +427,16 @@ function FaceAttempt({
|
||||
});
|
||||
}, [image, onRefresh]);
|
||||
|
||||
const onDelete = useCallback(() => {
|
||||
axios
|
||||
.post(`/faces/train/delete`, { ids: [image] })
|
||||
.then((resp) => {
|
||||
if (resp.status == 200) {
|
||||
toast.success(`Successfully deleted face.`, {
|
||||
position: "top-center",
|
||||
});
|
||||
onRefresh();
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
const errorMessage =
|
||||
error.response?.data?.message ||
|
||||
error.response?.data?.detail ||
|
||||
"Unknown error";
|
||||
toast.error(`Failed to delete: ${errorMessage}`, {
|
||||
position: "top-center",
|
||||
});
|
||||
});
|
||||
}, [image, onRefresh]);
|
||||
|
||||
return (
|
||||
<div className="relative flex flex-col rounded-lg">
|
||||
<div
|
||||
className={cn(
|
||||
"relative flex cursor-pointer flex-col rounded-lg outline outline-[3px]",
|
||||
selected
|
||||
? "shadow-selected outline-selected"
|
||||
: "outline-transparent duration-500",
|
||||
)}
|
||||
onClick={onClick}
|
||||
>
|
||||
<div className="w-full overflow-hidden rounded-t-lg border border-t-0 *:text-card-foreground">
|
||||
<img className="size-40" src={`${baseUrl}clips/faces/train/${image}`} />
|
||||
</div>
|
||||
@ -409,15 +486,6 @@ function FaceAttempt({
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Reprocess Face</TooltipContent>
|
||||
</Tooltip>
|
||||
<Tooltip>
|
||||
<TooltipTrigger>
|
||||
<LuTrash2
|
||||
className="size-5 cursor-pointer text-primary-variant hover:text-primary"
|
||||
onClick={onDelete}
|
||||
/>
|
||||
</TooltipTrigger>
|
||||
<TooltipContent>Delete Face Attempt</TooltipContent>
|
||||
</Tooltip>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -155,15 +155,20 @@ export interface CameraConfig {
|
||||
record: {
|
||||
enabled: boolean;
|
||||
enabled_in_config: boolean;
|
||||
events: {
|
||||
objects: string[] | null;
|
||||
alerts: {
|
||||
post_capture: number;
|
||||
pre_capture: number;
|
||||
required_zones: string[];
|
||||
retain: {
|
||||
default: number;
|
||||
days: number;
|
||||
mode: string;
|
||||
};
|
||||
};
|
||||
detections: {
|
||||
post_capture: number;
|
||||
pre_capture: number;
|
||||
retain: {
|
||||
days: number;
|
||||
mode: string;
|
||||
objects: Record<string, unknown>;
|
||||
};
|
||||
};
|
||||
expire_interval: number;
|
||||
|
@ -58,6 +58,8 @@ export type SearchResult = {
|
||||
average_estimated_speed: number;
|
||||
velocity_angle: number;
|
||||
path_data: [number[], number][];
|
||||
recognized_license_plate?: string;
|
||||
recognized_license_plate_score?: number;
|
||||
};
|
||||
};
|
||||
|
||||
@ -66,6 +68,7 @@ export type SearchFilter = {
|
||||
cameras?: string[];
|
||||
labels?: string[];
|
||||
sub_labels?: string[];
|
||||
recognized_license_plate?: string[];
|
||||
zones?: string[];
|
||||
before?: number;
|
||||
after?: number;
|
||||
@ -89,6 +92,7 @@ export type SearchQueryParams = {
|
||||
cameras?: string[];
|
||||
labels?: string[];
|
||||
sub_labels?: string[];
|
||||
recognized_license_plate?: string[];
|
||||
zones?: string[];
|
||||
before?: string;
|
||||
after?: string;
|
||||
|
@ -757,7 +757,12 @@ function DetectionReview({
|
||||
/>
|
||||
</div>
|
||||
<div
|
||||
className={`review-item-ring pointer-events-none absolute inset-0 z-10 size-full rounded-lg outline outline-[3px] -outline-offset-[2.8px] ${selected ? `outline-severity_${value.severity} shadow-severity_${value.severity}` : "outline-transparent duration-500"}`}
|
||||
className={cn(
|
||||
"review-item-ring pointer-events-none absolute inset-0 z-10 size-full rounded-lg outline outline-[3px] -outline-offset-[2.8px]",
|
||||
selected
|
||||
? `outline-severity_${value.severity} shadow-severity_${value.severity}`
|
||||
: "outline-transparent duration-500",
|
||||
)}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
|
@ -1013,7 +1013,7 @@ function FrigateCameraFeatures({
|
||||
<div className="font-semibold">
|
||||
Started manual on-demand recording.
|
||||
</div>
|
||||
{!camera.record.enabled || camera.record.retain.days == 0 ? (
|
||||
{!camera.record.enabled || camera.record.alerts.retain.days == 0 ? (
|
||||
<div>
|
||||
Since recording is disabled or restricted in the config for this
|
||||
camera, only a snapshot will be saved.
|
||||
|
@ -121,6 +121,9 @@ export default function SearchView({
|
||||
}, [config, searchFilter]);
|
||||
|
||||
const { data: allSubLabels } = useSWR("sub_labels");
|
||||
const { data: allRecognizedLicensePlates } = useSWR(
|
||||
"recognized_license_plates",
|
||||
);
|
||||
|
||||
const allZones = useMemo<string[]>(() => {
|
||||
if (!config) {
|
||||
@ -160,12 +163,20 @@ export default function SearchView({
|
||||
max_score: ["100"],
|
||||
min_speed: ["1"],
|
||||
max_speed: ["150"],
|
||||
recognized_license_plate: allRecognizedLicensePlates,
|
||||
has_clip: ["yes", "no"],
|
||||
has_snapshot: ["yes", "no"],
|
||||
...(config?.plus?.enabled &&
|
||||
searchFilter?.has_snapshot && { is_submitted: ["yes", "no"] }),
|
||||
}),
|
||||
[config, allLabels, allZones, allSubLabels, searchFilter],
|
||||
[
|
||||
config,
|
||||
allLabels,
|
||||
allZones,
|
||||
allSubLabels,
|
||||
allRecognizedLicensePlates,
|
||||
searchFilter,
|
||||
],
|
||||
);
|
||||
|
||||
// remove duplicate event ids
|
||||
|
Loading…
Reference in New Issue
Block a user