mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-08-04 13:47:37 +02:00
Merged local branch
This commit is contained in:
commit
f6856a9673
@ -108,7 +108,6 @@ imagestream
|
||||
imdecode
|
||||
imencode
|
||||
imread
|
||||
imutils
|
||||
imwrite
|
||||
interp
|
||||
iostat
|
||||
|
@ -73,7 +73,7 @@ body:
|
||||
attributes:
|
||||
label: Operating system
|
||||
options:
|
||||
- HassOS
|
||||
- Home Assistant OS
|
||||
- Debian
|
||||
- Other Linux
|
||||
- Proxmox
|
||||
@ -87,7 +87,7 @@ body:
|
||||
attributes:
|
||||
label: Install method
|
||||
options:
|
||||
- HassOS Addon
|
||||
- Home Assistant Add-on
|
||||
- Docker Compose
|
||||
- Docker CLI
|
||||
- Proxmox via Docker
|
||||
|
@ -59,7 +59,7 @@ body:
|
||||
attributes:
|
||||
label: Operating system
|
||||
options:
|
||||
- HassOS
|
||||
- Home Assistant OS
|
||||
- Debian
|
||||
- Other Linux
|
||||
- Proxmox
|
||||
@ -73,7 +73,7 @@ body:
|
||||
attributes:
|
||||
label: Install method
|
||||
options:
|
||||
- HassOS Addon
|
||||
- Home Assistant Add-on
|
||||
- Docker Compose
|
||||
- Docker CLI
|
||||
- Proxmox via Docker
|
||||
|
@ -53,7 +53,7 @@ body:
|
||||
attributes:
|
||||
label: Install method
|
||||
options:
|
||||
- HassOS Addon
|
||||
- Home Assistant Add-on
|
||||
- Docker Compose
|
||||
- Docker CLI
|
||||
- Proxmox via Docker
|
||||
|
@ -73,7 +73,7 @@ body:
|
||||
attributes:
|
||||
label: Install method
|
||||
options:
|
||||
- HassOS Addon
|
||||
- Home Assistant Add-on
|
||||
- Docker Compose
|
||||
- Docker CLI
|
||||
- Proxmox via Docker
|
||||
|
@ -69,7 +69,7 @@ body:
|
||||
attributes:
|
||||
label: Install method
|
||||
options:
|
||||
- HassOS Addon
|
||||
- Home Assistant Add-on
|
||||
- Docker Compose
|
||||
- Docker CLI
|
||||
- Proxmox via Docker
|
||||
|
4
.github/DISCUSSION_TEMPLATE/report-a-bug.yml
vendored
4
.github/DISCUSSION_TEMPLATE/report-a-bug.yml
vendored
@ -97,7 +97,7 @@ body:
|
||||
attributes:
|
||||
label: Operating system
|
||||
options:
|
||||
- HassOS
|
||||
- Home Assistant OS
|
||||
- Debian
|
||||
- Other Linux
|
||||
- Proxmox
|
||||
@ -111,7 +111,7 @@ body:
|
||||
attributes:
|
||||
label: Install method
|
||||
options:
|
||||
- HassOS Addon
|
||||
- Home Assistant Add-on
|
||||
- Docker Compose
|
||||
- Docker CLI
|
||||
validations:
|
||||
|
9
.github/pull_request_template.md
vendored
9
.github/pull_request_template.md
vendored
@ -2,12 +2,12 @@
|
||||
<!--
|
||||
Thank you!
|
||||
|
||||
If you're introducing a new feature or significantly refactoring existing functionality,
|
||||
we encourage you to start a discussion first. This helps ensure your idea aligns with
|
||||
If you're introducing a new feature or significantly refactoring existing functionality,
|
||||
we encourage you to start a discussion first. This helps ensure your idea aligns with
|
||||
Frigate's development goals.
|
||||
|
||||
Describe what this pull request does and how it will benefit users of Frigate.
|
||||
Please describe in detail any considerations, breaking changes, etc. that are
|
||||
Please describe in detail any considerations, breaking changes, etc. that are
|
||||
made in this pull request.
|
||||
-->
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
## Additional information
|
||||
|
||||
- This PR fixes or closes issue: fixes #
|
||||
- This PR is related to issue:
|
||||
- This PR is related to issue:
|
||||
|
||||
## Checklist
|
||||
|
||||
@ -35,4 +35,5 @@
|
||||
- [ ] The code change is tested and works locally.
|
||||
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
|
||||
- [ ] There is no commented out code in this PR.
|
||||
- [ ] UI changes including text have used i18n keys and have been added to the `en` locale.
|
||||
- [ ] The code has been formatted using Ruff (`ruff format frigate`)
|
||||
|
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@ -39,14 +39,14 @@ jobs:
|
||||
STABLE_TAG=${BASE}:stable
|
||||
PULL_TAG=${BASE}:${BUILD_TAG}
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG}
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp5 tensorrt-jp6 rk h8l rocm; do
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp6 rk rocm; do
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant}
|
||||
done
|
||||
|
||||
# stable tag
|
||||
if [[ "${BUILD_TYPE}" == "stable" ]]; then
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG}
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp5 tensorrt-jp6 rk h8l rocm; do
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp6 rk rocm; do
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant}
|
||||
done
|
||||
fi
|
||||
|
20
README.md
20
README.md
@ -4,9 +4,15 @@
|
||||
|
||||
# Frigate - NVR With Realtime Object Detection for IP Cameras
|
||||
|
||||
<a href="https://hosted.weblate.org/engage/frigate-nvr/">
|
||||
<img src="https://hosted.weblate.org/widget/frigate-nvr/language-badge.svg" alt="Translation status" />
|
||||
</a>
|
||||
|
||||
\[English\] | [简体中文](https://github.com/blakeblackshear/frigate/blob/dev/README_CN.md)
|
||||
|
||||
A complete and local NVR designed for [Home Assistant](https://www.home-assistant.io) with AI object detection. Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras.
|
||||
|
||||
Use of a [Google Coral Accelerator](https://coral.ai/products/) is optional, but highly recommended. The Coral will outperform even the best CPUs and can process 100+ FPS with very little overhead.
|
||||
Use of a GPU or AI accelerator such as a [Google Coral](https://coral.ai/products/) or [Hailo](https://hailo.ai/) is highly recommended. AI accelerators will outperform even the best CPUs with very little overhead.
|
||||
|
||||
- Tight integration with Home Assistant via a [custom component](https://github.com/blakeblackshear/frigate-hass-integration)
|
||||
- Designed to minimize resource use and maximize performance by only looking for objects when and where it is necessary
|
||||
@ -30,21 +36,33 @@ If you would like to make a donation to support development, please use [Github
|
||||
## Screenshots
|
||||
|
||||
### Live dashboard
|
||||
|
||||
<div>
|
||||
<img width="800" alt="Live dashboard" src="https://github.com/blakeblackshear/frigate/assets/569905/5e713cb9-9db5-41dc-947a-6937c3bc376e">
|
||||
</div>
|
||||
|
||||
### Streamlined review workflow
|
||||
|
||||
<div>
|
||||
<img width="800" alt="Streamlined review workflow" src="https://github.com/blakeblackshear/frigate/assets/569905/6fed96e8-3b18-40e5-9ddc-31e6f3c9f2ff">
|
||||
</div>
|
||||
|
||||
### Multi-camera scrubbing
|
||||
|
||||
<div>
|
||||
<img width="800" alt="Multi-camera scrubbing" src="https://github.com/blakeblackshear/frigate/assets/569905/d6788a15-0eeb-4427-a8d4-80b93cae3d74">
|
||||
</div>
|
||||
|
||||
### Built-in mask and zone editor
|
||||
|
||||
<div>
|
||||
<img width="800" alt="Multi-camera scrubbing" src="https://github.com/blakeblackshear/frigate/assets/569905/d7885fc3-bfe6-452f-b7d0-d957cb3e31f5">
|
||||
</div>
|
||||
|
||||
## Translations
|
||||
|
||||
We use [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) to support language translations. Contributions are always welcome.
|
||||
|
||||
<a href="https://hosted.weblate.org/engage/frigate-nvr/">
|
||||
<img src="https://hosted.weblate.org/widget/frigate-nvr/multi-auto.svg" alt="Translation status" />
|
||||
</a>
|
||||
|
64
README_CN.md
Normal file
64
README_CN.md
Normal file
@ -0,0 +1,64 @@
|
||||
<p align="center">
|
||||
<img align="center" alt="logo" src="docs/static/img/frigate.png">
|
||||
</p>
|
||||
|
||||
# Frigate - 一个具有实时目标检测的本地NVR
|
||||
|
||||
[English](https://github.com/blakeblackshear/frigate) | \[简体中文\]
|
||||
|
||||
<a href="https://hosted.weblate.org/engage/frigate-nvr/-/zh_Hans/">
|
||||
<img src="https://hosted.weblate.org/widget/frigate-nvr/-/zh_Hans/svg-badge.svg" alt="翻译状态" />
|
||||
</a>
|
||||
|
||||
一个完整的本地网络视频录像机(NVR),专为[Home Assistant](https://www.home-assistant.io)设计,具备AI物体检测功能。使用OpenCV和TensorFlow在本地为IP摄像头执行实时物体检测。
|
||||
|
||||
强烈推荐使用GPU或者AI加速器(例如[Google Coral加速器](https://coral.ai/products/) 或者 [Hailo](https://hailo.ai/))。它们的性能甚至超过目前的顶级CPU,并且可以以极低的耗电实现更优的性能。
|
||||
- 通过[自定义组件](https://github.com/blakeblackshear/frigate-hass-integration)与Home Assistant紧密集成
|
||||
- 设计上通过仅在必要时和必要地点寻找物体,最大限度地减少资源使用并最大化性能
|
||||
- 大量利用多进程处理,强调实时性而非处理每一帧
|
||||
- 使用非常低开销的运动检测来确定运行物体检测的位置
|
||||
- 使用TensorFlow进行物体检测,运行在单独的进程中以达到最大FPS
|
||||
- 通过MQTT进行通信,便于集成到其他系统中
|
||||
- 根据检测到的物体设置保留时间进行视频录制
|
||||
- 24/7全天候录制
|
||||
- 通过RTSP重新流传输以减少摄像头的连接数
|
||||
- 支持WebRTC和MSE,实现低延迟的实时观看
|
||||
|
||||
## 社区中文翻译文档
|
||||
|
||||
你可以在这里查看文档 https://docs.frigate-cn.video
|
||||
|
||||
## 赞助
|
||||
|
||||
如果您想通过捐赠支持开发,请使用 [Github Sponsors](https://github.com/sponsors/blakeblackshear)。
|
||||
|
||||
## 截图
|
||||
|
||||
### 实时监控面板
|
||||
<div>
|
||||
<img width="800" alt="实时监控面板" src="https://github.com/blakeblackshear/frigate/assets/569905/5e713cb9-9db5-41dc-947a-6937c3bc376e">
|
||||
</div>
|
||||
|
||||
### 简单的审查工作流程
|
||||
<div>
|
||||
<img width="800" alt="简单的审查工作流程" src="https://github.com/blakeblackshear/frigate/assets/569905/6fed96e8-3b18-40e5-9ddc-31e6f3c9f2ff">
|
||||
</div>
|
||||
|
||||
### 多摄像头可按时间轴查看
|
||||
<div>
|
||||
<img width="800" alt="多摄像头可按时间轴查看" src="https://github.com/blakeblackshear/frigate/assets/569905/d6788a15-0eeb-4427-a8d4-80b93cae3d74">
|
||||
</div>
|
||||
|
||||
### 内置遮罩和区域编辑器
|
||||
<div>
|
||||
<img width="800" alt="内置遮罩和区域编辑器" src="https://github.com/blakeblackshear/frigate/assets/569905/d7885fc3-bfe6-452f-b7d0-d957cb3e31f5">
|
||||
</div>
|
||||
|
||||
|
||||
## 翻译
|
||||
我们使用 [Weblate](https://hosted.weblate.org/projects/frigate-nvr/) 平台提供翻译支持,欢迎参与进来一起完善。
|
||||
|
||||
|
||||
## 非官方中文讨论社区
|
||||
欢迎加入中文讨论QQ群:1043861059
|
||||
Bilibili:https://space.bilibili.com/3546894915602564
|
@ -6,7 +6,7 @@ import numpy as np
|
||||
|
||||
import frigate.util as util
|
||||
from frigate.config import DetectorTypeEnum
|
||||
from frigate.object_detection import (
|
||||
from frigate.object_detection.base import (
|
||||
ObjectDetectProcess,
|
||||
RemoteObjectDetector,
|
||||
load_labels,
|
||||
|
@ -1,8 +1,8 @@
|
||||
version: "3"
|
||||
services:
|
||||
devcontainer:
|
||||
container_name: frigate-devcontainer
|
||||
# add groups from host for render, plugdev, video
|
||||
# Check host system's actual render/video/plugdev group IDs with 'getent group render', 'getent group video', and 'getent group plugdev'
|
||||
# Must add these exact IDs in container's group_add section or OpenVINO GPU acceleration will fail
|
||||
group_add:
|
||||
- "109" # render
|
||||
- "110" # render
|
||||
@ -24,8 +24,8 @@ services:
|
||||
# capabilities: [gpu]
|
||||
environment:
|
||||
YOLO_MODELS: ""
|
||||
devices:
|
||||
- /dev/bus/usb:/dev/bus/usb
|
||||
# devices:
|
||||
# - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB
|
||||
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
|
||||
volumes:
|
||||
- .:/workspace/frigate:cached
|
||||
@ -33,9 +33,10 @@ services:
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
- ./config:/config
|
||||
- ./debug:/media/frigate
|
||||
- /dev/bus/usb:/dev/bus/usb
|
||||
# - /dev/bus/usb:/dev/bus/usb # Uncomment for Google Coral USB
|
||||
mqtt:
|
||||
container_name: mqtt
|
||||
image: eclipse-mosquitto:1.6
|
||||
image: eclipse-mosquitto:2.0
|
||||
command: mosquitto -c /mosquitto-no-auth.conf # enable no-auth mode
|
||||
ports:
|
||||
- "1883:1883"
|
||||
- "1883:1883"
|
||||
|
@ -4,7 +4,7 @@
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential cmake git wget
|
||||
|
||||
hailo_version="4.20.0"
|
||||
hailo_version="4.20.1"
|
||||
arch=$(uname -m)
|
||||
|
||||
if [[ $arch == "x86_64" ]]; then
|
||||
|
@ -78,8 +78,9 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt
|
||||
RUN apt-get -qq update \
|
||||
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
|
||||
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
||||
&& python3 get-pip.py "pip" \
|
||||
&& pip install -r /requirements-ov.txt
|
||||
&& pip3 install -r /requirements-ov.txt
|
||||
|
||||
# Get OpenVino Model
|
||||
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
|
||||
@ -172,6 +173,7 @@ RUN apt-get -qq update \
|
||||
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
||||
|
||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
||||
&& python3 get-pip.py "pip"
|
||||
|
||||
COPY docker/main/requirements.txt /requirements.txt
|
||||
@ -235,6 +237,7 @@ ENV DEFAULT_FFMPEG_VERSION="7.0"
|
||||
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
|
||||
|
||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
||||
&& python3 get-pip.py "pip"
|
||||
|
||||
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
|
||||
@ -257,12 +260,12 @@ ENTRYPOINT ["/init"]
|
||||
CMD []
|
||||
|
||||
HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
|
||||
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
|
||||
CMD test -f /dev/shm/.frigate-is-stopping && exit 0; curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
|
||||
|
||||
# Frigate deps with Node.js and NPM for devcontainer
|
||||
FROM deps AS devcontainer
|
||||
|
||||
# Do not start the actual Frigate service on devcontainer as it will be started by VSCode
|
||||
# Do not start the actual Frigate service on devcontainer as it will be started by VS Code
|
||||
# But start a fake service for simulating the logs
|
||||
COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
NGINX_VERSION="1.25.3"
|
||||
NGINX_VERSION="1.27.4"
|
||||
VOD_MODULE_VERSION="1.31"
|
||||
SECURE_TOKEN_MODULE_VERSION="1.5"
|
||||
SET_MISC_MODULE_VERSION="v0.33"
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
hailo_version="4.20.0"
|
||||
hailo_version="4.20.1"
|
||||
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
arch="x86_64"
|
||||
@ -10,5 +10,5 @@ elif [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
arch="aarch64"
|
||||
fi
|
||||
|
||||
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" | tar -C / -xzf -
|
||||
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-debian12-${TARGETARCH}.tar.gz" | tar -C / -xzf -
|
||||
wget -P /wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp311-cp311-linux_${arch}.whl"
|
||||
|
@ -7,7 +7,6 @@ starlette-context == 0.3.6
|
||||
fastapi == 0.115.*
|
||||
uvicorn == 0.30.*
|
||||
slowapi == 0.1.*
|
||||
imutils == 0.5.*
|
||||
joserfc == 1.0.*
|
||||
pathvalidate == 3.2.*
|
||||
markupsafe == 3.0.*
|
||||
|
@ -25,4 +25,7 @@ elif [[ "${exit_code_service}" -ne 0 ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# used by the docker healthcheck
|
||||
touch /dev/shm/.frigate-is-stopping
|
||||
|
||||
exec /run/s6/basedir/bin/halt
|
||||
|
@ -4,44 +4,16 @@
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
# opt out of openvino telemetry
|
||||
if [ -e /usr/local/bin/opt_in_out ]; then
|
||||
/usr/local/bin/opt_in_out --opt_out
|
||||
fi
|
||||
|
||||
# Logs should be sent to stdout so that s6 can collect them
|
||||
|
||||
# Tell S6-Overlay not to restart this service
|
||||
s6-svc -O .
|
||||
|
||||
function migrate_db_path() {
|
||||
# Find config file in yaml or yml, but prefer yaml
|
||||
local config_file="${CONFIG_FILE:-"/config/config.yml"}"
|
||||
local config_file_yaml="${config_file//.yml/.yaml}"
|
||||
if [[ -f "${config_file_yaml}" ]]; then
|
||||
config_file="${config_file_yaml}"
|
||||
elif [[ ! -f "${config_file}" ]]; then
|
||||
# Frigate will create the config file on startup
|
||||
return 0
|
||||
fi
|
||||
unset config_file_yaml
|
||||
|
||||
# Use yq to check if database.path is set
|
||||
local user_db_path
|
||||
user_db_path=$(yq eval '.database.path' "${config_file}")
|
||||
|
||||
if [[ "${user_db_path}" == "null" ]]; then
|
||||
local previous_db_path="/media/frigate/frigate.db"
|
||||
local new_db_dir="/config"
|
||||
if [[ -f "${previous_db_path}" ]]; then
|
||||
if mountpoint --quiet "${new_db_dir}"; then
|
||||
# /config is a mount point, move the db
|
||||
echo "[INFO] Moving db from '${previous_db_path}' to the '${new_db_dir}' dir..."
|
||||
# Move all files that starts with frigate.db to the new directory
|
||||
mv -vf "${previous_db_path}"* "${new_db_dir}"
|
||||
else
|
||||
echo "[ERROR] Trying to migrate the db path from '${previous_db_path}' to the '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function set_libva_version() {
|
||||
local ffmpeg_path
|
||||
ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||
@ -50,8 +22,8 @@ function set_libva_version() {
|
||||
}
|
||||
|
||||
echo "[INFO] Preparing Frigate..."
|
||||
migrate_db_path
|
||||
set_libva_version
|
||||
|
||||
echo "[INFO] Starting Frigate..."
|
||||
|
||||
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
|
||||
|
@ -61,7 +61,7 @@ if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then
|
||||
echo "[INFO] Preparing new go2rtc config..."
|
||||
|
||||
if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then
|
||||
# Running as a Home Assistant add-on, infer the IP address and port
|
||||
# Running as a Home Assistant Add-on, infer the IP address and port
|
||||
get_ip_and_port_from_supervisor
|
||||
fi
|
||||
|
||||
|
@ -79,6 +79,11 @@ if [ ! \( -f "$letsencrypt_path/privkey.pem" -a -f "$letsencrypt_path/fullchain.
|
||||
-keyout "$letsencrypt_path/privkey.pem" -out "$letsencrypt_path/fullchain.pem" 2>/dev/null
|
||||
fi
|
||||
|
||||
# build templates for optional FRIGATE_BASE_PATH environment variable
|
||||
python3 /usr/local/nginx/get_base_path.py | \
|
||||
tempio -template /usr/local/nginx/templates/base_path.gotmpl \
|
||||
-out /usr/local/nginx/conf/base_path.conf
|
||||
|
||||
# build templates for optional TLS support
|
||||
python3 /usr/local/nginx/get_tls_settings.py | \
|
||||
tempio -template /usr/local/nginx/templates/listen.gotmpl \
|
||||
|
146
docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/run
Executable file
146
docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/run
Executable file
@ -0,0 +1,146 @@
|
||||
#!/command/with-contenv bash
|
||||
# shellcheck shell=bash
|
||||
# Do preparation tasks before starting the main services
|
||||
|
||||
set -o errexit -o nounset -o pipefail
|
||||
|
||||
function migrate_addon_config_dir() {
|
||||
local home_assistant_config_dir="/homeassistant"
|
||||
|
||||
if ! mountpoint --quiet "${home_assistant_config_dir}"; then
|
||||
# Not running as a Home Assistant Add-on
|
||||
return 0
|
||||
fi
|
||||
|
||||
local config_dir="/config"
|
||||
local new_config_file="${config_dir}/config.yml"
|
||||
local new_config_file_yaml="${new_config_file//.yml/.yaml}"
|
||||
if [[ -f "${new_config_file_yaml}" || -f "${new_config_file}" ]]; then
|
||||
# Already migrated
|
||||
return 0
|
||||
fi
|
||||
|
||||
local old_config_file="${home_assistant_config_dir}/frigate.yml"
|
||||
local old_config_file_yaml="${old_config_file//.yml/.yaml}"
|
||||
if [[ -f "${old_config_file}" ]]; then
|
||||
:
|
||||
elif [[ -f "${old_config_file_yaml}" ]]; then
|
||||
old_config_file="${old_config_file_yaml}"
|
||||
new_config_file="${new_config_file_yaml}"
|
||||
else
|
||||
# Nothing to migrate
|
||||
return 0
|
||||
fi
|
||||
unset old_config_file_yaml new_config_file_yaml
|
||||
|
||||
echo "[INFO] Starting migration from Home Assistant config dir to Add-on config dir..." >&2
|
||||
|
||||
local db_path
|
||||
db_path=$(yq -r '.database.path' "${old_config_file}")
|
||||
if [[ "${db_path}" == "null" ]]; then
|
||||
db_path="${config_dir}/frigate.db"
|
||||
fi
|
||||
if [[ "${db_path}" == "${config_dir}/"* ]]; then
|
||||
# replace /config/ prefix with /homeassistant/
|
||||
local old_db_path="${home_assistant_config_dir}/${db_path:8}"
|
||||
|
||||
if [[ -f "${old_db_path}" ]]; then
|
||||
local new_db_dir
|
||||
new_db_dir="$(dirname "${db_path}")"
|
||||
echo "[INFO] Migrating database from '${old_db_path}' to '${new_db_dir}' dir..." >&2
|
||||
mkdir -vp "${new_db_dir}"
|
||||
mv -vf "${old_db_path}" "${new_db_dir}"
|
||||
local db_file
|
||||
for db_file in "${old_db_path}"-shm "${old_db_path}"-wal; do
|
||||
if [[ -f "${db_file}" ]]; then
|
||||
mv -vf "${db_file}" "${new_db_dir}"
|
||||
fi
|
||||
done
|
||||
unset db_file
|
||||
fi
|
||||
fi
|
||||
|
||||
local config_entry
|
||||
for config_entry in .model.path .model.labelmap_path .ffmpeg.path .mqtt.tls_ca_certs .mqtt.tls_client_cert .mqtt.tls_client_key; do
|
||||
local config_entry_path
|
||||
config_entry_path=$(yq -r "${config_entry}" "${old_config_file}")
|
||||
if [[ "${config_entry_path}" == "${config_dir}/"* ]]; then
|
||||
# replace /config/ prefix with /homeassistant/
|
||||
local old_config_entry_path="${home_assistant_config_dir}/${config_entry_path:8}"
|
||||
|
||||
if [[ -f "${old_config_entry_path}" ]]; then
|
||||
local new_config_entry_entry
|
||||
new_config_entry_entry="$(dirname "${config_entry_path}")"
|
||||
echo "[INFO] Migrating ${config_entry} from '${old_config_entry_path}' to '${config_entry_path}'..." >&2
|
||||
mkdir -vp "${new_config_entry_entry}"
|
||||
mv -vf "${old_config_entry_path}" "${config_entry_path}"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
local old_model_cache_path="${home_assistant_config_dir}/model_cache"
|
||||
if [[ -d "${old_model_cache_path}" ]]; then
|
||||
echo "[INFO] Migrating '${old_model_cache_path}' to '${config_dir}'..." >&2
|
||||
mv -f "${old_model_cache_path}" "${config_dir}"
|
||||
fi
|
||||
|
||||
echo "[INFO] Migrating other files from '${home_assistant_config_dir}' to '${config_dir}'..." >&2
|
||||
local file
|
||||
for file in .exports .jwt_secret .timeline .vacuum go2rtc; do
|
||||
file="${home_assistant_config_dir}/${file}"
|
||||
if [[ -f "${file}" ]]; then
|
||||
mv -vf "${file}" "${config_dir}"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "[INFO] Migrating config file from '${old_config_file}' to '${new_config_file}'..." >&2
|
||||
mv -vf "${old_config_file}" "${new_config_file}"
|
||||
|
||||
echo "[INFO] Migration from Home Assistant config dir to Add-on config dir completed." >&2
|
||||
}
|
||||
|
||||
function migrate_db_from_media_to_config() {
|
||||
# Find config file in yml or yaml, but prefer yml
|
||||
local config_file="${CONFIG_FILE:-"/config/config.yml"}"
|
||||
local config_file_yaml="${config_file//.yml/.yaml}"
|
||||
if [[ -f "${config_file}" ]]; then
|
||||
:
|
||||
elif [[ -f "${config_file_yaml}" ]]; then
|
||||
config_file="${config_file_yaml}"
|
||||
else
|
||||
# Frigate will create the config file on startup
|
||||
return 0
|
||||
fi
|
||||
unset config_file_yaml
|
||||
|
||||
local user_db_path
|
||||
user_db_path=$(yq -r '.database.path' "${config_file}")
|
||||
if [[ "${user_db_path}" == "null" ]]; then
|
||||
local old_db_path="/media/frigate/frigate.db"
|
||||
local new_db_dir="/config"
|
||||
if [[ -f "${old_db_path}" ]]; then
|
||||
echo "[INFO] Migrating database from '${old_db_path}' to '${new_db_dir}' dir..." >&2
|
||||
if mountpoint --quiet "${new_db_dir}"; then
|
||||
# /config is a mount point, move the db
|
||||
mv -vf "${old_db_path}" "${new_db_dir}"
|
||||
local db_file
|
||||
for db_file in "${old_db_path}"-shm "${old_db_path}"-wal; do
|
||||
if [[ -f "${db_file}" ]]; then
|
||||
mv -vf "${db_file}" "${new_db_dir}"
|
||||
fi
|
||||
done
|
||||
unset db_file
|
||||
else
|
||||
echo "[ERROR] Trying to migrate the database path from '${old_db_path}' to '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir" >&2
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# remove leftover from last run, not normally needed, but just in case
|
||||
# used by the docker healthcheck
|
||||
rm -f /dev/shm/.frigate-is-stopping
|
||||
|
||||
migrate_addon_config_dir
|
||||
migrate_db_from_media_to_config
|
1
docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/type
Normal file
1
docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/type
Normal file
@ -0,0 +1 @@
|
||||
oneshot
|
1
docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/up
Normal file
1
docker/main/rootfs/etc/s6-overlay/s6-rc.d/prepare/up
Normal file
@ -0,0 +1 @@
|
||||
/etc/s6-overlay/s6-rc.d/prepare/run
|
@ -1,5 +1,4 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
@ -9,17 +8,13 @@ from frigate.const import (
|
||||
DEFAULT_FFMPEG_VERSION,
|
||||
INCLUDED_FFMPEG_VERSIONS,
|
||||
)
|
||||
from frigate.util.config import find_config_file
|
||||
|
||||
sys.path.remove("/opt/frigate")
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
|
@ -15,6 +15,7 @@ from frigate.const import (
|
||||
LIBAVFORMAT_VERSION_MAJOR,
|
||||
)
|
||||
from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
|
||||
from frigate.util.config import find_config_file
|
||||
|
||||
sys.path.remove("/opt/frigate")
|
||||
|
||||
@ -29,12 +30,7 @@ if os.path.isdir("/run/secrets"):
|
||||
Path(os.path.join("/run/secrets", secret_file)).read_text().strip()
|
||||
)
|
||||
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
@ -57,7 +53,7 @@ elif go2rtc_config["api"].get("origin") is None:
|
||||
|
||||
# Need to set default location for HA config
|
||||
if go2rtc_config.get("hass") is None:
|
||||
go2rtc_config["hass"] = {"config": "/config"}
|
||||
go2rtc_config["hass"] = {"config": "/homeassistant"}
|
||||
|
||||
# we want to ensure that logs are easy to read
|
||||
if go2rtc_config.get("log") is None:
|
||||
@ -106,7 +102,7 @@ elif go2rtc_config["ffmpeg"].get("bin") is None:
|
||||
|
||||
# need to replace ffmpeg command when using ffmpeg4
|
||||
if LIBAVFORMAT_VERSION_MAJOR < 59:
|
||||
rtsp_args = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
|
||||
rtsp_args = "-fflags nobuffer -flags low_delay -stimeout 10000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
|
||||
if go2rtc_config.get("ffmpeg") is None:
|
||||
go2rtc_config["ffmpeg"] = {"rtsp": rtsp_args}
|
||||
elif go2rtc_config["ffmpeg"].get("rtsp") is None:
|
||||
|
@ -30,7 +30,7 @@ http {
|
||||
|
||||
gzip on;
|
||||
gzip_comp_level 6;
|
||||
gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp image/png image/gif image/jpeg image/jpg;
|
||||
gzip_types text/plain text/css application/json application/x-javascript application/javascript text/javascript image/svg+xml image/x-icon image/bmp;
|
||||
gzip_proxied no-cache no-store private expired auth;
|
||||
gzip_vary on;
|
||||
|
||||
@ -82,7 +82,7 @@ http {
|
||||
aio on;
|
||||
|
||||
# file upload size
|
||||
client_max_body_size 10M;
|
||||
client_max_body_size 20M;
|
||||
|
||||
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
|
||||
vod_open_file_thread_pool default;
|
||||
@ -96,6 +96,7 @@ http {
|
||||
gzip_types application/vnd.apple.mpegurl;
|
||||
|
||||
include auth_location.conf;
|
||||
include base_path.conf;
|
||||
|
||||
location /vod/ {
|
||||
include auth_request.conf;
|
||||
@ -299,11 +300,29 @@ http {
|
||||
add_header Cache-Control "public";
|
||||
}
|
||||
|
||||
location /locales/ {
|
||||
access_log off;
|
||||
add_header Cache-Control "public";
|
||||
}
|
||||
|
||||
location ~ ^/.*-([A-Za-z0-9]+)\.webmanifest$ {
|
||||
access_log off;
|
||||
expires 1y;
|
||||
add_header Cache-Control "public";
|
||||
default_type application/json;
|
||||
proxy_set_header Accept-Encoding "";
|
||||
sub_filter_once off;
|
||||
sub_filter_types application/json;
|
||||
sub_filter '"start_url": "/BASE_PATH/"' '"start_url" : "$http_x_ingress_path/"';
|
||||
sub_filter '"src": "/BASE_PATH/' '"src": "$http_x_ingress_path/';
|
||||
}
|
||||
|
||||
sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/';
|
||||
sub_filter 'url(/BASE_PATH/' 'url($http_x_ingress_path/';
|
||||
sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/';
|
||||
sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/';
|
||||
sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/';
|
||||
sub_filter '"/BASE_PATH/locales/' '"$http_x_ingress_path/locales/';
|
||||
sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/';
|
||||
sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl';
|
||||
sub_filter '<body>' '<body><script>window.baseUrl="$http_x_ingress_path/";</script>';
|
||||
|
10
docker/main/rootfs/usr/local/nginx/get_base_path.py
Normal file
10
docker/main/rootfs/usr/local/nginx/get_base_path.py
Normal file
@ -0,0 +1,10 @@
|
||||
"""Prints the base path as json to stdout."""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
base_path = os.environ.get("FRIGATE_BASE_PATH", "")
|
||||
|
||||
result: dict[str, any] = {"base_path": base_path}
|
||||
|
||||
print(json.dumps(result))
|
@ -1,18 +1,18 @@
|
||||
"""Prints the tls config as json to stdout."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
sys.path.insert(0, "/opt/frigate")
|
||||
from frigate.util.config import find_config_file
|
||||
|
||||
sys.path.remove("/opt/frigate")
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
|
@ -0,0 +1,19 @@
|
||||
{{ if .base_path }}
|
||||
location = {{ .base_path }} {
|
||||
return 302 {{ .base_path }}/;
|
||||
}
|
||||
|
||||
location ^~ {{ .base_path }}/ {
|
||||
# remove base_url from the path before passing upstream
|
||||
rewrite ^{{ .base_path }}/(.*) /$1 break;
|
||||
|
||||
proxy_pass $scheme://127.0.0.1:8971;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Ingress-Path {{ .base_path }};
|
||||
|
||||
access_log off;
|
||||
}
|
||||
{{ end }}
|
@ -26,7 +26,7 @@ COPY --from=rootfs / /
|
||||
COPY docker/rockchip/COCO /COCO
|
||||
COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py
|
||||
|
||||
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/
|
||||
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.2/librknnrt.so /usr/lib/
|
||||
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
||||
|
@ -14,7 +14,7 @@ try:
|
||||
with open("/config/conv2rknn.yaml", "r") as config_file:
|
||||
configuration = yaml.safe_load(config_file)
|
||||
except FileNotFoundError:
|
||||
raise Exception("Please place a config.yaml file in /config/conv2rknn.yaml")
|
||||
raise Exception("Please place a config file at /config/conv2rknn.yaml")
|
||||
|
||||
if configuration["config"] != None:
|
||||
rknn_config = configuration["config"]
|
||||
|
@ -1,2 +1,2 @@
|
||||
rknn-toolkit2 == 2.3.0
|
||||
rknn-toolkit-lite2 == 2.3.0
|
||||
rknn-toolkit2 == 2.3.2
|
||||
rknn-toolkit-lite2 == 2.3.2
|
@ -39,6 +39,7 @@ WORKDIR /opt/frigate
|
||||
COPY --from=rootfs / /
|
||||
|
||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
||||
&& python3 get-pip.py "pip" --break-system-packages
|
||||
RUN python3 -m pip config set global.break-system-packages true
|
||||
|
||||
|
@ -9,9 +9,9 @@ ARG DEBIAN_FRONTEND
|
||||
|
||||
# Add deadsnakes PPA for python3.11
|
||||
RUN apt-get -qq update && \
|
||||
apt-get -qq install -y --no-install-recommends \
|
||||
software-properties-common \
|
||||
&& add-apt-repository ppa:deadsnakes/ppa
|
||||
apt-get -qq install -y --no-install-recommends \
|
||||
software-properties-common \
|
||||
&& add-apt-repository ppa:deadsnakes/ppa
|
||||
|
||||
# Use a separate container to build wheels to prevent build dependencies in final image
|
||||
RUN apt-get -qq update \
|
||||
@ -24,6 +24,7 @@ RUN apt-get -qq update \
|
||||
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
||||
|
||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
&& sed -i 's/args.append("setuptools")/args.append("setuptools==77.0.3")/' get-pip.py \
|
||||
&& python3 get-pip.py "pip"
|
||||
|
||||
FROM build-wheels AS trt-wheels
|
||||
|
@ -21,7 +21,20 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target
|
||||
RUN mkdir -p /usr/local/cuda-deps
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda-deps/ && \
|
||||
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ ; \
|
||||
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ && \
|
||||
cd /usr/local/cuda-deps/ && \
|
||||
for lib in libnvrtc.so.*; do \
|
||||
if [[ "$lib" =~ libnvrtc.so\.([0-9]+\.[0-9]+\.[0-9]+) ]]; then \
|
||||
version="${BASH_REMATCH[1]}"; \
|
||||
ln -sf "libnvrtc.so.$version" libnvrtc.so; \
|
||||
fi; \
|
||||
done && \
|
||||
for lib in libcurand.so.*; do \
|
||||
if [[ "$lib" =~ libcurand.so\.([0-9]+\.[0-9]+\.[0-9]+\.[0-9]+) ]]; then \
|
||||
version="${BASH_REMATCH[1]}"; \
|
||||
ln -sf "libcurand.so.$version" libcurand.so; \
|
||||
fi; \
|
||||
done; \
|
||||
fi
|
||||
|
||||
# Frigate w/ TensorRT Support as separate image
|
||||
|
@ -1,8 +1,7 @@
|
||||
/usr/local/lib
|
||||
/usr/local/cuda
|
||||
/usr/local/lib/python3.11/dist-packages/tensorrt
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_nvrtc/lib
|
||||
/usr/local/lib/python3.11/dist-packages/tensorrt
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cufft/lib
|
@ -44,7 +44,7 @@ go2rtc:
|
||||
|
||||
### `environment_vars`
|
||||
|
||||
This section can be used to set environment variables for those unable to modify the environment of the container (ie. within HassOS)
|
||||
This section can be used to set environment variables for those unable to modify the environment of the container, like within Home Assistant OS.
|
||||
|
||||
Example:
|
||||
|
||||
@ -172,6 +172,38 @@ listen [::]:8971 ipv6only=off ssl;
|
||||
listen [::]:5000 ipv6only=off;
|
||||
```
|
||||
|
||||
## Base path
|
||||
|
||||
By default, Frigate runs at the root path (`/`). However some setups require to run Frigate under a custom path prefix (e.g. `/frigate`), especially when Frigate is located behind a reverse proxy that requires path-based routing.
|
||||
|
||||
### Set Base Path via HTTP Header
|
||||
The preferred way to configure the base path is through the `X-Ingress-Path` HTTP header, which needs to be set to the desired base path in an upstream reverse proxy.
|
||||
|
||||
For example, in Nginx:
|
||||
```
|
||||
location /frigate {
|
||||
proxy_set_header X-Ingress-Path /frigate;
|
||||
proxy_pass http://frigate_backend;
|
||||
}
|
||||
```
|
||||
|
||||
### Set Base Path via Environment Variable
|
||||
When it is not feasible to set the base path via a HTTP header, it can also be set via the `FRIGATE_BASE_PATH` environment variable in the Docker Compose file.
|
||||
|
||||
For example:
|
||||
```
|
||||
services:
|
||||
frigate:
|
||||
image: blakeblackshear/frigate:latest
|
||||
environment:
|
||||
- FRIGATE_BASE_PATH=/frigate
|
||||
```
|
||||
|
||||
This can be used for example to access Frigate via a Tailscale agent (https), by simply forwarding all requests to the base path (http):
|
||||
```
|
||||
tailscale serve --https=443 --bg --set-path /frigate http://localhost:5000/frigate
|
||||
```
|
||||
|
||||
## Custom Dependencies
|
||||
|
||||
### Custom ffmpeg build
|
||||
|
@ -43,13 +43,13 @@ Restarting Frigate will reset the rate limits.
|
||||
|
||||
If you are running Frigate behind a proxy, you will want to set `trusted_proxies` or these rate limits will apply to the upstream proxy IP address. This means that a brute force attack will rate limit login attempts from other devices and could temporarily lock you out of your instance. In order to ensure rate limits only apply to the actual IP address where the requests are coming from, you will need to list the upstream networks that you want to trust. These trusted proxies are checked against the `X-Forwarded-For` header when looking for the IP address where the request originated.
|
||||
|
||||
If you are running a reverse proxy in the same docker compose file as Frigate, here is an example of how your auth config might look:
|
||||
If you are running a reverse proxy in the same Docker Compose file as Frigate, here is an example of how your auth config might look:
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
failed_login_rate_limit: "1/second;5/minute;20/hour"
|
||||
trusted_proxies:
|
||||
- 172.18.0.0/16 # <---- this is the subnet for the internal docker compose network
|
||||
- 172.18.0.0/16 # <---- this is the subnet for the internal Docker Compose network
|
||||
```
|
||||
|
||||
## JWT Token Secret
|
||||
@ -66,7 +66,7 @@ Frigate looks for a JWT token secret in the following order:
|
||||
|
||||
1. An environment variable named `FRIGATE_JWT_SECRET`
|
||||
2. A docker secret named `FRIGATE_JWT_SECRET` in `/run/secrets/`
|
||||
3. A `jwt_secret` option from the Home Assistant Addon options
|
||||
3. A `jwt_secret` option from the Home Assistant Add-on options
|
||||
4. A `.jwt_secret` file in the config directory
|
||||
|
||||
If no secret is found on startup, Frigate generates one and stores it in a `.jwt_secret` file in the config directory.
|
||||
@ -77,7 +77,7 @@ Changing the secret will invalidate current tokens.
|
||||
|
||||
Frigate can be configured to leverage features of common upstream authentication proxies such as Authelia, Authentik, oauth2_proxy, or traefik-forward-auth.
|
||||
|
||||
If you are leveraging the authentication of an upstream proxy, you likely want to disable Frigate's authentication. Optionally, if communication between the reverse proxy and Frigate is over an untrusted network, you should set an `auth_secret` in the `proxy` config and configure the proxy to send the secret value as a header named `X-Proxy-Secret`. Assuming this is an untrusted network, you will also want to [configure a real TLS certificate](tls.md) to ensure the traffic can't simply be sniffed to steal the secret.
|
||||
If you are leveraging the authentication of an upstream proxy, you likely want to disable Frigate's authentication as there is no correspondence between users in Frigate's database and users authenticated via the proxy. Optionally, if communication between the reverse proxy and Frigate is over an untrusted network, you should set an `auth_secret` in the `proxy` config and configure the proxy to send the secret value as a header named `X-Proxy-Secret`. Assuming this is an untrusted network, you will also want to [configure a real TLS certificate](tls.md) to ensure the traffic can't simply be sniffed to steal the secret.
|
||||
|
||||
Here is an example of how to disable Frigate's authentication and also ensure the requests come only from your known proxy.
|
||||
|
||||
@ -109,6 +109,14 @@ proxy:
|
||||
|
||||
Frigate supports both `admin` and `viewer` roles (see below). When using port `8971`, Frigate validates these headers and subsequent requests use the headers `remote-user` and `remote-role` for authorization.
|
||||
|
||||
A default role can be provided. Any value in the mapped `role` header will override the default.
|
||||
|
||||
```yaml
|
||||
proxy:
|
||||
...
|
||||
default_role: viewer
|
||||
```
|
||||
|
||||
#### Port Considerations
|
||||
|
||||
**Authenticated Port (8971)**
|
||||
|
31
docs/docs/configuration/bird_classification.md
Normal file
31
docs/docs/configuration/bird_classification.md
Normal file
@ -0,0 +1,31 @@
|
||||
---
|
||||
id: bird_classification
|
||||
title: Bird Classification
|
||||
---
|
||||
|
||||
Bird classification identifies known birds using a quantized Tensorflow model. When a known bird is recognized, its common name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
Bird classification runs a lightweight tflite model on the CPU, there are no significantly different system requirements than running Frigate itself.
|
||||
|
||||
## Model
|
||||
|
||||
The classification model used is the MobileNet INat Bird Classification, [available identifiers can be found here.](https://raw.githubusercontent.com/google-coral/test_data/master/inat_bird_labels.txt)
|
||||
|
||||
## Configuration
|
||||
|
||||
Bird classification is disabled by default, it must be enabled in your config file before it can be used. Bird classification is a global configuration setting.
|
||||
|
||||
```yaml
|
||||
classification:
|
||||
bird:
|
||||
enabled: true
|
||||
```
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
Fine-tune bird classification with these optional parameters:
|
||||
|
||||
- `threshold`: Classification confidence score required to set the sub label on the object.
|
||||
- Default: `0.9`.
|
@ -4,7 +4,7 @@ In addition to Frigate's Live camera dashboard, Birdseye allows a portable heads
|
||||
|
||||
Birdseye can be viewed by adding the "Birdseye" camera to a Camera Group in the Web UI. Add a Camera Group by pressing the "+" icon on the Live page, and choose "Birdseye" as one of the cameras.
|
||||
|
||||
Birdseye can also be used in HomeAssistant dashboards, cast to media devices, etc.
|
||||
Birdseye can also be used in Home Assistant dashboards, cast to media devices, etc.
|
||||
|
||||
## Birdseye Behavior
|
||||
|
||||
|
@ -15,6 +15,17 @@ Many cameras support encoding options which greatly affect the live view experie
|
||||
|
||||
:::
|
||||
|
||||
## H.265 Cameras via Safari
|
||||
|
||||
Some cameras support h265 with different formats, but Safari only supports the annexb format. When using h265 camera streams for recording with devices that use the Safari browser, the `apple_compatibility` option should be used.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
h265_cam: # <------ Doesn't matter what the camera is called
|
||||
ffmpeg:
|
||||
apple_compatibility: true # <- Adds compatibility with MacOS and iPhone
|
||||
```
|
||||
|
||||
## MJPEG Cameras
|
||||
|
||||
Note that mjpeg cameras require encoding the video into h264 for recording, and restream roles. This will use significantly more CPU than if the cameras supported h264 feeds directly. It is recommended to use the restream role to create an h264 restream and then use that as the source for ffmpeg.
|
||||
|
@ -3,25 +3,38 @@ id: face_recognition
|
||||
title: Face Recognition
|
||||
---
|
||||
|
||||
Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known person is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
|
||||
Face recognition identifies known individuals by matching detected faces with previously learned facial data. When a known `person` is recognized, their name will be added as a `sub_label`. This information is included in the UI, filters, as well as in notifications.
|
||||
|
||||
## Model Requirements
|
||||
|
||||
Frigate has support for CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally. A lightweight face landmark detection model is also used to align faces before running them through the face recognizer.
|
||||
### Face Detection
|
||||
|
||||
Users running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient.
|
||||
When running a Frigate+ model (or any custom model that natively detects faces) should ensure that `face` is added to the [list of objects to track](../plus/#available-label-types) either globally or for a specific camera. This will allow face detection to run at the same time as object detection and be more efficient.
|
||||
|
||||
Users without a model that detects faces can still run face recognition. Frigate uses a lightweight DNN face detection model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track.
|
||||
When running a default COCO model or another model that does not include `face` as a detectable label, face detection will run via CV2 using a lightweight DNN model that runs on the CPU. In this case, you should _not_ define `face` in your list of objects to track.
|
||||
|
||||
:::note
|
||||
|
||||
Frigate needs to first detect a `face` before it can recognize a face.
|
||||
Frigate needs to first detect a `person` before it can detect and recognize a face.
|
||||
|
||||
:::
|
||||
|
||||
### Face Recognition
|
||||
|
||||
Frigate has support for two face recognition model types:
|
||||
|
||||
- **small**: Frigate will run a FaceNet embedding model to recognize faces, which runs locally on the CPU. This model is optimized for efficiency and is not as accurate.
|
||||
- **large**: Frigate will run a large ArcFace embedding model that is optimized for accuracy. It is only recommended to be run when an integrated or dedicated GPU is available.
|
||||
|
||||
In both cases, a lightweight face landmark detection model is also used to align faces before running recognition.
|
||||
|
||||
All of these features run locally on your system.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
Face recognition is lightweight and runs on the CPU, there are no significantly different system requirements than running Frigate itself.
|
||||
The `small` model is optimized for efficiency and runs on the CPU, most CPUs should run the model efficiently.
|
||||
|
||||
The `large` model is optimized for accuracy, an integrated or discrete GPU is required. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
|
||||
|
||||
## Configuration
|
||||
|
||||
@ -34,7 +47,7 @@ face_recognition:
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
Fine-tune face recognition with these optional parameters:
|
||||
Fine-tune face recognition with these optional parameters at the global level of your config. The only optional parameters that can be set at the camera level are `enabled` and `min_area`.
|
||||
|
||||
### Detection
|
||||
|
||||
@ -47,12 +60,24 @@ Fine-tune face recognition with these optional parameters:
|
||||
|
||||
### Recognition
|
||||
|
||||
- `model_size`: Which model size to use, options are `small` or `large`
|
||||
- `unknown_score`: Min score to mark a person as a potential match, matches at or below this will be marked as unknown.
|
||||
- Default: `0.8`.
|
||||
- `recognition_threshold`: Recognition confidence score required to add the face to the object as a sub label.
|
||||
- Default: `0.9`.
|
||||
- `save_attempts`: Number of images of recognized faces to save for training.
|
||||
- Default: `100`.
|
||||
- `blur_confidence_filter`: Enables a filter that calculates how blurry the face is and adjusts the confidence based on this.
|
||||
- Default: `True`.
|
||||
|
||||
## Dataset
|
||||
## Usage
|
||||
|
||||
1. **Enable face recognition** in your configuration file and restart Frigate.
|
||||
2. **Upload your face** using the **Add Face** button's wizard in the Face Library section of the Frigate UI.
|
||||
3. When Frigate detects and attempts to recognize a face, it will appear in the **Train** tab of the Face Library, along with its associated recognition confidence.
|
||||
4. From the **Train** tab, you can **assign the face** to a new or existing person to improve recognition accuracy for the future.
|
||||
|
||||
## Creating a Robust Training Set
|
||||
|
||||
The number of images needed for a sufficient training set for face recognition varies depending on several factors:
|
||||
|
||||
@ -61,11 +86,9 @@ The number of images needed for a sufficient training set for face recognition v
|
||||
|
||||
However, here are some general guidelines:
|
||||
|
||||
- Minimum: For basic face recognition tasks, a minimum of 10-20 images per person is often recommended.
|
||||
- Recommended: For more robust and accurate systems, 30-50 images per person is a good starting point.
|
||||
- Ideal: For optimal performance, especially in challenging conditions, 100 or more images per person can be beneficial.
|
||||
|
||||
## Creating a Robust Training Set
|
||||
- Minimum: For basic face recognition tasks, a minimum of 5-10 images per person is often recommended.
|
||||
- Recommended: For more robust and accurate systems, 20-30 images per person is a good starting point.
|
||||
- Ideal: For optimal performance, especially in challenging conditions, 50-100 images per person can be beneficial.
|
||||
|
||||
The accuracy of face recognition is heavily dependent on the quality of data given to it for training. It is recommended to build the face training library in phases.
|
||||
|
||||
@ -74,19 +97,75 @@ The accuracy of face recognition is heavily dependent on the quality of data giv
|
||||
When choosing images to include in the face training set it is recommended to always follow these recommendations:
|
||||
|
||||
- If it is difficult to make out details in a persons face it will not be helpful in training.
|
||||
- Avoid images with under/over-exposure.
|
||||
- Avoid images with extreme under/over-exposure.
|
||||
- Avoid blurry / pixelated images.
|
||||
- Be careful when uploading images of people when they are wearing clothing that covers a lot of their face as this may confuse the training.
|
||||
- Do not upload too many images at the same time, it is recommended to train 4-6 images for each person each day so it is easier to know if the previously added images helped or hurt performance.
|
||||
- Avoid training on infrared (gray-scale). The models are trained on color images and will be able to extract features from gray-scale images.
|
||||
- Using images of people wearing hats / sunglasses may confuse the model.
|
||||
- Do not upload too many similar images at the same time, it is recommended to train no more than 4-6 similar images for each person to avoid over-fitting.
|
||||
|
||||
:::
|
||||
|
||||
### Step 1 - Building a Strong Foundation
|
||||
|
||||
When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-2 photos taken by a smartphone for each person. It is important that the person's face in the photo is straight-on and not turned which will ensure a good starting point.
|
||||
When first enabling face recognition it is important to build a foundation of strong images. It is recommended to start by uploading 1-5 photos containing just this person's face. It is important that the person's face in the photo is front-facing and not turned, this will ensure a good starting point.
|
||||
|
||||
Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are straight-on. Ignore images from cameras that recognize faces from an angle. Once a person starts to be consistently recognized correctly on images that are straight-on, it is time to move on to the next step.
|
||||
Then it is recommended to use the `Face Library` tab in Frigate to select and train images for each person as they are detected. When building a strong foundation it is strongly recommended to only train on images that are front-facing. Ignore images from cameras that recognize faces from an angle.
|
||||
|
||||
Aim to strike a balance between the quality of images while also having a range of conditions (day / night, different weather conditions, different times of day, etc.) in order to have diversity in the images used for each person and not have over-fitting.
|
||||
|
||||
Once a person starts to be consistently recognized correctly on images that are front-facing, it is time to move on to the next step.
|
||||
|
||||
### Step 2 - Expanding The Dataset
|
||||
|
||||
Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone.
|
||||
Once front-facing images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone.
|
||||
|
||||
## FAQ
|
||||
|
||||
### Why can't I bulk upload photos?
|
||||
|
||||
It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to over-fitting in that particular scenario and hurt recognition performance.
|
||||
|
||||
### Why can't I bulk reprocess faces?
|
||||
|
||||
Face embedding models work by breaking apart faces into different features. This means that when reprocessing an image, only images from a similar angle will have its score affected.
|
||||
|
||||
### Why do unknown people score similarly to known people?
|
||||
|
||||
This can happen for a few different reasons, but this is usually an indicator that the training set needs to be improved. This is often related to over-fitting:
|
||||
|
||||
- If you train with only a few images per person, especially if those images are very similar, the recognition model becomes overly specialized to those specific images.
|
||||
- When you provide images with different poses, lighting, and expressions, the algorithm extracts features that are consistent across those variations.
|
||||
- By training on a diverse set of images, the algorithm becomes less sensitive to minor variations and noise in the input image.
|
||||
|
||||
Review your face collections and remove most of the unclear or low-quality images. Then, use the **Reprocess** button on each face in the **Train** tab to evaluate how the changes affect recognition scores.
|
||||
|
||||
Avoid training on images that already score highly, as this can lead to over-fitting. Instead, focus on relatively clear images that score lower - ideally with different lighting, angles, and conditions—to help the model generalize more effectively.
|
||||
|
||||
### Frigate misidentified a face. Can I tell it that a face is "not" a specific person?
|
||||
|
||||
No, face recognition does not support negative training (i.e., explicitly telling it who someone is _not_). Instead, the best approach is to improve the training data by using a more diverse and representative set of images for each person.
|
||||
For more guidance, refer to the section above on improving recognition accuracy.
|
||||
|
||||
### I see scores above the threshold in the train tab, but a sub label wasn't assigned?
|
||||
|
||||
The Frigate considers the recognition scores across all recognition attempts for each person object. The scores are continually weighted based on the area of the face, and a sub label will only be assigned to person if a person is confidently recognized consistently. This avoids cases where a single high confidence recognition would throw off the results.
|
||||
|
||||
### Can I use other face recognition software like DoubleTake at the same time as the built in face recognition?
|
||||
|
||||
No, using another face recognition service will interfere with Frigate's built in face recognition. When using double-take the sub_label feature must be disabled if the built in face recognition is also desired.
|
||||
|
||||
### Does face recognition run on the recording stream?
|
||||
|
||||
Face recognition does not run on the recording stream, this would be suboptimal for many reasons:
|
||||
|
||||
1. The latency of accessing the recordings means the notifications would not include the names of recognized people because recognition would not complete until after.
|
||||
2. The embedding models used run on a set image size, so larger images will be scaled down to match this anyway.
|
||||
3. Motion clarity is much more important than extra pixels, over-compression and motion blur are much more detrimental to results than resolution.
|
||||
|
||||
### I get an unknown error when taking a photo directly with my iPhone
|
||||
|
||||
By default iOS devices will use HEIC (High Efficiency Image Container) for images, but this format is not supported for uploads. Choosing `large` as the format instead of `original` will use JPG which will work correctly.
|
||||
|
||||
## How can I delete the face database and start over?
|
||||
|
||||
Frigate does not store anything in its database related to face recognition. You can simply delete all of your faces through the Frigate UI or remove the contents of the `/media/frigate/clips/faces` directory.
|
||||
|
@ -9,7 +9,7 @@ Some presets of FFmpeg args are provided by default to make the configuration ea
|
||||
|
||||
It is highly recommended to use hwaccel presets in the config. These presets not only replace the longer args, but they also give Frigate hints of what hardware is available and allows Frigate to make other optimizations using the GPU such as when encoding the birdseye restream or when scaling a stream that has a size different than the native stream size.
|
||||
|
||||
See [the hwaccel docs](/configuration/hardware_acceleration.md) for more info on how to setup hwaccel for your GPU / iGPU.
|
||||
See [the hwaccel docs](/configuration/hardware_acceleration_video.md) for more info on how to setup hwaccel for your GPU / iGPU.
|
||||
|
||||
| Preset | Usage | Other Notes |
|
||||
| --------------------- | ------------------------------ | ----------------------------------------------------- |
|
||||
|
32
docs/docs/configuration/hardware_acceleration_enrichments.md
Normal file
32
docs/docs/configuration/hardware_acceleration_enrichments.md
Normal file
@ -0,0 +1,32 @@
|
||||
---
|
||||
id: hardware_acceleration_enrichments
|
||||
title: Enrichments
|
||||
---
|
||||
|
||||
# Enrichments
|
||||
|
||||
Some of Frigate's enrichments can use a discrete GPU for accelerated processing.
|
||||
|
||||
## Requirements
|
||||
|
||||
Object detection and enrichments (like Semantic Search, Face Recognition, and License Plate Recognition) are independent features. To use a GPU for object detection, see the [Object Detectors](/configuration/object_detectors.md) documentation. If you want to use your GPU for any supported enrichments, you must choose the appropriate Frigate Docker image for your GPU and configure the enrichment according to its specific documentation.
|
||||
|
||||
- **AMD**
|
||||
|
||||
- ROCm will automatically be detected and used for enrichments in the `-rocm` Frigate image.
|
||||
|
||||
- **Intel**
|
||||
|
||||
- OpenVINO will automatically be detected and used for enrichments in the default Frigate image.
|
||||
|
||||
- **Nvidia**
|
||||
- Nvidia GPUs will automatically be detected and used for enrichments in the `-tensorrt` Frigate image.
|
||||
- Jetson devices will automatically be detected and used for enrichments in the `-tensorrt-jp6` Frigate image.
|
||||
|
||||
Utilizing a GPU for enrichments does not require you to use the same GPU for object detection. For example, you can run the `tensorrt` Docker image for enrichments and still use other dedicated hardware for object detection.
|
||||
|
||||
:::note
|
||||
|
||||
A Google Coral is a TPU (Tensor Processing Unit), not a dedicated GPU (Graphics Processing Unit) and therefore does not provide any kind of acceleration for Frigate's enrichments.
|
||||
|
||||
:::
|
@ -1,20 +1,20 @@
|
||||
---
|
||||
id: hardware_acceleration
|
||||
title: Hardware Acceleration
|
||||
id: hardware_acceleration_video
|
||||
title: Video Decoding
|
||||
---
|
||||
|
||||
# Hardware Acceleration
|
||||
# Video Decoding
|
||||
|
||||
It is highly recommended to use a GPU for hardware acceleration in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg.
|
||||
It is highly recommended to use a GPU for hardware acceleration video decoding in Frigate. Some types of hardware acceleration are detected and used automatically, but you may need to update your configuration to enable hardware accelerated decoding in ffmpeg.
|
||||
|
||||
Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro
|
||||
|
||||
# Officially Supported
|
||||
# Object Detection
|
||||
|
||||
## Raspberry Pi 3/4
|
||||
|
||||
Ensure you increase the allocated RAM for your GPU to at least 128 (`raspi-config` > Performance Options > GPU Memory).
|
||||
If you are using the HA addon, you may need to use the full access variant and turn off `Protection mode` for hardware acceleration.
|
||||
If you are using the HA Add-on, you may need to use the full access variant and turn off _Protection mode_ for hardware acceleration.
|
||||
|
||||
```yaml
|
||||
# if you want to decode a h264 stream
|
||||
@ -28,8 +28,8 @@ ffmpeg:
|
||||
|
||||
:::note
|
||||
|
||||
If running Frigate in Docker, you either need to run in privileged mode or
|
||||
map the `/dev/video*` devices to Frigate. With Docker compose add:
|
||||
If running Frigate through Docker, you either need to run in privileged mode or
|
||||
map the `/dev/video*` devices to Frigate. With Docker Compose add:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
@ -69,18 +69,18 @@ Or map in all the `/dev/video*` devices.
|
||||
|
||||
**Recommended hwaccel Preset**
|
||||
|
||||
| CPU Generation | Intel Driver | Recommended Preset | Notes |
|
||||
| -------------- | ------------ | ------------------ | ----------------------------------- |
|
||||
| gen1 - gen7 | i965 | preset-vaapi | qsv is not supported |
|
||||
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-* can also be used |
|
||||
| gen13+ | iHD / Xe | preset-intel-qsv-* | |
|
||||
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-* | |
|
||||
| CPU Generation | Intel Driver | Recommended Preset | Notes |
|
||||
| -------------- | ------------ | ------------------- | ------------------------------------ |
|
||||
| gen1 - gen7 | i965 | preset-vaapi | qsv is not supported |
|
||||
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-\* can also be used |
|
||||
| gen13+ | iHD / Xe | preset-intel-qsv-\* | |
|
||||
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-\* | |
|
||||
|
||||
:::
|
||||
|
||||
:::note
|
||||
|
||||
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
|
||||
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars).
|
||||
|
||||
See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html) to figure out what generation your CPU is.
|
||||
|
||||
@ -191,7 +191,7 @@ VAAPI supports automatic profile selection so it will work automatically with bo
|
||||
|
||||
:::note
|
||||
|
||||
You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
|
||||
You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `config.yml` for HA Add-on users](advanced.md#environment_vars).
|
||||
|
||||
:::
|
||||
|
||||
@ -295,8 +295,7 @@ These instructions were originally based on the [Jellyfin documentation](https:/
|
||||
## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano\*, Xavier AGX, Xavier NX, TX2, TX1, Nano)
|
||||
|
||||
A separate set of docker images is available that is based on Jetpack/L4T. They come with an `ffmpeg` build
|
||||
with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 5.0+ use the `stable-tensorrt-jp5`
|
||||
tagged image, or if your Jetson host is running Jetpack 6.0+ use the `stable-tensorrt-jp6` tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection.
|
||||
with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 6.0+ use the `stable-tensorrt-jp6` tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection.
|
||||
|
||||
You will need to use the image with the nvidia container runtime:
|
||||
|
||||
@ -306,17 +305,16 @@ You will need to use the image with the nvidia container runtime:
|
||||
docker run -d \
|
||||
...
|
||||
--runtime nvidia
|
||||
ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp5
|
||||
ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp6
|
||||
```
|
||||
|
||||
### Docker Compose - Jetson
|
||||
|
||||
```yaml
|
||||
version: '2.4'
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
image: ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp5
|
||||
image: ghcr.io/blakeblackshear/frigate:stable-tensorrt-jp6
|
||||
runtime: nvidia # Add this
|
||||
```
|
||||
|
@ -3,10 +3,12 @@ id: index
|
||||
title: Frigate Configuration
|
||||
---
|
||||
|
||||
For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yaml` or `frigate.yml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored.
|
||||
For Home Assistant Add-on installations, the config file should be at `/addon_configs/<addon_directory>/config.yml`, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running. See the list of directories [here](#accessing-add-on-config-dir).
|
||||
|
||||
For all other installation types, the config file should be mapped to `/config/config.yml` inside the container.
|
||||
|
||||
It can be named `config.yml` or `config.yaml`, but if both files exist `config.yml` will be preferred and `config.yaml` will be ignored.
|
||||
|
||||
It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md) and use the built in configuration editor in Frigate's UI which supports validation.
|
||||
|
||||
```yaml
|
||||
@ -23,9 +25,24 @@ cameras:
|
||||
- detect
|
||||
```
|
||||
|
||||
## VSCode Configuration Schema
|
||||
## Accessing the Home Assistant Add-on configuration directory {#accessing-add-on-config-dir}
|
||||
|
||||
VSCode supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VSCode and Frigate as an add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VSCode on another machine.
|
||||
When running Frigate through the HA Add-on, the Frigate `/config` directory is mapped to `/addon_configs/<addon_directory>` in the host, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running.
|
||||
|
||||
| Add-on Variant | Configuration directory |
|
||||
| -------------------------- | -------------------------------------------- |
|
||||
| Frigate | `/addon_configs/ccab4aaf_frigate` |
|
||||
| Frigate (Full Access) | `/addon_configs/ccab4aaf_frigate-fa` |
|
||||
| Frigate Beta | `/addon_configs/ccab4aaf_frigate-beta` |
|
||||
| Frigate Beta (Full Access) | `/addon_configs/ccab4aaf_frigate-fa-beta` |
|
||||
|
||||
**Whenever you see `/config` in the documentation, it refers to this directory.**
|
||||
|
||||
If for example you are running the standard Add-on variant and use the [VS Code Add-on](https://github.com/hassio-addons/addon-vscode) to browse your files, you can click _File_ > _Open folder..._ and navigate to `/addon_configs/ccab4aaf_frigate` to access the Frigate `/config` directory and edit the `config.yaml` file. You can also use the built-in file editor in the Frigate UI to edit the configuration file.
|
||||
|
||||
## VS Code Configuration Schema
|
||||
|
||||
VS Code supports JSON schemas for automatically validating configuration files. You can enable this feature by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the beginning of the configuration file. Replace `frigate_host` with the IP address or hostname of your Frigate server. If you're using both VS Code and Frigate as an Add-on, you should use `ccab4aaf-frigate` instead. Make sure to expose the internal unauthenticated port `5000` when accessing the config from VS Code on another machine.
|
||||
|
||||
## Environment Variable Substitution
|
||||
|
||||
@ -65,10 +82,10 @@ genai:
|
||||
|
||||
Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values.
|
||||
|
||||
### Raspberry Pi Home Assistant Addon with USB Coral
|
||||
### Raspberry Pi Home Assistant Add-on with USB Coral
|
||||
|
||||
- Single camera with 720p, 5fps stream for detect
|
||||
- MQTT connected to home assistant mosquitto addon
|
||||
- MQTT connected to the Home Assistant Mosquitto Add-on
|
||||
- Hardware acceleration for decoding video
|
||||
- USB Coral detector
|
||||
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
|
||||
|
@ -3,32 +3,34 @@ id: license_plate_recognition
|
||||
title: License Plate Recognition (LPR)
|
||||
---
|
||||
|
||||
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
|
||||
Frigate can recognize license plates on vehicles and automatically add the detected characters to the `recognized_license_plate` field or a known name as a `sub_label` to tracked objects of type `car` or `motorcycle`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
|
||||
|
||||
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. However, LPR does not run on stationary vehicles.
|
||||
|
||||
When a plate is recognized, the recognized name is:
|
||||
When a plate is recognized, the details are:
|
||||
|
||||
- Added to the `car` tracked object as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown)
|
||||
- Viewable in the Review Item Details pane in Review and the Tracked Object Details pane in Explore.
|
||||
- Added as a `sub_label` (if known) or the `recognized_license_plate` field (if unknown) to a tracked object.
|
||||
- Viewable in the Review Item Details pane in Review (sub labels).
|
||||
- Viewable in the Tracked Object Details pane in Explore (sub labels and recognized license plates).
|
||||
- Filterable through the More Filters menu in Explore.
|
||||
- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the tracked object.
|
||||
- Published via the `frigate/events` MQTT topic as a `sub_label` (known) or `recognized_license_plate` (unknown) for the `car` or `motorcycle` tracked object.
|
||||
- Published via the `frigate/tracked_object_update` MQTT topic with `name` (if known) and `plate`.
|
||||
|
||||
## Model Requirements
|
||||
|
||||
Users running a Frigate+ model (or any custom model that natively detects license plates) should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model.
|
||||
|
||||
Users without a model that detects license plates can still run LPR. Frigate uses a lightweight YOLOv9 license plate detection model that runs on your CPU. In this case, you should _not_ define `license_plate` in your list of objects to track.
|
||||
Users without a model that detects license plates can still run LPR. Frigate uses a lightweight YOLOv9 license plate detection model that can be configured to run on your CPU or GPU. In this case, you should _not_ define `license_plate` in your list of objects to track.
|
||||
|
||||
:::note
|
||||
|
||||
Frigate needs to first detect a `car` before it can recognize a license plate. If you're using a dedicated LPR camera or have a zoomed-in view, make sure the camera captures enough of the `car` for Frigate to detect it reliably.
|
||||
In the default mode, Frigate's LPR needs to first detect a `car` or `motorcycle` before it can recognize a license plate. If you're using a dedicated LPR camera and have a zoomed-in view where a `car` or `motorcycle` will not be detected, you can still run LPR, but the configuration parameters will differ from the default mode. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section below.
|
||||
|
||||
:::
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and run on your CPU. At least 4GB of RAM is required.
|
||||
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and can run on your CPU or GPU, depending on your configuration. At least 4GB of RAM is required.
|
||||
|
||||
## Configuration
|
||||
|
||||
@ -39,22 +41,38 @@ lpr:
|
||||
enabled: True
|
||||
```
|
||||
|
||||
Ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run.
|
||||
Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. You should disable it for specific cameras at the camera level if you don't want to run LPR on cars on those cameras:
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
garage:
|
||||
...
|
||||
lpr:
|
||||
enabled: False
|
||||
```
|
||||
|
||||
For non-dedicated LPR cameras, ensure that your camera is configured to detect objects of type `car` or `motorcycle`, and that a car or motorcycle is actually being detected by Frigate. Otherwise, LPR will not run.
|
||||
|
||||
Like the other real-time processors in Frigate, license plate recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements.
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
Fine-tune the LPR feature using these optional parameters:
|
||||
Fine-tune the LPR feature using these optional parameters at the global level of your config. The only optional parameters that can be set at the camera level are `enabled`, `min_area`, and `enhancement`.
|
||||
|
||||
### Detection
|
||||
|
||||
- **`detection_threshold`**: License plate object detection confidence score required before recognition runs.
|
||||
- Default: `0.7`
|
||||
- Note: This is field only applies to the standalone license plate detection model, `min_score` should be used to filter for models that have license plate detection built in.
|
||||
- **`min_area`**: Defines the minimum size (in pixels) a license plate must be before recognition runs.
|
||||
- Default: `1000` pixels.
|
||||
- Note: This is field only applies to the standalone license plate detection model, `threshold` and `min_score` object filters should be used for models like Frigate+ that have license plate detection built in.
|
||||
- **`min_area`**: Defines the minimum area (in pixels) a license plate must be before recognition runs.
|
||||
- Default: `1000` pixels. Note: this is intentionally set very low as it is an _area_ measurement (length x width). For reference, 1000 pixels represents a ~32x32 pixel square in your camera image.
|
||||
- Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
|
||||
- **`device`**: Device to use to run license plate recognition models.
|
||||
- Default: `CPU`
|
||||
- This can be `CPU` or `GPU`. For users without a model that detects license plates natively, using a GPU may increase performance of the models, especially the YOLOv9 license plate detector model. See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
|
||||
- **`model_size`**: The size of the model used to detect text on plates.
|
||||
- Default: `small`
|
||||
- This can be `small` or `large`. The `large` model uses an enhanced text detector and is more accurate at finding text on plates but slower than the `small` model. For most users, the small model is recommended. For users in countries with multiple lines of text on plates, the large model is recommended. Note that using the large model does not improve _text recognition_, but it may improve _text detection_.
|
||||
|
||||
### Recognition
|
||||
|
||||
@ -69,19 +87,35 @@ Fine-tune the LPR feature using these optional parameters:
|
||||
|
||||
### Matching
|
||||
|
||||
- **`known_plates`**: List of strings or regular expressions that assign custom a `sub_label` to `car` objects when a recognized plate matches a known value.
|
||||
- **`known_plates`**: List of strings or regular expressions that assign custom a `sub_label` to `car` and `motorcycle` objects when a recognized plate matches a known value.
|
||||
- These labels appear in the UI, filters, and notifications.
|
||||
- Unknown plates are still saved but are added to the `recognized_license_plate` field rather than the `sub_label`.
|
||||
- **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate.
|
||||
- For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`.
|
||||
- This parameter will _not_ operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`.
|
||||
|
||||
### Image Enhancement
|
||||
|
||||
- **`enhancement`**: A value between 0 and 10 that adjusts the level of image enhancement applied to captured license plates before they are processed for recognition. This preprocessing step can sometimes improve accuracy but may also have the opposite effect.
|
||||
- Default: `0` (no enhancement)
|
||||
- Higher values increase contrast, sharpen details, and reduce noise, but excessive enhancement can blur or distort characters, actually making them much harder for Frigate to recognize.
|
||||
- This setting is best adjusted at the camera level if running LPR on multiple cameras.
|
||||
- If Frigate is already recognizing plates correctly, leave this setting at the default of `0`. However, if you're experiencing frequent character issues or incomplete plates and you can already easily read the plates yourself, try increasing the value gradually, starting at 5 and adjusting as needed. You should see how different enhancement levels affect your plates. Use the `debug_save_plates` configuration option (see below).
|
||||
|
||||
### Debugging
|
||||
|
||||
- **`debug_save_plates`**: Set to `True` to save captured text on plates for debugging. These images are stored in `/media/frigate/clips/lpr`, organized into subdirectories by `<camera>/<event_id>`, and named based on the capture timestamp.
|
||||
- These saved images are not full plates but rather the specific areas of text detected on the plates. It is normal for the text detection model to sometimes find multiple areas of text on the plate. Use them to analyze what text Frigate recognized and how image enhancement affects detection.
|
||||
- **Note:** Frigate does **not** automatically delete these debug images. Once LPR is functioning correctly, you should disable this option and manually remove the saved files to free up storage.
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
These configuration parameters are available at the global level of your config. The only optional parameters that should be set at the camera level are `enabled`, `min_area`, and `enhancement`.
|
||||
|
||||
```yaml
|
||||
lpr:
|
||||
enabled: True
|
||||
min_area: 1500 # Ignore plates smaller than 1500 pixels
|
||||
min_area: 1500 # Ignore plates with an area (length x width) smaller than 1500 pixels
|
||||
min_plate_length: 4 # Only recognize plates with 4 or more characters
|
||||
known_plates:
|
||||
Wife's Car:
|
||||
@ -98,7 +132,7 @@ lpr:
|
||||
```yaml
|
||||
lpr:
|
||||
enabled: True
|
||||
min_area: 4000 # Run recognition on larger plates only
|
||||
min_area: 4000 # Run recognition on larger plates only (4000 pixels represents a 63x63 pixel square in your image)
|
||||
recognition_threshold: 0.85
|
||||
format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers
|
||||
match_distance: 1 # Allow one character variation in plate matching
|
||||
@ -110,22 +144,176 @@ lpr:
|
||||
- "MN D3163"
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
If you want to detect cars on cameras but don't want to use resources to run LPR on those cars, you should disable LPR for those specific cameras.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
side_yard:
|
||||
lpr:
|
||||
enabled: False
|
||||
...
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
## Dedicated LPR Cameras
|
||||
|
||||
Dedicated LPR cameras are single-purpose cameras with powerful optical zoom to capture license plates on distant vehicles, often with fine-tuned settings to capture plates at night.
|
||||
|
||||
To mark a camera as a dedicated LPR camera, add `type: "lpr"` the camera configuration.
|
||||
|
||||
Users can configure Frigate's dedicated LPR mode in two different ways depending on whether a Frigate+ (or native `license_plate` detecting) model is used:
|
||||
|
||||
### Using a Frigate+ (or Native `license_plate` Detecting) Model
|
||||
|
||||
Users running a Frigate+ model (or any model that natively detects `license_plate`) can take advantage of `license_plate` detection. This allows license plates to be treated as standard objects in dedicated LPR mode, meaning that alerts, detections, snapshots, zones, and other Frigate features work as usual, and plates are detected efficiently through your configured object detector.
|
||||
|
||||
An example configuration for a dedicated LPR camera using a `license_plate`-detecting model:
|
||||
|
||||
```yaml
|
||||
# LPR global configuration
|
||||
lpr:
|
||||
enabled: True
|
||||
device: CPU # can also be GPU if available
|
||||
|
||||
# Dedicated LPR camera configuration
|
||||
cameras:
|
||||
dedicated_lpr_camera:
|
||||
type: "lpr" # required to use dedicated LPR camera mode
|
||||
ffmpeg: ... # add your streams
|
||||
detect:
|
||||
enabled: True
|
||||
fps: 5 # increase to 10 if vehicles move quickly across your frame. Higher than 10 is unnecessary and is not recommended.
|
||||
min_initialized: 2
|
||||
width: 1920
|
||||
height: 1080
|
||||
objects:
|
||||
track:
|
||||
- license_plate
|
||||
filters:
|
||||
license_plate:
|
||||
threshold: 0.7
|
||||
motion:
|
||||
threshold: 30
|
||||
contour_area: 60 # use an increased value to tune out small motion changes
|
||||
improve_contrast: false
|
||||
mask: 0.704,0.007,0.709,0.052,0.989,0.055,0.993,0.001 # ensure your camera's timestamp is masked
|
||||
record:
|
||||
enabled: True # disable recording if you only want snapshots
|
||||
snapshots:
|
||||
enabled: True
|
||||
review:
|
||||
detections:
|
||||
labels:
|
||||
- license_plate
|
||||
```
|
||||
|
||||
With this setup:
|
||||
|
||||
- License plates are treated as normal objects in Frigate.
|
||||
- Scores, alerts, detections, snapshots, zones, and object masks work as expected.
|
||||
- Snapshots will have license plate bounding boxes on them.
|
||||
- The `frigate/events` MQTT topic will publish tracked object updates.
|
||||
- Debug view will display `license_plate` bounding boxes.
|
||||
- If you are using a Frigate+ model and want to submit images from your dedicated LPR camera for model training and fine-tuning, annotate both the `car` / `motorcycle` and the `license_plate` in the snapshots on the Frigate+ website, even if the car is barely visible.
|
||||
|
||||
### Using the Secondary LPR Pipeline (Without Frigate+)
|
||||
|
||||
If you are not running a Frigate+ model, you can use Frigate’s built-in secondary dedicated LPR pipeline. In this mode, Frigate bypasses the standard object detection pipeline and runs a local license plate detector model on the full frame whenever motion activity occurs.
|
||||
|
||||
An example configuration for a dedicated LPR camera using the secondary pipeline:
|
||||
|
||||
```yaml
|
||||
# LPR global configuration
|
||||
lpr:
|
||||
enabled: True
|
||||
device: CPU # can also be GPU if available and correct Docker image is used
|
||||
detection_threshold: 0.7 # change if necessary
|
||||
|
||||
# Dedicated LPR camera configuration
|
||||
cameras:
|
||||
dedicated_lpr_camera:
|
||||
type: "lpr" # required to use dedicated LPR camera mode
|
||||
lpr:
|
||||
enabled: True
|
||||
enhancement: 3 # optional, enhance the image before trying to recognize characters
|
||||
ffmpeg: ... # add your streams
|
||||
detect:
|
||||
enabled: False # disable Frigate's standard object detection pipeline
|
||||
fps: 5 # increase if necessary, though high values may slow down Frigate's enrichments pipeline and use considerable CPU
|
||||
width: 1920
|
||||
height: 1080
|
||||
objects:
|
||||
track: [] # required when not using a Frigate+ model for dedicated LPR mode
|
||||
motion:
|
||||
threshold: 30
|
||||
contour_area: 60 # use an increased value here to tune out small motion changes
|
||||
improve_contrast: false
|
||||
mask: 0.704,0.007,0.709,0.052,0.989,0.055,0.993,0.001 # ensure your camera's timestamp is masked
|
||||
record:
|
||||
enabled: True # disable recording if you only want snapshots
|
||||
review:
|
||||
detections:
|
||||
enabled: True
|
||||
retain:
|
||||
default: 7
|
||||
```
|
||||
|
||||
With this setup:
|
||||
|
||||
- The standard object detection pipeline is bypassed. Any detected license plates on dedicated LPR cameras are treated similarly to manual events in Frigate. You must **not** specify `license_plate` as an object to track.
|
||||
- The license plate detector runs on the full frame whenever motion is detected and processes frames according to your detect `fps` setting.
|
||||
- Review items will always be classified as a `detection`.
|
||||
- Snapshots will always be saved.
|
||||
- Zones and object masks are **not** used.
|
||||
- The `frigate/events` MQTT topic will **not** publish tracked object updates with the license plate bounding box and score, though `frigate/reviews` will publish if recordings are enabled. If a plate is recognized as a known plate, publishing will occur with an updated `sub_label` field. If characters are recognized, publishing will occur with an updated `recognized_license_plate` field.
|
||||
- License plate snapshots are saved at the highest-scoring moment and appear in Explore.
|
||||
- Debug view will not show `license_plate` bounding boxes.
|
||||
|
||||
### Summary
|
||||
|
||||
| Feature | Native `license_plate` detecting Model (like Frigate+) | Secondary Pipeline (without native model or Frigate+) |
|
||||
| ----------------------- | ------------------------------------------------------ | --------------------------------------------------------------- |
|
||||
| License Plate Detection | Uses `license_plate` as a tracked object | Runs a dedicated LPR pipeline |
|
||||
| FPS Setting | 5 (increase for fast-moving cars) | 5 (increase for fast-moving cars, but it may use much more CPU) |
|
||||
| Object Detection | Standard Frigate+ detection applies | Bypasses standard object detection |
|
||||
| Zones & Object Masks | Supported | Not supported |
|
||||
| Debug View | May show `license_plate` bounding boxes | May **not** show `license_plate` bounding boxes |
|
||||
| MQTT `frigate/events` | Publishes tracked object updates | Publishes limited updates |
|
||||
| Explore | Recognized plates available in More Filters | Recognized plates available in More Filters |
|
||||
|
||||
By selecting the appropriate configuration, users can optimize their dedicated LPR cameras based on whether they are using a Frigate+ model or the secondary LPR pipeline.
|
||||
|
||||
### Best practices for using Dedicated LPR camera mode
|
||||
|
||||
- Tune your motion detection and increase the `contour_area` until you see only larger motion boxes being created as cars pass through the frame (likely somewhere between 50-90 for a 1920x1080 detect stream). Increasing the `contour_area` filters out small areas of motion and will prevent excessive resource use from looking for license plates in frames that don't even have a car passing through it.
|
||||
- Disable the `improve_contrast` motion setting, especially if you are running LPR at night and the frame is mostly dark. This will prevent small pixel changes and smaller areas of motion from triggering license plate detection.
|
||||
- Ensure your camera's timestamp is covered with a motion mask so that it's not incorrectly detected as a license plate.
|
||||
- For non-Frigate+ users, you may need to change your camera settings for a clearer image or decrease your global `recognition_threshold` config if your plates are not being accurately recognized at night.
|
||||
- The secondary pipeline mode runs a local AI model on your CPU or GPU (depending on how `device` is configured) to detect plates. Increasing detect `fps` will increase resource usage proportionally.
|
||||
|
||||
## FAQ
|
||||
|
||||
### Why isn't my license plate being detected and recognized?
|
||||
|
||||
Ensure that:
|
||||
|
||||
- Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate, Frigate certainly won't be able to. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling.
|
||||
- Your camera has a clear, human-readable, well-lit view of the plate. If you can't read the plate's characters, Frigate certainly won't be able to, even if the model is recognizing a `license_plate`. This may require changing video size, quality, or frame rate settings on your camera, depending on your scene and how fast the vehicles are traveling.
|
||||
- The plate is large enough in the image (try adjusting `min_area`) or increasing the resolution of your camera's stream.
|
||||
- A `car` is detected first, as LPR only runs on recognized vehicles.
|
||||
- Your `enhancement` level (if you've changed it from the default of `0`) is not too high. Too much enhancement will run too much denoising and cause the plate characters to become blurry and unreadable.
|
||||
|
||||
If you are using a Frigate+ model or a custom model that detects license plates, ensure that `license_plate` is added to your list of objects to track.
|
||||
If you are using the free model that ships with Frigate, you should _not_ add `license_plate` to the list of objects to track.
|
||||
|
||||
### Can I run LPR without detecting `car` objects?
|
||||
Recognized plates will show as object labels in the debug view and will appear in the "Recognized License Plates" select box in the More Filters popout in Explore.
|
||||
|
||||
No, Frigate requires a `car` to be detected first before recognizing a license plate.
|
||||
If you are still having issues detecting plates, start with a basic configuration and see the debugging tips below.
|
||||
|
||||
### Can I run LPR without detecting `car` or `motorcycle` objects?
|
||||
|
||||
In normal LPR mode, Frigate requires a `car` or `motorcycle` to be detected first before recognizing a license plate. If you have a dedicated LPR camera, you can change the camera `type` to `"lpr"` to use the Dedicated LPR Camera algorithm. This comes with important caveats, though. See the [Dedicated LPR Cameras](#dedicated-lpr-cameras) section above.
|
||||
|
||||
### How can I improve detection accuracy?
|
||||
|
||||
@ -137,6 +325,10 @@ No, Frigate requires a `car` to be detected first before recognizing a license p
|
||||
|
||||
Yes, but performance depends on camera quality, lighting, and infrared capabilities. Make sure your camera can capture clear images of plates at night.
|
||||
|
||||
### Can I limit LPR to specific zones?
|
||||
|
||||
LPR, like other Frigate enrichments, runs at the camera level rather than the zone level. While you can't restrict LPR to specific zones directly, you can control when recognition runs by setting a `min_area` value to filter out smaller detections.
|
||||
|
||||
### How can I match known plates with minor variations?
|
||||
|
||||
Use `match_distance` to allow small character mismatches. Alternatively, define multiple variations in `known_plates`.
|
||||
@ -144,10 +336,37 @@ Use `match_distance` to allow small character mismatches. Alternatively, define
|
||||
### How do I debug LPR issues?
|
||||
|
||||
- View MQTT messages for `frigate/events` to verify detected plates.
|
||||
- Adjust `detection_threshold` and `recognition_threshold` settings.
|
||||
- If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car`.
|
||||
- If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car` or `motorcycle`.
|
||||
- Watch the debug view to see plates recognized in real-time. For non-dedicated LPR cameras, the `car` or `motorcycle` label will change to the recognized plate when LPR is enabled and working.
|
||||
- Adjust `detection_threshold` and `recognition_threshold` settings per the suggestions [above](#advanced-configuration).
|
||||
- Enable `debug_save_plates` to save images of detected text on plates to the clips directory (`/media/frigate/clips/lpr`). Ensure these images are readable and the text is clear.
|
||||
- Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only enable this when necessary.
|
||||
|
||||
```yaml
|
||||
logger:
|
||||
default: info
|
||||
logs:
|
||||
frigate.data_processing.common.license_plate: debug
|
||||
```
|
||||
|
||||
### Will LPR slow down my system?
|
||||
|
||||
LPR runs on the CPU, so performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU for optimal results.
|
||||
LPR's performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU or GPU for optimal results. If you are running the Dedicated LPR Camera mode, resource usage will be higher compared to users who run a model that natively detects license plates. Tune your motion detection settings for your dedicated LPR camera so that the license plate detection model runs only when necessary.
|
||||
|
||||
### I am seeing a YOLOv9 plate detection metric in Enrichment Metrics, but I have a Frigate+ or custom model that detects `license_plate`. Why is the YOLOv9 model running?
|
||||
|
||||
The YOLOv9 license plate detector model will run (and the metric will appear) if you've enabled LPR but haven't defined `license_plate` as an object to track, either at the global or camera level.
|
||||
|
||||
If you are detecting `car` or `motorcycle` on cameras where you don't want to run LPR, make sure you disable LPR it at the camera level. And if you do want to run LPR on those cameras, make sure you define `license_plate` as an object to track.
|
||||
|
||||
### It looks like Frigate picked up my camera's timestamp or overlay text as the license plate. How can I prevent this?
|
||||
|
||||
This could happen if cars or motorcycles travel close to your camera's timestamp or overlay text. You could either move the text through your camera's firmware, or apply a mask to it in Frigate.
|
||||
|
||||
If you are using a model that natively detects `license_plate`, add an _object mask_ of type `license_plate` and a _motion mask_ over your text.
|
||||
|
||||
If you are not using a model that natively detects `license_plate` or you are using dedicated LPR camera mode, only a _motion mask_ over your text is required.
|
||||
|
||||
### I see "Error running ... model" in my logs. How can I fix this?
|
||||
|
||||
This usually happens when your GPU is unable to compile or use one of the LPR models. Set your `device` to `CPU` and try again. GPU acceleration only provides a slight performance increase, and the models are lightweight enough to run without issue on most CPUs.
|
||||
|
@ -42,6 +42,16 @@ go2rtc:
|
||||
- "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus)
|
||||
```
|
||||
|
||||
If your camera does not support AAC audio or are having problems with Live view, try transcoding to AAC audio directly:
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
rtsp_cam: # <- for RTSP streams
|
||||
- "ffmpeg:rtsp://192.168.1.5:554/live0#video=copy#audio=aac" # <- copies video stream and transcodes to aac audio
|
||||
- "ffmpeg:rtsp_cam#audio=opus" # <- provides support for WebRTC
|
||||
```
|
||||
|
||||
If your camera does not have audio and you are having problems with Live view, you should have go2rtc send video only:
|
||||
|
||||
```yaml
|
||||
@ -104,9 +114,9 @@ cameras:
|
||||
WebRTC works by creating a TCP or UDP connection on port `8555`. However, it requires additional configuration:
|
||||
|
||||
- For external access, over the internet, setup your router to forward port `8555` to port `8555` on the Frigate device, for both TCP and UDP.
|
||||
- For internal/local access, unless you are running through the add-on, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate:
|
||||
- For internal/local access, unless you are running through the HA Add-on, you will also need to set the WebRTC candidates list in the go2rtc config. For example, if `192.168.1.10` is the local IP of the device running Frigate:
|
||||
|
||||
```yaml title="/config/frigate.yaml"
|
||||
```yaml title="config.yml"
|
||||
go2rtc:
|
||||
streams:
|
||||
test_cam: ...
|
||||
@ -121,9 +131,9 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req
|
||||
|
||||
:::tip
|
||||
|
||||
This extra configuration may not be required if Frigate has been installed as a Home Assistant add-on, as Frigate uses the Supervisor's API to generate a WebRTC candidate.
|
||||
This extra configuration may not be required if Frigate has been installed as a Home Assistant Add-on, as Frigate uses the Supervisor's API to generate a WebRTC candidate.
|
||||
|
||||
However, it is recommended if issues occur to define the candidates manually. You should do this if the Frigate add-on fails to generate a valid candidate. If an error occurs you will see some warnings like the below in the add-on logs page during the initialization:
|
||||
However, it is recommended if issues occur to define the candidates manually. You should do this if the Frigate Add-on fails to generate a valid candidate. If an error occurs you will see some warnings like the below in the Add-on logs page during the initialization:
|
||||
|
||||
```log
|
||||
[WARN] Failed to get IP address from supervisor
|
||||
@ -203,9 +213,11 @@ Note that disabling a camera through the config file (`enabled: False`) removes
|
||||
|
||||
Frigate intelligently selects the live streaming technology based on a number of factors (user-selected modes like two-way talk, camera settings, browser capabilities, available bandwidth) and prioritizes showing an actual up-to-date live view of your camera's stream as quickly as possible.
|
||||
|
||||
When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. You can also try using the _Reset_ button to force a reload of your stream.
|
||||
When you have go2rtc configured, Live view initially attempts to load and play back your stream with a clearer, fluent stream technology (MSE). An initial timeout, a low bandwidth condition that would cause buffering of the stream, or decoding errors in the stream will cause Frigate to switch to the stream defined by the `detect` role, using the jsmpeg format. This is what the UI labels as "low bandwidth mode". On Live dashboards, the mode will automatically reset when smart streaming is configured and activity stops. Continuous streaming mode does not have an automatic reset mechanism, but you can use the _Reset_ option to force a reload of your stream.
|
||||
|
||||
If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the recommendations above or ensure you have enough bandwidth available.
|
||||
If you are using continuous streaming or you are loading more than a few high resolution streams at once on the dashboard, your browser may struggle to begin playback of your streams before the timeout. Frigate always prioritizes showing a live stream as quickly as possible, even if it is a lower quality jsmpeg stream. You can use the "Reset" link/button to try loading your high resolution stream again.
|
||||
|
||||
If you are still experiencing Frigate falling back to low bandwidth mode, you may need to adjust your camera's settings per the [recommendations above](#camera_settings_recommendations).
|
||||
|
||||
3. **It doesn't seem like my cameras are streaming on the Live dashboard. Why?**
|
||||
|
||||
@ -221,6 +233,8 @@ Note that disabling a camera through the config file (`enabled: False`) removes
|
||||
|
||||
This static image is pulled from the stream defined in your config with the `detect` role. When activity is detected, images from the `detect` stream immediately begin updating at ~5 frames per second so you can see the activity until the live player is loaded and begins playing. This usually only takes a second or two. If the live player times out, buffers, or has streaming errors, the jsmpeg player is loaded and plays a video-only stream from the `detect` role. When activity ends, the players are destroyed and a static image is displayed until activity is detected again, and the process repeats.
|
||||
|
||||
Smart streaming depends on having your camera's motion `threshold` and `contour_area` config values dialed in. Use the Motion Tuner in Settings in the UI to tune these values in real-time.
|
||||
|
||||
This is Frigate's default and recommended setting because it results in a significant bandwidth savings, especially for high resolution cameras.
|
||||
|
||||
6. **I have unmuted some cameras on my dashboard, but I do not hear sound. Why?**
|
||||
|
@ -77,7 +77,7 @@ At this point if motion is working as desired there is no reason to continue wit
|
||||
|
||||
Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone.
|
||||
|
||||
However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection.
|
||||
However, if the preferred day settings do not work well at night it is recommended to use Home Assistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection.
|
||||
|
||||
## Tuning For Large Changes In Motion
|
||||
|
||||
|
@ -28,7 +28,7 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
**Nvidia**
|
||||
|
||||
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models.
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp(4/5)` Frigate images when a supported ONNX model is configured.
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp6` Frigate images when a supported ONNX model is configured.
|
||||
|
||||
**Rockchip**
|
||||
|
||||
@ -130,8 +130,8 @@ detectors:
|
||||
type: edgetpu
|
||||
device: pci
|
||||
```
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
## Hailo-8
|
||||
|
||||
@ -141,18 +141,21 @@ See the [installation docs](../frigate/installation.md#hailo-8l) for information
|
||||
|
||||
### Configuration
|
||||
|
||||
When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**.
|
||||
When configuring the Hailo detector, you have two options to specify the model: a local **path** or a **URL**.
|
||||
If both are provided, the detector will first check for the model at the given local path. If the file is not found, it will download the model from the specified URL. The model file is cached under `/config/model_cache/hailo`.
|
||||
|
||||
#### YOLO
|
||||
#### YOLO
|
||||
|
||||
Use this configuration for YOLO-based models. When no custom model path or URL is provided, the detector automatically downloads the default model based on the detected hardware:
|
||||
|
||||
- **Hailo-8 hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`)
|
||||
- **Hailo-8L hardware:** Uses **YOLOv6n** (default: `yolov6n.hef`)
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
hailo8l:
|
||||
hailo:
|
||||
type: hailo8l
|
||||
device: PCIe
|
||||
|
||||
@ -163,6 +166,7 @@ model:
|
||||
input_pixel_format: rgb
|
||||
input_dtype: int
|
||||
model_type: yolo-generic
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
|
||||
# The detector automatically selects the default model based on your hardware:
|
||||
# - For Hailo-8 hardware: YOLOv6n (default: yolov6n.hef)
|
||||
@ -184,7 +188,7 @@ For SSD-based models, provide either a model path or URL to your compiled SSD mo
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
hailo8l:
|
||||
hailo:
|
||||
type: hailo8l
|
||||
device: PCIe
|
||||
|
||||
@ -208,7 +212,7 @@ The Hailo detector supports all YOLO models compiled for Hailo hardware that inc
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
hailo8l:
|
||||
hailo:
|
||||
type: hailo8l
|
||||
device: PCIe
|
||||
|
||||
@ -219,12 +223,14 @@ model:
|
||||
input_pixel_format: rgb
|
||||
input_dtype: int
|
||||
model_type: yolo-generic
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
# Optional: Specify a local model path.
|
||||
# path: /config/model_cache/hailo/custom_model.hef
|
||||
#
|
||||
# Alternatively, or as a fallback, provide a custom URL:
|
||||
# path: https://custom-model-url.com/path/to/model.hef
|
||||
```
|
||||
|
||||
For additional ready-to-use models, please visit: https://github.com/hailo-ai/hailo_model_zoo
|
||||
|
||||
Hailo8 supports all models in the Hailo Model Zoo that include HailoRT post-processing. You're welcome to choose any of these pre-configured models for your implementation.
|
||||
@ -370,13 +376,13 @@ model:
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
#### YOLOv9
|
||||
#### YOLO (v3, v4, v7, v9)
|
||||
|
||||
[YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
|
||||
YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
|
||||
|
||||
:::tip
|
||||
|
||||
The YOLOv9 detector has been designed to support YOLOv9 models, but may support other YOLO model architectures as well.
|
||||
The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv9 models, but may support other YOLO model architectures as well.
|
||||
|
||||
:::
|
||||
|
||||
@ -389,12 +395,69 @@ detectors:
|
||||
device: GPU
|
||||
|
||||
model:
|
||||
model_type: yolov9
|
||||
width: 640 # <--- should match the imgsize set during model export
|
||||
height: 640 # <--- should match the imgsize set during model export
|
||||
model_type: yolo-generic
|
||||
width: 320 # <--- should match the imgsize set during model export
|
||||
height: 320 # <--- should match the imgsize set during model export
|
||||
input_tensor: nchw
|
||||
input_dtype: float
|
||||
path: /config/model_cache/yolov9-t.onnx
|
||||
path: /config/model_cache/yolo.onnx
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
#### RF-DETR
|
||||
|
||||
[RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more informatoin on downloading the RF-DETR model for use in Frigate.
|
||||
|
||||
:::warning
|
||||
|
||||
Due to the size and complexity of the RF-DETR model, it is only recommended to be run with discrete Arc Graphics Cards.
|
||||
|
||||
:::
|
||||
|
||||
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
ov:
|
||||
type: openvino
|
||||
device: GPU
|
||||
|
||||
model:
|
||||
model_type: rfdetr
|
||||
width: 560
|
||||
height: 560
|
||||
input_tensor: nchw
|
||||
input_dtype: float
|
||||
path: /config/model_cache/rfdetr.onnx
|
||||
```
|
||||
|
||||
#### D-FINE
|
||||
|
||||
[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
|
||||
|
||||
:::warning
|
||||
|
||||
Currently D-FINE models only run on OpenVINO in CPU mode, GPUs currently fail to compile the model
|
||||
|
||||
:::
|
||||
|
||||
After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
ov:
|
||||
type: openvino
|
||||
device: GPU
|
||||
|
||||
model:
|
||||
model_type: dfine
|
||||
width: 640
|
||||
height: 640
|
||||
input_tensor: nchw
|
||||
input_dtype: float
|
||||
path: /config/model_cache/dfine_s_obj2coco.onnx
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
@ -485,7 +548,7 @@ frigate:
|
||||
|
||||
### Configuration Parameters
|
||||
|
||||
The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpus) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container.
|
||||
The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration_video.md#nvidia-gpus) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container.
|
||||
|
||||
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
|
||||
|
||||
@ -522,7 +585,7 @@ $ docker run --device=/dev/kfd --device=/dev/dri \
|
||||
...
|
||||
```
|
||||
|
||||
When using docker compose:
|
||||
When using Docker Compose:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
@ -554,7 +617,7 @@ $ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \
|
||||
...
|
||||
```
|
||||
|
||||
When using docker compose:
|
||||
When using Docker Compose:
|
||||
|
||||
```yaml
|
||||
services:
|
||||
@ -589,6 +652,7 @@ $ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/
|
||||
### Supported Models
|
||||
|
||||
See [ONNX supported models](#supported-models) for supported models, there are some caveats:
|
||||
|
||||
- D-FINE models are not supported
|
||||
- YOLO-NAS models are known to not run well on integrated GPUs
|
||||
|
||||
@ -610,7 +674,7 @@ If the correct build is used for your GPU then the GPU will be detected and used
|
||||
|
||||
- **Nvidia**
|
||||
- Nvidia GPUs will automatically be detected and used with the ONNX detector in the `-tensorrt` Frigate image.
|
||||
- Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp(4/5)` Frigate image.
|
||||
- Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp6` Frigate image.
|
||||
|
||||
:::
|
||||
|
||||
@ -653,13 +717,13 @@ model:
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
#### YOLOv9
|
||||
#### YOLO (v3, v4, v7, v9)
|
||||
|
||||
[YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
|
||||
YOLOv3, YOLOv4, YOLOv7, and [YOLOv9](https://github.com/WongKinYiu/yolov9) models are supported, but not included by default.
|
||||
|
||||
:::tip
|
||||
|
||||
The YOLOv9 detector has been designed to support YOLOv9 models, but may support other YOLO model architectures as well.
|
||||
The YOLO detector has been designed to support YOLOv3, YOLOv4, YOLOv7, and YOLOv9 models, but may support other YOLO model architectures as well. See [the models section](#downloading-yolo-models) for more information on downloading YOLO models for use in Frigate.
|
||||
|
||||
:::
|
||||
|
||||
@ -671,28 +735,65 @@ detectors:
|
||||
type: onnx
|
||||
|
||||
model:
|
||||
model_type: yolov9
|
||||
width: 640 # <--- should match the imgsize set during model export
|
||||
height: 640 # <--- should match the imgsize set during model export
|
||||
model_type: yolo-generic
|
||||
width: 320 # <--- should match the imgsize set during model export
|
||||
height: 320 # <--- should match the imgsize set during model export
|
||||
input_tensor: nchw
|
||||
input_dtype: float
|
||||
path: /config/model_cache/yolov9-t.onnx
|
||||
path: /config/model_cache/yolo.onnx
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
#### YOLOx
|
||||
|
||||
[YOLOx](https://github.com/Megvii-BaseDetection/YOLOX) models are supported, but not included by default. See [the models section](#downloading-yolo-models) for more information on downloading the YOLOx model for use in Frigate.
|
||||
|
||||
After placing the downloaded onnx model in your config folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
onnx:
|
||||
type: onnx
|
||||
|
||||
model:
|
||||
model_type: yolox
|
||||
width: 416 # <--- should match the imgsize set during model export
|
||||
height: 416 # <--- should match the imgsize set during model export
|
||||
input_tensor: nchw
|
||||
input_dtype: float_denorm
|
||||
path: /config/model_cache/yolox_tiny.onnx
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
#### RF-DETR
|
||||
|
||||
[RF-DETR](https://github.com/roboflow/rf-detr) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-rf-detr-model) for more information on downloading the RF-DETR model for use in Frigate.
|
||||
|
||||
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
onnx:
|
||||
type: onnx
|
||||
|
||||
model:
|
||||
model_type: rfdetr
|
||||
width: 560
|
||||
height: 560
|
||||
input_tensor: nchw
|
||||
input_dtype: float
|
||||
path: /config/model_cache/rfdetr.onnx
|
||||
```
|
||||
|
||||
#### D-FINE
|
||||
|
||||
[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
|
||||
[D-FINE](https://github.com/Peterande/D-FINE) is a DETR based model. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the D-FINE model for use in Frigate.
|
||||
|
||||
:::warning
|
||||
|
||||
D-FINE is currently not supported on OpenVINO
|
||||
|
||||
:::
|
||||
|
||||
After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration:
|
||||
After placing the downloaded onnx model in your `config/model_cache` folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@ -774,66 +875,27 @@ Hardware accelerated object detection is supported on the following SoCs:
|
||||
- RK3576
|
||||
- RK3588
|
||||
|
||||
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.0. Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
|
||||
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/), version v2.3.2.
|
||||
|
||||
### Prerequisites
|
||||
:::tip
|
||||
|
||||
Make sure to follow the [Rockchip specific installation instrucitions](/frigate/installation#rockchip-platform).
|
||||
|
||||
### Configuration
|
||||
|
||||
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
|
||||
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming NPU resources are available. An example configuration would be:
|
||||
|
||||
```yaml
|
||||
detectors: # required
|
||||
rknn: # required
|
||||
type: rknn # required
|
||||
# number of NPU cores to use
|
||||
# 0 means choose automatically
|
||||
# increase for better performance if you have a multicore NPU e.g. set to 3 on rk3588
|
||||
detectors:
|
||||
rknn_0:
|
||||
type: rknn
|
||||
num_cores: 0
|
||||
rknn_1:
|
||||
type: rknn
|
||||
num_cores: 0
|
||||
|
||||
model: # required
|
||||
# name of model (will be automatically downloaded) or path to your own .rknn model file
|
||||
# possible values are:
|
||||
# - deci-fp16-yolonas_s
|
||||
# - deci-fp16-yolonas_m
|
||||
# - deci-fp16-yolonas_l
|
||||
# - /config/model_cache/your_custom_model.rknn
|
||||
path: deci-fp16-yolonas_s
|
||||
# width and height of detection frames
|
||||
width: 320
|
||||
height: 320
|
||||
# pixel format of detection frame
|
||||
# default value is rgb but yolo models usually use bgr format
|
||||
input_pixel_format: bgr # required
|
||||
# shape of detection frame
|
||||
input_tensor: nhwc
|
||||
# needs to be adjusted to model, see below
|
||||
labelmap_path: /labelmap.txt # required
|
||||
```
|
||||
|
||||
The correct labelmap must be loaded for each model. If you use a custom model (see notes below), you must make sure to provide the correct labelmap. The table below lists the correct paths for the bundled models:
|
||||
|
||||
| `path` | `labelmap_path` |
|
||||
| --------------------- | --------------------- |
|
||||
| deci-fp16-yolonas\_\* | /labelmap/coco-80.txt |
|
||||
|
||||
### Choosing a model
|
||||
|
||||
:::warning
|
||||
|
||||
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
|
||||
|
||||
:::
|
||||
|
||||
The inference time was determined on a rk3588 with 3 NPU cores.
|
||||
### Prerequisites
|
||||
|
||||
| Model | Size in mb | Inference time in ms |
|
||||
| ------------------- | ---------- | -------------------- |
|
||||
| deci-fp16-yolonas_s | 24 | 25 |
|
||||
| deci-fp16-yolonas_m | 62 | 35 |
|
||||
| deci-fp16-yolonas_l | 81 | 45 |
|
||||
Make sure to follow the [Rockchip specific installation instructions](/frigate/installation#rockchip-platform).
|
||||
|
||||
:::tip
|
||||
|
||||
@ -846,9 +908,99 @@ $ cat /sys/kernel/debug/rknpu/load
|
||||
|
||||
:::
|
||||
|
||||
### Supported Models
|
||||
|
||||
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
|
||||
|
||||
```yaml
|
||||
detectors: # required
|
||||
rknn: # required
|
||||
type: rknn # required
|
||||
# number of NPU cores to use
|
||||
# 0 means choose automatically
|
||||
# increase for better performance if you have a multicore NPU e.g. set to 3 on rk3588
|
||||
num_cores: 0
|
||||
```
|
||||
|
||||
The inference time was determined on a rk3588 with 3 NPU cores.
|
||||
|
||||
| Model | Size in mb | Inference time in ms |
|
||||
| --------------------- | ---------- | -------------------- |
|
||||
| deci-fp16-yolonas_s | 24 | 25 |
|
||||
| deci-fp16-yolonas_m | 62 | 35 |
|
||||
| deci-fp16-yolonas_l | 81 | 45 |
|
||||
| frigate-fp16-yolov9-t | 6 | 35 |
|
||||
| rock-i8-yolox_nano | 3 | 14 |
|
||||
| rock-i8_yolox_tiny | 6 | 18 |
|
||||
|
||||
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
|
||||
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
|
||||
|
||||
#### YOLO-NAS
|
||||
|
||||
```yaml
|
||||
model: # required
|
||||
# name of model (will be automatically downloaded) or path to your own .rknn model file
|
||||
# possible values are:
|
||||
# - deci-fp16-yolonas_s
|
||||
# - deci-fp16-yolonas_m
|
||||
# - deci-fp16-yolonas_l
|
||||
# your yolonas_model.rknn
|
||||
path: deci-fp16-yolonas_s
|
||||
model_type: yolonas
|
||||
width: 320
|
||||
height: 320
|
||||
input_pixel_format: bgr
|
||||
input_tensor: nhwc
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
:::warning
|
||||
|
||||
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
|
||||
|
||||
:::
|
||||
|
||||
#### YOLO (v9)
|
||||
|
||||
```yaml
|
||||
model: # required
|
||||
# name of model (will be automatically downloaded) or path to your own .rknn model file
|
||||
# possible values are:
|
||||
# - frigate-fp16-yolov9-t
|
||||
# - frigate-fp16-yolov9-s
|
||||
# - frigate-fp16-yolov9-m
|
||||
# - frigate-fp16-yolov9-c
|
||||
# - frigate-fp16-yolov9-e
|
||||
# your yolo_model.rknn
|
||||
path: frigate-fp16-yolov9-t
|
||||
model_type: yolo-generic
|
||||
width: 320
|
||||
height: 320
|
||||
input_tensor: nhwc
|
||||
input_dtype: float
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
#### YOLOx
|
||||
|
||||
```yaml
|
||||
model: # required
|
||||
# name of model (will be automatically downloaded) or path to your own .rknn model file
|
||||
# possible values are:
|
||||
# - rock-i8-yolox_nano
|
||||
# - rock-i8-yolox_tiny
|
||||
# - rock-fp16-yolox_nano
|
||||
# - rock-fp16-yolox_tiny
|
||||
# your yolox_model.rknn
|
||||
path: rock-i8-yolox_nano
|
||||
model_type: yolox
|
||||
width: 416
|
||||
height: 416
|
||||
input_tensor: nhwc
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
### Converting your own onnx model to rknn format
|
||||
|
||||
To convert a onnx model to the rknn format using the [rknn-toolkit2](https://github.com/airockchip/rknn-toolkit2/) you have to:
|
||||
@ -868,7 +1020,7 @@ output_name: "{input_basename}"
|
||||
config:
|
||||
mean_values: [[0, 0, 0]]
|
||||
std_values: [[255, 255, 255]]
|
||||
quant_img_rgb2bgr: true
|
||||
quant_img_RGB2BGR: true
|
||||
```
|
||||
|
||||
Explanation of the paramters:
|
||||
@ -881,7 +1033,7 @@ Explanation of the paramters:
|
||||
- `soc`: the SoC this model was build for (e.g. "rk3588")
|
||||
- `tk_version`: Version of `rknn-toolkit2` (e.g. "2.3.0")
|
||||
- **example**: Specifying `output_name = "frigate-{quant}-{input_basename}-{soc}-v{tk_version}"` could result in a model called `frigate-i8-my_model-rk3588-v2.3.0.rknn`.
|
||||
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.0_EN.pdf).
|
||||
- `config`: Configuration passed to `rknn-toolkit2` for model conversion. For an explanation of all available parameters have a look at section "2.2. Model configuration" of [this manual](https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.2/03_Rockchip_RKNPU_API_Reference_RKNN_Toolkit2_V2.3.2_EN.pdf).
|
||||
|
||||
# Models
|
||||
|
||||
@ -914,6 +1066,26 @@ Make sure you change the batch size to 1 before exporting.
|
||||
|
||||
:::
|
||||
|
||||
### Download RF-DETR Model
|
||||
|
||||
To export as ONNX:
|
||||
|
||||
1. `pip3 install rfdetr`
|
||||
2. `python3`
|
||||
3. `from rfdetr import RFDETRBase`
|
||||
4. `x = RFDETRBase()`
|
||||
5. `x.export()`
|
||||
|
||||
#### Additional Configuration
|
||||
|
||||
The input tensor resolution can be customized:
|
||||
|
||||
```python
|
||||
from rfdetr import RFDETRBase
|
||||
x = RFDETRBase(resolution=560) # resolution must be a multiple of 56
|
||||
x.export()
|
||||
```
|
||||
|
||||
### Downloading YOLO-NAS Model
|
||||
|
||||
You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
|
||||
@ -925,3 +1097,41 @@ The pre-trained YOLO-NAS weights from DeciAI are subject to their license and ca
|
||||
:::
|
||||
|
||||
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
|
||||
|
||||
### Downloading YOLO Models
|
||||
|
||||
#### YOLOx
|
||||
|
||||
YOLOx models can be downloaded [from the YOLOx repo](https://github.com/Megvii-BaseDetection/YOLOX/tree/main/demo/ONNXRuntime).
|
||||
|
||||
#### YOLOv3, YOLOv4, and YOLOv7
|
||||
|
||||
To export as ONNX:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/NateMeyer/tensorrt_demos
|
||||
cd tensorrt_demos/yolo
|
||||
./download_yolo.sh
|
||||
python3 yolo_to_onnx.py -m yolov7-320
|
||||
```
|
||||
|
||||
#### YOLOv9
|
||||
|
||||
YOLOv9 models can be exported using the below code or they [can be downloaded from hugging face](https://huggingface.co/Xenova/yolov9-onnx/tree/main)
|
||||
|
||||
```sh
|
||||
git clone https://github.com/WongKinYiu/yolov9
|
||||
cd yolov9
|
||||
|
||||
# setup the virtual environment so installation doesn't affect main system
|
||||
python3 -m venv ./
|
||||
bin/pip install -r requirements.txt
|
||||
bin/pip install onnx onnxruntime onnx-simplifier>=0.4.1
|
||||
|
||||
# download the weights
|
||||
wget -O yolov9-t.pt "https://github.com/WongKinYiu/yolov9/releases/download/v0.1/yolov9-t-converted.pt" # download the weights
|
||||
|
||||
# prepare and run export script
|
||||
sed -i "s/ckpt = torch.load(attempt_download(w), map_location='cpu')/ckpt = torch.load(attempt_download(w), map_location='cpu', weights_only=False)/g" ./models/experimental.py
|
||||
bin/python3 export.py --weights ./yolov9-t.pt --imgsz 320 --simplify --include onnx
|
||||
```
|
||||
|
@ -146,7 +146,7 @@ The above configuration example can be added globally or on a per camera basis.
|
||||
|
||||
## Can I have "continuous" recordings, but only at certain times?
|
||||
|
||||
Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
|
||||
Using Frigate UI, Home Assistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
|
||||
|
||||
## How do I export recordings?
|
||||
|
||||
@ -174,6 +174,10 @@ To reduce the output file size the ffmpeg parameter `-qp n` can be utilized (whe
|
||||
|
||||
:::
|
||||
|
||||
## Apple Compatibility with H.265 Streams
|
||||
|
||||
Apple devices running the Safari browser may fail to playback h.265 recordings. The [apple compatibility option](../configuration/camera_specific.md#h265-cameras-via-safari) should be used to ensure seamless playback on Apple devices.
|
||||
|
||||
## Syncing Recordings With Disk
|
||||
|
||||
In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist.
|
||||
|
@ -78,16 +78,19 @@ proxy:
|
||||
# Optional: Mapping for headers from upstream proxies. Only used if Frigate's auth
|
||||
# is disabled.
|
||||
# NOTE: Many authentication proxies pass a header downstream with the authenticated
|
||||
# user name. Not all values are supported. It must be a whitelisted header.
|
||||
# user name and role. Not all values are supported. It must be a whitelisted header.
|
||||
# See the docs for more info.
|
||||
header_map:
|
||||
user: x-forwarded-user
|
||||
role: x-forwarded-role
|
||||
# Optional: Url for logging out a user. This sets the location of the logout url in
|
||||
# the UI.
|
||||
logout_url: /api/logout
|
||||
# Optional: Auth secret that is checked against the X-Proxy-Secret header sent from
|
||||
# the proxy. If not set, all requests are trusted regardless of origin.
|
||||
auth_secret: None
|
||||
# Optional: The default role to use for proxy auth. Must be "admin" or "viewer"
|
||||
default_role: viewer
|
||||
|
||||
# Optional: Authentication configuration
|
||||
auth:
|
||||
@ -125,7 +128,7 @@ auth:
|
||||
# NOTE: The default values are for the EdgeTPU detector.
|
||||
# Other detectors will require the model config to be set.
|
||||
model:
|
||||
# Required: path to the model (default: automatic based on detector)
|
||||
# Required: path to the model. Frigate+ models use plus://<model_id> (default: automatic based on detector)
|
||||
path: /edgetpu_model.tflite
|
||||
# Required: path to the labelmap (default: shown below)
|
||||
labelmap_path: /labelmap.txt
|
||||
@ -543,17 +546,35 @@ semantic_search:
|
||||
model_size: "small"
|
||||
|
||||
# Optional: Configuration for face recognition capability
|
||||
# NOTE: enabled, min_area can be overridden at the camera level
|
||||
face_recognition:
|
||||
# Optional: Enable semantic search (default: shown below)
|
||||
# Optional: Enable face recognition (default: shown below)
|
||||
enabled: False
|
||||
# Optional: Set the model size used for embeddings. (default: shown below)
|
||||
# NOTE: small model runs on CPU and large model runs on GPU
|
||||
model_size: "small"
|
||||
# Optional: Minimum face distance score required to mark as a potential match (default: shown below)
|
||||
unknown_score: 0.8
|
||||
# Optional: Minimum face detection score required to detect a face (default: shown below)
|
||||
# NOTE: This only applies when not running a Frigate+ model
|
||||
detection_threshold: 0.7
|
||||
# Optional: Minimum face distance score required to be considered a match (default: shown below)
|
||||
recognition_threshold: 0.9
|
||||
# Optional: Min area of detected face box to consider running face recognition (default: shown below)
|
||||
min_area: 500
|
||||
# Optional: Number of images of recognized faces to save for training (default: shown below)
|
||||
save_attempts: 100
|
||||
# Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below)
|
||||
blur_confidence_filter: True
|
||||
# Optional: Set the model size used face recognition. (default: shown below)
|
||||
model_size: small
|
||||
|
||||
# Optional: Configuration for license plate recognition capability
|
||||
# NOTE: enabled, min_area, and enhancement can be overridden at the camera level
|
||||
lpr:
|
||||
# Optional: Enable license plate recognition (default: shown below)
|
||||
enabled: False
|
||||
# Optional: The device to run the models on (default: shown below)
|
||||
device: CPU
|
||||
# Optional: Set the model size used for text detection. (default: shown below)
|
||||
model_size: small
|
||||
# Optional: License plate object confidence score required to begin running recognition (default: shown below)
|
||||
detection_threshold: 0.7
|
||||
# Optional: Minimum area of license plate to begin running recognition (default: shown below)
|
||||
@ -568,6 +589,11 @@ lpr:
|
||||
match_distance: 1
|
||||
# Optional: Known plates to track (strings or regular expressions) (default: shown below)
|
||||
known_plates: {}
|
||||
# Optional: Enhance the detected plate image with contrast adjustment and denoising (default: shown below)
|
||||
# A value between 0 and 10. Higher values are not always better and may perform worse than lower values.
|
||||
enhancement: 0
|
||||
# Optional: Save plate images to /media/frigate/clips/lpr for debugging purposes (default: shown below)
|
||||
debug_save_plates: False
|
||||
|
||||
# Optional: Configuration for AI generated tracked object descriptions
|
||||
# WARNING: Depending on the provider, this will send thumbnails over the internet
|
||||
@ -645,6 +671,9 @@ cameras:
|
||||
# If disabled: config is used but no live stream and no capture etc.
|
||||
# Events/Recordings are still viewable.
|
||||
enabled: True
|
||||
# Optional: camera type used for some Frigate features (default: shown below)
|
||||
# Options are "generic" and "lpr"
|
||||
type: "generic"
|
||||
# Required: ffmpeg settings for the camera
|
||||
ffmpeg:
|
||||
# Required: A list of input streams for the camera. See documentation for more information.
|
||||
@ -875,7 +904,7 @@ telemetry:
|
||||
# NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled.
|
||||
network_bandwidth: False
|
||||
# Optional: Enable the latest version outbound check (default: shown below)
|
||||
# NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions
|
||||
# NOTE: If you use the Home Assistant integration, disabling this will prevent it from reporting new versions
|
||||
version_check: True
|
||||
|
||||
# Optional: Camera groups (default: no groups are setup)
|
||||
|
@ -152,7 +152,7 @@ go2rtc:
|
||||
my_camera: rtsp://username:$%40foo%25@192.168.1.100
|
||||
```
|
||||
|
||||
See [this comment(https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-2242296489) for more information.
|
||||
See [this comment](https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-2242296489) for more information.
|
||||
|
||||
## Advanced Restream Configurations
|
||||
|
||||
|
@ -19,7 +19,7 @@ For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
|
||||
|
||||
## Configuration
|
||||
|
||||
Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Settings page before it can be used. Semantic Search is a global configuration setting.
|
||||
Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Classification Settings page before it can be used. Semantic Search is a global configuration setting.
|
||||
|
||||
```yaml
|
||||
semantic_search:
|
||||
@ -29,9 +29,9 @@ semantic_search:
|
||||
|
||||
:::tip
|
||||
|
||||
The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration or by toggling the switch on the Search Settings page in the UI and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to turn the UI's switch off or set the config back to `False` before restarting Frigate again.
|
||||
The embeddings database can be re-indexed from the existing tracked objects in your database by pressing the "Reindex" button in the Classification Settings in the UI or by adding `reindex: True` to your `semantic_search` configuration and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing.
|
||||
|
||||
If you are enabling Semantic Search for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that.
|
||||
If you are enabling Semantic Search for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to reindex as described above.
|
||||
|
||||
:::
|
||||
|
||||
@ -72,7 +72,7 @@ For most users, especially native English speakers, the V1 model remains the rec
|
||||
|
||||
:::note
|
||||
|
||||
Switching between V1 and V2 requires reindexing your embeddings. To do this, set `reindex: True` in your Semantic Search configuration and restart Frigate. The embeddings from V1 and V2 are incompatible, and failing to reindex will result in incorrect search results.
|
||||
Switching between V1 and V2 requires reindexing your embeddings. The embeddings from V1 and V2 are incompatible, and failing to reindex will result in incorrect search results.
|
||||
|
||||
:::
|
||||
|
||||
@ -90,19 +90,7 @@ semantic_search:
|
||||
|
||||
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically.
|
||||
|
||||
**NOTE:** Object detection and Semantic Search are independent features. If you want to use your GPU with Semantic Search, you must choose the appropriate Frigate Docker image for your GPU.
|
||||
|
||||
- **AMD**
|
||||
|
||||
- ROCm will automatically be detected and used for Semantic Search in the `-rocm` Frigate image.
|
||||
|
||||
- **Intel**
|
||||
|
||||
- OpenVINO will automatically be detected and used for Semantic Search in the default Frigate image.
|
||||
|
||||
- **Nvidia**
|
||||
- Nvidia GPUs will automatically be detected and used for Semantic Search in the `-tensorrt` Frigate image.
|
||||
- Jetson devices will automatically be detected and used for Semantic Search in the `-tensorrt-jp(4/5)` Frigate image.
|
||||
See the [Hardware Accelerated Enrichments](/configuration/hardware_acceleration_enrichments.md) documentation.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -5,7 +5,7 @@ title: Snapshots
|
||||
|
||||
Frigate can save a snapshot image to `/media/frigate/clips` for each object that is detected named as `<camera>-<id>.jpg`. They are also accessible [via the api](../integrations/api/event-snapshot-events-event-id-snapshot-jpg-get.api.mdx)
|
||||
|
||||
For users with Frigate+ enabled, snapshots are accessible in the UI in the Frigate+ pane to allow for quick submission to the Frigate+ service.
|
||||
Snapshots are accessible in the UI in the Explore pane. This allows for quick submission to the Frigate+ service.
|
||||
|
||||
To only save snapshots for objects that enter a specific zone, [see the zone docs](./zones.md#restricting-snapshots-to-specific-zones)
|
||||
|
||||
|
@ -84,7 +84,13 @@ Only car objects can trigger the `front_yard_street` zone and only person can tr
|
||||
|
||||
### Zone Loitering
|
||||
|
||||
Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time before the object will be considered in the zone.
|
||||
Sometimes objects are expected to be passing through a zone, but an object loitering in an area is unexpected. Zones can be configured to have a minimum loitering time after which the object will be considered in the zone.
|
||||
|
||||
:::note
|
||||
|
||||
When using loitering zones, a review item will remain active until the object leaves. Loitering zones are only meant to be used in areas where loitering is not expected behavior.
|
||||
|
||||
:::
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
|
@ -72,17 +72,17 @@ COPY --from=rootfs / /
|
||||
The images for each board will be built for each Frigate release, this is done in the `.github/workflows/ci.yml` file. The board build workflow will need to be added here.
|
||||
|
||||
```yml
|
||||
- name: Build and push board build
|
||||
uses: docker/bake-action@v3
|
||||
with:
|
||||
push: true
|
||||
targets: board # this is the target in the board.hcl file
|
||||
files: docker/board/board.hcl # this should be updated with the actual board type
|
||||
# the tags should be updated with the actual board types as well
|
||||
# the community board builds should never push to cache, but it can pull from cache
|
||||
set: |
|
||||
board.tags=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}-board
|
||||
*.cache-from=type=gha
|
||||
- name: Build and push board build
|
||||
uses: docker/bake-action@v3
|
||||
with:
|
||||
push: true
|
||||
targets: board # this is the target in the board.hcl file
|
||||
files: docker/board/board.hcl # this should be updated with the actual board type
|
||||
# the tags should be updated with the actual board types as well
|
||||
# the community board builds should never push to cache, but it can pull from cache
|
||||
set: |
|
||||
board.tags=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}-board
|
||||
*.cache-from=type=gha
|
||||
```
|
||||
|
||||
### Code Owner File
|
||||
@ -91,4 +91,4 @@ The `CODEOWNERS` file should be updated to include the `docker/board` along with
|
||||
|
||||
# Docs
|
||||
|
||||
At a minimum the `installation`, `object_detectors`, `hardware_acceleration`, and `ffmpeg-presets` docs should be updated (if applicable) to reflect the configuration of this community board.
|
||||
At a minimum the `installation`, `object_detectors`, `hardware_acceleration_video`, and `ffmpeg-presets` docs should be updated (if applicable) to reflect the configuration of this community board.
|
||||
|
@ -17,15 +17,15 @@ From here, follow the guides for:
|
||||
- [Web Interface](#web-interface)
|
||||
- [Documentation](#documentation)
|
||||
|
||||
### Frigate Home Assistant Addon
|
||||
### Frigate Home Assistant Add-on
|
||||
|
||||
This repository holds the Home Assistant Addon, for use with Home Assistant OS and compatible installations. It is the piece that allows you to run Frigate from your Home Assistant Supervisor tab.
|
||||
This repository holds the Home Assistant Add-on, for use with Home Assistant OS and compatible installations. It is the piece that allows you to run Frigate from your Home Assistant Supervisor tab.
|
||||
|
||||
Fork [blakeblackshear/frigate-hass-addons](https://github.com/blakeblackshear/frigate-hass-addons) to your own Github profile, then clone the forked repo to your local machine.
|
||||
|
||||
### Frigate Home Assistant Integration
|
||||
|
||||
This repository holds the custom integration that allows your Home Assistant installation to automatically create entities for your Frigate instance, whether you run that with the [addon](#frigate-home-assistant-addon) or in a separate Docker instance.
|
||||
This repository holds the custom integration that allows your Home Assistant installation to automatically create entities for your Frigate instance, whether you are running Frigate as a standalone Docker container or as a [Home Assistant Add-on](#frigate-home-assistant-add-on).
|
||||
|
||||
Fork [blakeblackshear/frigate-hass-integration](https://github.com/blakeblackshear/frigate-hass-integration) to your own GitHub profile, then clone the forked repo to your local machine.
|
||||
|
||||
@ -77,14 +77,15 @@ Create and place these files in a `debug` folder in the root of the repo. This i
|
||||
|
||||
#### 4. Run Frigate from the command line
|
||||
|
||||
VSCode will start the docker compose file for you and open a terminal window connected to `frigate-dev`.
|
||||
VS Code will start the Docker Compose file for you and open a terminal window connected to `frigate-dev`.
|
||||
|
||||
- Depending on what hardware you're developing on, you may need to amend `docker-compose.yml` in the project root to pass through a USB Coral or GPU for hardware acceleration.
|
||||
- Run `python3 -m frigate` to start the backend.
|
||||
- In a separate terminal window inside VS Code, change into the `web` directory and run `npm install && npm run dev` to start the frontend.
|
||||
|
||||
#### 5. Teardown
|
||||
|
||||
After closing VSCode, you may still have containers running. To close everything down, just run `docker-compose down -v` to cleanup all containers.
|
||||
After closing VS Code, you may still have containers running. To close everything down, just run `docker-compose down -v` to cleanup all containers.
|
||||
|
||||
### Testing
|
||||
|
||||
@ -235,3 +236,11 @@ When testing nginx config changes from within the dev container, the following c
|
||||
```console
|
||||
sudo cp docker/main/rootfs/usr/local/nginx/conf/* /usr/local/nginx/conf/ && sudo /usr/local/nginx/sbin/nginx -s reload
|
||||
```
|
||||
|
||||
## Contributing translations of the Web UI
|
||||
|
||||
Frigate uses [Weblate](https://weblate.org) to manage translations of the Web UI. To contribute translation, sign up for an account at Weblate and navigate to the Frigate NVR project:
|
||||
|
||||
https://hosted.weblate.org/projects/frigate-nvr/
|
||||
|
||||
When translating, maintain the existing key structure while translating only the values. Ensure your translations maintain proper formatting, including any placeholder variables (like `{{example}}`).
|
||||
|
@ -28,7 +28,7 @@ For the Dahua/Loryta 5442 camera, I use the following settings:
|
||||
- Encode Mode: H.264
|
||||
- Resolution: 2688\*1520
|
||||
- Frame Rate(FPS): 15
|
||||
- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](../configuration/live) for more info)
|
||||
- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](/configuration/live#camera_settings_recommendations) for more info)
|
||||
|
||||
**Sub Stream (Detection)**
|
||||
|
||||
|
@ -38,9 +38,10 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
**Most Hardware**
|
||||
|
||||
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices offering a wide range of compatibility with devices.
|
||||
|
||||
- [Supports many model architectures](../../configuration/object_detectors#configuration)
|
||||
- Runs best with tiny or small size models
|
||||
|
||||
|
||||
- [Google Coral EdgeTPU](#google-coral-tpu): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
|
||||
- [Supports primarily ssdlite and mobilenet model architectures](../../configuration/object_detectors#edge-tpu-detector)
|
||||
|
||||
@ -73,10 +74,10 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
### Hailo-8
|
||||
|
||||
|
||||
Frigate supports both the Hailo-8 and Hailo-8L AI Acceleration Modules on compatible hardware platforms—including the Raspberry Pi 5 with the PCIe hat from the AI kit. The Hailo detector integration in Frigate automatically identifies your hardware type and selects the appropriate default model when a custom model isn’t provided.
|
||||
|
||||
**Default Model Configuration:**
|
||||
|
||||
- **Hailo-8L:** Default model is **YOLOv6n**.
|
||||
- **Hailo-8:** Default model is **YOLOv6n**.
|
||||
|
||||
@ -89,7 +90,8 @@ In real-world deployments, even with multiple cameras running concurrently, Frig
|
||||
|
||||
### Google Coral TPU
|
||||
|
||||
Frigate supports both the USB and M.2 versions of the Google Coral.
|
||||
Frigate supports both the USB and M.2 versions of the Google Coral.
|
||||
|
||||
- The USB version is compatible with the widest variety of hardware and does not require a driver on the host machine. However, it does lack the automatic throttling features of the other versions.
|
||||
- The PCIe and M.2 versions require installation of a driver on the host. Follow the instructions for your version from https://coral.ai
|
||||
|
||||
@ -107,23 +109,17 @@ More information is available [in the detector docs](/configuration/object_detec
|
||||
|
||||
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:
|
||||
|
||||
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | Notes |
|
||||
| -------------------- | -------------------------- | ------------------------- | -------------------------------------- |
|
||||
| Intel Celeron J4105 | ~ 25 ms | | Can only run one detector instance |
|
||||
| Intel Celeron N3060 | 130 - 150 ms | | Can only run one detector instance |
|
||||
| Intel Celeron N3205U | ~ 120 ms | | Can only run one detector instance |
|
||||
| Intel Celeron N4020 | 50 - 200 ms | | Inference speed depends on other loads |
|
||||
| Intel i3 6100T | 15 - 35 ms | | Can only run one detector instance |
|
||||
| Intel i3 8100 | ~ 15 ms | | |
|
||||
| Intel i5 4590 | ~ 20 ms | | |
|
||||
| Intel i5 6500 | ~ 15 ms | | |
|
||||
| Intel i5 7200u | 15 - 25 ms | | |
|
||||
| Intel i5 7500 | ~ 15 ms | | |
|
||||
| Intel i5 1135G7 | 10 - 15 ms | | |
|
||||
| Intel i3 12000 | | 320: ~ 19 ms 640: ~ 54 ms | |
|
||||
| Intel i5 12600K | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | |
|
||||
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms | |
|
||||
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | |
|
||||
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time | Notes |
|
||||
| -------------- | -------------------------- | ------------------------- | ---------------------- | ---------------------------------- |
|
||||
| Intel HD 530 | 15 - 35 ms | | | Can only run one detector instance |
|
||||
| Intel HD 620 | 15 - 25 ms | 320: ~ 35 ms | | |
|
||||
| Intel HD 630 | ~ 15 ms | 320: ~ 30 ms | | |
|
||||
| Intel UHD 730 | ~ 10 ms | 320: ~ 19 ms 640: ~ 54 ms | | |
|
||||
| Intel UHD 770 | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | | |
|
||||
| Intel N100 | ~ 15 ms | 320: ~ 20 ms | | |
|
||||
| Intel Iris XE | ~ 10 ms | 320: ~ 18 ms 640: ~ 50 ms | | |
|
||||
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms 640: ~ 22 ms | 336: 20 ms 448: 27 ms | |
|
||||
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | | |
|
||||
|
||||
### TensorRT - Nvidia GPU
|
||||
|
||||
@ -132,29 +128,31 @@ The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which
|
||||
Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
|
||||
|
||||
| Name | YoloV7 Inference Time | YOLO-NAS Inference Time |
|
||||
| --------------- | --------------------- | ------------------------- |
|
||||
| GTX 1060 6GB | ~ 7 ms | |
|
||||
| GTX 1070 | ~ 6 ms | |
|
||||
| GTX 1660 SUPER | ~ 4 ms | |
|
||||
| RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms |
|
||||
| RTX 3070 Mobile | ~ 5 ms | |
|
||||
| Quadro P400 2GB | 20 - 25 ms | |
|
||||
| Quadro P2000 | ~ 12 ms | |
|
||||
| Name | YOLOv7 Inference Time | YOLO-NAS Inference Time | RF-DETR Inference Time |
|
||||
| --------------- | --------------------- | ------------------------- | ------------------------- |
|
||||
| GTX 1060 6GB | ~ 7 ms | | |
|
||||
| GTX 1070 | ~ 6 ms | | |
|
||||
| GTX 1660 SUPER | ~ 4 ms | | |
|
||||
| RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms | 336: ~ 16 ms 560: ~ 40 ms |
|
||||
| RTX 3070 Mobile | ~ 5 ms | | |
|
||||
| RTX 3070 | 4 - 6 ms | 320: ~ 6 ms 640: ~ 12 ms | 336: ~ 14 ms 560: ~ 36 ms |
|
||||
| Quadro P400 2GB | 20 - 25 ms | | |
|
||||
| Quadro P2000 | ~ 12 ms | | |
|
||||
|
||||
### AMD GPUs
|
||||
|
||||
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
|
||||
|
||||
| Name | YoloV9 Inference Time | YOLO-NAS Inference Time |
|
||||
| --------------- | --------------------- | ------------------------- |
|
||||
| AMD 780M | ~ 14 ms | ~ 60 ms |
|
||||
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time |
|
||||
| --------- | --------------------- | ------------------------- |
|
||||
| AMD 780M | ~ 14 ms | 320: ~ 30 ms 640: ~ 60 ms |
|
||||
| AMD 8700G | | 320: ~ 20 ms 640: ~ 40 ms |
|
||||
|
||||
## Community Supported Detectors
|
||||
|
||||
### Nvidia Jetson
|
||||
|
||||
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration_video#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
|
||||
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
|
||||
|
||||
@ -168,6 +166,11 @@ Frigate supports hardware video processing on all Rockchip boards. However, hard
|
||||
- RK3576
|
||||
- RK3588
|
||||
|
||||
| Name | YOLOv9 Inference Time | YOLO-NAS Inference Time | YOLOx Inference Time |
|
||||
| -------------- | --------------------- | --------------------------- | ----------------------- |
|
||||
| rk3588 3 cores | tiny: ~ 35 ms | small: ~ 20 ms med: ~ 30 ms | nano: 14 ms tiny: 18 ms |
|
||||
| rk3566 1 core | | small: ~ 96 ms | |
|
||||
|
||||
The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms for yolo-nas s.
|
||||
|
||||
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
|
||||
|
@ -3,11 +3,11 @@ id: installation
|
||||
title: Installation
|
||||
---
|
||||
|
||||
Frigate is a Docker container that can be run on any Docker host including as a [HassOS Addon](https://www.home-assistant.io/addons/). Note that a Home Assistant Addon is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant.
|
||||
Frigate is a Docker container that can be run on any Docker host including as a [Home Assistant Add-on](https://www.home-assistant.io/addons/). Note that the Home Assistant Add-on is **not** the same thing as the integration. The [integration](/integrations/home-assistant) is required to integrate Frigate into Home Assistant, whether you are running Frigate as a standalone Docker container or as a Home Assistant Add-on.
|
||||
|
||||
:::tip
|
||||
|
||||
If you already have Frigate installed as a Home Assistant addon, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate.
|
||||
If you already have Frigate installed as a Home Assistant Add-on, check out the [getting started guide](../guides/getting_started#configuring-frigate) to configure Frigate.
|
||||
|
||||
:::
|
||||
|
||||
@ -45,7 +45,7 @@ The following ports are used by Frigate and can be mapped via docker as required
|
||||
| `8554` | RTSP restreaming. By default, these streams are unauthenticated. Authentication can be configured in go2rtc section of config. |
|
||||
| `8555` | WebRTC connections for low latency live views. |
|
||||
|
||||
#### Common docker compose storage configurations
|
||||
#### Common Docker Compose storage configurations
|
||||
|
||||
Writing to a local disk or external USB drive:
|
||||
|
||||
@ -73,7 +73,7 @@ Users of the Snapcraft build of Docker cannot use storage locations outside your
|
||||
|
||||
Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**.
|
||||
|
||||
The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose).
|
||||
The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in Docker Compose).
|
||||
|
||||
The Frigate container also stores logs in shm, which can take up to **40MB**, so make sure to take this into account in your math as well.
|
||||
|
||||
@ -165,6 +165,8 @@ devices:
|
||||
- /dev/dma_heap
|
||||
- /dev/rga
|
||||
- /dev/mpp_service
|
||||
volumes:
|
||||
- /sys/:/sys/:ro
|
||||
```
|
||||
|
||||
or add these options to your `docker run` command:
|
||||
@ -175,19 +177,19 @@ or add these options to your `docker run` command:
|
||||
--device /dev/dri \
|
||||
--device /dev/dma_heap \
|
||||
--device /dev/rga \
|
||||
--device /dev/mpp_service
|
||||
--device /dev/mpp_service \
|
||||
--volume /sys/:/sys/:ro
|
||||
```
|
||||
|
||||
#### Configuration
|
||||
|
||||
Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration#rockchip-platform).
|
||||
Next, you should configure [hardware object detection](/configuration/object_detectors#rockchip-platform) and [hardware video processing](/configuration/hardware_acceleration_video#rockchip-platform).
|
||||
|
||||
## Docker
|
||||
|
||||
Running in Docker with compose is the recommended install method.
|
||||
Running through Docker with Docker Compose is the recommended install method.
|
||||
|
||||
```yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
frigate:
|
||||
container_name: frigate
|
||||
@ -219,7 +221,7 @@ services:
|
||||
FRIGATE_RTSP_PASSWORD: "password"
|
||||
```
|
||||
|
||||
If you can't use docker compose, you can run the container with something similar to this:
|
||||
If you can't use Docker Compose, you can run the container with something similar to this:
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
@ -243,25 +245,23 @@ docker run -d \
|
||||
|
||||
The official docker image tags for the current stable version are:
|
||||
|
||||
- `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64
|
||||
- `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64. This build includes support for Hailo devices as well.
|
||||
- `stable-standard-arm64` - Standard Frigate build for arm64
|
||||
- `stable-tensorrt` - Frigate build specific for amd64 devices running an nvidia GPU
|
||||
- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector)
|
||||
|
||||
The community supported docker image tags for the current stable version are:
|
||||
|
||||
- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5
|
||||
- `stable-tensorrt-jp6` - Frigate build optimized for nvidia Jetson devices running Jetpack 6
|
||||
- `stable-rk` - Frigate build for SBCs with Rockchip SoC
|
||||
- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector)
|
||||
- `stable-h8l` - Frigate build for the Hailo-8L M.2 PICe Raspberry Pi 5 hat
|
||||
|
||||
## Home Assistant Addon
|
||||
## Home Assistant Add-on
|
||||
|
||||
:::warning
|
||||
|
||||
As of HomeAssistant OS 10.2 and Core 2023.6 defining separate network storage for media is supported.
|
||||
As of Home Assistant Operating System 10.2 and Home Assistant 2023.6 defining separate network storage for media is supported.
|
||||
|
||||
There are important limitations in Home Assistant Operating System to be aware of:
|
||||
There are important limitations in HA OS to be aware of:
|
||||
|
||||
- Separate local storage for media is not yet supported by Home Assistant
|
||||
- AMD GPUs are not supported because HA OS does not include the mesa driver.
|
||||
@ -275,24 +275,27 @@ See [the network storage guide](/guides/ha_network_storage.md) for instructions
|
||||
|
||||
:::
|
||||
|
||||
HassOS users can install via the addon repository.
|
||||
Home Assistant OS users can install via the Add-on repository.
|
||||
|
||||
1. Navigate to Supervisor > Add-on Store > Repositories
|
||||
2. Add https://github.com/blakeblackshear/frigate-hass-addons
|
||||
3. Install your desired Frigate NVR Addon and navigate to it's page
|
||||
1. In Home Assistant, navigate to _Settings_ > _Add-ons_ > _Add-on Store_ > _Repositories_
|
||||
2. Add `https://github.com/blakeblackshear/frigate-hass-addons`
|
||||
3. Install the desired variant of the Frigate Add-on (see below)
|
||||
4. Setup your network configuration in the `Configuration` tab
|
||||
5. (not for proxy addon) Create the file `frigate.yaml` in your `config` directory with your detailed Frigate configuration
|
||||
6. Start the addon container
|
||||
7. (not for proxy addon) If you are using hardware acceleration for ffmpeg, you may need to disable "Protection mode"
|
||||
5. Start the Add-on
|
||||
6. Use the _Open Web UI_ button to access the Frigate UI, then click in the _cog icon_ > _Configuration editor_ and configure Frigate to your liking
|
||||
|
||||
There are several versions of the addon available:
|
||||
There are several variants of the Add-on available:
|
||||
|
||||
| Addon Version | Description |
|
||||
| ------------------------------ | ---------------------------------------------------------- |
|
||||
| Frigate NVR | Current release with protection mode on |
|
||||
| Frigate NVR (Full Access) | Current release with the option to disable protection mode |
|
||||
| Frigate NVR Beta | Beta release with protection mode on |
|
||||
| Frigate NVR Beta (Full Access) | Beta release with the option to disable protection mode |
|
||||
| Add-on Variant | Description |
|
||||
| -------------------------- | ---------------------------------------------------------- |
|
||||
| Frigate | Current release with protection mode on |
|
||||
| Frigate (Full Access) | Current release with the option to disable protection mode |
|
||||
| Frigate Beta | Beta release with protection mode on |
|
||||
| Frigate Beta (Full Access) | Beta release with the option to disable protection mode |
|
||||
|
||||
If you are using hardware acceleration for ffmpeg, you **may** need to use the _Full Access_ variant of the Add-on. This is because the Frigate Add-on runs in a container with limited access to the host system. The _Full Access_ variant allows you to disable _Protection mode_ and give Frigate full access to the host system.
|
||||
|
||||
You can also edit the Frigate configuration file through the [VS Code Add-on](https://github.com/hassio-addons/addon-vscode) or similar. In that case, the configuration file will be at `/addon_configs/<addon_directory>/config.yml`, where `<addon_directory>` is specific to the variant of the Frigate Add-on you are running. See the list of directories [here](../configuration/index.md#accessing-add-on-config-dir).
|
||||
|
||||
## Kubernetes
|
||||
|
||||
@ -313,7 +316,8 @@ If you choose to run Frigate via LXC in Proxmox the setup can be complex so be p
|
||||
|
||||
:::
|
||||
|
||||
Suggestions include:
|
||||
Suggestions include:
|
||||
|
||||
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
|
||||
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
|
||||
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`
|
||||
@ -404,7 +408,7 @@ mkdir -p /share/share_vol2/frigate/media
|
||||
# Also replace the time zone value for 'TZ' in the sample command.
|
||||
# Example command will create a docker container that uses at most 2 CPUs and 4G RAM.
|
||||
# You may need to add "--env=LIBVA_DRIVER_NAME=i965 \" to the following docker run command if you
|
||||
# have certain CPU (e.g., J4125). See https://docs.frigate.video/configuration/hardware_acceleration.
|
||||
# have certain CPU (e.g., J4125). See https://docs.frigate.video/configuration/hardware_acceleration_video.
|
||||
docker run \
|
||||
--name=frigate \
|
||||
--shm-size=256m \
|
||||
|
@ -115,3 +115,7 @@ section.
|
||||
|
||||
1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera).
|
||||
2. You may also prefer to [setup WebRTC](/configuration/live#webrtc-extra-configuration) for slightly lower latency than MSE. Note that WebRTC only supports h264 and specific audio formats and may require opening ports on your router.
|
||||
|
||||
## Important considerations
|
||||
|
||||
If you are configuring go2rtc to publish HomeKit camera streams, on pairing the configuration is written to the `/dev/shm/go2rtc.yaml` file inside the container. These changes must be manually copied across to the `go2rtc` section of your Frigate configuration in order to persist through restarts.
|
||||
|
@ -9,7 +9,7 @@ title: Getting started
|
||||
|
||||
If you already have an environment with Linux and Docker installed, you can continue to [Installing Frigate](#installing-frigate) below.
|
||||
|
||||
If you already have Frigate installed in Docker or as a Home Assistant addon, you can continue to [Configuring Frigate](#configuring-frigate) below.
|
||||
If you already have Frigate installed through Docker or through a Home Assistant Add-on, you can continue to [Configuring Frigate](#configuring-frigate) below.
|
||||
|
||||
:::
|
||||
|
||||
@ -81,7 +81,7 @@ Now you have a minimal Debian server that requires very little maintenance.
|
||||
|
||||
## Installing Frigate
|
||||
|
||||
This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant addon or another way, you can continue to [Configuring Frigate](#configuring-frigate).
|
||||
This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant Add-on or another way, you can continue to [Configuring Frigate](#configuring-frigate).
|
||||
|
||||
### Setup directories
|
||||
|
||||
@ -110,7 +110,6 @@ This `docker-compose.yml` file is just a starter for amd64 devices. You will nee
|
||||
`docker-compose.yml`
|
||||
|
||||
```yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
frigate:
|
||||
container_name: frigate
|
||||
@ -163,14 +162,13 @@ FFmpeg arguments for other types of cameras can be found [here](../configuration
|
||||
|
||||
### Step 3: Configure hardware acceleration (recommended)
|
||||
|
||||
Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration.md) config reference for examples applicable to your hardware.
|
||||
Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration_video.md) config reference for examples applicable to your hardware.
|
||||
|
||||
Here is an example configuration with hardware acceleration configured to work with most Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md):
|
||||
|
||||
`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes)
|
||||
|
||||
```yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -199,7 +197,6 @@ By default, Frigate will use a single CPU detector. If you have a USB Coral, you
|
||||
`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes)
|
||||
|
||||
```yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
@ -306,6 +303,7 @@ By default, Frigate will retain video of all tracked objects for 10 days. The fu
|
||||
### Step 7: Complete config
|
||||
|
||||
At this point you have a complete config with basic functionality.
|
||||
|
||||
- View [common configuration examples](../configuration/index.md#common-configuration-examples) for a list of common configuration examples.
|
||||
- View [full config reference](../configuration/reference.md) for a complete list of configuration options.
|
||||
|
||||
|
@ -3,24 +3,18 @@ id: ha_network_storage
|
||||
title: Home Assistant network storage
|
||||
---
|
||||
|
||||
As of Home Assistant Core 2023.6, Network Mounted Storage is supported for addons.
|
||||
As of Home Assistant 2023.6, Network Mounted Storage is supported for Add-ons.
|
||||
|
||||
## Setting Up Remote Storage For Frigate
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- HA Core 2023.6 or newer is installed
|
||||
- Running HA OS 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for supervised install)
|
||||
- Home Assistant 2023.6 or newer is installed
|
||||
- Running Home Assistant Operating System 10.2 or newer OR Running Supervised with latest os-agent installed (this is required for supervised install)
|
||||
|
||||
### Initial Setup
|
||||
|
||||
1. Stop the Frigate addon
|
||||
2. Update your [config](configuration/index.md) so the DB is stored in the /config directory by adding:
|
||||
|
||||
```yaml
|
||||
database:
|
||||
path: /config/frigate.db
|
||||
```
|
||||
1. Stop the Frigate Add-on
|
||||
|
||||
### Move current data
|
||||
|
||||
@ -43,4 +37,4 @@ Keeping the current data is optional, but the data will need to be moved regardl
|
||||
4. Fill out the additional required info for your particular NAS
|
||||
5. Connect
|
||||
6. Move files from `/media/frigate_tmp` to `/media/frigate` if they were kept in previous step
|
||||
7. Start the Frigate addon
|
||||
7. Start the Frigate Add-on
|
||||
|
@ -51,7 +51,7 @@ When configuring the integration, you will be asked for the `URL` of your Frigat
|
||||
|
||||
### Docker Compose Examples
|
||||
|
||||
If you are running Home Assistant Core and Frigate with Docker Compose on the same device, here are some examples.
|
||||
If you are running Home Assistant and Frigate with Docker Compose on the same device, here are some examples.
|
||||
|
||||
#### Home Assistant running with host networking
|
||||
|
||||
@ -60,7 +60,6 @@ It is not recommended to run Frigate in host networking mode. In this example, y
|
||||
```yaml
|
||||
services:
|
||||
homeassistant:
|
||||
container_name: hass
|
||||
image: ghcr.io/home-assistant/home-assistant:stable
|
||||
network_mode: host
|
||||
...
|
||||
@ -80,7 +79,6 @@ In this example, it is recommended to connect to the authenticated port, for exa
|
||||
```yaml
|
||||
services:
|
||||
homeassistant:
|
||||
container_name: hass
|
||||
image: ghcr.io/home-assistant/home-assistant:stable
|
||||
# network_mode: host
|
||||
...
|
||||
@ -93,17 +91,16 @@ services:
|
||||
...
|
||||
```
|
||||
|
||||
### HassOS Addon
|
||||
### Home Assistant Add-on
|
||||
|
||||
If you are using HassOS with the addon, the URL should be one of the following depending on which addon version you are using. Note that if you are using the Proxy Addon, you do NOT point the integration at the proxy URL. Just enter the URL used to access Frigate directly from your network.
|
||||
If you are using Home Assistant Add-on, the URL should be one of the following depending on which Add-on variant you are using. Note that if you are using the Proxy Add-on, you should NOT point the integration at the proxy URL. Just enter the same URL used to access Frigate directly from your network.
|
||||
|
||||
| Addon Version | URL |
|
||||
| ------------------------------ | ----------------------------------------- |
|
||||
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
|
||||
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
|
||||
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
|
||||
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
|
||||
| Frigate NVR HailoRT Beta | `http://ccab4aaf-frigate-hailo-beta:5000` |
|
||||
| Add-on Variant | URL |
|
||||
| -------------------------- | ----------------------------------------- |
|
||||
| Frigate | `http://ccab4aaf-frigate:5000` |
|
||||
| Frigate (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
|
||||
| Frigate Beta | `http://ccab4aaf-frigate-beta:5000` |
|
||||
| Frigate Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
|
||||
|
||||
### Frigate running on a separate machine
|
||||
|
||||
|
@ -104,7 +104,9 @@ Message published for each changed tracked object. The first message is publishe
|
||||
|
||||
### `frigate/tracked_object_update`
|
||||
|
||||
Message published for updates to tracked object metadata, for example when GenAI runs and returns a tracked object description.
|
||||
Message published for updates to tracked object metadata, for example:
|
||||
|
||||
#### Generative AI Description Update
|
||||
|
||||
```json
|
||||
{
|
||||
@ -114,6 +116,33 @@ Message published for updates to tracked object metadata, for example when GenAI
|
||||
}
|
||||
```
|
||||
|
||||
#### Face Recognition Update
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "face",
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"name": "John",
|
||||
"score": 0.95,
|
||||
"camera": "front_door_cam",
|
||||
"timestamp": 1607123958.748393,
|
||||
}
|
||||
```
|
||||
|
||||
#### License Plate Recognition Update
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "lpr",
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"name": "John's Car",
|
||||
"plate": "123ABC",
|
||||
"score": 0.95,
|
||||
"camera": "driveway_cam",
|
||||
"timestamp": 1607123958.748393,
|
||||
}
|
||||
```
|
||||
|
||||
### `frigate/reviews`
|
||||
|
||||
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.
|
||||
@ -305,6 +334,10 @@ Topic to adjust motion contour area for a camera. Expected value is an integer.
|
||||
|
||||
Topic with current motion contour area for a camera. Published value is an integer.
|
||||
|
||||
### `frigate/<camera_name>/review_status`
|
||||
|
||||
Topic with current activity status of the camera. Possible values are `NONE`, `DETECTION`, or `ALERT`.
|
||||
|
||||
### `frigate/<camera_name>/ptz`
|
||||
|
||||
Topic to send PTZ commands to camera.
|
||||
|
@ -19,11 +19,11 @@ Once logged in, you can generate an API key for Frigate in Settings.
|
||||
|
||||
### Set your API key
|
||||
|
||||
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Addons > Frigate NVR > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
|
||||
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Add-ons > Frigate > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
|
||||
|
||||
:::warning
|
||||
|
||||
You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or HA addon config.
|
||||
You cannot use the `environment_vars` section of your Frigate configuration file to set this environment variable. It must be defined as an environment variable in the docker config or Home Assistant Add-on config.
|
||||
|
||||
:::
|
||||
|
||||
@ -51,6 +51,8 @@ You can view all of your submitted images at [https://plus.frigate.video](https:
|
||||
|
||||
Once you have [requested your first model](../plus/first_model.md) and gotten your own model ID, it can be used with a special model path. No other information needs to be configured for Frigate+ models because it fetches the remaining config from Frigate+ automatically.
|
||||
|
||||
You can either choose the new model from the Frigate+ pane in the Settings page of the Frigate UI, or manually set the model at the root level in your config:
|
||||
|
||||
```yaml
|
||||
model:
|
||||
path: plus://<your_model_id>
|
||||
|
@ -21,7 +21,7 @@ This is a fork (with fixed errors and new features) of [original Double Take](ht
|
||||
|
||||
## [Frigate Notify](https://github.com/0x2142/frigate-notify)
|
||||
|
||||
[Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate NVR to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended.
|
||||
[Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended.
|
||||
|
||||
## [Frigate telegram](https://github.com/OldTyT/frigate-telegram)
|
||||
|
||||
|
@ -22,3 +22,13 @@ Yes. Models and metadata are stored in the `model_cache` directory within the co
|
||||
### Can I keep using my Frigate+ models even if I do not renew my subscription?
|
||||
|
||||
Yes. Subscriptions to Frigate+ provide access to the infrastructure used to train the models. Models trained with your subscription are yours to keep and use forever. However, do note that the terms and conditions prohibit you from sharing, reselling, or creating derivative products from the models.
|
||||
|
||||
### Why can't I submit images to Frigate+?
|
||||
|
||||
If you've configured your API key and the Frigate+ Settings page in the UI shows that the key is active, you need to ensure that you've enabled both snapshots and `clean_copy` snapshots for the cameras you'd like to submit images for. Note that `clean_copy` is enabled by default when snapshots are enabled.
|
||||
|
||||
```yaml
|
||||
snapshots:
|
||||
enabled: true
|
||||
clean_copy: true
|
||||
```
|
||||
|
@ -3,7 +3,7 @@ id: index
|
||||
title: Models
|
||||
---
|
||||
|
||||
<a href="https://frigate.video/plus" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate NVR analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a baseline model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions.
|
||||
<a href="https://frigate.video/plus" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a baseline model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions.
|
||||
|
||||
:::info
|
||||
|
||||
|
@ -32,7 +32,7 @@ The USB coral can draw up to 900mA and this can be too much for some on-device U
|
||||
The USB coral has different IDs when it is uninitialized and initialized.
|
||||
|
||||
- When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped.
|
||||
- When running HA OS you may need to run the Full Access version of the Frigate addon with the `Protected Mode` switch disabled so that the coral can be accessed.
|
||||
- When running through the Home Assistant OS you may need to run the Full Access variant of the Frigate Add-on with the _Protection mode_ switch disabled so that the coral can be accessed.
|
||||
|
||||
### Synology 716+II running DSM 7.2.1-69057 Update 5
|
||||
|
||||
|
@ -34,7 +34,7 @@ Frigate generally [recommends cameras with configurable sub streams](/frigate/ha
|
||||
To do this efficiently the following setup is required:
|
||||
|
||||
1. A GPU or iGPU must be available to do the scaling.
|
||||
2. [ffmpeg presets for hwaccel](/configuration/hardware_acceleration.md) must be used
|
||||
2. [ffmpeg presets for hwaccel](/configuration/hardware_acceleration_video.md) must be used
|
||||
3. Set the desired detection resolution for `detect -> width` and `detect -> height`.
|
||||
|
||||
When this is done correctly, the GPU will do the decoding and scaling which will result in a small increase in CPU usage but with better results.
|
||||
|
@ -47,10 +47,9 @@ On linux, some helpful tools/commands in diagnosing would be:
|
||||
|
||||
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
|
||||
|
||||
**Compose example**
|
||||
**Docker Compose example**
|
||||
|
||||
```yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
|
@ -17,6 +17,15 @@ const config: Config = {
|
||||
markdown: {
|
||||
mermaid: true,
|
||||
},
|
||||
i18n: {
|
||||
defaultLocale: 'en',
|
||||
locales: ['en'],
|
||||
localeConfigs: {
|
||||
en: {
|
||||
label: 'English',
|
||||
}
|
||||
},
|
||||
},
|
||||
themeConfig: {
|
||||
algolia: {
|
||||
appId: 'WIURGBNBPY',
|
||||
@ -82,6 +91,16 @@ const config: Config = {
|
||||
label: 'Demo',
|
||||
position: 'right',
|
||||
},
|
||||
{
|
||||
type: 'localeDropdown',
|
||||
position: 'right',
|
||||
dropdownItemsAfter: [
|
||||
{
|
||||
label: '简体中文(社区翻译)',
|
||||
href: 'https://docs.frigate-cn.video',
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
href: 'https://github.com/blakeblackshear/frigate',
|
||||
label: 'GitHub',
|
||||
|
7818
docs/package-lock.json
generated
7818
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -17,10 +17,10 @@
|
||||
"write-heading-ids": "docusaurus write-heading-ids"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "^3.6.3",
|
||||
"@docusaurus/preset-classic": "^3.6.3",
|
||||
"@docusaurus/theme-mermaid": "^3.6.3",
|
||||
"@docusaurus/core": "^3.7.0",
|
||||
"@docusaurus/plugin-content-docs": "^3.6.3",
|
||||
"@docusaurus/preset-classic": "^3.7.0",
|
||||
"@docusaurus/theme-mermaid": "^3.6.3",
|
||||
"@mdx-js/react": "^3.1.0",
|
||||
"clsx": "^2.1.1",
|
||||
"docusaurus-plugin-openapi-docs": "^4.3.1",
|
||||
|
@ -33,11 +33,12 @@ const sidebars: SidebarsConfig = {
|
||||
"configuration/object_detectors",
|
||||
"configuration/audio_detectors",
|
||||
],
|
||||
Classifiers: [
|
||||
Enrichments: [
|
||||
"configuration/semantic_search",
|
||||
"configuration/genai",
|
||||
"configuration/face_recognition",
|
||||
"configuration/license_plate_recognition",
|
||||
"configuration/bird_classification",
|
||||
],
|
||||
Cameras: [
|
||||
"configuration/cameras",
|
||||
@ -58,10 +59,13 @@ const sidebars: SidebarsConfig = {
|
||||
"configuration/objects",
|
||||
"configuration/stationary_objects",
|
||||
],
|
||||
"Hardware Acceleration": [
|
||||
"configuration/hardware_acceleration_video",
|
||||
"configuration/hardware_acceleration_enrichments",
|
||||
],
|
||||
"Extra Configuration": [
|
||||
"configuration/authentication",
|
||||
"configuration/notifications",
|
||||
"configuration/hardware_acceleration",
|
||||
"configuration/ffmpeg_presets",
|
||||
"configuration/pwa",
|
||||
"configuration/tls",
|
||||
|
25
docs/src/components/LanguageAlert/index.jsx
Normal file
25
docs/src/components/LanguageAlert/index.jsx
Normal file
@ -0,0 +1,25 @@
|
||||
import React, { useEffect, useState } from 'react';
|
||||
import { useLocation } from '@docusaurus/router';
|
||||
import styles from './styles.module.css';
|
||||
|
||||
export default function LanguageAlert() {
|
||||
const [showAlert, setShowAlert] = useState(false);
|
||||
const { pathname } = useLocation();
|
||||
|
||||
useEffect(() => {
|
||||
const userLanguage = navigator?.language || 'en';
|
||||
const isChineseUser = userLanguage.includes('zh');
|
||||
setShowAlert(isChineseUser);
|
||||
|
||||
}, [pathname]);
|
||||
|
||||
if (!showAlert) return null;
|
||||
|
||||
return (
|
||||
<div className={styles.alert}>
|
||||
<span>检测到您的主要语言为中文,您可以访问由中文社区翻译的</span>
|
||||
<a href={'https://docs.frigate-cn.video'+pathname}>中文文档</a>
|
||||
<span> 以获得更好的体验</span>
|
||||
</div>
|
||||
);
|
||||
}
|
13
docs/src/components/LanguageAlert/styles.module.css
Normal file
13
docs/src/components/LanguageAlert/styles.module.css
Normal file
@ -0,0 +1,13 @@
|
||||
.alert {
|
||||
padding: 12px;
|
||||
background: #fff8e6;
|
||||
border-bottom: 1px solid #ffd166;
|
||||
text-align: center;
|
||||
font-size: 15px;
|
||||
}
|
||||
|
||||
.alert a {
|
||||
color: #1890ff;
|
||||
font-weight: 500;
|
||||
margin-left: 6px;
|
||||
}
|
15
docs/src/theme/Navbar/index.js
Normal file
15
docs/src/theme/Navbar/index.js
Normal file
@ -0,0 +1,15 @@
|
||||
import React from 'react';
|
||||
import NavbarLayout from '@theme/Navbar/Layout';
|
||||
import NavbarContent from '@theme/Navbar/Content';
|
||||
import LanguageAlert from '../../components/LanguageAlert';
|
||||
|
||||
export default function Navbar() {
|
||||
return (
|
||||
<>
|
||||
<NavbarLayout>
|
||||
<NavbarContent />
|
||||
</NavbarLayout>
|
||||
<LanguageAlert />
|
||||
</>
|
||||
);
|
||||
}
|
554
docs/static/frigate-api.yaml
vendored
554
docs/static/frigate-api.yaml
vendored
@ -161,6 +161,253 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/users/{username}/role":
|
||||
put:
|
||||
tags:
|
||||
- Auth
|
||||
summary: Update Role
|
||||
operationId: update_role_users__username__role_put
|
||||
parameters:
|
||||
- name: username
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Username
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/AppPutRoleBody"
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/faces:
|
||||
get:
|
||||
tags:
|
||||
- Events
|
||||
summary: Get Faces
|
||||
operationId: get_faces_faces_get
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
/faces/reprocess:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
summary: Reclassify Face
|
||||
operationId: reclassify_face_faces_reprocess_post
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
title: Body
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/faces/train/{name}/classify":
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
summary: Train Face
|
||||
operationId: train_face_faces_train__name__classify_post
|
||||
parameters:
|
||||
- name: name
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Name
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
title: Body
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/faces/{name}/create":
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
summary: Create Face
|
||||
operationId: create_face_faces__name__create_post
|
||||
parameters:
|
||||
- name: name
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Name
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/faces/{name}/register":
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
summary: Register Face
|
||||
operationId: register_face_faces__name__register_post
|
||||
parameters:
|
||||
- name: name
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Name
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
multipart/form-data:
|
||||
schema:
|
||||
$ref: >-
|
||||
#/components/schemas/Body_register_face_faces__name__register_post
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/faces/recognize:
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
summary: Recognize Face
|
||||
operationId: recognize_face_faces_recognize_post
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
multipart/form-data:
|
||||
schema:
|
||||
$ref: "#/components/schemas/Body_recognize_face_faces_recognize_post"
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/faces/{name}/delete":
|
||||
post:
|
||||
tags:
|
||||
- Events
|
||||
summary: Deregister Faces
|
||||
operationId: deregister_faces_faces__name__delete_post
|
||||
parameters:
|
||||
- name: name
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Name
|
||||
requestBody:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: object
|
||||
title: Body
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/lpr/reprocess:
|
||||
put:
|
||||
tags:
|
||||
- Events
|
||||
summary: Reprocess License Plate
|
||||
operationId: reprocess_license_plate_lpr_reprocess_put
|
||||
parameters:
|
||||
- name: event_id
|
||||
in: query
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Event Id
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/reindex:
|
||||
put:
|
||||
tags:
|
||||
- Events
|
||||
summary: Reindex Embeddings
|
||||
operationId: reindex_embeddings_reindex_put
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
/review:
|
||||
get:
|
||||
tags:
|
||||
@ -206,9 +453,7 @@ paths:
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/SeverityEnum"
|
||||
title: Severity
|
||||
$ref: "#/components/schemas/SeverityEnum"
|
||||
- name: before
|
||||
in: query
|
||||
required: false
|
||||
@ -237,6 +482,35 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/review_ids:
|
||||
get:
|
||||
tags:
|
||||
- Review
|
||||
summary: Review Ids
|
||||
operationId: review_ids_review_ids_get
|
||||
parameters:
|
||||
- name: ids
|
||||
in: query
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Ids
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
type: array
|
||||
items:
|
||||
$ref: "#/components/schemas/ReviewSegmentResponse"
|
||||
title: Response Review Ids Review Ids Get
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/review/summary:
|
||||
get:
|
||||
tags:
|
||||
@ -575,6 +849,19 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/metrics:
|
||||
get:
|
||||
tags:
|
||||
- App
|
||||
summary: Metrics
|
||||
description: Expose Prometheus metrics endpoint and update metrics with latest stats
|
||||
operationId: metrics_metrics_get
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
/config:
|
||||
get:
|
||||
tags:
|
||||
@ -731,6 +1018,15 @@ paths:
|
||||
- type: string
|
||||
- type: "null"
|
||||
title: Download
|
||||
- name: stream
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
anyOf:
|
||||
- type: boolean
|
||||
- type: "null"
|
||||
default: false
|
||||
title: Stream
|
||||
- name: start
|
||||
in: query
|
||||
required: false
|
||||
@ -825,6 +1121,59 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/plus/models:
|
||||
get:
|
||||
tags:
|
||||
- App
|
||||
summary: Plusmodels
|
||||
operationId: plusModels_plus_models_get
|
||||
parameters:
|
||||
- name: filterByCurrentModelDetector
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: boolean
|
||||
default: false
|
||||
title: Filterbycurrentmodeldetector
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/recognized_license_plates:
|
||||
get:
|
||||
tags:
|
||||
- App
|
||||
summary: Get Recognized License Plates
|
||||
operationId: get_recognized_license_plates_recognized_license_plates_get
|
||||
parameters:
|
||||
- name: split_joined
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: "null"
|
||||
title: Split Joined
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
/timeline:
|
||||
get:
|
||||
tags:
|
||||
@ -1158,12 +1507,12 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/export/{event_id}/{new_name}":
|
||||
"/export/{event_id}/rename":
|
||||
patch:
|
||||
tags:
|
||||
- Export
|
||||
summary: Export Rename
|
||||
operationId: export_rename_export__event_id___new_name__patch
|
||||
operationId: export_rename_export__event_id__rename_patch
|
||||
parameters:
|
||||
- name: event_id
|
||||
in: path
|
||||
@ -1171,12 +1520,12 @@ paths:
|
||||
schema:
|
||||
type: string
|
||||
title: Event Id
|
||||
- name: new_name
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: New Name
|
||||
requestBody:
|
||||
required: true
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/ExportRenameBody"
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
@ -1409,6 +1758,31 @@ paths:
|
||||
- type: number
|
||||
- type: "null"
|
||||
title: Max Score
|
||||
- name: min_speed
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
anyOf:
|
||||
- type: number
|
||||
- type: "null"
|
||||
title: Min Speed
|
||||
- name: max_speed
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
anyOf:
|
||||
- type: number
|
||||
- type: "null"
|
||||
title: Max Speed
|
||||
- name: recognized_license_plate
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: "null"
|
||||
default: all
|
||||
title: Recognized License Plate
|
||||
- name: is_submitted
|
||||
in: query
|
||||
required: false
|
||||
@ -1684,6 +2058,31 @@ paths:
|
||||
- type: number
|
||||
- type: "null"
|
||||
title: Max Score
|
||||
- name: min_speed
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
anyOf:
|
||||
- type: number
|
||||
- type: "null"
|
||||
title: Min Speed
|
||||
- name: max_speed
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
anyOf:
|
||||
- type: number
|
||||
- type: "null"
|
||||
title: Max Speed
|
||||
- name: recognized_license_plate
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: "null"
|
||||
default: all
|
||||
title: Recognized License Plate
|
||||
- name: sort
|
||||
in: query
|
||||
required: false
|
||||
@ -1867,9 +2266,7 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/SubmitPlusBody"
|
||||
title: Body
|
||||
$ref: "#/components/schemas/SubmitPlusBody"
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
@ -2056,15 +2453,13 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/EventsCreateBody"
|
||||
$ref: "#/components/schemas/EventsCreateBody"
|
||||
default:
|
||||
source_type: api
|
||||
score: 0
|
||||
duration: 30
|
||||
include_recording: true
|
||||
draw: {}
|
||||
title: Body
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
@ -2305,6 +2700,14 @@ paths:
|
||||
- type: integer
|
||||
- type: "null"
|
||||
title: Height
|
||||
- name: store
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
anyOf:
|
||||
- type: integer
|
||||
- type: "null"
|
||||
title: Store
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
@ -2407,6 +2810,42 @@ paths:
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
/recordings/summary:
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
summary: All Recordings Summary
|
||||
description: Returns true/false by day indicating if recordings exist
|
||||
operationId: all_recordings_summary_recordings_summary_get
|
||||
parameters:
|
||||
- name: timezone
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: string
|
||||
default: utc
|
||||
title: Timezone
|
||||
- name: cameras
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: "null"
|
||||
default: all
|
||||
title: Cameras
|
||||
responses:
|
||||
"200":
|
||||
description: Successful Response
|
||||
content:
|
||||
application/json:
|
||||
schema: {}
|
||||
"422":
|
||||
description: Validation Error
|
||||
content:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/{camera_name}/recordings/summary":
|
||||
get:
|
||||
tags:
|
||||
@ -2461,14 +2900,14 @@ paths:
|
||||
required: false
|
||||
schema:
|
||||
type: number
|
||||
default: 1733228876.15567
|
||||
default: 1744227965.180043
|
||||
title: After
|
||||
- name: before
|
||||
in: query
|
||||
required: false
|
||||
schema:
|
||||
type: number
|
||||
default: 1733232476.15567
|
||||
default: 1744231565.180048
|
||||
title: Before
|
||||
responses:
|
||||
"200":
|
||||
@ -2487,6 +2926,8 @@ paths:
|
||||
tags:
|
||||
- Media
|
||||
summary: Recording Clip
|
||||
description: >-
|
||||
For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.
|
||||
operationId: recording_clip__camera_name__start__start_ts__end__end_ts__clip_mp4_get
|
||||
parameters:
|
||||
- name: camera_name
|
||||
@ -2749,12 +3190,12 @@ paths:
|
||||
application/json:
|
||||
schema:
|
||||
$ref: "#/components/schemas/HTTPValidationError"
|
||||
"/events/{event_id}/thumbnail.jpg":
|
||||
"/events/{event_id}/thumbnail.{extension}":
|
||||
get:
|
||||
tags:
|
||||
- Media
|
||||
summary: Event Thumbnail
|
||||
operationId: event_thumbnail_events__event_id__thumbnail_jpg_get
|
||||
operationId: event_thumbnail_events__event_id__thumbnail__extension__get
|
||||
parameters:
|
||||
- name: event_id
|
||||
in: path
|
||||
@ -2762,6 +3203,12 @@ paths:
|
||||
schema:
|
||||
type: string
|
||||
title: Event Id
|
||||
- name: extension
|
||||
in: path
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
title: Extension
|
||||
- name: max_cache_age
|
||||
in: query
|
||||
required: false
|
||||
@ -3251,6 +3698,12 @@ components:
|
||||
password:
|
||||
type: string
|
||||
title: Password
|
||||
role:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: "null"
|
||||
title: Role
|
||||
default: viewer
|
||||
type: object
|
||||
required:
|
||||
- username
|
||||
@ -3265,6 +3718,35 @@ components:
|
||||
required:
|
||||
- password
|
||||
title: AppPutPasswordBody
|
||||
AppPutRoleBody:
|
||||
properties:
|
||||
role:
|
||||
type: string
|
||||
title: Role
|
||||
type: object
|
||||
required:
|
||||
- role
|
||||
title: AppPutRoleBody
|
||||
Body_recognize_face_faces_recognize_post:
|
||||
properties:
|
||||
file:
|
||||
type: string
|
||||
format: binary
|
||||
title: File
|
||||
type: object
|
||||
required:
|
||||
- file
|
||||
title: Body_recognize_face_faces_recognize_post
|
||||
Body_register_face_faces__name__register_post:
|
||||
properties:
|
||||
file:
|
||||
type: string
|
||||
format: binary
|
||||
title: File
|
||||
type: object
|
||||
required:
|
||||
- file
|
||||
title: Body_register_face_faces__name__register_post
|
||||
DayReview:
|
||||
properties:
|
||||
day:
|
||||
@ -3354,7 +3836,9 @@ components:
|
||||
- type: "null"
|
||||
title: End Time
|
||||
false_positive:
|
||||
type: boolean
|
||||
anyOf:
|
||||
- type: boolean
|
||||
- type: "null"
|
||||
title: False Positive
|
||||
zones:
|
||||
items:
|
||||
@ -3362,7 +3846,9 @@ components:
|
||||
type: array
|
||||
title: Zones
|
||||
thumbnail:
|
||||
type: string
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: "null"
|
||||
title: Thumbnail
|
||||
has_clip:
|
||||
type: boolean
|
||||
@ -3394,6 +3880,7 @@ components:
|
||||
- type: "null"
|
||||
title: Model Type
|
||||
data:
|
||||
type: object
|
||||
title: Data
|
||||
type: object
|
||||
required:
|
||||
@ -3511,6 +3998,11 @@ components:
|
||||
exclusiveMinimum: 0
|
||||
- type: "null"
|
||||
title: Score for sub label
|
||||
camera:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: "null"
|
||||
title: Camera this object is detected on.
|
||||
type: object
|
||||
required:
|
||||
- subLabel
|
||||
@ -3518,13 +4010,11 @@ components:
|
||||
ExportRecordingsBody:
|
||||
properties:
|
||||
playback:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/PlaybackFactorEnum"
|
||||
$ref: "#/components/schemas/PlaybackFactorEnum"
|
||||
title: Playback factor
|
||||
default: realtime
|
||||
source:
|
||||
allOf:
|
||||
- $ref: "#/components/schemas/PlaybackSourceEnum"
|
||||
$ref: "#/components/schemas/PlaybackSourceEnum"
|
||||
title: Playback source
|
||||
default: recordings
|
||||
name:
|
||||
@ -3536,6 +4026,16 @@ components:
|
||||
title: Image Path
|
||||
type: object
|
||||
title: ExportRecordingsBody
|
||||
ExportRenameBody:
|
||||
properties:
|
||||
name:
|
||||
type: string
|
||||
maxLength: 256
|
||||
title: Friendly name
|
||||
type: object
|
||||
required:
|
||||
- name
|
||||
title: ExportRenameBody
|
||||
Extension:
|
||||
type: string
|
||||
enum:
|
||||
|
@ -9,6 +9,7 @@ import traceback
|
||||
from datetime import datetime, timedelta
|
||||
from functools import reduce
|
||||
from io import StringIO
|
||||
from pathlib import Path as FilePath
|
||||
from typing import Any, Optional
|
||||
|
||||
import aiofiles
|
||||
@ -73,18 +74,22 @@ def go2rtc_streams():
|
||||
)
|
||||
stream_data = r.json()
|
||||
for data in stream_data.values():
|
||||
for producer in data.get("producers", []):
|
||||
for producer in data.get("producers") or []:
|
||||
producer["url"] = clean_camera_user_pass(producer.get("url", ""))
|
||||
return JSONResponse(content=stream_data)
|
||||
|
||||
|
||||
@router.get("/go2rtc/streams/{camera_name}")
|
||||
def go2rtc_camera_stream(camera_name: str):
|
||||
def go2rtc_camera_stream(request: Request, camera_name: str):
|
||||
r = requests.get(
|
||||
f"http://127.0.0.1:1984/api/streams?src={camera_name}&video=all&audio=allµphone"
|
||||
)
|
||||
if not r.ok:
|
||||
logger.error("Failed to fetch streams from go2rtc")
|
||||
camera_config = request.app.frigate_config.cameras.get(camera_name)
|
||||
|
||||
if camera_config and camera_config.enabled:
|
||||
logger.error("Failed to fetch streams from go2rtc")
|
||||
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": "Error fetching stream data"}),
|
||||
status_code=500,
|
||||
@ -174,6 +179,22 @@ def config(request: Request):
|
||||
config["model"]["all_attributes"] = config_obj.model.all_attributes
|
||||
config["model"]["non_logo_attributes"] = config_obj.model.non_logo_attributes
|
||||
|
||||
# Add model plus data if plus is enabled
|
||||
if config["plus"]["enabled"]:
|
||||
model_path = config.get("model", {}).get("path")
|
||||
if model_path:
|
||||
model_json_path = FilePath(model_path).with_suffix(".json")
|
||||
try:
|
||||
with open(model_json_path, "r") as f:
|
||||
model_plus_data = json.load(f)
|
||||
config["model"]["plus"] = model_plus_data
|
||||
except FileNotFoundError:
|
||||
config["model"]["plus"] = None
|
||||
except json.JSONDecodeError:
|
||||
config["model"]["plus"] = None
|
||||
else:
|
||||
config["model"]["plus"] = None
|
||||
|
||||
# use merged labelamp
|
||||
for detector_config in config["detectors"].values():
|
||||
detector_config["model"]["labelmap"] = (
|
||||
@ -619,6 +640,48 @@ def get_sub_labels(split_joined: Optional[int] = None):
|
||||
return JSONResponse(content=sub_labels)
|
||||
|
||||
|
||||
@router.get("/plus/models")
|
||||
def plusModels(request: Request, filterByCurrentModelDetector: bool = False):
|
||||
if not request.app.frigate_config.plus_api.is_active():
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": "Frigate+ is not enabled"}),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
models: dict[any, any] = request.app.frigate_config.plus_api.get_models()
|
||||
|
||||
if not models["list"]:
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": "No models found"}),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
modelList = models["list"]
|
||||
|
||||
# current model type
|
||||
modelType = request.app.frigate_config.model.model_type
|
||||
|
||||
# current detectorType for comparing to supportedDetectors
|
||||
detectorType = list(request.app.frigate_config.detectors.values())[0].type
|
||||
|
||||
validModels = []
|
||||
|
||||
for model in sorted(
|
||||
filter(
|
||||
lambda m: (
|
||||
not filterByCurrentModelDetector
|
||||
or (detectorType in m["supportedDetectors"] and modelType in m["type"])
|
||||
),
|
||||
modelList,
|
||||
),
|
||||
key=(lambda m: m["trainDate"]),
|
||||
reverse=True,
|
||||
):
|
||||
validModels.append(model)
|
||||
|
||||
return JSONResponse(content=validModels)
|
||||
|
||||
|
||||
@router.get("/recognized_license_plates")
|
||||
def get_recognized_license_plates(split_joined: Optional[int] = None):
|
||||
try:
|
||||
|
@ -109,11 +109,11 @@ def get_jwt_secret() -> str:
|
||||
jwt_secret = (
|
||||
Path(os.path.join("/run/secrets", JWT_SECRET_ENV_VAR)).read_text().strip()
|
||||
)
|
||||
# check for the addon options file
|
||||
# check for the add-on options file
|
||||
elif os.path.isfile("/data/options.json"):
|
||||
with open("/data/options.json") as f:
|
||||
raw_options = f.read()
|
||||
logger.debug("Using jwt secret from Home Assistant addon options file.")
|
||||
logger.debug("Using jwt secret from Home Assistant Add-on options file.")
|
||||
options = json.loads(raw_options)
|
||||
jwt_secret = options.get("jwt_secret")
|
||||
|
||||
@ -253,22 +253,24 @@ def auth(request: Request):
|
||||
# pass the user header value from the upstream proxy if a mapping is specified
|
||||
# or use anonymous if none are specified
|
||||
user_header = proxy_config.header_map.user
|
||||
role_header = proxy_config.header_map.role
|
||||
success_response.headers["remote-user"] = (
|
||||
request.headers.get(user_header, default="anonymous")
|
||||
if user_header
|
||||
else "anonymous"
|
||||
)
|
||||
|
||||
role_header = proxy_config.header_map.role
|
||||
role = (
|
||||
request.headers.get(role_header, default="viewer")
|
||||
request.headers.get(role_header, default=proxy_config.default_role)
|
||||
if role_header
|
||||
else "viewer"
|
||||
else proxy_config.default_role
|
||||
)
|
||||
|
||||
# if comma-separated with "admin", use "admin", else "viewer"
|
||||
# if comma-separated with "admin", use "admin", else use default role
|
||||
success_response.headers["remote-role"] = (
|
||||
"admin" if role and "admin" in role else "viewer"
|
||||
"admin"
|
||||
if role and "admin" in [r.strip() for r in role.split(",")]
|
||||
else proxy_config.default_role
|
||||
)
|
||||
|
||||
return success_response
|
||||
|
@ -1,11 +1,11 @@
|
||||
"""Object classification APIs."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import string
|
||||
|
||||
import cv2
|
||||
from fastapi import APIRouter, Depends, Request, UploadFile
|
||||
from fastapi.responses import JSONResponse
|
||||
from pathvalidate import sanitize_filename
|
||||
@ -13,10 +13,13 @@ from peewee import DoesNotExist
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.api.auth import require_role
|
||||
from frigate.api.defs.request.classification_body import RenameFaceBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config.camera import DetectConfig
|
||||
from frigate.const import FACE_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event
|
||||
from frigate.util.path import get_event_snapshot
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -27,6 +30,9 @@ router = APIRouter(tags=[Tags.events])
|
||||
def get_faces():
|
||||
face_dict: dict[str, list[str]] = {}
|
||||
|
||||
if not os.path.exists(FACE_DIR):
|
||||
return JSONResponse(status_code=200, content={})
|
||||
|
||||
for name in os.listdir(FACE_DIR):
|
||||
face_dir = os.path.join(FACE_DIR, name)
|
||||
|
||||
@ -35,10 +41,9 @@ def get_faces():
|
||||
|
||||
face_dict[name] = []
|
||||
|
||||
for file in sorted(
|
||||
for file in filter(
|
||||
lambda f: (f.lower().endswith((".webp", ".png", ".jpg", ".jpeg"))),
|
||||
os.listdir(face_dir),
|
||||
key=lambda f: os.path.getctime(os.path.join(face_dir, f)),
|
||||
reverse=True,
|
||||
):
|
||||
face_dict[name].append(file)
|
||||
|
||||
@ -87,26 +92,68 @@ def train_face(request: Request, name: str, body: dict = None):
|
||||
)
|
||||
|
||||
json: dict[str, any] = body or {}
|
||||
training_file = os.path.join(
|
||||
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
|
||||
)
|
||||
training_file_name = sanitize_filename(json.get("training_file", ""))
|
||||
training_file = os.path.join(FACE_DIR, f"train/{training_file_name}")
|
||||
event_id = json.get("event_id")
|
||||
|
||||
if not training_file or not os.path.isfile(training_file):
|
||||
if not training_file_name and not event_id:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"Invalid filename or no file exists: {training_file}",
|
||||
"message": "A training file or event_id must be passed.",
|
||||
}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
if training_file_name and not os.path.isfile(training_file):
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"Invalid filename or no file exists: {training_file_name}",
|
||||
}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
sanitized_name = sanitize_filename(name)
|
||||
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
new_name = f"{sanitized_name}-{rand_id}.webp"
|
||||
new_file = os.path.join(FACE_DIR, f"{sanitized_name}/{new_name}")
|
||||
shutil.move(training_file, new_file)
|
||||
new_name = f"{sanitized_name}-{datetime.datetime.now().timestamp()}.webp"
|
||||
new_file_folder = os.path.join(FACE_DIR, f"{sanitized_name}")
|
||||
|
||||
if not os.path.exists(new_file_folder):
|
||||
os.mkdir(new_file_folder)
|
||||
|
||||
if training_file_name:
|
||||
shutil.move(training_file, os.path.join(new_file_folder, new_name))
|
||||
else:
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": f"Invalid event_id or no event exists: {event_id}",
|
||||
}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
snapshot = get_event_snapshot(event)
|
||||
face_box = event.data["attributes"][0]["box"]
|
||||
detect_config: DetectConfig = request.app.frigate_config.cameras[
|
||||
event.camera
|
||||
].detect
|
||||
|
||||
# crop onto the face box minus the bounding box itself
|
||||
x1 = int(face_box[0] * detect_config.width) + 2
|
||||
y1 = int(face_box[1] * detect_config.height) + 2
|
||||
x2 = x1 + int(face_box[2] * detect_config.width) - 4
|
||||
y2 = y1 + int(face_box[3] * detect_config.height) - 4
|
||||
face = snapshot[y1:y2, x1:x2]
|
||||
cv2.imwrite(os.path.join(new_file_folder, new_name), face)
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
context.clear_face_classifier()
|
||||
@ -115,7 +162,7 @@ def train_face(request: Request, name: str, body: dict = None):
|
||||
content=(
|
||||
{
|
||||
"success": True,
|
||||
"message": f"Successfully saved {training_file} as {new_name}.",
|
||||
"message": f"Successfully saved {training_file_name} as {new_name}.",
|
||||
}
|
||||
),
|
||||
status_code=200,
|
||||
@ -149,6 +196,42 @@ async def register_face(request: Request, name: str, file: UploadFile):
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
result = context.register_face(name, await file.read())
|
||||
|
||||
if not isinstance(result, dict):
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Could not process request. Try restarting Frigate.",
|
||||
},
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
status_code=200 if result.get("success", True) else 400,
|
||||
content=result,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/faces/recognize")
|
||||
async def recognize_face(request: Request, file: UploadFile):
|
||||
if not request.app.frigate_config.face_recognition.enabled:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={"message": "Face recognition is not enabled.", "success": False},
|
||||
)
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
result = context.recognize_face(await file.read())
|
||||
|
||||
if not isinstance(result, dict):
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Could not process request. Try restarting Frigate.",
|
||||
},
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
status_code=200 if result.get("success", True) else 400,
|
||||
content=result,
|
||||
@ -166,12 +249,6 @@ def deregister_faces(request: Request, name: str, body: dict = None):
|
||||
json: dict[str, any] = body or {}
|
||||
list_of_ids = json.get("ids", "")
|
||||
|
||||
if not list_of_ids or len(list_of_ids) == 0:
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": "Not a valid list of ids"}),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
context.delete_face_ids(
|
||||
name, map(lambda file: sanitize_filename(file), list_of_ids)
|
||||
@ -182,6 +259,35 @@ def deregister_faces(request: Request, name: str, body: dict = None):
|
||||
)
|
||||
|
||||
|
||||
@router.put("/faces/{old_name}/rename", dependencies=[Depends(require_role(["admin"]))])
|
||||
def rename_face(request: Request, old_name: str, body: RenameFaceBody):
|
||||
if not request.app.frigate_config.face_recognition.enabled:
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={"message": "Face recognition is not enabled.", "success": False},
|
||||
)
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
try:
|
||||
context.rename_face(old_name, body.new_name)
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": True,
|
||||
"message": f"Successfully renamed face to {body.new_name}.",
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
except ValueError as e:
|
||||
logger.error(e)
|
||||
return JSONResponse(
|
||||
status_code=400,
|
||||
content={
|
||||
"message": "Error renaming face. Check Frigate logs.",
|
||||
"success": False,
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@router.put("/lpr/reprocess")
|
||||
def reprocess_license_plate(request: Request, event_id: str):
|
||||
if not request.app.frigate_config.lpr.enabled:
|
||||
@ -213,3 +319,49 @@ def reprocess_license_plate(request: Request, event_id: str):
|
||||
content=response,
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.put("/reindex", dependencies=[Depends(require_role(["admin"]))])
|
||||
def reindex_embeddings(request: Request):
|
||||
if not request.app.frigate_config.semantic_search.enabled:
|
||||
message = (
|
||||
"Cannot reindex tracked object embeddings, Semantic Search is not enabled."
|
||||
)
|
||||
logger.error(message)
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": message,
|
||||
}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
response = context.reindex_embeddings()
|
||||
|
||||
if response == "started":
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": True,
|
||||
"message": "Embeddings reindexing has started.",
|
||||
},
|
||||
status_code=202, # 202 Accepted
|
||||
)
|
||||
elif response == "in_progress":
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Embeddings reindexing is already in progress.",
|
||||
},
|
||||
status_code=409, # 409 Conflict
|
||||
)
|
||||
else:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
"message": "Failed to start reindexing.",
|
||||
},
|
||||
status_code=500,
|
||||
)
|
||||
|
5
frigate/api/defs/request/classification_body.py
Normal file
5
frigate/api/defs/request/classification_body.py
Normal file
@ -0,0 +1,5 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class RenameFaceBody(BaseModel):
|
||||
new_name: str
|
@ -13,6 +13,15 @@ class EventsSubLabelBody(BaseModel):
|
||||
)
|
||||
|
||||
|
||||
class EventsLPRBody(BaseModel):
|
||||
recognizedLicensePlate: str = Field(
|
||||
title="Recognized License Plate", max_length=100
|
||||
)
|
||||
recognizedLicensePlateScore: Optional[float] = Field(
|
||||
title="Score for recognized license plate", default=None, gt=0.0, le=1.0
|
||||
)
|
||||
|
||||
|
||||
class EventsDescriptionBody(BaseModel):
|
||||
description: Union[str, None] = Field(title="The description of the event")
|
||||
|
||||
|
@ -31,6 +31,7 @@ from frigate.api.defs.request.events_body import (
|
||||
EventsDeleteBody,
|
||||
EventsDescriptionBody,
|
||||
EventsEndBody,
|
||||
EventsLPRBody,
|
||||
EventsSubLabelBody,
|
||||
SubmitPlusBody,
|
||||
)
|
||||
@ -701,6 +702,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
||||
for k, v in event["data"].items()
|
||||
if k
|
||||
in [
|
||||
"attributes",
|
||||
"type",
|
||||
"score",
|
||||
"top_score",
|
||||
@ -723,13 +725,15 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
||||
if (sort is None or sort == "relevance") and search_results:
|
||||
processed_events.sort(key=lambda x: x.get("search_distance", float("inf")))
|
||||
elif min_score is not None and max_score is not None and sort == "score_asc":
|
||||
processed_events.sort(key=lambda x: x["score"])
|
||||
processed_events.sort(key=lambda x: x["data"]["score"])
|
||||
elif min_score is not None and max_score is not None and sort == "score_desc":
|
||||
processed_events.sort(key=lambda x: x["score"], reverse=True)
|
||||
processed_events.sort(key=lambda x: x["data"]["score"], reverse=True)
|
||||
elif min_speed is not None and max_speed is not None and sort == "speed_asc":
|
||||
processed_events.sort(key=lambda x: x["average_estimated_speed"])
|
||||
processed_events.sort(key=lambda x: x["data"]["average_estimated_speed"])
|
||||
elif min_speed is not None and max_speed is not None and sort == "speed_desc":
|
||||
processed_events.sort(key=lambda x: x["average_estimated_speed"], reverse=True)
|
||||
processed_events.sort(
|
||||
key=lambda x: x["data"]["average_estimated_speed"], reverse=True
|
||||
)
|
||||
elif sort == "date_asc":
|
||||
processed_events.sort(key=lambda x: x["start_time"])
|
||||
else:
|
||||
@ -1098,6 +1102,60 @@ def set_sub_label(
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/events/{event_id}/recognized_license_plate",
|
||||
response_model=GenericResponse,
|
||||
dependencies=[Depends(require_role(["admin"]))],
|
||||
)
|
||||
def set_plate(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
body: EventsLPRBody,
|
||||
):
|
||||
try:
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
except DoesNotExist:
|
||||
event = None
|
||||
|
||||
if request.app.detected_frames_processor:
|
||||
tracked_obj: TrackedObject = None
|
||||
|
||||
for state in request.app.detected_frames_processor.camera_states.values():
|
||||
tracked_obj = state.tracked_objects.get(event_id)
|
||||
|
||||
if tracked_obj is not None:
|
||||
break
|
||||
else:
|
||||
tracked_obj = None
|
||||
|
||||
if not event and not tracked_obj:
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": False, "message": "Event " + event_id + " not found."}
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
new_plate = body.recognizedLicensePlate
|
||||
new_score = body.recognizedLicensePlateScore
|
||||
|
||||
if new_plate == "":
|
||||
new_plate = None
|
||||
new_score = None
|
||||
|
||||
request.app.event_metadata_updater.publish(
|
||||
EventMetadataTypeEnum.recognized_license_plate, (event_id, new_plate, new_score)
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": True,
|
||||
"message": f"Event {event_id} license plate set to {new_plate if new_plate is not None else 'None'}",
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/events/{event_id}/description",
|
||||
response_model=GenericResponse,
|
||||
|
@ -1,7 +1,9 @@
|
||||
"""Image and video apis."""
|
||||
|
||||
import asyncio
|
||||
import glob
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import subprocess as sp
|
||||
import time
|
||||
@ -109,9 +111,12 @@ def imagestream(
|
||||
@router.get("/{camera_name}/ptz/info")
|
||||
async def camera_ptz_info(request: Request, camera_name: str):
|
||||
if camera_name in request.app.frigate_config.cameras:
|
||||
return JSONResponse(
|
||||
content=await request.app.onvif.get_camera_info(camera_name),
|
||||
# Schedule get_camera_info in the OnvifController's event loop
|
||||
future = asyncio.run_coroutine_threadsafe(
|
||||
request.app.onvif.get_camera_info(camera_name), request.app.onvif.loop
|
||||
)
|
||||
result = future.result()
|
||||
return JSONResponse(content=result)
|
||||
else:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Camera not found"},
|
||||
@ -240,25 +245,50 @@ def get_snapshot_from_recording(
|
||||
content={"success": False, "message": "Camera not found"},
|
||||
status_code=404,
|
||||
)
|
||||
|
||||
recording_query = (
|
||||
Recordings.select(
|
||||
Recordings.path,
|
||||
Recordings.start_time,
|
||||
)
|
||||
.where(
|
||||
(
|
||||
(frame_time >= Recordings.start_time)
|
||||
& (frame_time <= Recordings.end_time)
|
||||
)
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.limit(1)
|
||||
)
|
||||
recording: Recordings | None = None
|
||||
|
||||
try:
|
||||
recording: Recordings = recording_query.get()
|
||||
recording = (
|
||||
Recordings.select(
|
||||
Recordings.path,
|
||||
Recordings.start_time,
|
||||
)
|
||||
.where(
|
||||
(
|
||||
(frame_time >= Recordings.start_time)
|
||||
& (frame_time <= Recordings.end_time)
|
||||
)
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.limit(1)
|
||||
.get()
|
||||
)
|
||||
except DoesNotExist:
|
||||
# try again with a rounded frame time as it may be between
|
||||
# the rounded segment start time
|
||||
frame_time = math.ceil(frame_time)
|
||||
try:
|
||||
recording = (
|
||||
Recordings.select(
|
||||
Recordings.path,
|
||||
Recordings.start_time,
|
||||
)
|
||||
.where(
|
||||
(
|
||||
(frame_time >= Recordings.start_time)
|
||||
& (frame_time <= Recordings.end_time)
|
||||
)
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.limit(1)
|
||||
.get()
|
||||
)
|
||||
except DoesNotExist:
|
||||
pass
|
||||
|
||||
if recording is not None:
|
||||
time_in_segment = frame_time - recording.start_time
|
||||
codec = "png" if format == "png" else "mjpeg"
|
||||
mime_type = "png" if format == "png" else "jpeg"
|
||||
@ -279,7 +309,7 @@ def get_snapshot_from_recording(
|
||||
status_code=404,
|
||||
)
|
||||
return Response(image_data, headers={"Content-Type": f"image/{mime_type}"})
|
||||
except DoesNotExist:
|
||||
else:
|
||||
return JSONResponse(
|
||||
content={
|
||||
"success": False,
|
||||
@ -511,7 +541,10 @@ def recordings(
|
||||
return JSONResponse(content=list(recordings))
|
||||
|
||||
|
||||
@router.get("/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4")
|
||||
@router.get(
|
||||
"/{camera_name}/start/{start_ts}/end/{end_ts}/clip.mp4",
|
||||
description="For iOS devices, use the master.m3u8 HLS link instead of clip.mp4. Safari does not reliably process progressive mp4 files.",
|
||||
)
|
||||
def recording_clip(
|
||||
request: Request,
|
||||
camera_name: str,
|
||||
@ -876,7 +909,7 @@ def event_thumbnail(
|
||||
elif extension == "webp":
|
||||
quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), 60]
|
||||
|
||||
_, img = cv2.imencode(f".{img}", thumbnail, quality_params)
|
||||
_, img = cv2.imencode(f".{extension}", thumbnail, quality_params)
|
||||
thumbnail_bytes = img.tobytes()
|
||||
|
||||
return Response(
|
||||
|
@ -58,13 +58,9 @@ async def review(
|
||||
)
|
||||
|
||||
clauses = [
|
||||
(
|
||||
(ReviewSegment.start_time > after)
|
||||
& (
|
||||
(ReviewSegment.end_time.is_null(True))
|
||||
| (ReviewSegment.end_time < before)
|
||||
)
|
||||
)
|
||||
(ReviewSegment.start_time > after)
|
||||
& (ReviewSegment.start_time < before)
|
||||
& ((ReviewSegment.end_time.is_null(True)) | (ReviewSegment.end_time < before))
|
||||
]
|
||||
|
||||
if cameras != "all":
|
||||
@ -176,7 +172,6 @@ async def review_summary(
|
||||
|
||||
hour_modifier, minute_modifier, seconds_offset = get_tz_modifiers(params.timezone)
|
||||
day_ago = (datetime.datetime.now() - datetime.timedelta(hours=24)).timestamp()
|
||||
month_ago = (datetime.datetime.now() - datetime.timedelta(days=30)).timestamp()
|
||||
|
||||
cameras = params.cameras
|
||||
labels = params.labels
|
||||
@ -277,7 +272,7 @@ async def review_summary(
|
||||
.get()
|
||||
)
|
||||
|
||||
clauses = [(ReviewSegment.start_time > month_ago)]
|
||||
clauses = []
|
||||
|
||||
if cameras != "all":
|
||||
camera_list = cameras.split(",")
|
||||
@ -365,7 +360,7 @@ async def review_summary(
|
||||
& (UserReviewStatus.user_id == user_id)
|
||||
),
|
||||
)
|
||||
.where(reduce(operator.and_, clauses))
|
||||
.where(reduce(operator.and_, clauses) if clauses else True)
|
||||
.group_by(
|
||||
(ReviewSegment.start_time + seconds_offset).cast("int") / day_in_seconds
|
||||
)
|
||||
|
@ -55,7 +55,7 @@ from frigate.models import (
|
||||
Timeline,
|
||||
User,
|
||||
)
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
from frigate.object_detection.base import ObjectDetectProcess
|
||||
from frigate.output.output import output_frames
|
||||
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
@ -699,6 +699,10 @@ class FrigateApp:
|
||||
self.audio_process.terminate()
|
||||
self.audio_process.join()
|
||||
|
||||
# stop the onvif controller
|
||||
if self.onvif_controller:
|
||||
self.onvif_controller.close()
|
||||
|
||||
# ensure the capture processes are done
|
||||
for camera, metrics in self.camera_metrics.items():
|
||||
capture_process = metrics.capture_process
|
||||
|
@ -5,7 +5,7 @@ import logging
|
||||
import os
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
from typing import Callable
|
||||
from typing import Any, Callable
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
@ -54,7 +54,7 @@ class CameraState:
|
||||
self.ptz_autotracker_thread = ptz_autotracker_thread
|
||||
self.prev_enabled = self.camera_config.enabled
|
||||
|
||||
def get_current_frame(self, draw_options={}):
|
||||
def get_current_frame(self, draw_options: dict[str, Any] = {}):
|
||||
with self.current_frame_lock:
|
||||
frame_copy = np.copy(self._current_frame)
|
||||
frame_time = self.current_frame_time
|
||||
@ -77,7 +77,9 @@ class CameraState:
|
||||
thickness = 1
|
||||
else:
|
||||
thickness = 2
|
||||
color = self.config.model.colormap[obj["label"]]
|
||||
color = self.config.model.colormap.get(
|
||||
obj["label"], (255, 255, 255)
|
||||
)
|
||||
else:
|
||||
thickness = 1
|
||||
color = (255, 0, 0)
|
||||
@ -99,7 +101,9 @@ class CameraState:
|
||||
and obj["frame_time"] == frame_time
|
||||
):
|
||||
thickness = 5
|
||||
color = self.config.model.colormap[obj["label"]]
|
||||
color = self.config.model.colormap.get(
|
||||
obj["label"], (255, 255, 255)
|
||||
)
|
||||
|
||||
# debug autotracking zooming - show the zoom factor box
|
||||
if (
|
||||
@ -259,14 +263,37 @@ class CameraState:
|
||||
current_detections[id],
|
||||
)
|
||||
|
||||
# add initial frame to frame cache
|
||||
self.frame_cache[frame_time] = np.copy(current_frame)
|
||||
|
||||
# save initial thumbnail data and best object
|
||||
thumbnail_data = {
|
||||
"frame_time": frame_time,
|
||||
"box": new_obj.obj_data["box"],
|
||||
"area": new_obj.obj_data["area"],
|
||||
"region": new_obj.obj_data["region"],
|
||||
"score": new_obj.obj_data["score"],
|
||||
"attributes": new_obj.obj_data["attributes"],
|
||||
"current_estimated_speed": 0,
|
||||
"velocity_angle": 0,
|
||||
"path_data": [],
|
||||
"recognized_license_plate": None,
|
||||
"recognized_license_plate_score": None,
|
||||
}
|
||||
new_obj.thumbnail_data = thumbnail_data
|
||||
tracked_objects[id].thumbnail_data = thumbnail_data
|
||||
self.best_objects[new_obj.obj_data["label"]] = new_obj
|
||||
|
||||
# call event handlers
|
||||
for c in self.callbacks["start"]:
|
||||
c(self.name, new_obj, frame_name)
|
||||
|
||||
for id in updated_ids:
|
||||
updated_obj = tracked_objects[id]
|
||||
thumb_update, significant_update, autotracker_update = updated_obj.update(
|
||||
frame_time, current_detections[id], current_frame is not None
|
||||
thumb_update, significant_update, path_update, autotracker_update = (
|
||||
updated_obj.update(
|
||||
frame_time, current_detections[id], current_frame is not None
|
||||
)
|
||||
)
|
||||
|
||||
if autotracker_update or significant_update:
|
||||
@ -285,11 +312,16 @@ class CameraState:
|
||||
|
||||
# if it has been more than 5 seconds since the last thumb update
|
||||
# and the last update is greater than the last publish or
|
||||
# the object has changed significantly
|
||||
# the object has changed significantly or
|
||||
# the object moved enough to update the path
|
||||
if (
|
||||
frame_time - updated_obj.last_published > 5
|
||||
and updated_obj.last_updated > updated_obj.last_published
|
||||
) or significant_update:
|
||||
(
|
||||
frame_time - updated_obj.last_published > 5
|
||||
and updated_obj.last_updated > updated_obj.last_published
|
||||
)
|
||||
or significant_update
|
||||
or path_update
|
||||
):
|
||||
# call event handlers
|
||||
for c in self.callbacks["update"]:
|
||||
c(self.name, updated_obj, frame_name)
|
||||
@ -306,7 +338,6 @@ class CameraState:
|
||||
# TODO: can i switch to looking this up and only changing when an event ends?
|
||||
# maintain best objects
|
||||
camera_activity: dict[str, list[any]] = {
|
||||
"enabled": True,
|
||||
"motion": len(motion_boxes) > 0,
|
||||
"objects": [],
|
||||
}
|
||||
@ -410,9 +441,13 @@ class CameraState:
|
||||
self.previous_frame_id = frame_name
|
||||
|
||||
def save_manual_event_image(
|
||||
self, event_id: str, label: str, draw: dict[str, list[dict]]
|
||||
self,
|
||||
frame: np.ndarray | None,
|
||||
event_id: str,
|
||||
label: str,
|
||||
draw: dict[str, list[dict]],
|
||||
) -> None:
|
||||
img_frame = self.get_current_frame()
|
||||
img_frame = frame if frame is not None else self.get_current_frame()
|
||||
|
||||
# write clean snapshot if enabled
|
||||
if self.camera_config.snapshots.clean_copy:
|
||||
@ -458,9 +493,9 @@ class CameraState:
|
||||
# create thumbnail with max height of 175 and save
|
||||
width = int(175 * img_frame.shape[1] / img_frame.shape[0])
|
||||
thumb = cv2.resize(img_frame, dsize=(width, 175), interpolation=cv2.INTER_AREA)
|
||||
cv2.imwrite(
|
||||
os.path.join(THUMB_DIR, self.camera_config.name, f"{event_id}.webp"), thumb
|
||||
)
|
||||
thumb_path = os.path.join(THUMB_DIR, self.camera_config.name)
|
||||
os.makedirs(thumb_path, exist_ok=True)
|
||||
cv2.imwrite(os.path.join(thumb_path, f"{event_id}.webp"), thumb)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
for obj in self.tracked_objects.values():
|
||||
|
@ -11,6 +11,7 @@ class DetectionTypeEnum(str, Enum):
|
||||
api = "api"
|
||||
video = "video"
|
||||
audio = "audio"
|
||||
lpr = "lpr"
|
||||
|
||||
|
||||
class DetectionPublisher(Publisher):
|
||||
|
@ -135,6 +135,7 @@ class Dispatcher:
|
||||
"type": TrackedObjectUpdateTypesEnum.description,
|
||||
"id": event.id,
|
||||
"description": event.data["description"],
|
||||
"camera": event.camera,
|
||||
}
|
||||
),
|
||||
)
|
||||
@ -164,8 +165,12 @@ class Dispatcher:
|
||||
|
||||
def handle_on_connect():
|
||||
camera_status = self.camera_activity.last_camera_activity.copy()
|
||||
cameras_with_status = camera_status.keys()
|
||||
|
||||
for camera in self.config.cameras.keys():
|
||||
if camera not in cameras_with_status:
|
||||
camera_status[camera] = {}
|
||||
|
||||
for camera in camera_status.keys():
|
||||
camera_status[camera]["config"] = {
|
||||
"detect": self.config.cameras[camera].detect.enabled,
|
||||
"enabled": self.config.cameras[camera].enabled,
|
||||
|
@ -13,9 +13,11 @@ class EmbeddingsRequestEnum(Enum):
|
||||
embed_description = "embed_description"
|
||||
embed_thumbnail = "embed_thumbnail"
|
||||
generate_search = "generate_search"
|
||||
recognize_face = "recognize_face"
|
||||
register_face = "register_face"
|
||||
reprocess_face = "reprocess_face"
|
||||
reprocess_plate = "reprocess_plate"
|
||||
reindex = "reindex"
|
||||
|
||||
|
||||
class EmbeddingsResponder:
|
||||
|
@ -15,6 +15,8 @@ class EventMetadataTypeEnum(str, Enum):
|
||||
regenerate_description = "regenerate_description"
|
||||
sub_label = "sub_label"
|
||||
recognized_license_plate = "recognized_license_plate"
|
||||
lpr_event_create = "lpr_event_create"
|
||||
save_lpr_snapshot = "save_lpr_snapshot"
|
||||
|
||||
|
||||
class EventMetadataPublisher(Publisher):
|
||||
@ -37,9 +39,6 @@ class EventMetadataSubscriber(Subscriber):
|
||||
def __init__(self, topic: EventMetadataTypeEnum) -> None:
|
||||
super().__init__(topic.value)
|
||||
|
||||
def check_for_update(self, timeout: float = 1) -> tuple | None:
|
||||
return super().check_for_update(timeout)
|
||||
|
||||
def _return_object(self, topic: str, payload: tuple) -> tuple:
|
||||
if payload is None:
|
||||
return (None, None)
|
||||
|
@ -213,6 +213,8 @@ class MqttClient(Communicator): # type: ignore[misc]
|
||||
"motion_contour_area",
|
||||
"birdseye",
|
||||
"birdseye_mode",
|
||||
"review_alerts",
|
||||
"review_detections",
|
||||
]
|
||||
|
||||
for name in self.config.cameras.keys():
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user