mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-07-26 13:47:03 +02:00
Merge remote-tracking branch 'upstream/dev' into dev
This commit is contained in:
commit
8b5a100530
@ -44,6 +44,7 @@ codeproject
|
||||
colormap
|
||||
colorspace
|
||||
comms
|
||||
cooldown
|
||||
coro
|
||||
ctypeslib
|
||||
CUDA
|
||||
|
@ -8,9 +8,25 @@
|
||||
"overrideCommand": false,
|
||||
"remoteUser": "vscode",
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/common-utils:1": {}
|
||||
"ghcr.io/devcontainers/features/common-utils:2": {}
|
||||
// Uncomment the following lines to use ONNX Runtime with CUDA support
|
||||
// "ghcr.io/devcontainers/features/nvidia-cuda:1": {
|
||||
// "installCudnn": true,
|
||||
// "installNvtx": true,
|
||||
// "installToolkit": true,
|
||||
// "cudaVersion": "12.5",
|
||||
// "cudnnVersion": "9.4.0.58"
|
||||
// },
|
||||
// "./features/onnxruntime-gpu": {}
|
||||
},
|
||||
"forwardPorts": [8971, 5000, 5001, 5173, 8554, 8555],
|
||||
"forwardPorts": [
|
||||
8971,
|
||||
5000,
|
||||
5001,
|
||||
5173,
|
||||
8554,
|
||||
8555
|
||||
],
|
||||
"portsAttributes": {
|
||||
"8971": {
|
||||
"label": "External NGINX",
|
||||
@ -64,10 +80,18 @@
|
||||
"editor.formatOnType": true,
|
||||
"python.testing.pytestEnabled": false,
|
||||
"python.testing.unittestEnabled": true,
|
||||
"python.testing.unittestArgs": ["-v", "-s", "./frigate/test"],
|
||||
"python.testing.unittestArgs": [
|
||||
"-v",
|
||||
"-s",
|
||||
"./frigate/test"
|
||||
],
|
||||
"files.trimTrailingWhitespace": true,
|
||||
"eslint.workingDirectories": ["./web"],
|
||||
"isort.args": ["--settings-path=./pyproject.toml"],
|
||||
"eslint.workingDirectories": [
|
||||
"./web"
|
||||
],
|
||||
"isort.args": [
|
||||
"--settings-path=./pyproject.toml"
|
||||
],
|
||||
"[python]": {
|
||||
"editor.defaultFormatter": "charliermarsh.ruff",
|
||||
"editor.formatOnSave": true,
|
||||
@ -86,9 +110,16 @@
|
||||
],
|
||||
"editor.tabSize": 2
|
||||
},
|
||||
"cSpell.ignoreWords": ["rtmp"],
|
||||
"cSpell.words": ["preact", "astype", "hwaccel", "mqtt"]
|
||||
"cSpell.ignoreWords": [
|
||||
"rtmp"
|
||||
],
|
||||
"cSpell.words": [
|
||||
"preact",
|
||||
"astype",
|
||||
"hwaccel",
|
||||
"mqtt"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,22 @@
|
||||
{
|
||||
"id": "onnxruntime-gpu",
|
||||
"version": "0.0.1",
|
||||
"name": "ONNX Runtime GPU (Nvidia)",
|
||||
"description": "Installs ONNX Runtime for Nvidia GPUs.",
|
||||
"documentationURL": "",
|
||||
"options": {
|
||||
"version": {
|
||||
"type": "string",
|
||||
"proposals": [
|
||||
"latest",
|
||||
"1.20.1",
|
||||
"1.20.0"
|
||||
],
|
||||
"default": "latest",
|
||||
"description": "Version of ONNX Runtime to install"
|
||||
}
|
||||
},
|
||||
"installsAfter": [
|
||||
"ghcr.io/devcontainers/features/nvidia-cuda"
|
||||
]
|
||||
}
|
15
.devcontainer/features/onnxruntime-gpu/install.sh
Normal file
15
.devcontainer/features/onnxruntime-gpu/install.sh
Normal file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
VERSION=${VERSION}
|
||||
|
||||
python3 -m pip config set global.break-system-packages true
|
||||
# if VERSION == "latest" or VERSION is empty, install the latest version
|
||||
if [ "$VERSION" == "latest" ] || [ -z "$VERSION" ]; then
|
||||
python3 -m pip install onnxruntime-gpu
|
||||
else
|
||||
python3 -m pip install onnxruntime-gpu==$VERSION
|
||||
fi
|
||||
|
||||
echo "Done!"
|
@ -19,7 +19,7 @@ sudo chown -R "$(id -u):$(id -g)" /media/frigate
|
||||
# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the
|
||||
# s6 service file. For dev, where frigate is started from an interactive
|
||||
# shell, we define it in .bashrc instead.
|
||||
echo 'export LIBAVFORMAT_VERSION_MAJOR=$(/usr/lib/ffmpeg/7.0/bin/ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc
|
||||
echo 'export LIBAVFORMAT_VERSION_MAJOR=$("$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)" -version | grep -Po "libavformat\W+\K\d+")' >> "$HOME/.bashrc"
|
||||
|
||||
make version
|
||||
|
||||
|
6
.github/pull_request_template.md
vendored
6
.github/pull_request_template.md
vendored
@ -1,5 +1,11 @@
|
||||
## Proposed change
|
||||
<!--
|
||||
Thank you!
|
||||
|
||||
If you're introducing a new feature or significantly refactoring existing functionality,
|
||||
we encourage you to start a discussion first. This helps ensure your idea aligns with
|
||||
Frigate's development goals.
|
||||
|
||||
Describe what this pull request does and how it will benefit users of Frigate.
|
||||
Please describe in detail any considerations, breaking changes, etc. that are
|
||||
made in this pull request.
|
||||
|
63
.github/workflows/ci.yml
vendored
63
.github/workflows/ci.yml
vendored
@ -42,7 +42,7 @@ jobs:
|
||||
tags: ${{ steps.setup.outputs.image-name }}-amd64
|
||||
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
||||
arm64_build:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-22.04-arm
|
||||
name: ARM Build
|
||||
steps:
|
||||
- name: Check out code
|
||||
@ -76,36 +76,6 @@ jobs:
|
||||
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
|
||||
jetson_jp4_build:
|
||||
if: false
|
||||
runs-on: ubuntu-22.04
|
||||
name: Jetson Jetpack 4
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push TensorRT (Jetson, Jetpack 4)
|
||||
env:
|
||||
ARCH: arm64
|
||||
BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest
|
||||
SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
|
||||
TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: tensorrt
|
||||
files: docker/tensorrt/trt.hcl
|
||||
set: |
|
||||
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp4
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4,mode=max
|
||||
jetson_jp5_build:
|
||||
if: false
|
||||
runs-on: ubuntu-22.04
|
||||
@ -136,6 +106,35 @@ jobs:
|
||||
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp5
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max
|
||||
jetson_jp6_build:
|
||||
runs-on: ubuntu-22.04-arm
|
||||
name: Jetson Jetpack 6
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push TensorRT (Jetson, Jetpack 6)
|
||||
env:
|
||||
ARCH: arm64
|
||||
BASE_IMAGE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu
|
||||
SLIM_BASE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu
|
||||
TRT_BASE: nvcr.io/nvidia/tensorrt:23.12-py3-igpu
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: tensorrt
|
||||
files: docker/tensorrt/trt.hcl
|
||||
set: |
|
||||
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp6
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp6
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp6,mode=max
|
||||
amd64_extra_builds:
|
||||
runs-on: ubuntu-22.04
|
||||
name: AMD64 Extra Build
|
||||
@ -178,7 +177,7 @@ jobs:
|
||||
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
|
||||
*.cache-from=type=gha
|
||||
arm64_extra_builds:
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-22.04-arm
|
||||
name: ARM Extra Build
|
||||
needs:
|
||||
- arm64_build
|
||||
|
1
.github/workflows/pull_request.yml
vendored
1
.github/workflows/pull_request.yml
vendored
@ -4,6 +4,7 @@ on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "docs/**"
|
||||
- ".github/**"
|
||||
|
||||
env:
|
||||
DEFAULT_PYTHON: 3.11
|
||||
|
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@ -39,14 +39,14 @@ jobs:
|
||||
STABLE_TAG=${BASE}:stable
|
||||
PULL_TAG=${BASE}:${BUILD_TAG}
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG}
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp5 tensorrt-jp6 rk h8l rocm; do
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant}
|
||||
done
|
||||
|
||||
# stable tag
|
||||
if [[ "${BUILD_TYPE}" == "stable" ]]; then
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG}
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do
|
||||
for variant in standard-arm64 tensorrt tensorrt-jp5 tensorrt-jp6 rk h8l rocm; do
|
||||
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant}
|
||||
done
|
||||
fi
|
||||
|
@ -38,4 +38,4 @@ services:
|
||||
container_name: mqtt
|
||||
image: eclipse-mosquitto:1.6
|
||||
ports:
|
||||
- "1883:1883"
|
||||
- "1883:1883"
|
@ -3,14 +3,29 @@
|
||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Globally set pip break-system-packages option to avoid having to specify it every time
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
|
||||
ARG BASE_IMAGE=debian:12
|
||||
ARG SLIM_BASE=debian:12-slim
|
||||
|
||||
# A hook that allows us to inject commands right after the base images
|
||||
ARG BASE_HOOK=
|
||||
|
||||
FROM ${BASE_IMAGE} AS base
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
ARG BASE_HOOK
|
||||
|
||||
RUN sh -c "$BASE_HOOK"
|
||||
|
||||
FROM --platform=${BUILDPLATFORM} debian:12 AS base_host
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
|
||||
FROM ${SLIM_BASE} AS slim-base
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
ARG BASE_HOOK
|
||||
|
||||
RUN sh -c "$BASE_HOOK"
|
||||
|
||||
FROM slim-base AS wget
|
||||
ARG DEBIAN_FRONTEND
|
||||
@ -66,8 +81,8 @@ COPY docker/main/requirements-ov.txt /requirements-ov.txt
|
||||
RUN apt-get -qq update \
|
||||
&& apt-get -qq install -y wget python3 python3-dev python3-distutils gcc pkg-config libhdf5-dev \
|
||||
&& wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
&& python3 get-pip.py "pip" --break-system-packages \
|
||||
&& pip install --break-system-packages -r /requirements-ov.txt
|
||||
&& python3 get-pip.py "pip" \
|
||||
&& pip install -r /requirements-ov.txt
|
||||
|
||||
# Get OpenVino Model
|
||||
RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \
|
||||
@ -142,8 +157,8 @@ RUN apt-get -qq update \
|
||||
apt-transport-https wget \
|
||||
&& apt-get -qq update \
|
||||
&& apt-get -qq install -y \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3.11 \
|
||||
python3.11-dev \
|
||||
# opencv dependencies
|
||||
build-essential cmake git pkg-config libgtk-3-dev \
|
||||
libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
|
||||
@ -157,11 +172,13 @@ RUN apt-get -qq update \
|
||||
gcc gfortran libopenblas-dev liblapack-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
||||
|
||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
&& python3 get-pip.py "pip" --break-system-packages
|
||||
&& python3 get-pip.py "pip"
|
||||
|
||||
COPY docker/main/requirements.txt /requirements.txt
|
||||
RUN pip3 install -r /requirements.txt --break-system-packages
|
||||
RUN pip3 install -r /requirements.txt
|
||||
|
||||
# Build pysqlite3 from source
|
||||
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
|
||||
@ -214,9 +231,14 @@ ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PA
|
||||
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
|
||||
/deps/install_deps.sh
|
||||
|
||||
ENV DEFAULT_FFMPEG_VERSION="7.0"
|
||||
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:5.0"
|
||||
|
||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
&& python3 get-pip.py "pip"
|
||||
|
||||
RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \
|
||||
python3 -m pip install --upgrade pip --break-system-packages && \
|
||||
pip3 install -U /deps/wheels/*.whl --break-system-packages
|
||||
pip3 install -U /deps/wheels/*.whl
|
||||
|
||||
COPY --from=deps-rootfs / /
|
||||
|
||||
@ -263,7 +285,7 @@ RUN apt-get update \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \
|
||||
pip3 install -r requirements-dev.txt --break-system-packages
|
||||
pip3 install -r requirements-dev.txt
|
||||
|
||||
HEALTHCHECK NONE
|
||||
|
||||
|
@ -6,13 +6,13 @@ apt-get -qq update
|
||||
|
||||
apt-get -qq install --no-install-recommends -y \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
gnupg \
|
||||
wget \
|
||||
lbzip2 \
|
||||
procps vainfo \
|
||||
unzip locales tzdata libxml2 xz-utils \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3.11 \
|
||||
curl \
|
||||
lsof \
|
||||
jq \
|
||||
@ -21,47 +21,38 @@ apt-get -qq install --no-install-recommends -y \
|
||||
libglib2.0-0 \
|
||||
libusb-1.0.0
|
||||
|
||||
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
||||
|
||||
mkdir -p -m 600 /root/.gnupg
|
||||
|
||||
# install coral runtime
|
||||
wget -q -O /tmp/libedgetpu1-max.deb "https://github.com/feranick/libedgetpu/releases/download/16.0TF2.17.0-1/libedgetpu1-max_16.0tf2.17.0-1.bookworm_${TARGETARCH}.deb"
|
||||
wget -q -O /tmp/libedgetpu1-max.deb "https://github.com/feranick/libedgetpu/releases/download/16.0TF2.17.1-1/libedgetpu1-max_16.0tf2.17.1-1.bookworm_${TARGETARCH}.deb"
|
||||
unset DEBIAN_FRONTEND
|
||||
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
|
||||
rm /tmp/libedgetpu1-max.deb
|
||||
|
||||
# install python3 & tflite runtime
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_x86_64.whl
|
||||
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_x86_64.whl
|
||||
fi
|
||||
|
||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
pip3 install --break-system-packages https://github.com/feranick/TFlite-builds/releases/download/v2.17.0/tflite_runtime-2.17.0-cp311-cp311-linux_aarch64.whl
|
||||
pip3 install --break-system-packages https://github.com/feranick/pycoral/releases/download/2.0.2TF2.17.0/pycoral-2.0.2-cp311-cp311-linux_aarch64.whl
|
||||
fi
|
||||
|
||||
# btbn-ffmpeg -> amd64
|
||||
# ffmpeg -> amd64
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
mkdir -p /usr/lib/ffmpeg/5.0
|
||||
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
|
||||
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe
|
||||
rm -rf ffmpeg.tar.xz
|
||||
mkdir -p /usr/lib/ffmpeg/7.0
|
||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
|
||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
|
||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
||||
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
|
||||
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 amd64/bin/ffmpeg amd64/bin/ffprobe
|
||||
rm -rf ffmpeg.tar.xz
|
||||
fi
|
||||
|
||||
# ffmpeg -> arm64
|
||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
mkdir -p /usr/lib/ffmpeg/5.0
|
||||
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
|
||||
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe
|
||||
rm -f ffmpeg.tar.xz
|
||||
mkdir -p /usr/lib/ffmpeg/7.0
|
||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
|
||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
|
||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
|
||||
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
|
||||
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
|
||||
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
|
||||
wget -qO ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
|
||||
tar -xf ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1 arm64/bin/ffmpeg arm64/bin/ffprobe
|
||||
rm -f ffmpeg.tar.xz
|
||||
fi
|
||||
|
||||
# arch specific packages
|
||||
|
@ -54,7 +54,6 @@ pywebpush == 2.0.*
|
||||
pyclipper == 1.3.*
|
||||
shapely == 2.0.*
|
||||
Levenshtein==0.26.*
|
||||
prometheus-client == 0.21.*
|
||||
# HailoRT Wheels
|
||||
appdirs==1.4.*
|
||||
argcomplete==2.0.*
|
||||
@ -68,3 +67,7 @@ netaddr==0.8.*
|
||||
netifaces==0.10.*
|
||||
verboselogs==1.7.*
|
||||
virtualenv==20.17.*
|
||||
prometheus-client == 0.21.*
|
||||
# TFLite
|
||||
tflite_runtime @ https://github.com/frigate-nvr/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64'
|
||||
tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64'
|
||||
|
@ -43,8 +43,10 @@ function migrate_db_path() {
|
||||
}
|
||||
|
||||
function set_libva_version() {
|
||||
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+")
|
||||
local ffmpeg_path
|
||||
ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||
LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+")
|
||||
export LIBAVFORMAT_VERSION_MAJOR
|
||||
}
|
||||
|
||||
echo "[INFO] Preparing Frigate..."
|
||||
|
@ -44,10 +44,14 @@ function get_ip_and_port_from_supervisor() {
|
||||
}
|
||||
|
||||
function set_libva_version() {
|
||||
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+")
|
||||
local ffmpeg_path
|
||||
ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||
LIBAVFORMAT_VERSION_MAJOR=$("$ffmpeg_path" -version | grep -Po "libavformat\W+\K\d+")
|
||||
export LIBAVFORMAT_VERSION_MAJOR
|
||||
}
|
||||
|
||||
set_libva_version
|
||||
|
||||
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
|
||||
echo "[INFO] Removing stale config from last run..."
|
||||
rm /dev/shm/go2rtc.yaml
|
||||
@ -66,8 +70,6 @@ else
|
||||
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
|
||||
fi
|
||||
|
||||
set_libva_version
|
||||
|
||||
readonly config_path="/config"
|
||||
|
||||
if [[ -x "${config_path}/go2rtc" ]]; then
|
||||
|
@ -1,6 +1,5 @@
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
@ -35,10 +34,7 @@ except FileNotFoundError:
|
||||
|
||||
path = config.get("ffmpeg", {}).get("path", "default")
|
||||
if path == "default":
|
||||
if shutil.which("ffmpeg") is None:
|
||||
print(f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg")
|
||||
else:
|
||||
print("ffmpeg")
|
||||
print(f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg")
|
||||
elif path in INCLUDED_FFMPEG_VERSIONS:
|
||||
print(f"/usr/lib/ffmpeg/{path}/bin/ffmpeg")
|
||||
else:
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
@ -13,6 +12,7 @@ from frigate.const import (
|
||||
BIRDSEYE_PIPE,
|
||||
DEFAULT_FFMPEG_VERSION,
|
||||
INCLUDED_FFMPEG_VERSIONS,
|
||||
LIBAVFORMAT_VERSION_MAJOR,
|
||||
)
|
||||
from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
|
||||
|
||||
@ -115,10 +115,7 @@ else:
|
||||
# ensure ffmpeg path is set correctly
|
||||
path = config.get("ffmpeg", {}).get("path", "default")
|
||||
if path == "default":
|
||||
if shutil.which("ffmpeg") is None:
|
||||
ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
|
||||
else:
|
||||
ffmpeg_path = "ffmpeg"
|
||||
ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
|
||||
elif path in INCLUDED_FFMPEG_VERSIONS:
|
||||
ffmpeg_path = f"/usr/lib/ffmpeg/{path}/bin/ffmpeg"
|
||||
else:
|
||||
@ -130,14 +127,12 @@ elif go2rtc_config["ffmpeg"].get("bin") is None:
|
||||
go2rtc_config["ffmpeg"]["bin"] = ffmpeg_path
|
||||
|
||||
# need to replace ffmpeg command when using ffmpeg4
|
||||
if int(os.environ.get("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") < 59:
|
||||
if go2rtc_config["ffmpeg"].get("rtsp") is None:
|
||||
go2rtc_config["ffmpeg"]["rtsp"] = (
|
||||
"-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
|
||||
)
|
||||
else:
|
||||
if LIBAVFORMAT_VERSION_MAJOR < 59:
|
||||
rtsp_args = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
|
||||
if go2rtc_config.get("ffmpeg") is None:
|
||||
go2rtc_config["ffmpeg"] = {"path": ""}
|
||||
go2rtc_config["ffmpeg"] = {"rtsp": rtsp_args}
|
||||
elif go2rtc_config["ffmpeg"].get("rtsp") is None:
|
||||
go2rtc_config["ffmpeg"]["rtsp"] = rtsp_args
|
||||
|
||||
for name in go2rtc_config.get("streams", {}):
|
||||
stream = go2rtc_config["streams"][name]
|
||||
|
@ -3,20 +3,23 @@
|
||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Globally set pip break-system-packages option to avoid having to specify it every time
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
|
||||
FROM wheels as rk-wheels
|
||||
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
|
||||
COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt
|
||||
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
|
||||
RUN sed -i "/onnxruntime/d" /requirements-wheels.txt
|
||||
RUN python3 -m pip config set global.break-system-packages true
|
||||
RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt
|
||||
RUN rm -rf /rk-wheels/opencv_python-*
|
||||
|
||||
FROM deps AS rk-frigate
|
||||
ARG TARGETARCH
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
|
||||
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
|
||||
pip3 install --no-deps -U /deps/rk-wheels/*.whl --break-system-packages
|
||||
pip3 install --no-deps -U /deps/rk-wheels/*.whl
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
@ -25,8 +28,7 @@ COPY docker/rockchip/conv2rknn.py /opt/conv2rknn.py
|
||||
|
||||
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.3.0/librknnrt.so /usr/lib/
|
||||
|
||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
|
||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
||||
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"
|
||||
ENV DEFAULT_FFMPEG_VERSION="6.0"
|
||||
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:${INCLUDED_FFMPEG_VERSIONS}"
|
||||
|
@ -6,11 +6,12 @@ ARG DEBIAN_FRONTEND=noninteractive
|
||||
FROM deps AS rpi-deps
|
||||
ARG TARGETARCH
|
||||
|
||||
RUN rm -rf /usr/lib/btbn-ffmpeg/
|
||||
|
||||
# Install dependencies
|
||||
RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \
|
||||
/deps/install_deps.sh
|
||||
|
||||
ENV DEFAULT_FFMPEG_VERSION="rpi"
|
||||
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:${INCLUDED_FFMPEG_VERSIONS}"
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
|
@ -28,4 +28,7 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bookworm main" | tee /etc/apt/sources.list.d/raspi.list
|
||||
apt-get -qq update
|
||||
apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg
|
||||
mkdir -p /usr/lib/ffmpeg/rpi/bin
|
||||
ln -svf /usr/bin/ffmpeg /usr/lib/ffmpeg/rpi/bin/ffmpeg
|
||||
ln -svf /usr/bin/ffprobe /usr/lib/ffmpeg/rpi/bin/ffprobe
|
||||
fi
|
||||
|
@ -3,22 +3,16 @@
|
||||
# https://askubuntu.com/questions/972516/debian-frontend-environment-variable
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Make this a separate target so it can be built/cached optionally
|
||||
FROM wheels as trt-wheels
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG TARGETARCH
|
||||
RUN python3 -m pip config set global.break-system-packages true
|
||||
|
||||
# Add TensorRT wheels to another folder
|
||||
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
||||
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
|
||||
# Globally set pip break-system-packages option to avoid having to specify it every time
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES=1
|
||||
|
||||
FROM tensorrt-base AS frigate-tensorrt
|
||||
ARG PIP_BREAK_SYSTEM_PACKAGES
|
||||
ENV TRT_VER=8.6.1
|
||||
RUN python3 -m pip config set global.break-system-packages true
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages && \
|
||||
ldconfig
|
||||
|
||||
# Install TensorRT wheels
|
||||
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
||||
RUN pip3 install -U -r /requirements-tensorrt.txt && ldconfig
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
@ -32,4 +26,4 @@ COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl --break-system-packages
|
||||
pip3 install -U /deps/trt-wheels/*.whl
|
||||
|
@ -7,20 +7,25 @@ ARG BASE_IMAGE
|
||||
FROM ${BASE_IMAGE} AS build-wheels
|
||||
ARG DEBIAN_FRONTEND
|
||||
|
||||
# Add deadsnakes PPA for python3.11
|
||||
RUN apt-get -qq update && \
|
||||
apt-get -qq install -y --no-install-recommends \
|
||||
software-properties-common \
|
||||
&& add-apt-repository ppa:deadsnakes/ppa
|
||||
|
||||
# Use a separate container to build wheels to prevent build dependencies in final image
|
||||
RUN apt-get -qq update \
|
||||
&& apt-get -qq install -y --no-install-recommends \
|
||||
python3.9 python3.9-dev \
|
||||
python3.11 python3.11-dev \
|
||||
wget build-essential cmake git \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Ensure python3 defaults to python3.9
|
||||
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
|
||||
# Ensure python3 defaults to python3.11
|
||||
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1
|
||||
|
||||
RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
|
||||
&& python3 get-pip.py "pip"
|
||||
|
||||
|
||||
FROM build-wheels AS trt-wheels
|
||||
ARG DEBIAN_FRONTEND
|
||||
ARG TARGETARCH
|
||||
@ -41,11 +46,12 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
|
||||
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
|
||||
|
||||
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
|
||||
ADD https://nvidia.box.com/shared/static/psl23iw3bh7hlgku0mjo1xekxpego3e3.whl /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
|
||||
# See https://elinux.org/Jetson_Zoo#ONNX_Runtime
|
||||
ADD https://nvidia.box.com/shared/static/9yvw05k6u343qfnkhdv2x6xhygze0aq1.whl /tmp/onnxruntime_gpu-1.19.0-cp311-cp311-linux_aarch64.whl
|
||||
|
||||
RUN pip3 uninstall -y onnxruntime-openvino \
|
||||
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
|
||||
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp311-cp311-linux_aarch64.whl
|
||||
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.19.0-cp311-cp311-linux_aarch64.whl
|
||||
|
||||
FROM build-wheels AS trt-model-wheels
|
||||
ARG DEBIAN_FRONTEND
|
||||
@ -67,11 +73,18 @@ RUN --mount=type=bind,source=docker/tensorrt/build_jetson_ffmpeg.sh,target=/deps
|
||||
# Frigate w/ TensorRT for NVIDIA Jetson platforms
|
||||
FROM tensorrt-base AS frigate-tensorrt
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y python-is-python3 libprotobuf17 \
|
||||
&& apt-get install -y python-is-python3 libprotobuf23 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN rm -rf /usr/lib/btbn-ffmpeg/
|
||||
COPY --from=jetson-ffmpeg /rootfs /
|
||||
ENV DEFAULT_FFMPEG_VERSION="jetson"
|
||||
ENV INCLUDED_FFMPEG_VERSIONS="${DEFAULT_FFMPEG_VERSION}:${INCLUDED_FFMPEG_VERSIONS}"
|
||||
|
||||
# ffmpeg runtime dependencies
|
||||
RUN apt-get -qq update \
|
||||
&& apt-get -qq install -y --no-install-recommends \
|
||||
libx264-163 libx265-199 libegl1 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
@ -81,3 +94,6 @@ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
|
||||
# Fixes "Error importing detector runtime: /usr/lib/aarch64-linux-gnu/libstdc++.so.6: cannot allocate memory in static TLS block"
|
||||
ENV LD_PRELOAD /usr/lib/aarch64-linux-gnu/libstdc++.so.6
|
||||
|
@ -8,6 +8,7 @@ ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.12-py3
|
||||
# Build TensorRT-specific library
|
||||
FROM ${TRT_BASE} AS trt-deps
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG COMPUTE_LEVEL
|
||||
|
||||
RUN apt-get update \
|
||||
@ -16,15 +17,26 @@ RUN apt-get update \
|
||||
RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \
|
||||
/tensorrt_libyolo.sh
|
||||
|
||||
# COPY required individual CUDA deps
|
||||
RUN mkdir -p /usr/local/cuda-deps
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libcurand.so.* /usr/local/cuda-deps/ && \
|
||||
cp /usr/local/cuda-12.3/targets/x86_64-linux/lib/libnvrtc.so.* /usr/local/cuda-deps/ ; \
|
||||
fi
|
||||
|
||||
# Frigate w/ TensorRT Support as separate image
|
||||
FROM deps AS tensorrt-base
|
||||
|
||||
#Disable S6 Global timeout
|
||||
ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||
|
||||
# COPY TensorRT Model Generation Deps
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda
|
||||
|
||||
# COPY Individual CUDA deps folder
|
||||
COPY --from=trt-deps /usr/local/cuda-deps /usr/local/cuda
|
||||
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
ENV YOLO_MODELS=""
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
INSTALL_PREFIX=/rootfs/usr/local
|
||||
INSTALL_PREFIX=/rootfs/usr/lib/ffmpeg/jetson
|
||||
|
||||
apt-get -qq update
|
||||
apt-get -qq install -y --no-install-recommends build-essential ccache clang cmake pkg-config
|
||||
@ -14,14 +14,27 @@ apt-get -qq install -y --no-install-recommends libx264-dev libx265-dev
|
||||
pushd /tmp
|
||||
|
||||
# Install libnvmpi to enable nvmpi decoders (h264_nvmpi, hevc_nvmpi)
|
||||
if [ -e /usr/local/cuda-10.2 ]; then
|
||||
if [ -e /usr/local/cuda-12 ]; then
|
||||
# assume Jetpack 6.2
|
||||
apt-key adv --fetch-key https://repo.download.nvidia.com/jetson/jetson-ota-public.asc
|
||||
echo "deb https://repo.download.nvidia.com/jetson/common r36.4 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list
|
||||
echo "deb https://repo.download.nvidia.com/jetson/t234 r36.4 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list
|
||||
echo "deb https://repo.download.nvidia.com/jetson/ffmpeg r36.4 main" >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list
|
||||
|
||||
mkdir -p /opt/nvidia/l4t-packages/
|
||||
touch /opt/nvidia/l4t-packages/.nv-l4t-disable-boot-fw-update-in-preinstall
|
||||
|
||||
apt-get update
|
||||
apt-get -qq install -y --no-install-recommends -o Dpkg::Options::="--force-confold" nvidia-l4t-jetson-multimedia-api
|
||||
elif [ -e /usr/local/cuda-10.2 ]; then
|
||||
# assume Jetpack 4.X
|
||||
wget -q https://developer.nvidia.com/embedded/L4T/r32_Release_v5.0/T186/Jetson_Multimedia_API_R32.5.0_aarch64.tbz2 -O jetson_multimedia_api.tbz2
|
||||
tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2
|
||||
else
|
||||
# assume Jetpack 5.X
|
||||
wget -q https://developer.nvidia.com/downloads/embedded/l4t/r35_release_v3.1/release/jetson_multimedia_api_r35.3.1_aarch64.tbz2 -O jetson_multimedia_api.tbz2
|
||||
tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2
|
||||
fi
|
||||
tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2
|
||||
|
||||
wget -q https://github.com/AndBobsYourUncle/jetson-ffmpeg/archive/9c17b09.zip -O jetson-ffmpeg.zip
|
||||
unzip jetson-ffmpeg.zip && rm jetson-ffmpeg.zip && mv jetson-ffmpeg-* jetson-ffmpeg && cd jetson-ffmpeg
|
||||
|
@ -6,23 +6,23 @@ mkdir -p /trt-wheels
|
||||
|
||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
|
||||
# NVIDIA supplies python-tensorrt for python3.8, but frigate uses python3.9,
|
||||
# NVIDIA supplies python-tensorrt for python3.10, but frigate uses python3.11,
|
||||
# so we must build python-tensorrt ourselves.
|
||||
|
||||
# Get python-tensorrt source
|
||||
mkdir /workspace
|
||||
mkdir -p /workspace
|
||||
cd /workspace
|
||||
git clone -b ${TENSORRT_VER} https://github.com/NVIDIA/TensorRT.git --depth=1
|
||||
git clone -b release/8.6 https://github.com/NVIDIA/TensorRT.git --depth=1
|
||||
|
||||
# Collect dependencies
|
||||
EXT_PATH=/workspace/external && mkdir -p $EXT_PATH
|
||||
pip3 install pybind11 && ln -s /usr/local/lib/python3.9/dist-packages/pybind11 $EXT_PATH/pybind11
|
||||
ln -s /usr/include/python3.9 $EXT_PATH/python3.9
|
||||
pip3 install pybind11 && ln -s /usr/local/lib/python3.11/dist-packages/pybind11 $EXT_PATH/pybind11
|
||||
ln -s /usr/include/python3.11 $EXT_PATH/python3.11
|
||||
ln -s /usr/include/aarch64-linux-gnu/NvOnnxParser.h /workspace/TensorRT/parsers/onnx/
|
||||
|
||||
# Build wheel
|
||||
cd /workspace/TensorRT/python
|
||||
EXT_PATH=$EXT_PATH PYTHON_MAJOR_VERSION=3 PYTHON_MINOR_VERSION=9 TARGET_ARCHITECTURE=aarch64 /bin/bash ./build.sh
|
||||
mv build/dist/*.whl /trt-wheels/
|
||||
EXT_PATH=$EXT_PATH PYTHON_MAJOR_VERSION=3 PYTHON_MINOR_VERSION=11 TARGET_ARCHITECTURE=aarch64 TENSORRT_MODULE=tensorrt /bin/bash ./build.sh
|
||||
mv build/bindings_wheel/dist/*.whl /trt-wheels/
|
||||
|
||||
fi
|
||||
|
@ -1,5 +1,5 @@
|
||||
/usr/local/lib
|
||||
/usr/local/cuda/lib64
|
||||
/usr/local/cuda
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cudnn/lib
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cuda_runtime/lib
|
||||
/usr/local/lib/python3.11/dist-packages/nvidia/cublas/lib
|
||||
|
@ -20,7 +20,7 @@ FIRST_MODEL=true
|
||||
MODEL_DOWNLOAD=""
|
||||
MODEL_CONVERT=""
|
||||
|
||||
if [ -z "$YOLO_MODELS"]; then
|
||||
if [ -z "$YOLO_MODELS" ]; then
|
||||
echo "tensorrt model preparation disabled"
|
||||
exit 0
|
||||
fi
|
||||
@ -64,7 +64,7 @@ fi
|
||||
# order to run libyolo here.
|
||||
# On Jetpack 5.0, these libraries are not mounted by the runtime and are supplied by the image.
|
||||
if [[ "$(arch)" == "aarch64" ]]; then
|
||||
if [[ ! -e /usr/lib/aarch64-linux-gnu/tegra ]]; then
|
||||
if [[ ! -e /usr/lib/aarch64-linux-gnu/tegra && ! -e /usr/lib/aarch64-linux-gnu/tegra-egl ]]; then
|
||||
echo "ERROR: Container must be launched with nvidia runtime"
|
||||
exit 1
|
||||
elif [[ ! -e /usr/lib/aarch64-linux-gnu/libnvinfer.so.8 ||
|
||||
|
@ -1,14 +1,17 @@
|
||||
# NVidia TensorRT Support (amd64 only)
|
||||
--extra-index-url 'https://pypi.nvidia.com'
|
||||
numpy < 1.24; platform_machine == 'x86_64'
|
||||
tensorrt == 8.6.1.*; platform_machine == 'x86_64'
|
||||
tensorrt == 8.6.1; platform_machine == 'x86_64'
|
||||
tensorrt_bindings == 8.6.1; platform_machine == 'x86_64'
|
||||
cuda-python == 11.8.*; platform_machine == 'x86_64'
|
||||
cython == 3.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
|
||||
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
|
||||
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cudnn-cu12 == 9.5.0.*; platform_machine == 'x86_64'
|
||||
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
|
||||
nvidia-cufft-cu12==11.*; platform_machine == 'x86_64'
|
||||
onnx==1.16.*; platform_machine == 'x86_64'
|
||||
onnxruntime-gpu==1.20.*; platform_machine == 'x86_64'
|
||||
protobuf==3.20.3; platform_machine == 'x86_64'
|
||||
|
@ -1 +1 @@
|
||||
cuda-python == 11.7; platform_machine == 'aarch64'
|
||||
cuda-python == 12.6.*; platform_machine == 'aarch64'
|
||||
|
@ -13,13 +13,29 @@ variable "TRT_BASE" {
|
||||
variable "COMPUTE_LEVEL" {
|
||||
default = ""
|
||||
}
|
||||
variable "BASE_HOOK" {
|
||||
# Ensure an up-to-date python 3.11 is available in jetson images
|
||||
default = <<EOT
|
||||
if grep -iq \"ubuntu\" /etc/os-release; then
|
||||
. /etc/os-release
|
||||
|
||||
# Add the deadsnakes PPA repository
|
||||
echo "deb https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
|
||||
echo "deb-src https://ppa.launchpadcontent.net/deadsnakes/ppa/ubuntu $VERSION_CODENAME main" >> /etc/apt/sources.list.d/deadsnakes.list
|
||||
|
||||
# Add deadsnakes signing key
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys F23C5A6CF475977595C89F51BA6932366A755776
|
||||
fi
|
||||
EOT
|
||||
}
|
||||
|
||||
target "_build_args" {
|
||||
args = {
|
||||
BASE_IMAGE = BASE_IMAGE,
|
||||
SLIM_BASE = SLIM_BASE,
|
||||
TRT_BASE = TRT_BASE,
|
||||
COMPUTE_LEVEL = COMPUTE_LEVEL
|
||||
COMPUTE_LEVEL = COMPUTE_LEVEL,
|
||||
BASE_HOOK = BASE_HOOK
|
||||
}
|
||||
platforms = ["linux/${ARCH}"]
|
||||
}
|
||||
@ -79,7 +95,6 @@ target "tensorrt" {
|
||||
wget = "target:wget",
|
||||
tensorrt-base = "target:tensorrt-base",
|
||||
rootfs = "target:rootfs"
|
||||
wheels = "target:wheels"
|
||||
}
|
||||
target = "frigate-tensorrt"
|
||||
inherits = ["_build_args"]
|
||||
|
@ -1,41 +1,41 @@
|
||||
BOARDS += trt
|
||||
|
||||
JETPACK4_BASE ?= timongentzsch/l4t-ubuntu20-opencv:latest # L4T 32.7.1 JetPack 4.6.1
|
||||
JETPACK5_BASE ?= nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime # L4T 35.3.1 JetPack 5.1.1
|
||||
JETPACK6_BASE ?= nvcr.io/nvidia/tensorrt:23.12-py3-igpu
|
||||
X86_DGPU_ARGS := ARCH=amd64 COMPUTE_LEVEL="50 60 70 80 90"
|
||||
JETPACK4_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK4_BASE) SLIM_BASE=$(JETPACK4_BASE) TRT_BASE=$(JETPACK4_BASE)
|
||||
JETPACK5_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK5_BASE) SLIM_BASE=$(JETPACK5_BASE) TRT_BASE=$(JETPACK5_BASE)
|
||||
JETPACK6_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK6_BASE) SLIM_BASE=$(JETPACK6_BASE) TRT_BASE=$(JETPACK6_BASE)
|
||||
|
||||
local-trt: version
|
||||
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=frigate:latest-tensorrt \
|
||||
--load
|
||||
|
||||
local-trt-jp4: version
|
||||
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=frigate:latest-tensorrt-jp4 \
|
||||
--load
|
||||
|
||||
local-trt-jp5: version
|
||||
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=frigate:latest-tensorrt-jp5 \
|
||||
--load
|
||||
|
||||
local-trt-jp6: version
|
||||
$(JETPACK6_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=frigate:latest-tensorrt-jp6 \
|
||||
--load
|
||||
|
||||
build-trt:
|
||||
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt
|
||||
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4
|
||||
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5
|
||||
$(JETPACK6_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp6
|
||||
|
||||
push-trt: build-trt
|
||||
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt \
|
||||
--push
|
||||
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 \
|
||||
--push
|
||||
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 \
|
||||
--push
|
||||
$(JETPACK6_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
|
||||
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp6 \
|
||||
--push
|
||||
|
@ -37,7 +37,7 @@ See [the go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#modul
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
...
|
||||
# ...
|
||||
log:
|
||||
exec: trace
|
||||
```
|
||||
@ -176,15 +176,13 @@ listen [::]:5000 ipv6only=off;
|
||||
|
||||
### Custom ffmpeg build
|
||||
|
||||
Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, statically built ffmpeg binary can be downloaded to /config and used.
|
||||
Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, statically built `ffmpeg` and `ffprobe` binaries can be placed in `/config/custom-ffmpeg/bin` for Frigate to use.
|
||||
|
||||
To do this:
|
||||
|
||||
1. Download your ffmpeg build and uncompress to the Frigate config folder.
|
||||
2. Update your docker-compose or docker CLI to include `'/home/appdata/frigate/custom-ffmpeg':'/usr/lib/btbn-ffmpeg':'ro'` in the volume mappings.
|
||||
3. Restart Frigate and the custom version will be used if the mapping was done correctly.
|
||||
|
||||
NOTE: The folder that is set for the config needs to be the folder that contains `/bin`. So if the full structure is `/home/appdata/frigate/custom-ffmpeg/bin/ffmpeg` then the `ffmpeg -> path` field should be `/config/custom-ffmpeg/bin`.
|
||||
1. Download your ffmpeg build and uncompress it to the `/config/custom-ffmpeg` folder. Verify that both the `ffmpeg` and `ffprobe` binaries are located in `/config/custom-ffmpeg/bin`.
|
||||
2. Update the `ffmpeg.path` in your Frigate config to `/config/custom-ffmpeg`.
|
||||
3. Restart Frigate and the custom version will be used if the steps above were done correctly.
|
||||
|
||||
### Custom go2rtc version
|
||||
|
||||
@ -192,7 +190,7 @@ Frigate currently includes go2rtc v1.9.2, there may be certain cases where you w
|
||||
|
||||
To do this:
|
||||
|
||||
1. Download the go2rtc build to the /config folder.
|
||||
1. Download the go2rtc build to the `/config` folder.
|
||||
2. Rename the build to `go2rtc`.
|
||||
3. Give `go2rtc` execute permission.
|
||||
4. Restart Frigate and the custom version will be used, you can verify by checking go2rtc logs.
|
||||
|
@ -5,15 +5,11 @@ title: Face Recognition
|
||||
|
||||
Face recognition allows people to be assigned names and when their face is recognized Frigate will assign the person's name as a sub label. This information is included in the UI, filters, as well as in notifications.
|
||||
|
||||
Frigate has support for FaceNet to create face embeddings, which runs locally. Embeddings are then saved to Frigate's database.
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
Face recognition works by running a large AI model locally on your system. Systems without a GPU will not run Face Recognition reliably or at all.
|
||||
Frigate has support for CV2 Local Binary Pattern Face Recognizer to recognize faces, which runs locally. A lightweight face landmark detection model is also used to align faces before running them through the face recognizer.
|
||||
|
||||
## Configuration
|
||||
|
||||
Face recognition is disabled by default and requires semantic search to be enabled, face recognition must be enabled in your config file before it can be used. Semantic Search and face recognition are global configuration settings.
|
||||
Face recognition is disabled by default, face recognition must be enabled in your config file before it can be used. Face recognition is a global configuration setting.
|
||||
|
||||
```yaml
|
||||
face_recognition:
|
||||
@ -40,6 +36,7 @@ The accuracy of face recognition is heavily dependent on the quality of data giv
|
||||
:::tip
|
||||
|
||||
When choosing images to include in the face training set it is recommended to always follow these recommendations:
|
||||
|
||||
- If it is difficult to make out details in a persons face it will not be helpful in training.
|
||||
- Avoid images with under/over-exposure.
|
||||
- Avoid blurry / pixelated images.
|
||||
@ -56,4 +53,4 @@ Then it is recommended to use the `Face Library` tab in Frigate to select and tr
|
||||
|
||||
### Step 2 - Expanding The Dataset
|
||||
|
||||
Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone.
|
||||
Once straight-on images are performing well, start choosing slightly off-angle images to include for training. It is important to still choose images where enough face detail is visible to recognize someone.
|
||||
|
@ -7,12 +7,6 @@ Generative AI can be used to automatically generate descriptive text based on th
|
||||
|
||||
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI.
|
||||
|
||||
:::info
|
||||
|
||||
Semantic Search must be enabled to use Generative AI.
|
||||
|
||||
:::
|
||||
|
||||
## Configuration
|
||||
|
||||
Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||
|
@ -295,10 +295,8 @@ These instructions were originally based on the [Jellyfin documentation](https:/
|
||||
## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano\*, Xavier AGX, Xavier NX, TX2, TX1, Nano)
|
||||
|
||||
A separate set of docker images is available that is based on Jetpack/L4T. They come with an `ffmpeg` build
|
||||
with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 4.6, use the
|
||||
`stable-tensorrt-jp4` tagged image, or if your Jetson host is running Jetpack 5.0+, use the `stable-tensorrt-jp5`
|
||||
tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform,
|
||||
but the image will still allow hardware decoding and tensorrt object detection.
|
||||
with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 5.0+ use the `stable-tensorrt-jp5`
|
||||
tagged image, or if your Jetson host is running Jetpack 6.0+ use the `stable-tensorrt-jp6` tagged image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, but the image will still allow hardware decoding and tensorrt object detection.
|
||||
|
||||
You will need to use the image with the nvidia container runtime:
|
||||
|
||||
|
@ -3,13 +3,28 @@ id: license_plate_recognition
|
||||
title: License Plate Recognition (LPR)
|
||||
---
|
||||
|
||||
Frigate can recognize license plates on vehicles and automatically add the detected characters as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street with a dedicated LPR camera.
|
||||
Frigate can recognize license plates on vehicles and automatically add the detected characters or recognized name as a `sub_label` to objects that are of type `car`. A common use case may be to read the license plates of cars pulling into a driveway or cars passing by on a street.
|
||||
|
||||
LPR works best when the license plate is clearly visible to the camera. For moving vehicles, Frigate continuously refines the recognition process, keeping the most confident result. However, LPR does not run on stationary vehicles.
|
||||
|
||||
When a plate is recognized, the detected characters or recognized name is:
|
||||
|
||||
- Added as a `sub_label` to the `car` tracked object.
|
||||
- Viewable in the Review Item Details pane in Review and the Tracked Object Details pane in Explore.
|
||||
- Filterable through the More Filters menu in Explore.
|
||||
- Published via the `frigate/events` MQTT topic as a `sub_label` for the tracked object.
|
||||
|
||||
## Model Requirements
|
||||
|
||||
Users running a Frigate+ model (or any custom model that natively detects license plates) should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model.
|
||||
|
||||
Users without a model that detects license plates can still run LPR. A small, CPU inference, YOLOv9 license plate detection model will be used instead. You should _not_ define `license_plate` in your list of objects to track.
|
||||
Users without a model that detects license plates can still run LPR. Frigate uses a lightweight YOLOv9 license plate detection model that runs on your CPU. In this case, you should _not_ define `license_plate` in your list of objects to track.
|
||||
|
||||
LPR is most effective when the vehicle’s license plate is fully visible to the camera. For moving vehicles, Frigate will attempt to read the plate continuously, refining recognition and keeping the most confident result. LPR will not run on stationary vehicles.
|
||||
:::note
|
||||
|
||||
Frigate needs to first detect a `car` before it can recognize a license plate. If you're using a dedicated LPR camera or have a zoomed-in view, make sure the camera captures enough of the `car` for Frigate to detect it reliably.
|
||||
|
||||
:::
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
@ -24,6 +39,10 @@ lpr:
|
||||
enabled: True
|
||||
```
|
||||
|
||||
Ensure that your camera is configured to detect objects of type `car`, and that a car is actually being detected by Frigate. Otherwise, LPR will not run.
|
||||
|
||||
Like the other real-time processors in Frigate, license plate recognition runs on the camera stream defined by the `detect` role in your config. To ensure optimal performance, select a suitable resolution for this stream in your camera's firmware that fits your specific scene and requirements.
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
Fine-tune the LPR feature using these optional parameters:
|
||||
@ -35,17 +54,18 @@ Fine-tune the LPR feature using these optional parameters:
|
||||
- Note: If you are using a Frigate+ model and you set the `threshold` in your objects config for `license_plate` higher than this value, recognition will never run. It's best to ensure these values match, or this `detection_threshold` is lower than your object config `threshold`.
|
||||
- **`min_area`**: Defines the minimum size (in pixels) a license plate must be before recognition runs.
|
||||
- Default: `1000` pixels.
|
||||
- Depending on the resolution of your cameras, you can increase this value to ignore small or distant plates.
|
||||
- Depending on the resolution of your camera's `detect` stream, you can increase this value to ignore small or distant plates.
|
||||
|
||||
### Recognition
|
||||
|
||||
- **`recognition_threshold`**: Recognition confidence score required to add the plate to the object as a sub label.
|
||||
- Default: `0.9`.
|
||||
- **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a sub-label to an object.
|
||||
- **`min_plate_length`**: Specifies the minimum number of characters a detected license plate must have to be added as a sub label to an object.
|
||||
- Use this to filter out short, incomplete, or incorrect detections.
|
||||
- **`format`**: A regular expression defining the expected format of detected plates. Plates that do not match this format will be discarded.
|
||||
- `"^[A-Z]{1,3} [A-Z]{1,2} [0-9]{1,4}$"` matches plates like "B AB 1234" or "M X 7"
|
||||
- `"^[A-Z]{2}[0-9]{2} [A-Z]{3}$"` matches plates like "AB12 XYZ" or "XY68 ABC"
|
||||
- Websites like https://regex101.com/ can help test regular expressions for your plates.
|
||||
|
||||
### Matching
|
||||
|
||||
@ -53,9 +73,9 @@ Fine-tune the LPR feature using these optional parameters:
|
||||
- These labels appear in the UI, filters, and notifications.
|
||||
- **`match_distance`**: Allows for minor variations (missing/incorrect characters) when matching a detected plate to a known plate.
|
||||
- For example, setting `match_distance: 1` allows a plate `ABCDE` to match `ABCBE` or `ABCD`.
|
||||
- This parameter will not operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`.
|
||||
- This parameter will _not_ operate on known plates that are defined as regular expressions. You should define the full string of your plate in `known_plates` in order to use `match_distance`.
|
||||
|
||||
### Examples
|
||||
## Configuration Examples
|
||||
|
||||
```yaml
|
||||
lpr:
|
||||
@ -69,7 +89,9 @@ lpr:
|
||||
Johnny:
|
||||
- "J*N-*234" # Matches JHN-1234 and JMN-I234, but also note that "*" matches any number of characters
|
||||
Sally:
|
||||
- "[S5]LL-1234" # Matches both SLL-1234 and 5LL-1234
|
||||
- "[S5]LL 1234" # Matches both SLL 1234 and 5LL 1234
|
||||
Work Trucks:
|
||||
- "EMP-[0-9]{3}[A-Z]" # Matches plates like EMP-123A, EMP-456Z
|
||||
```
|
||||
|
||||
```yaml
|
||||
@ -77,12 +99,54 @@ lpr:
|
||||
enabled: True
|
||||
min_area: 4000 # Run recognition on larger plates only
|
||||
recognition_threshold: 0.85
|
||||
format: "^[A-Z]{3}-[0-9]{4}$" # Only recognize plates that are three letters, followed by a dash, followed by 4 numbers
|
||||
format: "^[A-Z]{2} [A-Z][0-9]{4}$" # Only recognize plates that are two letters, followed by a space, followed by a single letter and 4 numbers
|
||||
match_distance: 1 # Allow one character variation in plate matching
|
||||
known_plates:
|
||||
Delivery Van:
|
||||
- "RJK-5678"
|
||||
- "UPS-1234"
|
||||
Employee Parking:
|
||||
- "EMP-[0-9]{3}[A-Z]" # Matches plates like EMP-123A, EMP-456Z
|
||||
- "RJ K5678"
|
||||
- "UP A1234"
|
||||
Supervisor:
|
||||
- "MN D3163"
|
||||
```
|
||||
|
||||
## FAQ
|
||||
|
||||
### Why isn't my license plate being detected and recognized?
|
||||
|
||||
Ensure that:
|
||||
|
||||
- Your camera has a clear, well-lit view of the plate.
|
||||
- The plate is large enough in the image (try adjusting `min_area`) or increasing the resolution of your camera's stream.
|
||||
- A `car` is detected first, as LPR only runs on recognized vehicles.
|
||||
|
||||
If you are using a Frigate+ model or a custom model that detects license plates, ensure that `license_plate` is added to your list of objects to track.
|
||||
If you are using the free model that ships with Frigate, you should _not_ add `license_plate` to the list of objects to track.
|
||||
|
||||
### Can I run LPR without detecting `car` objects?
|
||||
|
||||
No, Frigate requires a `car` to be detected first before recognizing a license plate.
|
||||
|
||||
### How can I improve detection accuracy?
|
||||
|
||||
- Use high-quality cameras with good resolution.
|
||||
- Adjust `detection_threshold` and `recognition_threshold` values.
|
||||
- Define a `format` regex to filter out invalid detections.
|
||||
|
||||
### Does LPR work at night?
|
||||
|
||||
Yes, but performance depends on camera quality, lighting, and infrared capabilities. Make sure your camera can capture clear images of plates at night.
|
||||
|
||||
### How can I match known plates with minor variations?
|
||||
|
||||
Use `match_distance` to allow small character mismatches. Alternatively, define multiple variations in `known_plates`.
|
||||
|
||||
### How do I debug LPR issues?
|
||||
|
||||
- View MQTT messages for `frigate/events` to verify detected plates.
|
||||
- Adjust `detection_threshold` and `recognition_threshold` settings.
|
||||
- If you are using a Frigate+ model or a model that detects license plates, watch the debug view (Settings --> Debug) to ensure that `license_plate` is being detected with a `car`.
|
||||
- Enable debug logs for LPR by adding `frigate.data_processing.common.license_plate: debug` to your `logger` configuration. These logs are _very_ verbose, so only enable this when necessary.
|
||||
|
||||
### Will LPR slow down my system?
|
||||
|
||||
LPR runs on the CPU, so performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU for optimal results.
|
||||
|
@ -11,14 +11,38 @@ Frigate offers native notifications using the [WebPush Protocol](https://web.dev
|
||||
|
||||
In order to use notifications the following requirements must be met:
|
||||
|
||||
- Frigate must be accessed via a secure https connection
|
||||
- Frigate must be accessed via a secure `https` connection ([see the authorization docs](/configuration/authentication)).
|
||||
- A supported browser must be used. Currently Chrome, Firefox, and Safari are known to be supported.
|
||||
- In order for notifications to be usable externally, Frigate must be accessible externally
|
||||
- In order for notifications to be usable externally, Frigate must be accessible externally.
|
||||
- For iOS devices, some users have also indicated that the Notifications switch needs to be enabled in iOS Settings --> Apps --> Safari --> Advanced --> Features.
|
||||
|
||||
### Configuration
|
||||
|
||||
To configure notifications, go to the Frigate WebUI -> Settings -> Notifications and enable, then fill out the fields and save.
|
||||
|
||||
Optionally, you can change the default cooldown period for notifications through the `cooldown` parameter in your config file. This parameter can also be overridden at the camera level.
|
||||
|
||||
Notifications will be prevented if either:
|
||||
|
||||
- The global cooldown period hasn't elapsed since any camera's last notification
|
||||
- The camera-specific cooldown period hasn't elapsed for the specific camera
|
||||
|
||||
```yaml
|
||||
notifications:
|
||||
enabled: True
|
||||
email: "johndoe@gmail.com"
|
||||
cooldown: 10 # wait 10 seconds before sending another notification from any camera
|
||||
```
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
doorbell:
|
||||
...
|
||||
notifications:
|
||||
enabled: True
|
||||
cooldown: 30 # wait 30 seconds before sending another notification from the doorbell camera
|
||||
```
|
||||
|
||||
### Registration
|
||||
|
||||
Once notifications are enabled, press the `Register for Notifications` button on all devices that you would like to receive notifications on. This will register the background worker. After this Frigate must be restarted and then notifications will begin to be sent.
|
||||
@ -39,4 +63,4 @@ Different platforms handle notifications differently, some settings changes may
|
||||
|
||||
### Android
|
||||
|
||||
Most Android phones have battery optimization settings. To get reliable Notification delivery the browser (Chrome, Firefox) should have battery optimizations disabled. If Frigate is running as a PWA then the Frigate app should have battery optimizations disabled as well.
|
||||
Most Android phones have battery optimization settings. To get reliable Notification delivery the browser (Chrome, Firefox) should have battery optimizations disabled. If Frigate is running as a PWA then the Frigate app should have battery optimizations disabled as well.
|
||||
|
@ -10,25 +10,31 @@ title: Object Detectors
|
||||
Frigate supports multiple different detectors that work on different types of hardware:
|
||||
|
||||
**Most Hardware**
|
||||
|
||||
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
|
||||
- [Hailo](#hailo-8): The Hailo8 and Hailo8L AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
|
||||
|
||||
**AMD**
|
||||
|
||||
- [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection.
|
||||
- [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Intel**
|
||||
|
||||
- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
|
||||
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
|
||||
|
||||
**Nvidia**
|
||||
|
||||
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models.
|
||||
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp(4/5)` Frigate images when a supported ONNX model is configured.
|
||||
|
||||
**Rockchip**
|
||||
|
||||
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
|
||||
|
||||
**For Testing**
|
||||
|
||||
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
|
||||
|
||||
:::
|
||||
@ -169,7 +175,6 @@ model:
|
||||
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
|
||||
```
|
||||
|
||||
|
||||
### Custom Models
|
||||
|
||||
The Hailo-8l detector supports all YOLO models that have been compiled for the Hailo hardware and include post-processing. The detector automatically detects your hardware type (Hailo-8 or Hailo-8L) and uses the appropriate model.
|
||||
@ -461,7 +466,7 @@ When using docker compose:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
...
|
||||
|
||||
environment:
|
||||
HSA_OVERRIDE_GFX_VERSION: "9.0.0"
|
||||
```
|
||||
@ -604,6 +609,35 @@ model:
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
#### D-FINE
|
||||
|
||||
[D-FINE](https://github.com/Peterande/D-FINE) is the [current state of the art](https://paperswithcode.com/sota/real-time-object-detection-on-coco?p=d-fine-redefine-regression-task-in-detrs-as) at the time of writing. The ONNX exported models are supported, but not included by default. See [the models section](#downloading-d-fine-model) for more information on downloading the YOLO-NAS model for use in Frigate.
|
||||
|
||||
:::warning
|
||||
|
||||
D-FINE is currently not supported on OpenVINO
|
||||
|
||||
:::
|
||||
|
||||
After placing the downloaded onnx model in your config/model_cache folder, you can use the following configuration:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
onnx:
|
||||
type: onnx
|
||||
|
||||
model:
|
||||
model_type: dfine
|
||||
width: 640
|
||||
height: 640
|
||||
input_tensor: nchw
|
||||
input_dtype: float
|
||||
path: /config/model_cache/dfine_m_obj2coco.onnx
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
```
|
||||
|
||||
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
|
||||
|
||||
## CPU Detector (not recommended)
|
||||
|
||||
The CPU detector type runs a TensorFlow Lite model utilizing the CPU without hardware acceleration. It is recommended to use a hardware accelerated detector type instead for better performance. To configure a CPU based detector, set the `"type"` attribute to `"cpu"`.
|
||||
@ -753,7 +787,7 @@ To convert a onnx model to the rknn format using the [rknn-toolkit2](https://git
|
||||
This is an example configuration file that you need to adjust to your specific onnx model:
|
||||
|
||||
```yaml
|
||||
soc: ["rk3562","rk3566", "rk3568", "rk3576", "rk3588"]
|
||||
soc: ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
|
||||
quantization: false
|
||||
|
||||
output_name: "{input_basename}"
|
||||
@ -784,6 +818,29 @@ Some model types are not included in Frigate by default.
|
||||
|
||||
Here are some tips for getting different model types
|
||||
|
||||
### Downloading D-FINE Model
|
||||
|
||||
To export as ONNX:
|
||||
|
||||
1. Clone: https://github.com/Peterande/D-FINE and install all dependencies.
|
||||
2. Select and download a checkpoint from the [readme](https://github.com/Peterande/D-FINE).
|
||||
3. Modify line 58 of `tools/deployment/export_onnx.py` and change batch size to 1: `data = torch.rand(1, 3, 640, 640)`
|
||||
4. Run the export, making sure you select the right config, for your checkpoint.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
python3 tools/deployment/export_onnx.py -c configs/dfine/objects365/dfine_hgnetv2_m_obj2coco.yml -r output/dfine_m_obj2coco.pth
|
||||
```
|
||||
|
||||
:::tip
|
||||
|
||||
Model export has only been tested on Linux (or WSL2). Not all dependencies are in `requirements.txt`. Some live in the deployment folder, and some are still missing entirely and must be installed manually.
|
||||
|
||||
Make sure you change the batch size to 1 before exporting.
|
||||
|
||||
:::
|
||||
|
||||
### Downloading YOLO-NAS Model
|
||||
|
||||
You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
|
||||
|
@ -420,6 +420,8 @@ notifications:
|
||||
# Optional: Email for push service to reach out to
|
||||
# NOTE: This is required to use notifications
|
||||
email: "admin@example.com"
|
||||
# Optional: Cooldown time for notifications in seconds (default: shown below)
|
||||
cooldown: 0
|
||||
|
||||
# Optional: Record configuration
|
||||
# NOTE: Can be overridden at the camera level
|
||||
@ -534,6 +536,8 @@ semantic_search:
|
||||
enabled: False
|
||||
# Optional: Re-index embeddings database from historical tracked objects (default: shown below)
|
||||
reindex: False
|
||||
# Optional: Set the model used for embeddings. (default: shown below)
|
||||
model: "jinav1"
|
||||
# Optional: Set the model size used for embeddings. (default: shown below)
|
||||
# NOTE: small model runs on CPU and large model runs on GPU
|
||||
model_size: "small"
|
||||
@ -566,7 +570,6 @@ lpr:
|
||||
known_plates: {}
|
||||
|
||||
# Optional: Configuration for AI generated tracked object descriptions
|
||||
# NOTE: Semantic Search must be enabled for this to do anything.
|
||||
# WARNING: Depending on the provider, this will send thumbnails over the internet
|
||||
# to Google or OpenAI's LLMs to generate descriptions. It can be overridden at
|
||||
# the camera level (enabled: False) to enhance privacy for indoor cameras.
|
||||
|
@ -5,7 +5,7 @@ title: Semantic Search
|
||||
|
||||
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
|
||||
|
||||
Frigate uses [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create and save embeddings to Frigate's database. All of this runs locally.
|
||||
Frigate uses models from [Jina AI](https://huggingface.co/jinaai) to create and save embeddings to Frigate's database. All of this runs locally.
|
||||
|
||||
Semantic Search is accessed via the _Explore_ view in the Frigate UI.
|
||||
|
||||
@ -35,23 +35,47 @@ If you are enabling Semantic Search for the first time, be advised that Frigate
|
||||
|
||||
:::
|
||||
|
||||
### Jina AI CLIP
|
||||
### Jina AI CLIP (version 1)
|
||||
|
||||
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||
The [V1 model from Jina](https://huggingface.co/jinaai/jina-clip-v1) has a vision model which is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||
|
||||
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||
The V1 text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||
|
||||
Differently weighted versions of the Jina model are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
||||
Differently weighted versions of the Jina models are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
||||
|
||||
```yaml
|
||||
semantic_search:
|
||||
enabled: True
|
||||
model: "jinav1"
|
||||
model_size: small
|
||||
```
|
||||
|
||||
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
|
||||
- Configuring the `small` model employs a quantized version of the Jina model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
|
||||
|
||||
### Jina AI CLIP (version 2)
|
||||
|
||||
Frigate also supports the [V2 model from Jina](https://huggingface.co/jinaai/jina-clip-v2), which introduces multilingual support (89 languages). In contrast, the V1 model only supports English.
|
||||
|
||||
V2 offers only a 3% performance improvement over V1 in both text-image and text-text retrieval tasks, an upgrade that is unlikely to yield noticeable real-world benefits. Additionally, V2 has _significantly_ higher RAM and GPU requirements, leading to increased inference time and memory usage. If you plan to use V2, ensure your system has ample RAM and a discrete GPU. CPU inference (with the `small` model) using V2 is not recommended.
|
||||
|
||||
To use the V2 model, update the `model` parameter in your config:
|
||||
|
||||
```yaml
|
||||
semantic_search:
|
||||
enabled: True
|
||||
model: "jinav2"
|
||||
model_size: large
|
||||
```
|
||||
|
||||
For most users, especially native English speakers, the V1 model remains the recommended choice.
|
||||
|
||||
:::note
|
||||
|
||||
Switching between V1 and V2 requires reindexing your embeddings. To do this, set `reindex: True` in your Semantic Search configuration and restart Frigate. The embeddings from V1 and V2 are incompatible, and failing to reindex will result in incorrect search results.
|
||||
|
||||
:::
|
||||
|
||||
### GPU Acceleration
|
||||
|
||||
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used.
|
||||
|
@ -140,12 +140,12 @@ cameras:
|
||||
zones:
|
||||
street:
|
||||
coordinates: 0.033,0.306,0.324,0.138,0.439,0.185,0.042,0.428
|
||||
distances: 10,12,11,13.5
|
||||
distances: 10,12,11,13.5 # in meters or feet
|
||||
```
|
||||
|
||||
Each number in the `distance` field represents the real-world distance between the points in the `coordinates` list. So in the example above, the distance between the first two points ([0.033,0.306] and [0.324,0.138]) is 10. The distance between the second and third set of points ([0.324,0.138] and [0.439,0.185]) is 12, and so on. The fastest and most accurate way to configure this is through the Zone Editor in the Frigate UI.
|
||||
|
||||
The `distance` values are measured in meters or feet, depending on how `unit_system` is configured in your `ui` config:
|
||||
The `distance` values are measured in meters (metric) or feet (imperial), depending on how `unit_system` is configured in your `ui` config:
|
||||
|
||||
```yaml
|
||||
ui:
|
||||
@ -153,7 +153,9 @@ ui:
|
||||
unit_system: metric
|
||||
```
|
||||
|
||||
The average speed of your object as it moved through your zone is saved in Frigate's database and can be seen in the UI in the Tracked Object Details pane in Explore. Current estimated speed can also be seen on the debug view as the third value in the object label (see the caveats below). Current estimated speed, average estimated speed, and velocity angle (the angle of the direction the object is moving relative to the frame) of tracked objects is also sent through the `events` MQTT topic. See the [MQTT docs](../integrations/mqtt.md#frigateevents). These speed values are output as a number in miles per hour (mph) or kilometers per hour (kph), depending on how `unit_system` is configured in your `ui` config.
|
||||
The average speed of your object as it moved through your zone is saved in Frigate's database and can be seen in the UI in the Tracked Object Details pane in Explore. Current estimated speed can also be seen on the debug view as the third value in the object label (see the caveats below). Current estimated speed, average estimated speed, and velocity angle (the angle of the direction the object is moving relative to the frame) of tracked objects is also sent through the `events` MQTT topic. See the [MQTT docs](../integrations/mqtt.md#frigateevents).
|
||||
|
||||
These speed values are output as a number in miles per hour (mph) or kilometers per hour (kph). For miles per hour, set `unit_system` to `imperial`. For kilometers per hour, set `unit_system` to `metric`.
|
||||
|
||||
#### Best practices and caveats
|
||||
|
||||
|
@ -34,7 +34,7 @@ Fork [blakeblackshear/frigate-hass-integration](https://github.com/blakeblackshe
|
||||
### Prerequisites
|
||||
|
||||
- GNU make
|
||||
- Docker
|
||||
- Docker (including buildx plugin)
|
||||
- An extra detector (Coral, OpenVINO, etc.) is optional but recommended to simulate real world performance.
|
||||
|
||||
:::note
|
||||
|
@ -80,12 +80,12 @@ The Frigate container also stores logs in shm, which can take up to **40MB**, so
|
||||
You can calculate the **minimum** shm size for each camera with the following formula using the resolution specified for detect:
|
||||
|
||||
```console
|
||||
# Replace <width> and <height>
|
||||
# Template for one camera without logs, replace <width> and <height>
|
||||
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 20 + 270480) / 1048576))'
|
||||
|
||||
# Example for 1280x720, including logs
|
||||
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 20 + 270480) / 1048576)) + 40'
|
||||
46.63MB
|
||||
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 20 + 270480) / 1048576 + 40))'
|
||||
66.63MB
|
||||
|
||||
# Example for eight cameras detecting at 1280x720, including logs
|
||||
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 20 + 270480) / 1048576) * 8 + 40))'
|
||||
@ -250,7 +250,7 @@ The official docker image tags for the current stable version are:
|
||||
The community supported docker image tags for the current stable version are:
|
||||
|
||||
- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5
|
||||
- `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6
|
||||
- `stable-tensorrt-jp6` - Frigate build optimized for nvidia Jetson devices running Jetpack 6
|
||||
- `stable-rk` - Frigate build for SBCs with Rockchip SoC
|
||||
- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector)
|
||||
- `stable-h8l` - Frigate build for the Hailo-8L M.2 PICe Raspberry Pi 5 hat
|
||||
|
@ -177,7 +177,7 @@ services:
|
||||
frigate:
|
||||
...
|
||||
devices:
|
||||
- /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware
|
||||
- /dev/dri/renderD128:/dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware
|
||||
...
|
||||
```
|
||||
|
||||
|
@ -10,6 +10,12 @@ There are many possible causes for a USB coral not being detected and some are O
|
||||
1. When the device is first plugged in and has not initialized it will appear as `1a6e:089a Global Unichip Corp.` when running `lsusb` or checking the hardware page in HA OS.
|
||||
2. Once initialized, the device will appear as `18d1:9302 Google Inc.` when running `lsusb` or checking the hardware page in HA OS.
|
||||
|
||||
:::tip
|
||||
|
||||
Using `lsusb` or checking the hardware page in HA OS will show as `1a6e:089a Global Unichip Corp.` until Frigate runs an inferance using the coral. So don't worry about the identification until after Frigate has attempted to detect the coral.
|
||||
|
||||
:::
|
||||
|
||||
If the coral does not initialize then Frigate can not interface with it. Some common reasons for the USB based Coral not initializing are:
|
||||
|
||||
### Not Enough Power
|
||||
|
@ -20,7 +20,6 @@ from fastapi.params import Depends
|
||||
from fastapi.responses import JSONResponse, PlainTextResponse, StreamingResponse
|
||||
from markupsafe import escape
|
||||
from peewee import operator
|
||||
from prometheus_client import CONTENT_TYPE_LATEST, generate_latest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryParameters
|
||||
@ -28,6 +27,7 @@ from frigate.api.defs.request.app_body import AppConfigSetBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.models import Event, Timeline
|
||||
from frigate.stats.prometheus import get_metrics, update_metrics
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
get_tz_modifiers,
|
||||
@ -113,9 +113,13 @@ def stats_history(request: Request, keys: str = None):
|
||||
|
||||
|
||||
@router.get("/metrics")
|
||||
def metrics():
|
||||
"""Expose Prometheus metrics endpoint"""
|
||||
return Response(content=generate_latest(), media_type=CONTENT_TYPE_LATEST)
|
||||
def metrics(request: Request):
|
||||
"""Expose Prometheus metrics endpoint and update metrics with latest stats"""
|
||||
# Retrieve the latest statistics and update the Prometheus metrics
|
||||
stats = request.app.stats_emitter.get_latest_stats()
|
||||
update_metrics(stats)
|
||||
content, content_type = get_metrics()
|
||||
return Response(content=content, media_type=content_type)
|
||||
|
||||
|
||||
@router.get("/config")
|
||||
|
@ -9,10 +9,13 @@ import string
|
||||
from fastapi import APIRouter, Request, UploadFile
|
||||
from fastapi.responses import JSONResponse
|
||||
from pathvalidate import sanitize_filename
|
||||
from peewee import DoesNotExist
|
||||
from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import FACE_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
from frigate.models import Event
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -176,3 +179,36 @@ def deregister_faces(request: Request, name: str, body: dict = None):
|
||||
content=({"success": True, "message": "Successfully deleted faces."}),
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
|
||||
@router.put("/lpr/reprocess")
|
||||
def reprocess_license_plate(request: Request, event_id: str):
|
||||
if not request.app.frigate_config.lpr.enabled:
|
||||
message = "License plate recognition is not enabled."
|
||||
logger.error(message)
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": False,
|
||||
"message": message,
|
||||
}
|
||||
),
|
||||
status_code=400,
|
||||
)
|
||||
|
||||
try:
|
||||
event = Event.get(Event.id == event_id)
|
||||
except DoesNotExist:
|
||||
message = f"Event {event_id} not found"
|
||||
logger.error(message)
|
||||
return JSONResponse(
|
||||
content=({"success": False, "message": message}), status_code=404
|
||||
)
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
response = context.reprocess_plate(model_to_dict(event))
|
||||
|
||||
return JSONResponse(
|
||||
content=response,
|
||||
status_code=200,
|
||||
)
|
||||
|
@ -12,7 +12,7 @@ class EventResponse(BaseModel):
|
||||
end_time: Optional[float]
|
||||
false_positive: Optional[bool]
|
||||
zones: list[str]
|
||||
thumbnail: str
|
||||
thumbnail: Optional[str]
|
||||
has_clip: bool
|
||||
has_snapshot: bool
|
||||
retain_indefinitely: bool
|
||||
|
@ -336,6 +336,7 @@ def events_explore(limit: int = 10):
|
||||
"sub_label_score",
|
||||
"average_estimated_speed",
|
||||
"velocity_angle",
|
||||
"path_data",
|
||||
]
|
||||
},
|
||||
"event_count": label_counts[event.label],
|
||||
@ -622,6 +623,7 @@ def events_search(request: Request, params: EventsSearchQueryParams = Depends())
|
||||
"sub_label_score",
|
||||
"average_estimated_speed",
|
||||
"velocity_angle",
|
||||
"path_data",
|
||||
]
|
||||
}
|
||||
|
||||
@ -989,6 +991,10 @@ def set_sub_label(
|
||||
new_sub_label = body.subLabel
|
||||
new_score = body.subLabelScore
|
||||
|
||||
if new_sub_label == "":
|
||||
new_sub_label = None
|
||||
new_score = None
|
||||
|
||||
if tracked_obj:
|
||||
tracked_obj.obj_data["sub_label"] = (new_sub_label, new_score)
|
||||
|
||||
@ -999,21 +1005,19 @@ def set_sub_label(
|
||||
|
||||
if event:
|
||||
event.sub_label = new_sub_label
|
||||
|
||||
if new_score:
|
||||
data = event.data
|
||||
data = event.data
|
||||
if new_sub_label is None:
|
||||
data["sub_label_score"] = None
|
||||
elif new_score is not None:
|
||||
data["sub_label_score"] = new_score
|
||||
event.data = data
|
||||
|
||||
event.data = data
|
||||
event.save()
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{
|
||||
"success": True,
|
||||
"message": "Event " + event_id + " sub label set to " + new_sub_label,
|
||||
}
|
||||
),
|
||||
content={
|
||||
"success": True,
|
||||
"message": f"Event {event_id} sub label set to {new_sub_label if new_sub_label is not None else 'None'}",
|
||||
},
|
||||
status_code=200,
|
||||
)
|
||||
|
||||
@ -1079,10 +1083,7 @@ def regenerate_description(
|
||||
|
||||
camera_config = request.app.frigate_config.cameras[event.camera]
|
||||
|
||||
if (
|
||||
request.app.frigate_config.semantic_search.enabled
|
||||
and camera_config.genai.enabled
|
||||
):
|
||||
if camera_config.genai.enabled:
|
||||
request.app.event_metadata_updater.publish((event.id, params.source))
|
||||
|
||||
return JSONResponse(
|
||||
|
@ -1,6 +1,5 @@
|
||||
"""Image and video apis."""
|
||||
|
||||
import base64
|
||||
import glob
|
||||
import logging
|
||||
import os
|
||||
@ -32,6 +31,7 @@ from frigate.config import FrigateConfig
|
||||
from frigate.const import (
|
||||
CACHE_DIR,
|
||||
CLIPS_DIR,
|
||||
INSTALL_DIR,
|
||||
MAX_SEGMENT_DURATION,
|
||||
PREVIEW_FRAME_TYPE,
|
||||
RECORD_DIR,
|
||||
@ -40,6 +40,7 @@ from frigate.models import Event, Previews, Recordings, Regions, ReviewSegment
|
||||
from frigate.object_processing import TrackedObjectProcessor
|
||||
from frigate.util.builtin import get_tz_modifiers
|
||||
from frigate.util.image import get_image_from_recording
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -155,7 +156,9 @@ def latest_frame(
|
||||
frame_processor.get_current_frame_time(camera_name) + retry_interval
|
||||
):
|
||||
if request.app.camera_error_image is None:
|
||||
error_image = glob.glob("/opt/frigate/frigate/images/camera-error.jpg")
|
||||
error_image = glob.glob(
|
||||
os.path.join(INSTALL_DIR, "frigate/images/camera-error.jpg")
|
||||
)
|
||||
|
||||
if len(error_image) > 0:
|
||||
request.app.camera_error_image = cv2.imread(
|
||||
@ -550,7 +553,7 @@ def recording_clip(
|
||||
)
|
||||
|
||||
file_name = sanitize_filename(f"playlist_{camera_name}_{start_ts}-{end_ts}.txt")
|
||||
file_path = f"/tmp/cache/{file_name}"
|
||||
file_path = os.path.join(CACHE_DIR, file_name)
|
||||
with open(file_path, "w") as file:
|
||||
clip: Recordings
|
||||
for clip in recordings:
|
||||
@ -804,10 +807,11 @@ def event_snapshot(
|
||||
)
|
||||
|
||||
|
||||
@router.get("/events/{event_id}/thumbnail.jpg")
|
||||
@router.get("/events/{event_id}/thumbnail.{extension}")
|
||||
def event_thumbnail(
|
||||
request: Request,
|
||||
event_id: str,
|
||||
extension: str,
|
||||
max_cache_age: int = Query(
|
||||
2592000, description="Max cache age in seconds. Default 30 days in seconds."
|
||||
),
|
||||
@ -816,11 +820,15 @@ def event_thumbnail(
|
||||
thumbnail_bytes = None
|
||||
event_complete = False
|
||||
try:
|
||||
event = Event.get(Event.id == event_id)
|
||||
event: Event = Event.get(Event.id == event_id)
|
||||
if event.end_time is not None:
|
||||
event_complete = True
|
||||
thumbnail_bytes = base64.b64decode(event.thumbnail)
|
||||
|
||||
thumbnail_bytes = get_event_thumbnail_bytes(event)
|
||||
except DoesNotExist:
|
||||
thumbnail_bytes = None
|
||||
|
||||
if thumbnail_bytes is None:
|
||||
# see if the object is currently being tracked
|
||||
try:
|
||||
camera_states = request.app.detected_frames_processor.camera_states.values()
|
||||
@ -828,7 +836,7 @@ def event_thumbnail(
|
||||
if event_id in camera_state.tracked_objects:
|
||||
tracked_obj = camera_state.tracked_objects.get(event_id)
|
||||
if tracked_obj is not None:
|
||||
thumbnail_bytes = tracked_obj.get_thumbnail()
|
||||
thumbnail_bytes = tracked_obj.get_thumbnail(extension)
|
||||
except Exception:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Event not found"},
|
||||
@ -843,8 +851,8 @@ def event_thumbnail(
|
||||
|
||||
# android notifications prefer a 2:1 ratio
|
||||
if format == "android":
|
||||
jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
|
||||
img = cv2.imdecode(jpg_as_np, flags=1)
|
||||
img_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
|
||||
img = cv2.imdecode(img_as_np, flags=1)
|
||||
thumbnail = cv2.copyMakeBorder(
|
||||
img,
|
||||
0,
|
||||
@ -854,17 +862,25 @@ def event_thumbnail(
|
||||
cv2.BORDER_CONSTANT,
|
||||
(0, 0, 0),
|
||||
)
|
||||
ret, jpg = cv2.imencode(".jpg", thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
|
||||
thumbnail_bytes = jpg.tobytes()
|
||||
|
||||
quality_params = None
|
||||
|
||||
if extension == "jpg" or extension == "jpeg":
|
||||
quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), 70]
|
||||
elif extension == "webp":
|
||||
quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), 60]
|
||||
|
||||
_, img = cv2.imencode(f".{img}", thumbnail, quality_params)
|
||||
thumbnail_bytes = img.tobytes()
|
||||
|
||||
return Response(
|
||||
thumbnail_bytes,
|
||||
media_type="image/jpeg",
|
||||
media_type=f"image/{extension}",
|
||||
headers={
|
||||
"Cache-Control": f"private, max-age={max_cache_age}"
|
||||
if event_complete
|
||||
else "no-store",
|
||||
"Content-Type": "image/jpeg",
|
||||
"Content-Type": f"image/{extension}",
|
||||
},
|
||||
)
|
||||
|
||||
|
@ -9,7 +9,7 @@ from fastapi import APIRouter
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import CACHE_DIR, PREVIEW_FRAME_TYPE
|
||||
from frigate.const import BASE_DIR, CACHE_DIR, PREVIEW_FRAME_TYPE
|
||||
from frigate.models import Previews
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -52,7 +52,7 @@ def preview_ts(camera_name: str, start_ts: float, end_ts: float):
|
||||
clips.append(
|
||||
{
|
||||
"camera": preview["camera"],
|
||||
"src": preview["path"].replace("/media/frigate", ""),
|
||||
"src": preview["path"].replace(BASE_DIR, ""),
|
||||
"type": "video/mp4",
|
||||
"start": preview["start_time"],
|
||||
"end": preview["end_time"],
|
||||
|
@ -39,6 +39,7 @@ from frigate.const import (
|
||||
MODEL_CACHE_DIR,
|
||||
RECORD_DIR,
|
||||
SHM_FRAMES_VAR,
|
||||
THUMB_DIR,
|
||||
)
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
@ -92,7 +93,13 @@ class FrigateApp:
|
||||
self.log_queue: Queue = mp.Queue()
|
||||
self.camera_metrics: dict[str, CameraMetrics] = {}
|
||||
self.embeddings_metrics: DataProcessorMetrics | None = (
|
||||
DataProcessorMetrics() if config.semantic_search.enabled else None
|
||||
DataProcessorMetrics()
|
||||
if (
|
||||
config.semantic_search.enabled
|
||||
or config.lpr.enabled
|
||||
or config.face_recognition.enabled
|
||||
)
|
||||
else None
|
||||
)
|
||||
self.ptz_metrics: dict[str, PTZMetrics] = {}
|
||||
self.processes: dict[str, int] = {}
|
||||
@ -105,6 +112,7 @@ class FrigateApp:
|
||||
dirs = [
|
||||
CONFIG_DIR,
|
||||
RECORD_DIR,
|
||||
THUMB_DIR,
|
||||
f"{CLIPS_DIR}/cache",
|
||||
CACHE_DIR,
|
||||
MODEL_CACHE_DIR,
|
||||
@ -234,7 +242,16 @@ class FrigateApp:
|
||||
logger.info(f"Review process started: {review_segment_process.pid}")
|
||||
|
||||
def init_embeddings_manager(self) -> None:
|
||||
if not self.config.semantic_search.enabled:
|
||||
genai_cameras = [
|
||||
c for c in self.config.cameras.values() if c.enabled and c.genai.enabled
|
||||
]
|
||||
|
||||
if (
|
||||
not self.config.semantic_search.enabled
|
||||
and not genai_cameras
|
||||
and not self.config.lpr.enabled
|
||||
and not self.config.face_recognition.enabled
|
||||
):
|
||||
return
|
||||
|
||||
embedding_process = util.Process(
|
||||
@ -291,7 +308,16 @@ class FrigateApp:
|
||||
migrate_exports(self.config.ffmpeg, list(self.config.cameras.keys()))
|
||||
|
||||
def init_embeddings_client(self) -> None:
|
||||
if self.config.semantic_search.enabled:
|
||||
genai_cameras = [
|
||||
c for c in self.config.cameras.values() if c.enabled and c.genai.enabled
|
||||
]
|
||||
|
||||
if (
|
||||
self.config.semantic_search.enabled
|
||||
or self.config.lpr.enabled
|
||||
or genai_cameras
|
||||
or self.config.face_recognition.enabled
|
||||
):
|
||||
# Create a client for other processes to use
|
||||
self.embeddings = EmbeddingsContext(self.db)
|
||||
|
||||
|
@ -33,7 +33,11 @@ class CameraActivityManager:
|
||||
self.zone_active_object_counts[zone] = Counter()
|
||||
self.all_zone_labels[zone] = set()
|
||||
|
||||
self.all_zone_labels[zone].update(zone_config.objects)
|
||||
self.all_zone_labels[zone].update(
|
||||
zone_config.objects
|
||||
if zone_config.objects
|
||||
else camera_config.objects.track
|
||||
)
|
||||
|
||||
def update_activity(self, new_activity: dict[str, dict[str, any]]) -> None:
|
||||
all_objects: list[dict[str, any]] = []
|
||||
|
@ -32,7 +32,9 @@ class ConfigPublisher:
|
||||
class ConfigSubscriber:
|
||||
"""Simplifies receiving an updated config."""
|
||||
|
||||
def __init__(self, topic: str) -> None:
|
||||
def __init__(self, topic: str, exact=False) -> None:
|
||||
self.topic = topic
|
||||
self.exact = exact
|
||||
self.context = zmq.Context()
|
||||
self.socket = self.context.socket(zmq.SUB)
|
||||
self.socket.setsockopt_string(zmq.SUBSCRIBE, topic)
|
||||
@ -42,7 +44,12 @@ class ConfigSubscriber:
|
||||
"""Returns updated config or None if no update."""
|
||||
try:
|
||||
topic = self.socket.recv_string(flags=zmq.NOBLOCK)
|
||||
return (topic, self.socket.recv_pyobj())
|
||||
obj = self.socket.recv_pyobj()
|
||||
|
||||
if not self.exact or self.topic == topic:
|
||||
return (topic, obj)
|
||||
else:
|
||||
return (None, None)
|
||||
except zmq.ZMQError:
|
||||
return (None, None)
|
||||
|
||||
|
@ -15,6 +15,7 @@ class EmbeddingsRequestEnum(Enum):
|
||||
generate_search = "generate_search"
|
||||
register_face = "register_face"
|
||||
reprocess_face = "reprocess_face"
|
||||
reprocess_plate = "reprocess_plate"
|
||||
|
||||
|
||||
class EmbeddingsResponder:
|
||||
|
36
frigate/comms/recordings_updater.py
Normal file
36
frigate/comms/recordings_updater.py
Normal file
@ -0,0 +1,36 @@
|
||||
"""Facilitates communication between processes."""
|
||||
|
||||
import logging
|
||||
from enum import Enum
|
||||
|
||||
from .zmq_proxy import Publisher, Subscriber
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RecordingsDataTypeEnum(str, Enum):
|
||||
all = ""
|
||||
recordings_available_through = "recordings_available_through"
|
||||
|
||||
|
||||
class RecordingsDataPublisher(Publisher):
|
||||
"""Publishes latest recording data."""
|
||||
|
||||
topic_base = "recordings/"
|
||||
|
||||
def __init__(self, topic: RecordingsDataTypeEnum) -> None:
|
||||
topic = topic.value
|
||||
super().__init__(topic)
|
||||
|
||||
def publish(self, payload: tuple[str, float]) -> None:
|
||||
super().publish(payload)
|
||||
|
||||
|
||||
class RecordingsDataSubscriber(Subscriber):
|
||||
"""Receives latest recording data."""
|
||||
|
||||
topic_base = "recordings/"
|
||||
|
||||
def __init__(self, topic: RecordingsDataTypeEnum) -> None:
|
||||
topic = topic.value
|
||||
super().__init__(topic)
|
@ -47,6 +47,10 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
||||
self.suspended_cameras: dict[str, int] = {
|
||||
c.name: 0 for c in self.config.cameras.values()
|
||||
}
|
||||
self.last_camera_notification_time: dict[str, float] = {
|
||||
c.name: 0 for c in self.config.cameras.values()
|
||||
}
|
||||
self.last_notification_time: float = 0
|
||||
self.notification_queue: queue.Queue[PushNotification] = queue.Queue()
|
||||
self.notification_thread = threading.Thread(
|
||||
target=self._process_notifications, daemon=True
|
||||
@ -264,6 +268,29 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
||||
):
|
||||
return
|
||||
|
||||
camera: str = payload["after"]["camera"]
|
||||
current_time = datetime.datetime.now().timestamp()
|
||||
|
||||
# Check global cooldown period
|
||||
if (
|
||||
current_time - self.last_notification_time
|
||||
< self.config.notifications.cooldown
|
||||
):
|
||||
logger.debug(
|
||||
f"Skipping notification for {camera} - in global cooldown period"
|
||||
)
|
||||
return
|
||||
|
||||
# Check camera-specific cooldown period
|
||||
if (
|
||||
current_time - self.last_camera_notification_time[camera]
|
||||
< self.config.cameras[camera].notifications.cooldown
|
||||
):
|
||||
logger.debug(
|
||||
f"Skipping notification for {camera} - in camera-specific cooldown period"
|
||||
)
|
||||
return
|
||||
|
||||
self.check_registrations()
|
||||
|
||||
state = payload["type"]
|
||||
@ -278,6 +305,9 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
||||
):
|
||||
return
|
||||
|
||||
self.last_camera_notification_time[camera] = current_time
|
||||
self.last_notification_time = current_time
|
||||
|
||||
reviewId = payload["after"]["id"]
|
||||
sorted_objects: set[str] = set()
|
||||
|
||||
@ -287,7 +317,6 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
||||
|
||||
sorted_objects.update(payload["after"]["data"]["sub_labels"])
|
||||
|
||||
camera: str = payload["after"]["camera"]
|
||||
title = f"{', '.join(sorted_objects).replace('_', ' ').title()}{' was' if state == 'end' else ''} detected in {', '.join(payload['after']['data']['zones']).replace('_', ' ').title()}"
|
||||
message = f"Detected on {camera.replace('_', ' ').title()}"
|
||||
image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}"
|
||||
|
@ -1,4 +1,3 @@
|
||||
import shutil
|
||||
from enum import Enum
|
||||
from typing import Union
|
||||
|
||||
@ -71,10 +70,7 @@ class FfmpegConfig(FrigateBaseModel):
|
||||
@property
|
||||
def ffmpeg_path(self) -> str:
|
||||
if self.path == "default":
|
||||
if shutil.which("ffmpeg") is None:
|
||||
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
|
||||
else:
|
||||
return "ffmpeg"
|
||||
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
|
||||
elif self.path in INCLUDED_FFMPEG_VERSIONS:
|
||||
return f"/usr/lib/ffmpeg/{self.path}/bin/ffmpeg"
|
||||
else:
|
||||
@ -83,10 +79,7 @@ class FfmpegConfig(FrigateBaseModel):
|
||||
@property
|
||||
def ffprobe_path(self) -> str:
|
||||
if self.path == "default":
|
||||
if shutil.which("ffprobe") is None:
|
||||
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
|
||||
else:
|
||||
return "ffprobe"
|
||||
return f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffprobe"
|
||||
elif self.path in INCLUDED_FFMPEG_VERSIONS:
|
||||
return f"/usr/lib/ffmpeg/{self.path}/bin/ffprobe"
|
||||
else:
|
||||
|
@ -10,6 +10,9 @@ __all__ = ["NotificationConfig"]
|
||||
class NotificationConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable notifications")
|
||||
email: Optional[str] = Field(default=None, title="Email required for push.")
|
||||
cooldown: Optional[int] = Field(
|
||||
default=0, ge=0, title="Cooldown period for notifications (time in seconds)."
|
||||
)
|
||||
enabled_in_config: Optional[bool] = Field(
|
||||
default=None, title="Keep track of original state of notifications."
|
||||
)
|
||||
|
@ -1,3 +1,4 @@
|
||||
from enum import Enum
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from pydantic import Field
|
||||
@ -11,6 +12,11 @@ __all__ = [
|
||||
]
|
||||
|
||||
|
||||
class SemanticSearchModelEnum(str, Enum):
|
||||
jinav1 = "jinav1"
|
||||
jinav2 = "jinav2"
|
||||
|
||||
|
||||
class BirdClassificationConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable bird classification.")
|
||||
threshold: float = Field(
|
||||
@ -30,7 +36,11 @@ class ClassificationConfig(FrigateBaseModel):
|
||||
class SemanticSearchConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable semantic search.")
|
||||
reindex: Optional[bool] = Field(
|
||||
default=False, title="Reindex all detections on startup."
|
||||
default=False, title="Reindex all tracked objects on startup."
|
||||
)
|
||||
model: Optional[SemanticSearchModelEnum] = Field(
|
||||
default=SemanticSearchModelEnum.jinav1,
|
||||
title="The CLIP model to use for semantic search.",
|
||||
)
|
||||
model_size: str = Field(
|
||||
default="small", title="The size of the embeddings model used."
|
||||
|
@ -172,16 +172,6 @@ class RestreamConfig(BaseModel):
|
||||
model_config = ConfigDict(extra="allow")
|
||||
|
||||
|
||||
def verify_semantic_search_dependent_configs(config: FrigateConfig) -> None:
|
||||
"""Verify that semantic search is enabled if required features are enabled."""
|
||||
if not config.semantic_search.enabled:
|
||||
if config.genai.enabled:
|
||||
raise ValueError("Genai requires semantic search to be enabled.")
|
||||
|
||||
if config.face_recognition.enabled:
|
||||
raise ValueError("Face recognition requires semantic to be enabled.")
|
||||
|
||||
|
||||
def verify_config_roles(camera_config: CameraConfig) -> None:
|
||||
"""Verify that roles are setup in the config correctly."""
|
||||
assigned_roles = list(
|
||||
@ -647,7 +637,6 @@ class FrigateConfig(FrigateBaseModel):
|
||||
detector_config.model = model
|
||||
self.detectors[key] = detector_config
|
||||
|
||||
verify_semantic_search_dependent_configs(self)
|
||||
return self
|
||||
|
||||
@field_validator("cameras")
|
||||
|
@ -1,5 +1,7 @@
|
||||
import os
|
||||
import re
|
||||
|
||||
INSTALL_DIR = "/opt/frigate"
|
||||
CONFIG_DIR = "/config"
|
||||
DEFAULT_DB_PATH = f"{CONFIG_DIR}/frigate.db"
|
||||
MODEL_CACHE_DIR = f"{CONFIG_DIR}/model_cache"
|
||||
@ -7,6 +9,7 @@ BASE_DIR = "/media/frigate"
|
||||
CLIPS_DIR = f"{BASE_DIR}/clips"
|
||||
EXPORT_DIR = f"{BASE_DIR}/exports"
|
||||
FACE_DIR = f"{CLIPS_DIR}/faces"
|
||||
THUMB_DIR = f"{CLIPS_DIR}/thumbs"
|
||||
RECORD_DIR = f"{BASE_DIR}/recordings"
|
||||
BIRDSEYE_PIPE = "/tmp/cache/birdseye"
|
||||
CACHE_DIR = "/tmp/cache"
|
||||
@ -60,8 +63,9 @@ MAX_WAL_SIZE = 10 # MB
|
||||
|
||||
# Ffmpeg constants
|
||||
|
||||
DEFAULT_FFMPEG_VERSION = "7.0"
|
||||
INCLUDED_FFMPEG_VERSIONS = ["7.0", "5.0"]
|
||||
DEFAULT_FFMPEG_VERSION = os.environ.get("DEFAULT_FFMPEG_VERSION", "")
|
||||
INCLUDED_FFMPEG_VERSIONS = os.environ.get("INCLUDED_FFMPEG_VERSIONS", "").split(":")
|
||||
LIBAVFORMAT_VERSION_MAJOR = int(os.environ.get("LIBAVFORMAT_VERSION_MAJOR", "59"))
|
||||
FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
|
||||
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
|
||||
FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
|
||||
|
@ -13,29 +13,21 @@ from Levenshtein import distance
|
||||
from pyclipper import ET_CLOSEDPOLYGON, JT_ROUND, PyclipperOffset
|
||||
from shapely.geometry import Polygon
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import FRIGATE_LOCALHOST
|
||||
from frigate.embeddings.functions.onnx import GenericONNXEmbedding, ModelTypeEnum
|
||||
from frigate.util.image import area
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
from .api import RealTimeProcessorApi
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
WRITE_DEBUG_IMAGES = False
|
||||
|
||||
|
||||
class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
|
||||
super().__init__(config, metrics)
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.lpr_config = config.lpr
|
||||
class LicensePlateProcessingMixin:
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.requires_license_plate_detection = (
|
||||
"license_plate" not in self.config.objects.all_objects
|
||||
)
|
||||
self.detected_license_plates: dict[str, dict[str, any]] = {}
|
||||
|
||||
self.ctc_decoder = CTCDecoder()
|
||||
|
||||
@ -47,65 +39,6 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
self.box_thresh = 0.8
|
||||
self.mask_thresh = 0.8
|
||||
|
||||
self.lpr_detection_model = None
|
||||
self.lpr_classification_model = None
|
||||
self.lpr_recognition_model = None
|
||||
|
||||
if self.config.lpr.enabled:
|
||||
self.detection_model = GenericONNXEmbedding(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="detection.onnx",
|
||||
download_urls={
|
||||
"detection.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/detection.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.lpr_detect,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
|
||||
self.classification_model = GenericONNXEmbedding(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="classification.onnx",
|
||||
download_urls={
|
||||
"classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.lpr_classify,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
|
||||
self.recognition_model = GenericONNXEmbedding(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="recognition.onnx",
|
||||
download_urls={
|
||||
"recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.lpr_recognize,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
self.yolov9_detection_model = GenericONNXEmbedding(
|
||||
model_name="yolov9_license_plate",
|
||||
model_file="yolov9-256-license-plates.onnx",
|
||||
download_urls={
|
||||
"yolov9-256-license-plates.onnx": "https://github.com/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx"
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.yolov9_lpr_detect,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
|
||||
if self.lpr_config.enabled:
|
||||
# all models need to be loaded to run LPR
|
||||
self.detection_model._load_model_and_utils()
|
||||
self.classification_model._load_model_and_utils()
|
||||
self.recognition_model._load_model_and_utils()
|
||||
self.yolov9_detection_model._load_model_and_utils()
|
||||
|
||||
def _detect(self, image: np.ndarray) -> List[np.ndarray]:
|
||||
"""
|
||||
Detect possible license plates in the input image by first resizing and normalizing it,
|
||||
@ -132,7 +65,7 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
resized_image,
|
||||
)
|
||||
|
||||
outputs = self.detection_model([normalized_image])[0]
|
||||
outputs = self.model_runner.detection_model([normalized_image])[0]
|
||||
outputs = outputs[0, :, :]
|
||||
|
||||
boxes, _ = self._boxes_from_bitmap(outputs, outputs > self.mask_thresh, w, h)
|
||||
@ -161,7 +94,7 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
norm_img = norm_img[np.newaxis, :]
|
||||
norm_images.append(norm_img)
|
||||
|
||||
outputs = self.classification_model(norm_images)
|
||||
outputs = self.model_runner.classification_model(norm_images)
|
||||
|
||||
return self._process_classification_output(images, outputs)
|
||||
|
||||
@ -201,7 +134,7 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
norm_image = norm_image[np.newaxis, :]
|
||||
norm_images.append(norm_image)
|
||||
|
||||
outputs = self.recognition_model(norm_images)
|
||||
outputs = self.model_runner.recognition_model(norm_images)
|
||||
return self.ctc_decoder(outputs)
|
||||
|
||||
def _process_license_plate(
|
||||
@ -217,9 +150,9 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
Tuple[List[str], List[float], List[int]]: Detected license plate texts, confidence scores, and areas of the plates.
|
||||
"""
|
||||
if (
|
||||
self.detection_model.runner is None
|
||||
or self.classification_model.runner is None
|
||||
or self.recognition_model.runner is None
|
||||
self.model_runner.detection_model.runner is None
|
||||
or self.model_runner.classification_model.runner is None
|
||||
or self.model_runner.recognition_model.runner is None
|
||||
):
|
||||
# we might still be downloading the models
|
||||
logger.debug("Model runners not loaded")
|
||||
@ -683,7 +616,9 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
input_w = int(input_h * max_wh_ratio)
|
||||
|
||||
# check for model-specific input width
|
||||
model_input_w = self.recognition_model.runner.ort.get_inputs()[0].shape[3]
|
||||
model_input_w = self.model_runner.recognition_model.runner.ort.get_inputs()[
|
||||
0
|
||||
].shape[3]
|
||||
if isinstance(model_input_w, int) and model_input_w > 0:
|
||||
input_w = model_input_w
|
||||
|
||||
@ -750,19 +685,13 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
image = np.rot90(image, k=3)
|
||||
return image
|
||||
|
||||
def __update_metrics(self, duration: float) -> None:
|
||||
"""
|
||||
Update inference metrics.
|
||||
"""
|
||||
self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10
|
||||
|
||||
def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]:
|
||||
"""
|
||||
Use a lightweight YOLOv9 model to detect license plates for users without Frigate+
|
||||
|
||||
Return the dimensions of the detected plate as [x1, y1, x2, y2].
|
||||
"""
|
||||
predictions = self.yolov9_detection_model(input)
|
||||
predictions = self.model_runner.yolov9_detection_model(input)
|
||||
|
||||
confidence_threshold = self.lpr_config.detection_threshold
|
||||
|
||||
@ -788,8 +717,8 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
|
||||
# Return the top scoring bounding box if found
|
||||
if top_box is not None:
|
||||
# expand box by 15% to help with OCR
|
||||
expansion = (top_box[2:] - top_box[:2]) * 0.1
|
||||
# expand box by 30% to help with OCR
|
||||
expansion = (top_box[2:] - top_box[:2]) * 0.30
|
||||
|
||||
# Expand box
|
||||
expanded_box = np.array(
|
||||
@ -887,9 +816,22 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
# 5. Return True if we should keep the previous plate (i.e., if it scores higher)
|
||||
return prev_score > curr_score
|
||||
|
||||
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
|
||||
def __update_yolov9_metrics(self, duration: float) -> None:
|
||||
"""
|
||||
Update inference metrics.
|
||||
"""
|
||||
self.metrics.yolov9_lpr_fps.value = (
|
||||
self.metrics.yolov9_lpr_fps.value * 9 + duration
|
||||
) / 10
|
||||
|
||||
def __update_lpr_metrics(self, duration: float) -> None:
|
||||
"""
|
||||
Update inference metrics.
|
||||
"""
|
||||
self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10
|
||||
|
||||
def lpr_process(self, obj_data: dict[str, any], frame: np.ndarray):
|
||||
"""Look for license plates in image."""
|
||||
start = datetime.datetime.now().timestamp()
|
||||
|
||||
id = obj_data["id"]
|
||||
|
||||
@ -915,6 +857,7 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
|
||||
if self.requires_license_plate_detection:
|
||||
logger.debug("Running manual license_plate detection.")
|
||||
|
||||
car_box = obj_data.get("box")
|
||||
|
||||
if not car_box:
|
||||
@ -939,6 +882,9 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
logger.debug(
|
||||
f"YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms"
|
||||
)
|
||||
self.__update_yolov9_metrics(
|
||||
datetime.datetime.now().timestamp() - yolov9_start
|
||||
)
|
||||
|
||||
if not license_plate:
|
||||
logger.debug("Detected no license plates for car object.")
|
||||
@ -952,7 +898,7 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
|
||||
# check that license plate is valid
|
||||
# double the value because we've doubled the size of the car
|
||||
if license_plate_area < self.config.lpr.min_area * 2:
|
||||
if license_plate_area < self.lpr_config.min_area * 2:
|
||||
logger.debug("License plate is less than min_area")
|
||||
return
|
||||
|
||||
@ -990,7 +936,7 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
# check that license plate is valid
|
||||
if (
|
||||
not license_plate_box
|
||||
or area(license_plate_box) < self.config.lpr.min_area
|
||||
or area(license_plate_box) < self.lpr_config.min_area
|
||||
):
|
||||
logger.debug(f"Invalid license plate box {license_plate}")
|
||||
return
|
||||
@ -1017,11 +963,15 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
license_plate_frame,
|
||||
)
|
||||
|
||||
start = datetime.datetime.now().timestamp()
|
||||
|
||||
# run detection, returns results sorted by confidence, best first
|
||||
license_plates, confidences, areas = self._process_license_plate(
|
||||
license_plate_frame
|
||||
)
|
||||
|
||||
self.__update_lpr_metrics(datetime.datetime.now().timestamp() - start)
|
||||
|
||||
logger.debug(f"Text boxes: {license_plates}")
|
||||
logger.debug(f"Confidences: {confidences}")
|
||||
logger.debug(f"Areas: {areas}")
|
||||
@ -1096,10 +1046,9 @@ class LicensePlateProcessor(RealTimeProcessorApi):
|
||||
"plate": top_plate,
|
||||
"char_confidences": top_char_confidences,
|
||||
"area": top_area,
|
||||
"obj_data": obj_data,
|
||||
}
|
||||
|
||||
self.__update_metrics(datetime.datetime.now().timestamp() - start)
|
||||
|
||||
def handle_request(self, topic, request_data) -> dict[str, any] | None:
|
||||
return
|
||||
|
31
frigate/data_processing/common/license_plate/model.py
Normal file
31
frigate/data_processing/common/license_plate/model.py
Normal file
@ -0,0 +1,31 @@
|
||||
from frigate.embeddings.onnx.lpr_embedding import (
|
||||
LicensePlateDetector,
|
||||
PaddleOCRClassification,
|
||||
PaddleOCRDetection,
|
||||
PaddleOCRRecognition,
|
||||
)
|
||||
|
||||
from ...types import DataProcessorModelRunner
|
||||
|
||||
|
||||
class LicensePlateModelRunner(DataProcessorModelRunner):
|
||||
def __init__(self, requestor, device: str = "CPU", model_size: str = "large"):
|
||||
super().__init__(requestor, device, model_size)
|
||||
self.detection_model = PaddleOCRDetection(
|
||||
model_size=model_size, requestor=requestor, device=device
|
||||
)
|
||||
self.classification_model = PaddleOCRClassification(
|
||||
model_size=model_size, requestor=requestor, device=device
|
||||
)
|
||||
self.recognition_model = PaddleOCRRecognition(
|
||||
model_size=model_size, requestor=requestor, device=device
|
||||
)
|
||||
self.yolov9_detection_model = LicensePlateDetector(
|
||||
model_size=model_size, requestor=requestor, device=device
|
||||
)
|
||||
|
||||
# Load all models once
|
||||
self.detection_model._load_model_and_utils()
|
||||
self.classification_model._load_model_and_utils()
|
||||
self.recognition_model._load_model_and_utils()
|
||||
self.yolov9_detection_model._load_model_and_utils()
|
@ -5,16 +5,22 @@ from abc import ABC, abstractmethod
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
|
||||
from ..types import DataProcessorMetrics, PostProcessDataEnum
|
||||
from ..types import DataProcessorMetrics, DataProcessorModelRunner, PostProcessDataEnum
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PostProcessorApi(ABC):
|
||||
@abstractmethod
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
metrics: DataProcessorMetrics,
|
||||
model_runner: DataProcessorModelRunner,
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.metrics = metrics
|
||||
self.model_runner = model_runner
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
224
frigate/data_processing/post/license_plate.py
Normal file
224
frigate/data_processing/post/license_plate.py
Normal file
@ -0,0 +1,224 @@
|
||||
"""Handle post processing for license plate recognition."""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from peewee import DoesNotExist
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.data_processing.common.license_plate.mixin import (
|
||||
WRITE_DEBUG_IMAGES,
|
||||
LicensePlateProcessingMixin,
|
||||
)
|
||||
from frigate.data_processing.common.license_plate.model import (
|
||||
LicensePlateModelRunner,
|
||||
)
|
||||
from frigate.data_processing.types import PostProcessDataEnum
|
||||
from frigate.models import Recordings
|
||||
from frigate.util.image import get_image_from_recording
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
from .api import PostProcessorApi
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LicensePlatePostProcessor(LicensePlateProcessingMixin, PostProcessorApi):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
metrics: DataProcessorMetrics,
|
||||
model_runner: LicensePlateModelRunner,
|
||||
detected_license_plates: dict[str, dict[str, any]],
|
||||
):
|
||||
self.detected_license_plates = detected_license_plates
|
||||
self.model_runner = model_runner
|
||||
self.lpr_config = config.lpr
|
||||
self.config = config
|
||||
super().__init__(config, metrics, model_runner)
|
||||
|
||||
def process_data(
|
||||
self, data: dict[str, any], data_type: PostProcessDataEnum
|
||||
) -> None:
|
||||
"""Look for license plates in recording stream image
|
||||
Args:
|
||||
data (dict): containing data about the input.
|
||||
data_type (enum): Describing the data that is being processed.
|
||||
|
||||
Returns:
|
||||
None.
|
||||
"""
|
||||
event_id = data["event_id"]
|
||||
camera_name = data["camera"]
|
||||
|
||||
if data_type == PostProcessDataEnum.recording:
|
||||
obj_data = data["obj_data"]
|
||||
frame_time = obj_data["frame_time"]
|
||||
recordings_available_through = data["recordings_available"]
|
||||
|
||||
if frame_time > recordings_available_through:
|
||||
logger.debug(
|
||||
f"LPR post processing: No recordings available for this frame time {frame_time}, available through {recordings_available_through}"
|
||||
)
|
||||
|
||||
elif data_type == PostProcessDataEnum.tracked_object:
|
||||
# non-functional, need to think about snapshot time
|
||||
obj_data = data["event"]["data"]
|
||||
obj_data["id"] = data["event"]["id"]
|
||||
obj_data["camera"] = data["event"]["camera"]
|
||||
# TODO: snapshot time?
|
||||
frame_time = data["event"]["start_time"]
|
||||
|
||||
else:
|
||||
logger.error("No data type passed to LPR postprocessing")
|
||||
return
|
||||
|
||||
recording_query = (
|
||||
Recordings.select(
|
||||
Recordings.path,
|
||||
Recordings.start_time,
|
||||
)
|
||||
.where(
|
||||
(
|
||||
(frame_time >= Recordings.start_time)
|
||||
& (frame_time <= Recordings.end_time)
|
||||
)
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.order_by(Recordings.start_time.desc())
|
||||
.limit(1)
|
||||
)
|
||||
|
||||
try:
|
||||
recording: Recordings = recording_query.get()
|
||||
time_in_segment = frame_time - recording.start_time
|
||||
codec = "mjpeg"
|
||||
|
||||
image_data = get_image_from_recording(
|
||||
self.config.ffmpeg, recording.path, time_in_segment, codec, None
|
||||
)
|
||||
|
||||
if not image_data:
|
||||
logger.debug(
|
||||
"LPR post processing: Unable to fetch license plate from recording"
|
||||
)
|
||||
|
||||
# Convert bytes to numpy array
|
||||
image_array = np.frombuffer(image_data, dtype=np.uint8)
|
||||
|
||||
if len(image_array) == 0:
|
||||
logger.debug("LPR post processing: No image")
|
||||
return
|
||||
|
||||
image = cv2.imdecode(image_array, cv2.IMREAD_COLOR)
|
||||
|
||||
except DoesNotExist:
|
||||
logger.debug("Error fetching license plate for postprocessing")
|
||||
return
|
||||
|
||||
if WRITE_DEBUG_IMAGES:
|
||||
cv2.imwrite(
|
||||
f"debug/frames/lpr_post_{datetime.datetime.now().timestamp()}.jpg",
|
||||
image,
|
||||
)
|
||||
|
||||
# convert to yuv for processing
|
||||
frame = cv2.cvtColor(image, cv2.COLOR_BGR2YUV_I420)
|
||||
|
||||
detect_width = self.config.cameras[camera_name].detect.width
|
||||
detect_height = self.config.cameras[camera_name].detect.height
|
||||
|
||||
# Scale the boxes based on detect dimensions
|
||||
scale_x = image.shape[1] / detect_width
|
||||
scale_y = image.shape[0] / detect_height
|
||||
|
||||
# Determine which box to enlarge based on detection mode
|
||||
if self.requires_license_plate_detection:
|
||||
# Scale and enlarge the car box
|
||||
box = obj_data.get("box")
|
||||
if not box:
|
||||
return
|
||||
|
||||
# Scale original car box to detection dimensions
|
||||
left = int(box[0] * scale_x)
|
||||
top = int(box[1] * scale_y)
|
||||
right = int(box[2] * scale_x)
|
||||
bottom = int(box[3] * scale_y)
|
||||
box = [left, top, right, bottom]
|
||||
else:
|
||||
# Get the license plate box from attributes
|
||||
if not obj_data.get("current_attributes"):
|
||||
return
|
||||
|
||||
license_plate = None
|
||||
for attr in obj_data["current_attributes"]:
|
||||
if attr.get("label") != "license_plate":
|
||||
continue
|
||||
if license_plate is None or attr.get("score", 0.0) > license_plate.get(
|
||||
"score", 0.0
|
||||
):
|
||||
license_plate = attr
|
||||
|
||||
if not license_plate or not license_plate.get("box"):
|
||||
return
|
||||
|
||||
# Scale license plate box to detection dimensions
|
||||
orig_box = license_plate["box"]
|
||||
left = int(orig_box[0] * scale_x)
|
||||
top = int(orig_box[1] * scale_y)
|
||||
right = int(orig_box[2] * scale_x)
|
||||
bottom = int(orig_box[3] * scale_y)
|
||||
box = [left, top, right, bottom]
|
||||
|
||||
width_box = right - left
|
||||
height_box = bottom - top
|
||||
|
||||
# Enlarge box slightly to account for drift in detect vs recording stream
|
||||
enlarge_factor = 0.3
|
||||
new_left = max(0, int(left - (width_box * enlarge_factor / 2)))
|
||||
new_top = max(0, int(top - (height_box * enlarge_factor / 2)))
|
||||
new_right = min(image.shape[1], int(right + (width_box * enlarge_factor / 2)))
|
||||
new_bottom = min(
|
||||
image.shape[0], int(bottom + (height_box * enlarge_factor / 2))
|
||||
)
|
||||
|
||||
keyframe_obj_data = obj_data.copy()
|
||||
if self.requires_license_plate_detection:
|
||||
# car box
|
||||
keyframe_obj_data["box"] = [new_left, new_top, new_right, new_bottom]
|
||||
else:
|
||||
# Update the license plate box in the attributes
|
||||
new_attributes = []
|
||||
for attr in obj_data["current_attributes"]:
|
||||
if attr.get("label") == "license_plate":
|
||||
new_attr = attr.copy()
|
||||
new_attr["box"] = [new_left, new_top, new_right, new_bottom]
|
||||
new_attributes.append(new_attr)
|
||||
else:
|
||||
new_attributes.append(attr)
|
||||
keyframe_obj_data["current_attributes"] = new_attributes
|
||||
|
||||
# run the frame through lpr processing
|
||||
logger.debug(f"Post processing plate: {event_id}, {frame_time}")
|
||||
self.lpr_process(keyframe_obj_data, frame)
|
||||
|
||||
def handle_request(self, topic, request_data) -> dict[str, any] | None:
|
||||
if topic == EmbeddingsRequestEnum.reprocess_plate.value:
|
||||
event = request_data["event"]
|
||||
|
||||
self.process_data(
|
||||
{
|
||||
"event_id": event["id"],
|
||||
"camera": event["camera"],
|
||||
"event": event,
|
||||
},
|
||||
PostProcessDataEnum.tracked_object,
|
||||
)
|
||||
|
||||
return {
|
||||
"message": "Successfully requested reprocessing of license plate.",
|
||||
"success": True,
|
||||
}
|
@ -14,7 +14,11 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
class RealTimeProcessorApi(ABC):
|
||||
@abstractmethod
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
metrics: DataProcessorMetrics,
|
||||
) -> None:
|
||||
self.config = config
|
||||
self.metrics = metrics
|
||||
pass
|
||||
|
@ -22,7 +22,7 @@ except ModuleNotFoundError:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class BirdProcessor(RealTimeProcessorApi):
|
||||
class BirdRealTimeProcessor(RealTimeProcessorApi):
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
|
||||
super().__init__(config, metrics)
|
||||
self.interpreter: Interpreter = None
|
@ -27,7 +27,7 @@ logger = logging.getLogger(__name__)
|
||||
MIN_MATCHING_FACES = 2
|
||||
|
||||
|
||||
class FaceProcessor(RealTimeProcessorApi):
|
||||
class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
def __init__(self, config: FrigateConfig, metrics: DataProcessorMetrics):
|
||||
super().__init__(config, metrics)
|
||||
self.face_config = config.face_recognition
|
||||
@ -76,14 +76,16 @@ class FaceProcessor(RealTimeProcessorApi):
|
||||
|
||||
def __build_detector(self) -> None:
|
||||
self.face_detector = cv2.FaceDetectorYN.create(
|
||||
"/config/model_cache/facedet/facedet.onnx",
|
||||
os.path.join(MODEL_CACHE_DIR, "facedet/facedet.onnx"),
|
||||
config="",
|
||||
input_size=(320, 320),
|
||||
score_threshold=0.8,
|
||||
nms_threshold=0.3,
|
||||
)
|
||||
self.landmark_detector = cv2.face.createFacemarkLBF()
|
||||
self.landmark_detector.loadModel("/config/model_cache/facedet/landmarkdet.yaml")
|
||||
self.landmark_detector.loadModel(
|
||||
os.path.join(MODEL_CACHE_DIR, "facedet/landmarkdet.yaml")
|
||||
)
|
||||
|
||||
def __build_classifier(self) -> None:
|
||||
if not self.landmark_detector:
|
44
frigate/data_processing/real_time/license_plate.py
Normal file
44
frigate/data_processing/real_time/license_plate.py
Normal file
@ -0,0 +1,44 @@
|
||||
"""Handle processing images for face detection and recognition."""
|
||||
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.data_processing.common.license_plate.mixin import (
|
||||
LicensePlateProcessingMixin,
|
||||
)
|
||||
from frigate.data_processing.common.license_plate.model import (
|
||||
LicensePlateModelRunner,
|
||||
)
|
||||
|
||||
from ..types import DataProcessorMetrics
|
||||
from .api import RealTimeProcessorApi
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcessorApi):
|
||||
def __init__(
|
||||
self,
|
||||
config: FrigateConfig,
|
||||
metrics: DataProcessorMetrics,
|
||||
model_runner: LicensePlateModelRunner,
|
||||
detected_license_plates: dict[str, dict[str, any]],
|
||||
):
|
||||
self.detected_license_plates = detected_license_plates
|
||||
self.model_runner = model_runner
|
||||
self.lpr_config = config.lpr
|
||||
self.config = config
|
||||
super().__init__(config, metrics)
|
||||
|
||||
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
|
||||
"""Look for license plates in image."""
|
||||
self.lpr_process(obj_data, frame)
|
||||
|
||||
def handle_request(self, topic, request_data) -> dict[str, any] | None:
|
||||
return
|
||||
|
||||
def expire_object(self, object_id: str):
|
||||
if object_id in self.detected_license_plates:
|
||||
self.detected_license_plates.pop(object_id)
|
@ -10,12 +10,21 @@ class DataProcessorMetrics:
|
||||
text_embeddings_sps: Synchronized
|
||||
face_rec_fps: Synchronized
|
||||
alpr_pps: Synchronized
|
||||
yolov9_lpr_fps: Synchronized
|
||||
|
||||
def __init__(self):
|
||||
self.image_embeddings_fps = mp.Value("d", 0.01)
|
||||
self.text_embeddings_sps = mp.Value("d", 0.01)
|
||||
self.face_rec_fps = mp.Value("d", 0.01)
|
||||
self.alpr_pps = mp.Value("d", 0.01)
|
||||
self.yolov9_lpr_fps = mp.Value("d", 0.01)
|
||||
|
||||
|
||||
class DataProcessorModelRunner:
|
||||
def __init__(self, requestor, device: str = "CPU", model_size: str = "large"):
|
||||
self.requestor = requestor
|
||||
self.device = device
|
||||
self.model_size = model_size
|
||||
|
||||
|
||||
class PostProcessDataEnum(str, Enum):
|
||||
|
@ -9,7 +9,7 @@ import requests
|
||||
from pydantic import BaseModel, ConfigDict, Field
|
||||
from pydantic.fields import PrivateAttr
|
||||
|
||||
from frigate.const import DEFAULT_ATTRIBUTE_LABEL_MAP
|
||||
from frigate.const import DEFAULT_ATTRIBUTE_LABEL_MAP, MODEL_CACHE_DIR
|
||||
from frigate.plus import PlusApi
|
||||
from frigate.util.builtin import generate_color_palette, load_labels
|
||||
|
||||
@ -37,6 +37,7 @@ class ModelTypeEnum(str, Enum):
|
||||
yolox = "yolox"
|
||||
yolov9 = "yolov9"
|
||||
yolonas = "yolonas"
|
||||
dfine = "dfine"
|
||||
hailoyolo = "hailo-yolo"
|
||||
|
||||
|
||||
@ -123,7 +124,7 @@ class ModelConfig(BaseModel):
|
||||
return
|
||||
|
||||
model_id = self.path[7:]
|
||||
self.path = f"/config/model_cache/{model_id}"
|
||||
self.path = os.path.join(MODEL_CACHE_DIR, model_id)
|
||||
model_info_path = f"{self.path}.json"
|
||||
|
||||
# download the model if it doesn't exist
|
||||
|
@ -25,6 +25,8 @@ except ModuleNotFoundError:
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum, InputTensorEnum, PixelFormatEnum, InputDTypeEnum
|
||||
from PIL import Image, ImageDraw, ImageFont
|
||||
|
@ -9,7 +9,11 @@ from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
ModelTypeEnum,
|
||||
)
|
||||
from frigate.util.model import get_ort_providers, post_process_yolov9
|
||||
from frigate.util.model import (
|
||||
get_ort_providers,
|
||||
post_process_dfine,
|
||||
post_process_yolov9,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -41,6 +45,7 @@ class ONNXDetector(DetectionApi):
|
||||
providers, options = get_ort_providers(
|
||||
detector_config.device == "CPU", detector_config.device
|
||||
)
|
||||
|
||||
self.model = ort.InferenceSession(
|
||||
path, providers=providers, provider_options=options
|
||||
)
|
||||
@ -55,6 +60,16 @@ class ONNXDetector(DetectionApi):
|
||||
logger.info(f"ONNX: {path} loaded")
|
||||
|
||||
def detect_raw(self, tensor_input: np.ndarray):
|
||||
if self.onnx_model_type == ModelTypeEnum.dfine:
|
||||
tensor_output = self.model.run(
|
||||
None,
|
||||
{
|
||||
"images": tensor_input,
|
||||
"orig_target_sizes": np.array([[self.h, self.w]], dtype=np.int64),
|
||||
},
|
||||
)
|
||||
return post_process_dfine(tensor_output, self.w, self.h)
|
||||
|
||||
model_input_name = self.model.get_inputs()[0].name
|
||||
tensor_output = self.model.run(None, {model_input_name: tensor_input})
|
||||
|
||||
|
@ -7,6 +7,7 @@ import openvino.properties as props
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||
from frigate.util.model import post_process_yolov9
|
||||
@ -41,8 +42,10 @@ class OvDetector(DetectionApi):
|
||||
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
|
||||
raise FileNotFoundError
|
||||
|
||||
os.makedirs("/config/model_cache/openvino", exist_ok=True)
|
||||
self.ov_core.set_property({props.cache_dir: "/config/model_cache/openvino"})
|
||||
os.makedirs(os.path.join(MODEL_CACHE_DIR, "openvino"), exist_ok=True)
|
||||
self.ov_core.set_property(
|
||||
{props.cache_dir: os.path.join(MODEL_CACHE_DIR, "openvino")}
|
||||
)
|
||||
self.interpreter = self.ov_core.compile_model(
|
||||
model=detector_config.model.path, device_name=detector_config.device
|
||||
)
|
||||
|
@ -6,6 +6,7 @@ from typing import Literal
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||
|
||||
@ -17,7 +18,7 @@ supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
|
||||
|
||||
supported_models = {ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$"}
|
||||
|
||||
model_cache_dir = "/config/model_cache/rknn_cache/"
|
||||
model_cache_dir = os.path.join(MODEL_CACHE_DIR, "rknn_cache/")
|
||||
|
||||
|
||||
class RknnDetectorConfig(BaseDetectorConfig):
|
||||
|
@ -9,6 +9,7 @@ import numpy as np
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import (
|
||||
BaseDetectorConfig,
|
||||
@ -116,7 +117,7 @@ class ROCmDetector(DetectionApi):
|
||||
|
||||
logger.info(f"AMD/ROCm: saving parsed model into {mxr_path}")
|
||||
|
||||
os.makedirs("/config/model_cache/rocm", exist_ok=True)
|
||||
os.makedirs(os.path.join(MODEL_CACHE_DIR, "rocm"), exist_ok=True)
|
||||
migraphx.save(self.model, mxr_path)
|
||||
|
||||
logger.info("AMD/ROCm: model loaded")
|
||||
|
@ -17,7 +17,7 @@ from frigate.config import FrigateConfig
|
||||
from frigate.const import CONFIG_DIR, FACE_DIR
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event
|
||||
from frigate.models import Event, Recordings
|
||||
from frigate.util.builtin import serialize
|
||||
from frigate.util.services import listen
|
||||
|
||||
@ -28,10 +28,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def manage_embeddings(config: FrigateConfig, metrics: DataProcessorMetrics) -> None:
|
||||
# Only initialize embeddings if semantic search is enabled
|
||||
if not config.semantic_search.enabled:
|
||||
return
|
||||
|
||||
stop_event = mp.Event()
|
||||
|
||||
def receiveSignal(signalNumber: int, frame: Optional[FrameType]) -> None:
|
||||
@ -55,7 +51,7 @@ def manage_embeddings(config: FrigateConfig, metrics: DataProcessorMetrics) -> N
|
||||
timeout=max(60, 10 * len([c for c in config.cameras.values() if c.enabled])),
|
||||
load_vec_extension=True,
|
||||
)
|
||||
models = [Event]
|
||||
models = [Event, Recordings]
|
||||
db.bind(models)
|
||||
|
||||
maintainer = EmbeddingMaintainer(
|
||||
@ -234,3 +230,8 @@ class EmbeddingsContext:
|
||||
EmbeddingsRequestEnum.embed_description.value,
|
||||
{"id": event_id, "description": description},
|
||||
)
|
||||
|
||||
def reprocess_plate(self, event: dict[str, any]) -> dict[str, any]:
|
||||
return self.requestor.send_data(
|
||||
EmbeddingsRequestEnum.reprocess_plate.value, {"event": event}
|
||||
)
|
||||
|
@ -1,6 +1,5 @@
|
||||
"""SQLite-vec embeddings database."""
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
@ -11,6 +10,7 @@ from playhouse.shortcuts import model_to_dict
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.config.classification import SemanticSearchModelEnum
|
||||
from frigate.const import (
|
||||
CONFIG_DIR,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||
@ -21,8 +21,10 @@ from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.builtin import serialize
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
|
||||
from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum
|
||||
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
|
||||
from .onnx.jina_v2_embedding import JinaV2Embedding
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -75,18 +77,7 @@ class Embeddings:
|
||||
# Create tables if they don't exist
|
||||
self.db.create_embeddings_tables()
|
||||
|
||||
models = [
|
||||
"jinaai/jina-clip-v1-text_model_fp16.onnx",
|
||||
"jinaai/jina-clip-v1-tokenizer",
|
||||
"jinaai/jina-clip-v1-vision_model_fp16.onnx"
|
||||
if config.semantic_search.model_size == "large"
|
||||
else "jinaai/jina-clip-v1-vision_model_quantized.onnx",
|
||||
"jinaai/jina-clip-v1-preprocessor_config.json",
|
||||
"facenet-facenet.onnx",
|
||||
"paddleocr-onnx-detection.onnx",
|
||||
"paddleocr-onnx-classification.onnx",
|
||||
"paddleocr-onnx-recognition.onnx",
|
||||
]
|
||||
models = self.get_model_definitions()
|
||||
|
||||
for model in models:
|
||||
self.requestor.send_data(
|
||||
@ -97,39 +88,64 @@ class Embeddings:
|
||||
},
|
||||
)
|
||||
|
||||
self.text_embedding = GenericONNXEmbedding(
|
||||
model_name="jinaai/jina-clip-v1",
|
||||
model_file="text_model_fp16.onnx",
|
||||
tokenizer_file="tokenizer",
|
||||
download_urls={
|
||||
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
|
||||
},
|
||||
model_size=config.semantic_search.model_size,
|
||||
model_type=ModelTypeEnum.text,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2:
|
||||
# Single JinaV2Embedding instance for both text and vision
|
||||
self.embedding = JinaV2Embedding(
|
||||
model_size=self.config.semantic_search.model_size,
|
||||
requestor=self.requestor,
|
||||
device="GPU"
|
||||
if self.config.semantic_search.model_size == "large"
|
||||
else "CPU",
|
||||
)
|
||||
self.text_embedding = lambda input_data: self.embedding(
|
||||
input_data, embedding_type="text"
|
||||
)
|
||||
self.vision_embedding = lambda input_data: self.embedding(
|
||||
input_data, embedding_type="vision"
|
||||
)
|
||||
else: # Default to jinav1
|
||||
self.text_embedding = JinaV1TextEmbedding(
|
||||
model_size=config.semantic_search.model_size,
|
||||
requestor=self.requestor,
|
||||
device="CPU",
|
||||
)
|
||||
self.vision_embedding = JinaV1ImageEmbedding(
|
||||
model_size=config.semantic_search.model_size,
|
||||
requestor=self.requestor,
|
||||
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
||||
)
|
||||
|
||||
def get_model_definitions(self):
|
||||
# Version-specific models
|
||||
if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2:
|
||||
models = [
|
||||
"jinaai/jina-clip-v2-tokenizer",
|
||||
"jinaai/jina-clip-v2-model_fp16.onnx"
|
||||
if self.config.semantic_search.model_size == "large"
|
||||
else "jinaai/jina-clip-v2-model_quantized.onnx",
|
||||
"jinaai/jina-clip-v2-preprocessor_config.json",
|
||||
]
|
||||
else: # Default to jinav1
|
||||
models = [
|
||||
"jinaai/jina-clip-v1-text_model_fp16.onnx",
|
||||
"jinaai/jina-clip-v1-tokenizer",
|
||||
"jinaai/jina-clip-v1-vision_model_fp16.onnx"
|
||||
if self.config.semantic_search.model_size == "large"
|
||||
else "jinaai/jina-clip-v1-vision_model_quantized.onnx",
|
||||
"jinaai/jina-clip-v1-preprocessor_config.json",
|
||||
]
|
||||
|
||||
# Add common models
|
||||
models.extend(
|
||||
[
|
||||
"facenet-facenet.onnx",
|
||||
"paddleocr-onnx-detection.onnx",
|
||||
"paddleocr-onnx-classification.onnx",
|
||||
"paddleocr-onnx-recognition.onnx",
|
||||
]
|
||||
)
|
||||
|
||||
model_file = (
|
||||
"vision_model_fp16.onnx"
|
||||
if self.config.semantic_search.model_size == "large"
|
||||
else "vision_model_quantized.onnx"
|
||||
)
|
||||
|
||||
download_urls = {
|
||||
model_file: f"https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}",
|
||||
"preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
|
||||
}
|
||||
|
||||
self.vision_embedding = GenericONNXEmbedding(
|
||||
model_name="jinaai/jina-clip-v1",
|
||||
model_file=model_file,
|
||||
download_urls=download_urls,
|
||||
model_size=config.semantic_search.model_size,
|
||||
model_type=ModelTypeEnum.vision,
|
||||
requestor=self.requestor,
|
||||
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
||||
)
|
||||
return models
|
||||
|
||||
def embed_thumbnail(
|
||||
self, event_id: str, thumbnail: bytes, upsert: bool = True
|
||||
@ -264,16 +280,13 @@ class Embeddings:
|
||||
st = time.time()
|
||||
|
||||
# Get total count of events to process
|
||||
total_events = (
|
||||
Event.select()
|
||||
.where(
|
||||
(Event.has_clip == True | Event.has_snapshot == True)
|
||||
& Event.thumbnail.is_null(False)
|
||||
)
|
||||
.count()
|
||||
)
|
||||
total_events = Event.select().count()
|
||||
|
||||
batch_size = 32
|
||||
batch_size = (
|
||||
4
|
||||
if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2
|
||||
else 32
|
||||
)
|
||||
current_page = 1
|
||||
|
||||
totals = {
|
||||
@ -289,10 +302,6 @@ class Embeddings:
|
||||
|
||||
events = (
|
||||
Event.select()
|
||||
.where(
|
||||
(Event.has_clip == True | Event.has_snapshot == True)
|
||||
& Event.thumbnail.is_null(False)
|
||||
)
|
||||
.order_by(Event.start_time.desc())
|
||||
.paginate(current_page, batch_size)
|
||||
)
|
||||
@ -302,7 +311,12 @@ class Embeddings:
|
||||
batch_thumbs = {}
|
||||
batch_descs = {}
|
||||
for event in events:
|
||||
batch_thumbs[event.id] = base64.b64decode(event.thumbnail)
|
||||
thumbnail = get_event_thumbnail_bytes(event)
|
||||
|
||||
if thumbnail is None:
|
||||
continue
|
||||
|
||||
batch_thumbs[event.id] = thumbnail
|
||||
totals["thumbnails"] += 1
|
||||
|
||||
if description := event.data.get("description", "").strip():
|
||||
@ -341,10 +355,6 @@ class Embeddings:
|
||||
current_page += 1
|
||||
events = (
|
||||
Event.select()
|
||||
.where(
|
||||
(Event.has_clip == True | Event.has_snapshot == True)
|
||||
& Event.thumbnail.is_null(False)
|
||||
)
|
||||
.order_by(Event.start_time.desc())
|
||||
.paginate(current_page, batch_size)
|
||||
)
|
||||
|
@ -1,325 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
from enum import Enum
|
||||
from io import BytesIO
|
||||
from typing import Dict, List, Optional, Union
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
# importing this without pytorch or others causes a warning
|
||||
# https://github.com/huggingface/transformers/issues/27214
|
||||
# suppressed by setting env TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
||||
from transformers import AutoFeatureExtractor, AutoTokenizer
|
||||
from transformers.utils.logging import disable_progress_bar
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
from frigate.util.model import ONNXModelRunner
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
category=FutureWarning,
|
||||
message="The class CLIPFeatureExtractor is deprecated",
|
||||
)
|
||||
|
||||
# disables the progress bar for downloading tokenizers and feature extractors
|
||||
disable_progress_bar()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
FACE_EMBEDDING_SIZE = 160
|
||||
LPR_EMBEDDING_SIZE = 256
|
||||
|
||||
|
||||
class ModelTypeEnum(str, Enum):
|
||||
face = "face"
|
||||
vision = "vision"
|
||||
text = "text"
|
||||
lpr_detect = "lpr_detect"
|
||||
lpr_classify = "lpr_classify"
|
||||
lpr_recognize = "lpr_recognize"
|
||||
yolov9_lpr_detect = "yolov9_lpr_detect"
|
||||
|
||||
|
||||
class GenericONNXEmbedding:
|
||||
"""Generic embedding function for ONNX models (text and vision)."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
model_name: str,
|
||||
model_file: str,
|
||||
download_urls: Dict[str, str],
|
||||
model_size: str,
|
||||
model_type: ModelTypeEnum,
|
||||
requestor: InterProcessRequestor,
|
||||
tokenizer_file: Optional[str] = None,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
self.model_name = model_name
|
||||
self.model_file = model_file
|
||||
self.tokenizer_file = tokenizer_file
|
||||
self.requestor = requestor
|
||||
self.download_urls = download_urls
|
||||
self.model_type = model_type
|
||||
self.model_size = model_size
|
||||
self.device = device
|
||||
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
self.tokenizer = None
|
||||
self.feature_extractor = None
|
||||
self.runner = None
|
||||
files_names = list(self.download_urls.keys()) + (
|
||||
[self.tokenizer_file] if self.tokenizer_file else []
|
||||
)
|
||||
|
||||
if not all(
|
||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||
):
|
||||
logger.debug(f"starting model download for {self.model_name}")
|
||||
self.downloader = ModelDownloader(
|
||||
model_name=self.model_name,
|
||||
download_path=self.download_path,
|
||||
file_names=files_names,
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
self.requestor,
|
||||
self.model_name,
|
||||
files_names,
|
||||
ModelStatusTypesEnum.downloaded,
|
||||
)
|
||||
self._load_model_and_utils()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
def _download_model(self, path: str):
|
||||
try:
|
||||
file_name = os.path.basename(path)
|
||||
|
||||
if file_name in self.download_urls:
|
||||
ModelDownloader.download_from_url(self.download_urls[file_name], path)
|
||||
elif (
|
||||
file_name == self.tokenizer_file
|
||||
and self.model_type == ModelTypeEnum.text
|
||||
):
|
||||
if not os.path.exists(path + "/" + self.model_name):
|
||||
logger.info(f"Downloading {self.model_name} tokenizer")
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
self.model_name,
|
||||
trust_remote_code=True,
|
||||
cache_dir=f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer",
|
||||
clean_up_tokenization_spaces=True,
|
||||
)
|
||||
tokenizer.save_pretrained(path)
|
||||
|
||||
self.downloader.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": f"{self.model_name}-{file_name}",
|
||||
"state": ModelStatusTypesEnum.downloaded,
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
self.downloader.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": f"{self.model_name}-{file_name}",
|
||||
"state": ModelStatusTypesEnum.error,
|
||||
},
|
||||
)
|
||||
|
||||
def _load_model_and_utils(self):
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
if self.model_type == ModelTypeEnum.text:
|
||||
self.tokenizer = self._load_tokenizer()
|
||||
elif self.model_type == ModelTypeEnum.vision:
|
||||
self.feature_extractor = self._load_feature_extractor()
|
||||
elif self.model_type == ModelTypeEnum.face:
|
||||
self.feature_extractor = []
|
||||
elif self.model_type == ModelTypeEnum.lpr_detect:
|
||||
self.feature_extractor = []
|
||||
elif self.model_type == ModelTypeEnum.lpr_classify:
|
||||
self.feature_extractor = []
|
||||
elif self.model_type == ModelTypeEnum.lpr_recognize:
|
||||
self.feature_extractor = []
|
||||
elif self.model_type == ModelTypeEnum.yolov9_lpr_detect:
|
||||
self.feature_extractor = []
|
||||
|
||||
self.runner = ONNXModelRunner(
|
||||
os.path.join(self.download_path, self.model_file),
|
||||
self.device,
|
||||
self.model_size,
|
||||
)
|
||||
|
||||
def _load_tokenizer(self):
|
||||
tokenizer_path = os.path.join(f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer")
|
||||
return AutoTokenizer.from_pretrained(
|
||||
self.model_name,
|
||||
cache_dir=tokenizer_path,
|
||||
trust_remote_code=True,
|
||||
clean_up_tokenization_spaces=True,
|
||||
)
|
||||
|
||||
def _load_feature_extractor(self):
|
||||
return AutoFeatureExtractor.from_pretrained(
|
||||
f"{MODEL_CACHE_DIR}/{self.model_name}",
|
||||
)
|
||||
|
||||
def _preprocess_inputs(self, raw_inputs: any) -> any:
|
||||
if self.model_type == ModelTypeEnum.text:
|
||||
max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs)
|
||||
return [
|
||||
self.tokenizer(
|
||||
text,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
max_length=max_length,
|
||||
return_tensors="np",
|
||||
)
|
||||
for text in raw_inputs
|
||||
]
|
||||
elif self.model_type == ModelTypeEnum.vision:
|
||||
processed_images = [self._process_image(img) for img in raw_inputs]
|
||||
return [
|
||||
self.feature_extractor(images=image, return_tensors="np")
|
||||
for image in processed_images
|
||||
]
|
||||
elif self.model_type == ModelTypeEnum.face:
|
||||
if isinstance(raw_inputs, list):
|
||||
raise ValueError("Face embedding does not support batch inputs.")
|
||||
|
||||
pil = self._process_image(raw_inputs)
|
||||
|
||||
# handle images larger than input size
|
||||
width, height = pil.size
|
||||
if width != FACE_EMBEDDING_SIZE or height != FACE_EMBEDDING_SIZE:
|
||||
if width > height:
|
||||
new_height = int(((height / width) * FACE_EMBEDDING_SIZE) // 4 * 4)
|
||||
pil = pil.resize((FACE_EMBEDDING_SIZE, new_height))
|
||||
else:
|
||||
new_width = int(((width / height) * FACE_EMBEDDING_SIZE) // 4 * 4)
|
||||
pil = pil.resize((new_width, FACE_EMBEDDING_SIZE))
|
||||
|
||||
og = np.array(pil).astype(np.float32)
|
||||
|
||||
# Image must be FACE_EMBEDDING_SIZExFACE_EMBEDDING_SIZE
|
||||
og_h, og_w, channels = og.shape
|
||||
frame = np.full(
|
||||
(FACE_EMBEDDING_SIZE, FACE_EMBEDDING_SIZE, channels),
|
||||
(0, 0, 0),
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
# compute center offset
|
||||
x_center = (FACE_EMBEDDING_SIZE - og_w) // 2
|
||||
y_center = (FACE_EMBEDDING_SIZE - og_h) // 2
|
||||
|
||||
# copy img image into center of result image
|
||||
frame[y_center : y_center + og_h, x_center : x_center + og_w] = og
|
||||
frame = np.expand_dims(frame, axis=0)
|
||||
return [{"input_2": frame}]
|
||||
elif self.model_type == ModelTypeEnum.lpr_detect:
|
||||
preprocessed = []
|
||||
for x in raw_inputs:
|
||||
preprocessed.append(x)
|
||||
return [{"x": preprocessed[0]}]
|
||||
elif self.model_type == ModelTypeEnum.lpr_classify:
|
||||
processed = []
|
||||
for img in raw_inputs:
|
||||
processed.append({"x": img})
|
||||
return processed
|
||||
elif self.model_type == ModelTypeEnum.lpr_recognize:
|
||||
processed = []
|
||||
for img in raw_inputs:
|
||||
processed.append({"x": img})
|
||||
return processed
|
||||
elif self.model_type == ModelTypeEnum.yolov9_lpr_detect:
|
||||
if isinstance(raw_inputs, list):
|
||||
raise ValueError(
|
||||
"License plate embedding does not support batch inputs."
|
||||
)
|
||||
# Get image as numpy array
|
||||
img = self._process_image(raw_inputs)
|
||||
height, width, channels = img.shape
|
||||
|
||||
# Resize maintaining aspect ratio
|
||||
if width > height:
|
||||
new_height = int(((height / width) * LPR_EMBEDDING_SIZE) // 4 * 4)
|
||||
img = cv2.resize(img, (LPR_EMBEDDING_SIZE, new_height))
|
||||
else:
|
||||
new_width = int(((width / height) * LPR_EMBEDDING_SIZE) // 4 * 4)
|
||||
img = cv2.resize(img, (new_width, LPR_EMBEDDING_SIZE))
|
||||
|
||||
# Get new dimensions after resize
|
||||
og_h, og_w, channels = img.shape
|
||||
|
||||
# Create black square frame
|
||||
frame = np.full(
|
||||
(LPR_EMBEDDING_SIZE, LPR_EMBEDDING_SIZE, channels),
|
||||
(0, 0, 0),
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
# Center the resized image in the square frame
|
||||
x_center = (LPR_EMBEDDING_SIZE - og_w) // 2
|
||||
y_center = (LPR_EMBEDDING_SIZE - og_h) // 2
|
||||
frame[y_center : y_center + og_h, x_center : x_center + og_w] = img
|
||||
|
||||
# Normalize to 0-1
|
||||
frame = frame / 255.0
|
||||
|
||||
# Convert from HWC to CHW format and add batch dimension
|
||||
frame = np.transpose(frame, (2, 0, 1))
|
||||
frame = np.expand_dims(frame, axis=0)
|
||||
return [{"images": frame}]
|
||||
else:
|
||||
raise ValueError(f"Unable to preprocess inputs for {self.model_type}")
|
||||
|
||||
def _process_image(self, image, output: str = "RGB") -> Image.Image:
|
||||
if isinstance(image, str):
|
||||
if image.startswith("http"):
|
||||
response = requests.get(image)
|
||||
image = Image.open(BytesIO(response.content)).convert(output)
|
||||
elif isinstance(image, bytes):
|
||||
image = Image.open(BytesIO(image)).convert(output)
|
||||
|
||||
return image
|
||||
|
||||
def __call__(
|
||||
self, inputs: Union[List[str], List[Image.Image], List[str]]
|
||||
) -> List[np.ndarray]:
|
||||
self._load_model_and_utils()
|
||||
if self.runner is None or (
|
||||
self.tokenizer is None and self.feature_extractor is None
|
||||
):
|
||||
logger.error(
|
||||
f"{self.model_name} model or tokenizer/feature extractor is not loaded."
|
||||
)
|
||||
return []
|
||||
|
||||
processed_inputs = self._preprocess_inputs(inputs)
|
||||
input_names = self.runner.get_input_names()
|
||||
onnx_inputs = {name: [] for name in input_names}
|
||||
input: dict[str, any]
|
||||
for input in processed_inputs:
|
||||
for key, value in input.items():
|
||||
if key in input_names:
|
||||
onnx_inputs[key].append(value[0])
|
||||
|
||||
for key in input_names:
|
||||
if onnx_inputs.get(key):
|
||||
onnx_inputs[key] = np.stack(onnx_inputs[key])
|
||||
else:
|
||||
logger.warning(f"Expected input '{key}' not found in onnx_inputs")
|
||||
|
||||
embeddings = self.runner.run(onnx_inputs)[0]
|
||||
return [embedding for embedding in embeddings]
|
@ -20,24 +20,36 @@ from frigate.comms.event_metadata_updater import (
|
||||
)
|
||||
from frigate.comms.events_updater import EventEndSubscriber, EventUpdateSubscriber
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.comms.recordings_updater import (
|
||||
RecordingsDataSubscriber,
|
||||
RecordingsDataTypeEnum,
|
||||
)
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import (
|
||||
CLIPS_DIR,
|
||||
UPDATE_EVENT_DESCRIPTION,
|
||||
)
|
||||
from frigate.data_processing.real_time.api import RealTimeProcessorApi
|
||||
from frigate.data_processing.real_time.bird_processor import BirdProcessor
|
||||
from frigate.data_processing.real_time.face_processor import FaceProcessor
|
||||
from frigate.data_processing.real_time.license_plate_processor import (
|
||||
LicensePlateProcessor,
|
||||
from frigate.data_processing.common.license_plate.model import (
|
||||
LicensePlateModelRunner,
|
||||
)
|
||||
from frigate.data_processing.types import DataProcessorMetrics
|
||||
from frigate.data_processing.post.api import PostProcessorApi
|
||||
from frigate.data_processing.post.license_plate import (
|
||||
LicensePlatePostProcessor,
|
||||
)
|
||||
from frigate.data_processing.real_time.api import RealTimeProcessorApi
|
||||
from frigate.data_processing.real_time.bird import BirdRealTimeProcessor
|
||||
from frigate.data_processing.real_time.face import FaceRealTimeProcessor
|
||||
from frigate.data_processing.real_time.license_plate import (
|
||||
LicensePlateRealTimeProcessor,
|
||||
)
|
||||
from frigate.data_processing.types import DataProcessorMetrics, PostProcessDataEnum
|
||||
from frigate.events.types import EventTypeEnum
|
||||
from frigate.genai import get_genai_client
|
||||
from frigate.models import Event
|
||||
from frigate.types import TrackedObjectUpdateTypesEnum
|
||||
from frigate.util.builtin import serialize
|
||||
from frigate.util.image import SharedMemoryFrameManager, calculate_region
|
||||
from frigate.util.path import get_event_thumbnail_bytes
|
||||
|
||||
from .embeddings import Embeddings
|
||||
|
||||
@ -59,46 +71,80 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
super().__init__(name="embeddings_maintainer")
|
||||
self.config = config
|
||||
self.metrics = metrics
|
||||
self.embeddings = Embeddings(config, db, metrics)
|
||||
self.embeddings = None
|
||||
|
||||
# Check if we need to re-index events
|
||||
if config.semantic_search.reindex:
|
||||
self.embeddings.reindex()
|
||||
if config.semantic_search.enabled:
|
||||
self.embeddings = Embeddings(config, db, metrics)
|
||||
|
||||
# Check if we need to re-index events
|
||||
if config.semantic_search.reindex:
|
||||
self.embeddings.reindex()
|
||||
|
||||
# create communication for updating event descriptions
|
||||
self.requestor = InterProcessRequestor()
|
||||
|
||||
self.event_subscriber = EventUpdateSubscriber()
|
||||
self.event_end_subscriber = EventEndSubscriber()
|
||||
self.event_metadata_subscriber = EventMetadataSubscriber(
|
||||
EventMetadataTypeEnum.regenerate_description
|
||||
)
|
||||
self.recordings_subscriber = RecordingsDataSubscriber(
|
||||
RecordingsDataTypeEnum.recordings_available_through
|
||||
)
|
||||
self.embeddings_responder = EmbeddingsResponder()
|
||||
self.frame_manager = SharedMemoryFrameManager()
|
||||
self.processors: list[RealTimeProcessorApi] = []
|
||||
|
||||
self.detected_license_plates: dict[str, dict[str, any]] = {}
|
||||
|
||||
# model runners to share between realtime and post processors
|
||||
if self.config.lpr.enabled:
|
||||
lpr_model_runner = LicensePlateModelRunner(self.requestor)
|
||||
|
||||
# realtime processors
|
||||
self.realtime_processors: list[RealTimeProcessorApi] = []
|
||||
|
||||
if self.config.face_recognition.enabled:
|
||||
self.processors.append(FaceProcessor(self.config, metrics))
|
||||
self.realtime_processors.append(FaceRealTimeProcessor(self.config, metrics))
|
||||
|
||||
if self.config.classification.bird.enabled:
|
||||
self.processors.append(BirdProcessor(self.config, metrics))
|
||||
self.realtime_processors.append(BirdRealTimeProcessor(self.config, metrics))
|
||||
|
||||
if self.config.lpr.enabled:
|
||||
self.processors.append(LicensePlateProcessor(self.config, metrics))
|
||||
self.realtime_processors.append(
|
||||
LicensePlateRealTimeProcessor(
|
||||
self.config, metrics, lpr_model_runner, self.detected_license_plates
|
||||
)
|
||||
)
|
||||
|
||||
# post processors
|
||||
self.post_processors: list[PostProcessorApi] = []
|
||||
|
||||
if self.config.lpr.enabled:
|
||||
self.post_processors.append(
|
||||
LicensePlatePostProcessor(
|
||||
self.config, metrics, lpr_model_runner, self.detected_license_plates
|
||||
)
|
||||
)
|
||||
|
||||
# create communication for updating event descriptions
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.stop_event = stop_event
|
||||
self.tracked_events: dict[str, list[any]] = {}
|
||||
self.genai_client = get_genai_client(config)
|
||||
|
||||
# recordings data
|
||||
self.recordings_available_through: dict[str, float] = {}
|
||||
|
||||
def run(self) -> None:
|
||||
"""Maintain a SQLite-vec database for semantic search."""
|
||||
while not self.stop_event.is_set():
|
||||
self._process_requests()
|
||||
self._process_updates()
|
||||
self._process_recordings_updates()
|
||||
self._process_finalized()
|
||||
self._process_event_metadata()
|
||||
|
||||
self.event_subscriber.stop()
|
||||
self.event_end_subscriber.stop()
|
||||
self.recordings_subscriber.stop()
|
||||
self.event_metadata_subscriber.stop()
|
||||
self.embeddings_responder.stop()
|
||||
self.requestor.stop()
|
||||
@ -109,32 +155,34 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
def _handle_request(topic: str, data: dict[str, any]) -> str:
|
||||
try:
|
||||
if topic == EmbeddingsRequestEnum.embed_description.value:
|
||||
return serialize(
|
||||
self.embeddings.embed_description(
|
||||
data["id"], data["description"]
|
||||
),
|
||||
pack=False,
|
||||
)
|
||||
elif topic == EmbeddingsRequestEnum.embed_thumbnail.value:
|
||||
thumbnail = base64.b64decode(data["thumbnail"])
|
||||
return serialize(
|
||||
self.embeddings.embed_thumbnail(data["id"], thumbnail),
|
||||
pack=False,
|
||||
)
|
||||
elif topic == EmbeddingsRequestEnum.generate_search.value:
|
||||
return serialize(
|
||||
self.embeddings.embed_description("", data, upsert=False),
|
||||
pack=False,
|
||||
)
|
||||
else:
|
||||
for processor in self.processors:
|
||||
# First handle the embedding-specific topics when semantic search is enabled
|
||||
if self.config.semantic_search.enabled:
|
||||
if topic == EmbeddingsRequestEnum.embed_description.value:
|
||||
return serialize(
|
||||
self.embeddings.embed_description(
|
||||
data["id"], data["description"]
|
||||
),
|
||||
pack=False,
|
||||
)
|
||||
elif topic == EmbeddingsRequestEnum.embed_thumbnail.value:
|
||||
thumbnail = base64.b64decode(data["thumbnail"])
|
||||
return serialize(
|
||||
self.embeddings.embed_thumbnail(data["id"], thumbnail),
|
||||
pack=False,
|
||||
)
|
||||
elif topic == EmbeddingsRequestEnum.generate_search.value:
|
||||
return serialize(
|
||||
self.embeddings.embed_description("", data, upsert=False),
|
||||
pack=False,
|
||||
)
|
||||
processors = [self.realtime_processors, self.post_processors]
|
||||
for processor_list in processors:
|
||||
for processor in processor_list:
|
||||
resp = processor.handle_request(topic, data)
|
||||
|
||||
if resp is not None:
|
||||
return resp
|
||||
except Exception as e:
|
||||
logger.error(f"Unable to handle embeddings request {e}")
|
||||
logger.error(f"Unable to handle embeddings request {e}", exc_info=True)
|
||||
|
||||
self.embeddings_responder.check_for_request(_handle_request)
|
||||
|
||||
@ -153,7 +201,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
||||
# no need to process updated objects if face recognition, lpr, genai are disabled
|
||||
if not camera_config.genai.enabled and len(self.processors) == 0:
|
||||
if not camera_config.genai.enabled and len(self.realtime_processors) == 0:
|
||||
return
|
||||
|
||||
# Create our own thumbnail based on the bounding box and the frame time
|
||||
@ -170,7 +218,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
return
|
||||
|
||||
for processor in self.processors:
|
||||
for processor in self.realtime_processors:
|
||||
processor.process_frame(data, yuv_frame)
|
||||
|
||||
# no need to save our own thumbnails if genai is not enabled
|
||||
@ -201,7 +249,32 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
event_id, camera, updated_db = ended
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
||||
for processor in self.processors:
|
||||
# call any defined post processors
|
||||
for processor in self.post_processors:
|
||||
if isinstance(processor, LicensePlatePostProcessor):
|
||||
recordings_available = self.recordings_available_through.get(camera)
|
||||
if (
|
||||
recordings_available is not None
|
||||
and event_id in self.detected_license_plates
|
||||
):
|
||||
processor.process_data(
|
||||
{
|
||||
"event_id": event_id,
|
||||
"camera": camera,
|
||||
"recordings_available": self.recordings_available_through[
|
||||
camera
|
||||
],
|
||||
"obj_data": self.detected_license_plates[event_id][
|
||||
"obj_data"
|
||||
],
|
||||
},
|
||||
PostProcessDataEnum.recording,
|
||||
)
|
||||
else:
|
||||
processor.process_data(event_id, PostProcessDataEnum.event_id)
|
||||
|
||||
# expire in realtime processors
|
||||
for processor in self.realtime_processors:
|
||||
processor.expire_object(event_id)
|
||||
|
||||
if updated_db:
|
||||
@ -215,7 +288,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
continue
|
||||
|
||||
# Extract valid thumbnail
|
||||
thumbnail = base64.b64decode(event.thumbnail)
|
||||
thumbnail = get_event_thumbnail_bytes(event)
|
||||
|
||||
# Embed the thumbnail
|
||||
self._embed_thumbnail(event_id, thumbnail)
|
||||
@ -314,6 +387,24 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
if event_id in self.tracked_events:
|
||||
del self.tracked_events[event_id]
|
||||
|
||||
def _process_recordings_updates(self) -> None:
|
||||
"""Process recordings updates."""
|
||||
while True:
|
||||
recordings_data = self.recordings_subscriber.check_for_update(timeout=0.01)
|
||||
|
||||
if recordings_data == None:
|
||||
break
|
||||
|
||||
camera, recordings_available_through_timestamp = recordings_data
|
||||
|
||||
self.recordings_available_through[camera] = (
|
||||
recordings_available_through_timestamp
|
||||
)
|
||||
|
||||
logger.debug(
|
||||
f"{camera} now has recordings available through {recordings_available_through_timestamp}"
|
||||
)
|
||||
|
||||
def _process_event_metadata(self):
|
||||
# Check for regenerate description requests
|
||||
(topic, event_id, source) = self.event_metadata_subscriber.check_for_update(
|
||||
@ -344,6 +435,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
def _embed_thumbnail(self, event_id: str, thumbnail: bytes) -> None:
|
||||
"""Embed the thumbnail for an event."""
|
||||
if not self.config.semantic_search.enabled:
|
||||
return
|
||||
|
||||
self.embeddings.embed_thumbnail(event_id, thumbnail)
|
||||
|
||||
def _embed_description(self, event: Event, thumbnails: list[bytes]) -> None:
|
||||
@ -369,7 +463,8 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
|
||||
# Embed the description
|
||||
self.embeddings.embed_description(event.id, description)
|
||||
if self.config.semantic_search.enabled:
|
||||
self.embeddings.embed_description(event.id, description)
|
||||
|
||||
logger.debug(
|
||||
"Generated description for %s (%d images): %s",
|
||||
@ -390,7 +485,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
logger.error(f"GenAI not enabled for camera {event.camera}")
|
||||
return
|
||||
|
||||
thumbnail = base64.b64decode(event.thumbnail)
|
||||
thumbnail = get_event_thumbnail_bytes(event)
|
||||
|
||||
logger.debug(
|
||||
f"Trying {source} regeneration for {event}, has_snapshot: {event.has_snapshot}"
|
||||
|
100
frigate/embeddings/onnx/base_embedding.py
Normal file
100
frigate/embeddings/onnx/base_embedding.py
Normal file
@ -0,0 +1,100 @@
|
||||
"""Base class for onnx embedding implementations."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from io import BytesIO
|
||||
|
||||
import numpy as np
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from frigate.const import UPDATE_MODEL_STATE
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EmbeddingTypeEnum(str, Enum):
|
||||
thumbnail = "thumbnail"
|
||||
description = "description"
|
||||
|
||||
|
||||
class BaseEmbedding(ABC):
|
||||
"""Base embedding class."""
|
||||
|
||||
def __init__(self, model_name: str, model_file: str, download_urls: dict[str, str]):
|
||||
self.model_name = model_name
|
||||
self.model_file = model_file
|
||||
self.download_urls = download_urls
|
||||
self.downloader: ModelDownloader = None
|
||||
|
||||
def _download_model(self, path: str):
|
||||
try:
|
||||
file_name = os.path.basename(path)
|
||||
|
||||
if file_name in self.download_urls:
|
||||
ModelDownloader.download_from_url(self.download_urls[file_name], path)
|
||||
|
||||
self.downloader.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": f"{self.model_name}-{file_name}",
|
||||
"state": ModelStatusTypesEnum.downloaded,
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
self.downloader.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": f"{self.model_name}-{file_name}",
|
||||
"state": ModelStatusTypesEnum.error,
|
||||
},
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def _load_model_and_utils(self):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _preprocess_inputs(self, raw_inputs: any) -> any:
|
||||
pass
|
||||
|
||||
def _process_image(self, image, output: str = "RGB") -> Image.Image:
|
||||
if isinstance(image, str):
|
||||
if image.startswith("http"):
|
||||
response = requests.get(image)
|
||||
image = Image.open(BytesIO(response.content)).convert(output)
|
||||
elif isinstance(image, bytes):
|
||||
image = Image.open(BytesIO(image)).convert(output)
|
||||
|
||||
return image
|
||||
|
||||
def _postprocess_outputs(self, outputs: any) -> any:
|
||||
return outputs
|
||||
|
||||
def __call__(
|
||||
self, inputs: list[str] | list[Image.Image] | list[str]
|
||||
) -> list[np.ndarray]:
|
||||
self._load_model_and_utils()
|
||||
processed = self._preprocess_inputs(inputs)
|
||||
input_names = self.runner.get_input_names()
|
||||
onnx_inputs = {name: [] for name in input_names}
|
||||
input: dict[str, any]
|
||||
for input in processed:
|
||||
for key, value in input.items():
|
||||
if key in input_names:
|
||||
onnx_inputs[key].append(value[0])
|
||||
|
||||
for key in input_names:
|
||||
if onnx_inputs.get(key):
|
||||
onnx_inputs[key] = np.stack(onnx_inputs[key])
|
||||
else:
|
||||
logger.warning(f"Expected input '{key}' not found in onnx_inputs")
|
||||
|
||||
outputs = self.runner.run(onnx_inputs)[0]
|
||||
embeddings = self._postprocess_outputs(outputs)
|
||||
|
||||
return [embedding for embedding in embeddings]
|
216
frigate/embeddings/onnx/jina_v1_embedding.py
Normal file
216
frigate/embeddings/onnx/jina_v1_embedding.py
Normal file
@ -0,0 +1,216 @@
|
||||
"""JinaV1 Embeddings."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
|
||||
# importing this without pytorch or others causes a warning
|
||||
# https://github.com/huggingface/transformers/issues/27214
|
||||
# suppressed by setting env TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
||||
from transformers import AutoFeatureExtractor, AutoTokenizer
|
||||
from transformers.utils.logging import disable_progress_bar
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
|
||||
from .base_embedding import BaseEmbedding
|
||||
from .runner import ONNXModelRunner
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
category=FutureWarning,
|
||||
message="The class CLIPFeatureExtractor is deprecated",
|
||||
)
|
||||
|
||||
# disables the progress bar for downloading tokenizers and feature extractors
|
||||
disable_progress_bar()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class JinaV1TextEmbedding(BaseEmbedding):
|
||||
def __init__(
|
||||
self,
|
||||
model_size: str,
|
||||
requestor: InterProcessRequestor,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
super().__init__(
|
||||
model_name="jinaai/jina-clip-v1",
|
||||
model_file="text_model_fp16.onnx",
|
||||
download_urls={
|
||||
"text_model_fp16.onnx": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/text_model_fp16.onnx",
|
||||
},
|
||||
)
|
||||
self.tokenizer_file = "tokenizer"
|
||||
self.requestor = requestor
|
||||
self.model_size = model_size
|
||||
self.device = device
|
||||
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
self.tokenizer = None
|
||||
self.feature_extractor = None
|
||||
self.runner = None
|
||||
files_names = list(self.download_urls.keys()) + [self.tokenizer_file]
|
||||
|
||||
if not all(
|
||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||
):
|
||||
logger.debug(f"starting model download for {self.model_name}")
|
||||
self.downloader = ModelDownloader(
|
||||
model_name=self.model_name,
|
||||
download_path=self.download_path,
|
||||
file_names=files_names,
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
self.requestor,
|
||||
self.model_name,
|
||||
files_names,
|
||||
ModelStatusTypesEnum.downloaded,
|
||||
)
|
||||
self._load_model_and_utils()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
def _download_model(self, path: str):
|
||||
try:
|
||||
file_name = os.path.basename(path)
|
||||
|
||||
if file_name in self.download_urls:
|
||||
ModelDownloader.download_from_url(self.download_urls[file_name], path)
|
||||
elif file_name == self.tokenizer_file:
|
||||
if not os.path.exists(path + "/" + self.model_name):
|
||||
logger.info(f"Downloading {self.model_name} tokenizer")
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
self.model_name,
|
||||
trust_remote_code=True,
|
||||
cache_dir=f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer",
|
||||
clean_up_tokenization_spaces=True,
|
||||
)
|
||||
tokenizer.save_pretrained(path)
|
||||
|
||||
self.downloader.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": f"{self.model_name}-{file_name}",
|
||||
"state": ModelStatusTypesEnum.downloaded,
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
self.downloader.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": f"{self.model_name}-{file_name}",
|
||||
"state": ModelStatusTypesEnum.error,
|
||||
},
|
||||
)
|
||||
|
||||
def _load_model_and_utils(self):
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
|
||||
tokenizer_path = os.path.join(
|
||||
f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer"
|
||||
)
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(
|
||||
self.model_name,
|
||||
cache_dir=tokenizer_path,
|
||||
trust_remote_code=True,
|
||||
clean_up_tokenization_spaces=True,
|
||||
)
|
||||
|
||||
self.runner = ONNXModelRunner(
|
||||
os.path.join(self.download_path, self.model_file),
|
||||
self.device,
|
||||
self.model_size,
|
||||
)
|
||||
|
||||
def _preprocess_inputs(self, raw_inputs):
|
||||
max_length = max(len(self.tokenizer.encode(text)) for text in raw_inputs)
|
||||
return [
|
||||
self.tokenizer(
|
||||
text,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
max_length=max_length,
|
||||
return_tensors="np",
|
||||
)
|
||||
for text in raw_inputs
|
||||
]
|
||||
|
||||
|
||||
class JinaV1ImageEmbedding(BaseEmbedding):
|
||||
def __init__(
|
||||
self,
|
||||
model_size: str,
|
||||
requestor: InterProcessRequestor,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
model_file = (
|
||||
"vision_model_fp16.onnx"
|
||||
if model_size == "large"
|
||||
else "vision_model_quantized.onnx"
|
||||
)
|
||||
super().__init__(
|
||||
model_name="jinaai/jina-clip-v1",
|
||||
model_file=model_file,
|
||||
download_urls={
|
||||
model_file: f"https://huggingface.co/jinaai/jina-clip-v1/resolve/main/onnx/{model_file}",
|
||||
"preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v1/resolve/main/preprocessor_config.json",
|
||||
},
|
||||
)
|
||||
self.requestor = requestor
|
||||
self.model_size = model_size
|
||||
self.device = device
|
||||
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
self.feature_extractor = None
|
||||
self.runner: ONNXModelRunner | None = None
|
||||
files_names = list(self.download_urls.keys())
|
||||
if not all(
|
||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||
):
|
||||
logger.debug(f"starting model download for {self.model_name}")
|
||||
self.downloader = ModelDownloader(
|
||||
model_name=self.model_name,
|
||||
download_path=self.download_path,
|
||||
file_names=files_names,
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
self.requestor,
|
||||
self.model_name,
|
||||
files_names,
|
||||
ModelStatusTypesEnum.downloaded,
|
||||
)
|
||||
self._load_model_and_utils()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
def _load_model_and_utils(self):
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
|
||||
self.feature_extractor = AutoFeatureExtractor.from_pretrained(
|
||||
f"{MODEL_CACHE_DIR}/{self.model_name}",
|
||||
)
|
||||
|
||||
self.runner = ONNXModelRunner(
|
||||
os.path.join(self.download_path, self.model_file),
|
||||
self.device,
|
||||
self.model_size,
|
||||
)
|
||||
|
||||
def _preprocess_inputs(self, raw_inputs):
|
||||
processed_images = [self._process_image(img) for img in raw_inputs]
|
||||
return [
|
||||
self.feature_extractor(images=image, return_tensors="np")
|
||||
for image in processed_images
|
||||
]
|
231
frigate/embeddings/onnx/jina_v2_embedding.py
Normal file
231
frigate/embeddings/onnx/jina_v2_embedding.py
Normal file
@ -0,0 +1,231 @@
|
||||
"""JinaV2 Embeddings."""
|
||||
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
from transformers import AutoTokenizer
|
||||
from transformers.utils.logging import disable_progress_bar, set_verbosity_error
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.const import MODEL_CACHE_DIR, UPDATE_MODEL_STATE
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
|
||||
from .base_embedding import BaseEmbedding
|
||||
from .runner import ONNXModelRunner
|
||||
|
||||
# disables the progress bar and download logging for downloading tokenizers and image processors
|
||||
disable_progress_bar()
|
||||
set_verbosity_error()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class JinaV2Embedding(BaseEmbedding):
|
||||
def __init__(
|
||||
self,
|
||||
model_size: str,
|
||||
requestor: InterProcessRequestor,
|
||||
device: str = "AUTO",
|
||||
embedding_type: str = None,
|
||||
):
|
||||
model_file = (
|
||||
"model_fp16.onnx" if model_size == "large" else "model_quantized.onnx"
|
||||
)
|
||||
super().__init__(
|
||||
model_name="jinaai/jina-clip-v2",
|
||||
model_file=model_file,
|
||||
download_urls={
|
||||
model_file: f"https://huggingface.co/jinaai/jina-clip-v2/resolve/main/onnx/{model_file}",
|
||||
"preprocessor_config.json": "https://huggingface.co/jinaai/jina-clip-v2/resolve/main/preprocessor_config.json",
|
||||
},
|
||||
)
|
||||
self.tokenizer_file = "tokenizer"
|
||||
self.embedding_type = embedding_type
|
||||
self.requestor = requestor
|
||||
self.model_size = model_size
|
||||
self.device = device
|
||||
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
self.tokenizer = None
|
||||
self.image_processor = None
|
||||
self.runner = None
|
||||
files_names = list(self.download_urls.keys()) + [self.tokenizer_file]
|
||||
if not all(
|
||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||
):
|
||||
logger.debug(f"starting model download for {self.model_name}")
|
||||
self.downloader = ModelDownloader(
|
||||
model_name=self.model_name,
|
||||
download_path=self.download_path,
|
||||
file_names=files_names,
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
self.requestor,
|
||||
self.model_name,
|
||||
files_names,
|
||||
ModelStatusTypesEnum.downloaded,
|
||||
)
|
||||
self._load_model_and_utils()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
def _download_model(self, path: str):
|
||||
try:
|
||||
file_name = os.path.basename(path)
|
||||
|
||||
if file_name in self.download_urls:
|
||||
ModelDownloader.download_from_url(self.download_urls[file_name], path)
|
||||
elif file_name == self.tokenizer_file:
|
||||
if not os.path.exists(os.path.join(path, self.model_name)):
|
||||
logger.info(f"Downloading {self.model_name} tokenizer")
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
self.model_name,
|
||||
trust_remote_code=True,
|
||||
cache_dir=os.path.join(
|
||||
MODEL_CACHE_DIR, self.model_name, "tokenizer"
|
||||
),
|
||||
clean_up_tokenization_spaces=True,
|
||||
)
|
||||
tokenizer.save_pretrained(path)
|
||||
self.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": f"{self.model_name}-{file_name}",
|
||||
"state": ModelStatusTypesEnum.downloaded,
|
||||
},
|
||||
)
|
||||
except Exception:
|
||||
self.requestor.send_data(
|
||||
UPDATE_MODEL_STATE,
|
||||
{
|
||||
"model": f"{self.model_name}-{file_name}",
|
||||
"state": ModelStatusTypesEnum.error,
|
||||
},
|
||||
)
|
||||
|
||||
def _load_model_and_utils(self):
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
|
||||
tokenizer_path = os.path.join(
|
||||
f"{MODEL_CACHE_DIR}/{self.model_name}/tokenizer"
|
||||
)
|
||||
self.tokenizer = AutoTokenizer.from_pretrained(
|
||||
self.model_name,
|
||||
cache_dir=tokenizer_path,
|
||||
trust_remote_code=True,
|
||||
clean_up_tokenization_spaces=True,
|
||||
)
|
||||
|
||||
self.runner = ONNXModelRunner(
|
||||
os.path.join(self.download_path, self.model_file),
|
||||
self.device,
|
||||
self.model_size,
|
||||
)
|
||||
|
||||
def _preprocess_image(self, image_data: bytes | Image.Image) -> np.ndarray:
|
||||
"""
|
||||
Manually preprocess a single image from bytes or PIL.Image to (3, 512, 512).
|
||||
"""
|
||||
if isinstance(image_data, bytes):
|
||||
image = Image.open(io.BytesIO(image_data))
|
||||
else:
|
||||
image = image_data
|
||||
|
||||
if image.mode != "RGB":
|
||||
image = image.convert("RGB")
|
||||
|
||||
image = image.resize((512, 512), Image.Resampling.LANCZOS)
|
||||
|
||||
# Convert to numpy array, normalize to [0, 1], and transpose to (channels, height, width)
|
||||
image_array = np.array(image, dtype=np.float32) / 255.0
|
||||
image_array = np.transpose(image_array, (2, 0, 1)) # (H, W, C) -> (C, H, W)
|
||||
|
||||
return image_array
|
||||
|
||||
def _preprocess_inputs(self, raw_inputs):
|
||||
"""
|
||||
Preprocess inputs into a list of real input tensors (no dummies).
|
||||
- For text: Returns list of input_ids.
|
||||
- For vision: Returns list of pixel_values.
|
||||
"""
|
||||
if not isinstance(raw_inputs, list):
|
||||
raw_inputs = [raw_inputs]
|
||||
|
||||
processed = []
|
||||
if self.embedding_type == "text":
|
||||
for text in raw_inputs:
|
||||
input_ids = self.tokenizer([text], return_tensors="np")["input_ids"]
|
||||
processed.append(input_ids)
|
||||
elif self.embedding_type == "vision":
|
||||
for img in raw_inputs:
|
||||
pixel_values = self._preprocess_image(img)
|
||||
processed.append(
|
||||
pixel_values[np.newaxis, ...]
|
||||
) # Add batch dim: (1, 3, 512, 512)
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Invalid embedding_type: {self.embedding_type}. Must be 'text' or 'vision'."
|
||||
)
|
||||
return processed
|
||||
|
||||
def _postprocess_outputs(self, outputs):
|
||||
"""
|
||||
Process ONNX model outputs, truncating each embedding in the array to truncate_dim.
|
||||
- outputs: NumPy array of embeddings.
|
||||
- Returns: List of truncated embeddings.
|
||||
"""
|
||||
# size of vector in database
|
||||
truncate_dim = 768
|
||||
|
||||
# jina v2 defaults to 1024 and uses Matryoshka representation, so
|
||||
# truncating only causes an extremely minor decrease in retrieval accuracy
|
||||
if outputs.shape[-1] > truncate_dim:
|
||||
outputs = outputs[..., :truncate_dim]
|
||||
|
||||
return outputs
|
||||
|
||||
def __call__(
|
||||
self, inputs: list[str] | list[Image.Image] | list[str], embedding_type=None
|
||||
) -> list[np.ndarray]:
|
||||
self.embedding_type = embedding_type
|
||||
if not self.embedding_type:
|
||||
raise ValueError(
|
||||
"embedding_type must be specified either in __init__ or __call__"
|
||||
)
|
||||
|
||||
self._load_model_and_utils()
|
||||
processed = self._preprocess_inputs(inputs)
|
||||
batch_size = len(processed)
|
||||
|
||||
# Prepare ONNX inputs with matching batch sizes
|
||||
onnx_inputs = {}
|
||||
if self.embedding_type == "text":
|
||||
onnx_inputs["input_ids"] = np.stack([x[0] for x in processed])
|
||||
onnx_inputs["pixel_values"] = np.zeros(
|
||||
(batch_size, 3, 512, 512), dtype=np.float32
|
||||
)
|
||||
elif self.embedding_type == "vision":
|
||||
onnx_inputs["input_ids"] = np.zeros((batch_size, 16), dtype=np.int64)
|
||||
onnx_inputs["pixel_values"] = np.stack([x[0] for x in processed])
|
||||
else:
|
||||
raise ValueError("Invalid embedding type")
|
||||
|
||||
# Run inference
|
||||
outputs = self.runner.run(onnx_inputs)
|
||||
if self.embedding_type == "text":
|
||||
embeddings = outputs[2] # text embeddings
|
||||
elif self.embedding_type == "vision":
|
||||
embeddings = outputs[3] # image embeddings
|
||||
else:
|
||||
raise ValueError("Invalid embedding type")
|
||||
|
||||
embeddings = self._postprocess_outputs(embeddings)
|
||||
return [embedding for embedding in embeddings]
|
297
frigate/embeddings/onnx/lpr_embedding.py
Normal file
297
frigate/embeddings/onnx/lpr_embedding.py
Normal file
@ -0,0 +1,297 @@
|
||||
import logging
|
||||
import os
|
||||
import warnings
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.types import ModelStatusTypesEnum
|
||||
from frigate.util.downloader import ModelDownloader
|
||||
|
||||
from .base_embedding import BaseEmbedding
|
||||
from .runner import ONNXModelRunner
|
||||
|
||||
warnings.filterwarnings(
|
||||
"ignore",
|
||||
category=FutureWarning,
|
||||
message="The class CLIPFeatureExtractor is deprecated",
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
LPR_EMBEDDING_SIZE = 256
|
||||
|
||||
|
||||
class PaddleOCRDetection(BaseEmbedding):
|
||||
def __init__(
|
||||
self,
|
||||
model_size: str,
|
||||
requestor: InterProcessRequestor,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
super().__init__(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="detection.onnx",
|
||||
download_urls={
|
||||
"detection.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/detection.onnx"
|
||||
},
|
||||
)
|
||||
self.requestor = requestor
|
||||
self.model_size = model_size
|
||||
self.device = device
|
||||
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
self.runner: ONNXModelRunner | None = None
|
||||
files_names = list(self.download_urls.keys())
|
||||
if not all(
|
||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||
):
|
||||
logger.debug(f"starting model download for {self.model_name}")
|
||||
self.downloader = ModelDownloader(
|
||||
model_name=self.model_name,
|
||||
download_path=self.download_path,
|
||||
file_names=files_names,
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
self.requestor,
|
||||
self.model_name,
|
||||
files_names,
|
||||
ModelStatusTypesEnum.downloaded,
|
||||
)
|
||||
self._load_model_and_utils()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
def _load_model_and_utils(self):
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
|
||||
self.runner = ONNXModelRunner(
|
||||
os.path.join(self.download_path, self.model_file),
|
||||
self.device,
|
||||
self.model_size,
|
||||
)
|
||||
|
||||
def _preprocess_inputs(self, raw_inputs):
|
||||
preprocessed = []
|
||||
for x in raw_inputs:
|
||||
preprocessed.append(x)
|
||||
return [{"x": preprocessed[0]}]
|
||||
|
||||
|
||||
class PaddleOCRClassification(BaseEmbedding):
|
||||
def __init__(
|
||||
self,
|
||||
model_size: str,
|
||||
requestor: InterProcessRequestor,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
super().__init__(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="classification.onnx",
|
||||
download_urls={
|
||||
"classification.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/classification.onnx"
|
||||
},
|
||||
)
|
||||
self.requestor = requestor
|
||||
self.model_size = model_size
|
||||
self.device = device
|
||||
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
self.runner: ONNXModelRunner | None = None
|
||||
files_names = list(self.download_urls.keys())
|
||||
if not all(
|
||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||
):
|
||||
logger.debug(f"starting model download for {self.model_name}")
|
||||
self.downloader = ModelDownloader(
|
||||
model_name=self.model_name,
|
||||
download_path=self.download_path,
|
||||
file_names=files_names,
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
self.requestor,
|
||||
self.model_name,
|
||||
files_names,
|
||||
ModelStatusTypesEnum.downloaded,
|
||||
)
|
||||
self._load_model_and_utils()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
def _load_model_and_utils(self):
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
|
||||
self.runner = ONNXModelRunner(
|
||||
os.path.join(self.download_path, self.model_file),
|
||||
self.device,
|
||||
self.model_size,
|
||||
)
|
||||
|
||||
def _preprocess_inputs(self, raw_inputs):
|
||||
processed = []
|
||||
for img in raw_inputs:
|
||||
processed.append({"x": img})
|
||||
return processed
|
||||
|
||||
|
||||
class PaddleOCRRecognition(BaseEmbedding):
|
||||
def __init__(
|
||||
self,
|
||||
model_size: str,
|
||||
requestor: InterProcessRequestor,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
super().__init__(
|
||||
model_name="paddleocr-onnx",
|
||||
model_file="recognition.onnx",
|
||||
download_urls={
|
||||
"recognition.onnx": "https://github.com/hawkeye217/paddleocr-onnx/raw/refs/heads/master/models/recognition.onnx"
|
||||
},
|
||||
)
|
||||
self.requestor = requestor
|
||||
self.model_size = model_size
|
||||
self.device = device
|
||||
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
self.runner: ONNXModelRunner | None = None
|
||||
files_names = list(self.download_urls.keys())
|
||||
if not all(
|
||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||
):
|
||||
logger.debug(f"starting model download for {self.model_name}")
|
||||
self.downloader = ModelDownloader(
|
||||
model_name=self.model_name,
|
||||
download_path=self.download_path,
|
||||
file_names=files_names,
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
self.requestor,
|
||||
self.model_name,
|
||||
files_names,
|
||||
ModelStatusTypesEnum.downloaded,
|
||||
)
|
||||
self._load_model_and_utils()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
def _load_model_and_utils(self):
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
|
||||
self.runner = ONNXModelRunner(
|
||||
os.path.join(self.download_path, self.model_file),
|
||||
self.device,
|
||||
self.model_size,
|
||||
)
|
||||
|
||||
def _preprocess_inputs(self, raw_inputs):
|
||||
processed = []
|
||||
for img in raw_inputs:
|
||||
processed.append({"x": img})
|
||||
return processed
|
||||
|
||||
|
||||
class LicensePlateDetector(BaseEmbedding):
|
||||
def __init__(
|
||||
self,
|
||||
model_size: str,
|
||||
requestor: InterProcessRequestor,
|
||||
device: str = "AUTO",
|
||||
):
|
||||
super().__init__(
|
||||
model_name="yolov9_license_plate",
|
||||
model_file="yolov9-256-license-plates.onnx",
|
||||
download_urls={
|
||||
"yolov9-256-license-plates.onnx": "https://github.com/hawkeye217/yolov9-license-plates/raw/refs/heads/master/models/yolov9-256-license-plates.onnx"
|
||||
},
|
||||
)
|
||||
|
||||
self.requestor = requestor
|
||||
self.model_size = model_size
|
||||
self.device = device
|
||||
self.download_path = os.path.join(MODEL_CACHE_DIR, self.model_name)
|
||||
self.runner: ONNXModelRunner | None = None
|
||||
files_names = list(self.download_urls.keys())
|
||||
if not all(
|
||||
os.path.exists(os.path.join(self.download_path, n)) for n in files_names
|
||||
):
|
||||
logger.debug(f"starting model download for {self.model_name}")
|
||||
self.downloader = ModelDownloader(
|
||||
model_name=self.model_name,
|
||||
download_path=self.download_path,
|
||||
file_names=files_names,
|
||||
download_func=self._download_model,
|
||||
)
|
||||
self.downloader.ensure_model_files()
|
||||
else:
|
||||
self.downloader = None
|
||||
ModelDownloader.mark_files_state(
|
||||
self.requestor,
|
||||
self.model_name,
|
||||
files_names,
|
||||
ModelStatusTypesEnum.downloaded,
|
||||
)
|
||||
self._load_model_and_utils()
|
||||
logger.debug(f"models are already downloaded for {self.model_name}")
|
||||
|
||||
def _load_model_and_utils(self):
|
||||
if self.runner is None:
|
||||
if self.downloader:
|
||||
self.downloader.wait_for_download()
|
||||
|
||||
self.runner = ONNXModelRunner(
|
||||
os.path.join(self.download_path, self.model_file),
|
||||
self.device,
|
||||
self.model_size,
|
||||
)
|
||||
|
||||
def _preprocess_inputs(self, raw_inputs):
|
||||
if isinstance(raw_inputs, list):
|
||||
raise ValueError("License plate embedding does not support batch inputs.")
|
||||
# Get image as numpy array
|
||||
img = self._process_image(raw_inputs)
|
||||
height, width, channels = img.shape
|
||||
|
||||
# Resize maintaining aspect ratio
|
||||
if width > height:
|
||||
new_height = int(((height / width) * LPR_EMBEDDING_SIZE) // 4 * 4)
|
||||
img = cv2.resize(img, (LPR_EMBEDDING_SIZE, new_height))
|
||||
else:
|
||||
new_width = int(((width / height) * LPR_EMBEDDING_SIZE) // 4 * 4)
|
||||
img = cv2.resize(img, (new_width, LPR_EMBEDDING_SIZE))
|
||||
|
||||
# Get new dimensions after resize
|
||||
og_h, og_w, channels = img.shape
|
||||
|
||||
# Create black square frame
|
||||
frame = np.full(
|
||||
(LPR_EMBEDDING_SIZE, LPR_EMBEDDING_SIZE, channels),
|
||||
(0, 0, 0),
|
||||
dtype=np.float32,
|
||||
)
|
||||
|
||||
# Center the resized image in the square frame
|
||||
x_center = (LPR_EMBEDDING_SIZE - og_w) // 2
|
||||
y_center = (LPR_EMBEDDING_SIZE - og_h) // 2
|
||||
frame[y_center : y_center + og_h, x_center : x_center + og_w] = img
|
||||
|
||||
# Normalize to 0-1
|
||||
frame = frame / 255.0
|
||||
|
||||
# Convert from HWC to CHW format and add batch dimension
|
||||
frame = np.transpose(frame, (2, 0, 1))
|
||||
frame = np.expand_dims(frame, axis=0)
|
||||
return [{"images": frame}]
|
76
frigate/embeddings/onnx/runner.py
Normal file
76
frigate/embeddings/onnx/runner.py
Normal file
@ -0,0 +1,76 @@
|
||||
"""Convenience runner for onnx models."""
|
||||
|
||||
import logging
|
||||
import os.path
|
||||
from typing import Any
|
||||
|
||||
import onnxruntime as ort
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.util.model import get_ort_providers
|
||||
|
||||
try:
|
||||
import openvino as ov
|
||||
except ImportError:
|
||||
# openvino is not included
|
||||
pass
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ONNXModelRunner:
|
||||
"""Run onnx models optimally based on available hardware."""
|
||||
|
||||
def __init__(self, model_path: str, device: str, requires_fp16: bool = False):
|
||||
self.model_path = model_path
|
||||
self.ort: ort.InferenceSession = None
|
||||
self.ov: ov.Core = None
|
||||
providers, options = get_ort_providers(device == "CPU", device, requires_fp16)
|
||||
self.interpreter = None
|
||||
|
||||
if "OpenVINOExecutionProvider" in providers:
|
||||
try:
|
||||
# use OpenVINO directly
|
||||
self.type = "ov"
|
||||
self.ov = ov.Core()
|
||||
self.ov.set_property(
|
||||
{ov.properties.cache_dir: os.path.join(MODEL_CACHE_DIR, "openvino")}
|
||||
)
|
||||
self.interpreter = self.ov.compile_model(
|
||||
model=model_path, device_name=device
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(
|
||||
f"OpenVINO failed to build model, using CPU instead: {e}"
|
||||
)
|
||||
self.interpreter = None
|
||||
|
||||
# Use ONNXRuntime
|
||||
if self.interpreter is None:
|
||||
self.type = "ort"
|
||||
self.ort = ort.InferenceSession(
|
||||
model_path,
|
||||
providers=providers,
|
||||
provider_options=options,
|
||||
)
|
||||
|
||||
def get_input_names(self) -> list[str]:
|
||||
if self.type == "ov":
|
||||
input_names = []
|
||||
|
||||
for input in self.interpreter.inputs:
|
||||
input_names.extend(input.names)
|
||||
|
||||
return input_names
|
||||
elif self.type == "ort":
|
||||
return [input.name for input in self.ort.get_inputs()]
|
||||
|
||||
def run(self, input: dict[str, Any]) -> Any:
|
||||
if self.type == "ov":
|
||||
infer_request = self.interpreter.create_infer_request()
|
||||
|
||||
outputs = infer_request.infer(input)
|
||||
|
||||
return outputs
|
||||
elif self.type == "ort":
|
||||
return self.ort.run(None, input)
|
@ -11,6 +11,7 @@ from frigate.config import FrigateConfig
|
||||
from frigate.const import CLIPS_DIR
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event, Timeline
|
||||
from frigate.util.path import delete_event_images
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -64,7 +65,6 @@ class EventCleanup(threading.Thread):
|
||||
def expire_snapshots(self) -> list[str]:
|
||||
## Expire events from unlisted cameras based on the global config
|
||||
retain_config = self.config.snapshots.retain
|
||||
file_extension = "jpg"
|
||||
update_params = {"has_snapshot": False}
|
||||
|
||||
distinct_labels = self.get_removed_camera_labels()
|
||||
@ -83,6 +83,7 @@ class EventCleanup(threading.Thread):
|
||||
Event.select(
|
||||
Event.id,
|
||||
Event.camera,
|
||||
Event.thumbnail,
|
||||
)
|
||||
.where(
|
||||
Event.camera.not_in(self.camera_keys),
|
||||
@ -94,22 +95,15 @@ class EventCleanup(threading.Thread):
|
||||
.iterator()
|
||||
)
|
||||
logger.debug(f"{len(list(expired_events))} events can be expired")
|
||||
|
||||
# delete the media from disk
|
||||
for expired in expired_events:
|
||||
media_name = f"{expired.camera}-{expired.id}"
|
||||
media_path = Path(
|
||||
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
|
||||
)
|
||||
deleted = delete_event_images(expired)
|
||||
|
||||
try:
|
||||
media_path.unlink(missing_ok=True)
|
||||
if file_extension == "jpg":
|
||||
media_path = Path(
|
||||
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
|
||||
)
|
||||
media_path.unlink(missing_ok=True)
|
||||
except OSError as e:
|
||||
logger.warning(f"Unable to delete event images: {e}")
|
||||
if not deleted:
|
||||
logger.warning(
|
||||
f"Unable to delete event images for {expired.camera}: {expired.id}"
|
||||
)
|
||||
|
||||
# update the clips attribute for the db entry
|
||||
query = Event.select(Event.id).where(
|
||||
@ -165,6 +159,7 @@ class EventCleanup(threading.Thread):
|
||||
Event.select(
|
||||
Event.id,
|
||||
Event.camera,
|
||||
Event.thumbnail,
|
||||
)
|
||||
.where(
|
||||
Event.camera == name,
|
||||
@ -181,19 +176,12 @@ class EventCleanup(threading.Thread):
|
||||
# so no need to delete mp4 files
|
||||
for event in expired_events:
|
||||
events_to_update.append(event.id)
|
||||
deleted = delete_event_images(event)
|
||||
|
||||
try:
|
||||
media_name = f"{event.camera}-{event.id}"
|
||||
media_path = Path(
|
||||
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
|
||||
if not deleted:
|
||||
logger.warning(
|
||||
f"Unable to delete event images for {event.camera}: {event.id}"
|
||||
)
|
||||
media_path.unlink(missing_ok=True)
|
||||
media_path = Path(
|
||||
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
|
||||
)
|
||||
media_path.unlink(missing_ok=True)
|
||||
except OSError as e:
|
||||
logger.warning(f"Unable to delete event images: {e}")
|
||||
|
||||
# update the clips attribute for the db entry
|
||||
for i in range(0, len(events_to_update), CHUNK_SIZE):
|
||||
|
@ -1,6 +1,5 @@
|
||||
"""Handle external events created by the user."""
|
||||
|
||||
import base64
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
@ -15,7 +14,7 @@ from numpy import ndarray
|
||||
from frigate.comms.detections_updater import DetectionPublisher, DetectionTypeEnum
|
||||
from frigate.comms.events_updater import EventUpdatePublisher
|
||||
from frigate.config import CameraConfig, FrigateConfig
|
||||
from frigate.const import CLIPS_DIR
|
||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||
from frigate.events.types import EventStateEnum, EventTypeEnum
|
||||
from frigate.util.image import draw_box_with_label
|
||||
|
||||
@ -55,9 +54,7 @@ class ExternalEventProcessor:
|
||||
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||
event_id = f"{now}-{rand_id}"
|
||||
|
||||
thumbnail = self._write_images(
|
||||
camera_config, label, event_id, draw, snapshot_frame
|
||||
)
|
||||
self._write_images(camera_config, label, event_id, draw, snapshot_frame)
|
||||
end = now + duration if duration is not None else None
|
||||
|
||||
self.event_sender.publish(
|
||||
@ -74,7 +71,6 @@ class ExternalEventProcessor:
|
||||
"camera": camera,
|
||||
"start_time": now - camera_config.record.event_pre_capture,
|
||||
"end_time": end,
|
||||
"thumbnail": thumbnail,
|
||||
"has_clip": camera_config.record.enabled and include_recording,
|
||||
"has_snapshot": True,
|
||||
"type": source_type,
|
||||
@ -134,9 +130,9 @@ class ExternalEventProcessor:
|
||||
event_id: str,
|
||||
draw: dict[str, any],
|
||||
img_frame: Optional[ndarray],
|
||||
) -> Optional[str]:
|
||||
) -> None:
|
||||
if img_frame is None:
|
||||
return None
|
||||
return
|
||||
|
||||
# write clean snapshot if enabled
|
||||
if camera_config.snapshots.clean_copy:
|
||||
@ -182,8 +178,9 @@ class ExternalEventProcessor:
|
||||
# create thumbnail with max height of 175 and save
|
||||
width = int(175 * img_frame.shape[1] / img_frame.shape[0])
|
||||
thumb = cv2.resize(img_frame, dsize=(width, 175), interpolation=cv2.INTER_AREA)
|
||||
ret, jpg = cv2.imencode(".jpg", thumb)
|
||||
return base64.b64encode(jpg.tobytes()).decode("utf-8")
|
||||
cv2.imwrite(
|
||||
os.path.join(THUMB_DIR, camera_config.name, f"{event_id}.webp"), thumb
|
||||
)
|
||||
|
||||
def stop(self):
|
||||
self.event_sender.stop()
|
||||
|
@ -23,11 +23,11 @@ def should_update_db(prev_event: Event, current_event: Event) -> bool:
|
||||
if (
|
||||
prev_event["top_score"] != current_event["top_score"]
|
||||
or prev_event["entered_zones"] != current_event["entered_zones"]
|
||||
or prev_event["thumbnail"] != current_event["thumbnail"]
|
||||
or prev_event["end_time"] != current_event["end_time"]
|
||||
or prev_event["average_estimated_speed"]
|
||||
!= current_event["average_estimated_speed"]
|
||||
or prev_event["velocity_angle"] != current_event["velocity_angle"]
|
||||
or prev_event["path_data"] != current_event["path_data"]
|
||||
):
|
||||
return True
|
||||
return False
|
||||
@ -201,7 +201,7 @@ class EventProcessor(threading.Thread):
|
||||
Event.start_time: start_time,
|
||||
Event.end_time: end_time,
|
||||
Event.zones: list(event_data["entered_zones"]),
|
||||
Event.thumbnail: event_data["thumbnail"],
|
||||
Event.thumbnail: event_data.get("thumbnail"),
|
||||
Event.has_clip: event_data["has_clip"],
|
||||
Event.has_snapshot: event_data["has_snapshot"],
|
||||
Event.model_hash: first_detector.model.model_hash,
|
||||
@ -217,6 +217,7 @@ class EventProcessor(threading.Thread):
|
||||
"velocity_angle": event_data["velocity_angle"],
|
||||
"type": "object",
|
||||
"max_severity": event_data.get("max_severity"),
|
||||
"path_data": event_data.get("path_data"),
|
||||
},
|
||||
}
|
||||
|
||||
@ -256,7 +257,7 @@ class EventProcessor(threading.Thread):
|
||||
Event.camera: event_data["camera"],
|
||||
Event.start_time: event_data["start_time"],
|
||||
Event.end_time: event_data["end_time"],
|
||||
Event.thumbnail: event_data["thumbnail"],
|
||||
Event.thumbnail: event_data.get("thumbnail"),
|
||||
Event.has_clip: event_data["has_clip"],
|
||||
Event.has_snapshot: event_data["has_snapshot"],
|
||||
Event.zones: [],
|
||||
|
@ -10,6 +10,7 @@ from frigate.const import (
|
||||
FFMPEG_HWACCEL_NVIDIA,
|
||||
FFMPEG_HWACCEL_VAAPI,
|
||||
FFMPEG_HWACCEL_VULKAN,
|
||||
LIBAVFORMAT_VERSION_MAJOR,
|
||||
)
|
||||
from frigate.util.services import vainfo_hwaccel
|
||||
from frigate.version import VERSION
|
||||
@ -51,9 +52,8 @@ class LibvaGpuSelector:
|
||||
return ""
|
||||
|
||||
|
||||
LIBAV_VERSION = int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59") or "59")
|
||||
FPS_VFR_PARAM = "-fps_mode vfr" if LIBAV_VERSION >= 59 else "-vsync 2"
|
||||
TIMEOUT_PARAM = "-timeout" if LIBAV_VERSION >= 59 else "-stimeout"
|
||||
FPS_VFR_PARAM = "-fps_mode vfr" if LIBAVFORMAT_VERSION_MAJOR >= 59 else "-vsync 2"
|
||||
TIMEOUT_PARAM = "-timeout" if LIBAVFORMAT_VERSION_MAJOR >= 59 else "-stimeout"
|
||||
|
||||
_gpu_selector = LibvaGpuSelector()
|
||||
_user_agent_args = [
|
||||
@ -65,8 +65,8 @@ PRESETS_HW_ACCEL_DECODE = {
|
||||
"preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m",
|
||||
"preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m",
|
||||
FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
|
||||
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv{' -bsf:v dump_extra' if LIBAV_VERSION >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17
|
||||
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv{' -bsf:v dump_extra' if LIBAV_VERSION >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17
|
||||
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv{' -bsf:v dump_extra' if LIBAVFORMAT_VERSION_MAJOR >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17
|
||||
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv{' -bsf:v dump_extra' if LIBAVFORMAT_VERSION_MAJOR >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17
|
||||
FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda",
|
||||
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
|
||||
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",
|
||||
|
@ -49,7 +49,7 @@ class ImprovedMotionDetector(MotionDetector):
|
||||
self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8)
|
||||
self.contrast_values[:, 1:2] = 255
|
||||
self.contrast_values_index = 0
|
||||
self.config_subscriber = ConfigSubscriber(f"config/motion/{name}")
|
||||
self.config_subscriber = ConfigSubscriber(f"config/motion/{name}", True)
|
||||
self.ptz_metrics = ptz_metrics
|
||||
self.last_stop_time = None
|
||||
|
||||
|
@ -1,7 +1,6 @@
|
||||
import datetime
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import queue
|
||||
import threading
|
||||
from collections import defaultdict
|
||||
@ -16,13 +15,13 @@ from frigate.comms.dispatcher import Dispatcher
|
||||
from frigate.comms.events_updater import EventEndSubscriber, EventUpdatePublisher
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import (
|
||||
CameraMqttConfig,
|
||||
FrigateConfig,
|
||||
MqttConfig,
|
||||
RecordConfig,
|
||||
SnapshotsConfig,
|
||||
ZoomingModeEnum,
|
||||
)
|
||||
from frigate.const import CLIPS_DIR, UPDATE_CAMERA_ACTIVITY
|
||||
from frigate.const import UPDATE_CAMERA_ACTIVITY
|
||||
from frigate.events.types import EventStateEnum, EventTypeEnum
|
||||
from frigate.ptz.autotrack import PtzAutoTrackerThread
|
||||
from frigate.track.tracked_object import TrackedObject
|
||||
@ -413,6 +412,11 @@ class CameraState:
|
||||
|
||||
self.previous_frame_id = frame_name
|
||||
|
||||
def shutdown(self) -> None:
|
||||
for obj in self.tracked_objects.values():
|
||||
if not obj.obj_data.get("end_time"):
|
||||
obj.write_thumbnail_to_disk()
|
||||
|
||||
|
||||
class TrackedObjectProcessor(threading.Thread):
|
||||
def __init__(
|
||||
@ -479,7 +483,7 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
EventStateEnum.update,
|
||||
camera,
|
||||
frame_name,
|
||||
obj.to_dict(include_thumbnail=True),
|
||||
obj.to_dict(),
|
||||
)
|
||||
)
|
||||
|
||||
@ -491,41 +495,13 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
obj.has_snapshot = self.should_save_snapshot(camera, obj)
|
||||
obj.has_clip = self.should_retain_recording(camera, obj)
|
||||
|
||||
# write thumbnail to disk if it will be saved as an event
|
||||
if obj.has_snapshot or obj.has_clip:
|
||||
obj.write_thumbnail_to_disk()
|
||||
|
||||
# write the snapshot to disk
|
||||
if obj.has_snapshot:
|
||||
snapshot_config: SnapshotsConfig = self.config.cameras[camera].snapshots
|
||||
jpg_bytes = obj.get_jpg_bytes(
|
||||
timestamp=snapshot_config.timestamp,
|
||||
bounding_box=snapshot_config.bounding_box,
|
||||
crop=snapshot_config.crop,
|
||||
height=snapshot_config.height,
|
||||
quality=snapshot_config.quality,
|
||||
)
|
||||
if jpg_bytes is None:
|
||||
logger.warning(f"Unable to save snapshot for {obj.obj_data['id']}.")
|
||||
else:
|
||||
with open(
|
||||
os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"),
|
||||
"wb",
|
||||
) as j:
|
||||
j.write(jpg_bytes)
|
||||
|
||||
# write clean snapshot if enabled
|
||||
if snapshot_config.clean_copy:
|
||||
png_bytes = obj.get_clean_png()
|
||||
if png_bytes is None:
|
||||
logger.warning(
|
||||
f"Unable to save clean snapshot for {obj.obj_data['id']}."
|
||||
)
|
||||
else:
|
||||
with open(
|
||||
os.path.join(
|
||||
CLIPS_DIR,
|
||||
f"{camera}-{obj.obj_data['id']}-clean.png",
|
||||
),
|
||||
"wb",
|
||||
) as p:
|
||||
p.write(png_bytes)
|
||||
obj.write_snapshot_to_disk()
|
||||
|
||||
if not obj.false_positive:
|
||||
message = {
|
||||
@ -542,14 +518,15 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
EventStateEnum.end,
|
||||
camera,
|
||||
frame_name,
|
||||
obj.to_dict(include_thumbnail=True),
|
||||
obj.to_dict(),
|
||||
)
|
||||
)
|
||||
|
||||
def snapshot(camera, obj: TrackedObject, frame_name: str):
|
||||
mqtt_config: MqttConfig = self.config.cameras[camera].mqtt
|
||||
mqtt_config: CameraMqttConfig = self.config.cameras[camera].mqtt
|
||||
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
|
||||
jpg_bytes = obj.get_jpg_bytes(
|
||||
jpg_bytes = obj.get_img_bytes(
|
||||
ext="jpg",
|
||||
timestamp=mqtt_config.timestamp,
|
||||
bounding_box=mqtt_config.bounding_box,
|
||||
crop=mqtt_config.crop,
|
||||
@ -750,6 +727,10 @@ class TrackedObjectProcessor(threading.Thread):
|
||||
event_id, camera, _ = update
|
||||
self.camera_states[camera].finished(event_id)
|
||||
|
||||
# shut down camera states
|
||||
for state in self.camera_states.values():
|
||||
state.shutdown()
|
||||
|
||||
self.requestor.stop()
|
||||
self.detection_publisher.stop()
|
||||
self.event_sender.stop()
|
||||
|
@ -16,7 +16,7 @@ import numpy as np
|
||||
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.config import BirdseyeModeEnum, FfmpegConfig, FrigateConfig
|
||||
from frigate.const import BASE_DIR, BIRDSEYE_PIPE
|
||||
from frigate.const import BASE_DIR, BIRDSEYE_PIPE, INSTALL_DIR
|
||||
from frigate.util.image import (
|
||||
SharedMemoryFrameManager,
|
||||
copy_yuv_to_position,
|
||||
@ -297,7 +297,9 @@ class BirdsEyeFrameManager:
|
||||
birdseye_logo = cv2.imread(custom_logo_files[0], cv2.IMREAD_UNCHANGED)
|
||||
|
||||
if birdseye_logo is None:
|
||||
logo_files = glob.glob("/opt/frigate/frigate/images/birdseye.png")
|
||||
logo_files = glob.glob(
|
||||
os.path.join(INSTALL_DIR, "frigate/images/birdseye.png")
|
||||
)
|
||||
|
||||
if len(logo_files) > 0:
|
||||
birdseye_logo = cv2.imread(logo_files[0], cv2.IMREAD_UNCHANGED)
|
||||
|
@ -172,7 +172,9 @@ class PreviewRecorder:
|
||||
|
||||
# create communication for finished previews
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.config_subscriber = ConfigSubscriber(f"config/record/{self.config.name}")
|
||||
self.config_subscriber = ConfigSubscriber(
|
||||
f"config/record/{self.config.name}", True
|
||||
)
|
||||
|
||||
y, u1, u2, v1, v2 = get_yuv_crop(
|
||||
self.config.frame_shape_yuv,
|
||||
|
@ -80,8 +80,8 @@ class RecordingExporter(threading.Thread):
|
||||
Path(os.path.join(CLIPS_DIR, "export")).mkdir(exist_ok=True)
|
||||
|
||||
def get_datetime_from_timestamp(self, timestamp: int) -> str:
|
||||
"""Convenience fun to get a simple date time from timestamp."""
|
||||
return datetime.datetime.fromtimestamp(timestamp).strftime("%Y/%m/%d %H:%M")
|
||||
# return in iso format
|
||||
return datetime.datetime.fromtimestamp(timestamp).strftime("%Y-%m-%d %H:%M:%S")
|
||||
|
||||
def save_thumbnail(self, id: str) -> str:
|
||||
thumb_path = os.path.join(CLIPS_DIR, f"export/{id}.webp")
|
||||
@ -236,6 +236,10 @@ class RecordingExporter(threading.Thread):
|
||||
if self.config.ffmpeg.apple_compatibility:
|
||||
ffmpeg_cmd += FFMPEG_HVC1_ARGS
|
||||
|
||||
# add metadata
|
||||
title = f"Frigate Recording for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}"
|
||||
ffmpeg_cmd.extend(["-metadata", f"title={title}"])
|
||||
|
||||
ffmpeg_cmd.append(video_path)
|
||||
|
||||
return ffmpeg_cmd, playlist_lines
|
||||
@ -323,6 +327,10 @@ class RecordingExporter(threading.Thread):
|
||||
)
|
||||
).split(" ")
|
||||
|
||||
# add metadata
|
||||
title = f"Frigate Preview for {self.camera}, {self.get_datetime_from_timestamp(self.start_time)} - {self.get_datetime_from_timestamp(self.end_time)}"
|
||||
ffmpeg_cmd.extend(["-metadata", f"title={title}"])
|
||||
|
||||
return ffmpeg_cmd, playlist_lines
|
||||
|
||||
def run(self) -> None:
|
||||
@ -355,10 +363,13 @@ class RecordingExporter(threading.Thread):
|
||||
}
|
||||
).execute()
|
||||
|
||||
if self.playback_source == PlaybackSourceEnum.recordings:
|
||||
ffmpeg_cmd, playlist_lines = self.get_record_export_command(video_path)
|
||||
else:
|
||||
ffmpeg_cmd, playlist_lines = self.get_preview_export_command(video_path)
|
||||
try:
|
||||
if self.playback_source == PlaybackSourceEnum.recordings:
|
||||
ffmpeg_cmd, playlist_lines = self.get_record_export_command(video_path)
|
||||
else:
|
||||
ffmpeg_cmd, playlist_lines = self.get_preview_export_command(video_path)
|
||||
except DoesNotExist:
|
||||
return
|
||||
|
||||
p = sp.run(
|
||||
ffmpeg_cmd,
|
||||
|
@ -19,6 +19,10 @@ import psutil
|
||||
from frigate.comms.config_updater import ConfigSubscriber
|
||||
from frigate.comms.detections_updater import DetectionSubscriber, DetectionTypeEnum
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.comms.recordings_updater import (
|
||||
RecordingsDataPublisher,
|
||||
RecordingsDataTypeEnum,
|
||||
)
|
||||
from frigate.config import FrigateConfig, RetainModeEnum
|
||||
from frigate.const import (
|
||||
CACHE_DIR,
|
||||
@ -70,6 +74,9 @@ class RecordingMaintainer(threading.Thread):
|
||||
self.requestor = InterProcessRequestor()
|
||||
self.config_subscriber = ConfigSubscriber("config/record/")
|
||||
self.detection_subscriber = DetectionSubscriber(DetectionTypeEnum.all)
|
||||
self.recordings_publisher = RecordingsDataPublisher(
|
||||
RecordingsDataTypeEnum.recordings_available_through
|
||||
)
|
||||
|
||||
self.stop_event = stop_event
|
||||
self.object_recordings_info: dict[str, list] = defaultdict(list)
|
||||
@ -213,6 +220,16 @@ class RecordingMaintainer(threading.Thread):
|
||||
[self.validate_and_move_segment(camera, reviews, r) for r in recordings]
|
||||
)
|
||||
|
||||
# publish most recently available recording time and None if disabled
|
||||
self.recordings_publisher.publish(
|
||||
(
|
||||
camera,
|
||||
recordings[0]["start_time"].timestamp()
|
||||
if self.config.cameras[camera].record.enabled
|
||||
else None,
|
||||
)
|
||||
)
|
||||
|
||||
recordings_to_insert: list[Optional[Recordings]] = await asyncio.gather(*tasks)
|
||||
|
||||
# fire and forget recordings entries
|
||||
@ -456,7 +473,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
# get the segment size of the cache file
|
||||
# file without faststart is same size
|
||||
segment_size = round(
|
||||
float(os.path.getsize(cache_path)) / pow(2, 20), 1
|
||||
float(os.path.getsize(cache_path)) / pow(2, 20), 2
|
||||
)
|
||||
except OSError:
|
||||
segment_size = 0
|
||||
@ -582,4 +599,5 @@ class RecordingMaintainer(threading.Thread):
|
||||
self.requestor.stop()
|
||||
self.config_subscriber.stop()
|
||||
self.detection_subscriber.stop()
|
||||
self.recordings_publisher.stop()
|
||||
logger.info("Exiting recording maintenance...")
|
||||
|
@ -1,207 +1,495 @@
|
||||
from typing import Dict
|
||||
import logging
|
||||
import re
|
||||
|
||||
from prometheus_client import (
|
||||
CONTENT_TYPE_LATEST,
|
||||
Counter,
|
||||
Gauge,
|
||||
Info,
|
||||
generate_latest,
|
||||
)
|
||||
|
||||
# System metrics
|
||||
SYSTEM_INFO = Info("frigate_system", "System information")
|
||||
CPU_USAGE = Gauge(
|
||||
"frigate_cpu_usage_percent",
|
||||
"Process CPU usage %",
|
||||
["pid", "name", "process", "type", "cmdline"],
|
||||
)
|
||||
MEMORY_USAGE = Gauge(
|
||||
"frigate_mem_usage_percent",
|
||||
"Process memory usage %",
|
||||
["pid", "name", "process", "type", "cmdline"],
|
||||
)
|
||||
|
||||
# Camera metrics
|
||||
CAMERA_FPS = Gauge(
|
||||
"frigate_camera_fps",
|
||||
"Frames per second being consumed from your camera",
|
||||
["camera_name"],
|
||||
)
|
||||
DETECTION_FPS = Gauge(
|
||||
"frigate_detection_fps",
|
||||
"Number of times detection is run per second",
|
||||
["camera_name"],
|
||||
)
|
||||
PROCESS_FPS = Gauge(
|
||||
"frigate_process_fps",
|
||||
"Frames per second being processed by frigate",
|
||||
["camera_name"],
|
||||
)
|
||||
SKIPPED_FPS = Gauge(
|
||||
"frigate_skipped_fps", "Frames per second skipped for processing", ["camera_name"]
|
||||
)
|
||||
DETECTION_ENABLED = Gauge(
|
||||
"frigate_detection_enabled", "Detection enabled for camera", ["camera_name"]
|
||||
)
|
||||
AUDIO_DBFS = Gauge("frigate_audio_dBFS", "Audio dBFS for camera", ["camera_name"])
|
||||
AUDIO_RMS = Gauge("frigate_audio_rms", "Audio RMS for camera", ["camera_name"])
|
||||
|
||||
# Detector metrics
|
||||
DETECTOR_INFERENCE = Gauge(
|
||||
"frigate_detector_inference_speed_seconds",
|
||||
"Time spent running object detection in seconds",
|
||||
["name"],
|
||||
)
|
||||
DETECTOR_START = Gauge(
|
||||
"frigate_detection_start", "Detector start time (unix timestamp)", ["name"]
|
||||
)
|
||||
|
||||
# GPU metrics
|
||||
GPU_USAGE = Gauge("frigate_gpu_usage_percent", "GPU utilisation %", ["gpu_name"])
|
||||
GPU_MEMORY = Gauge("frigate_gpu_mem_usage_percent", "GPU memory usage %", ["gpu_name"])
|
||||
|
||||
# Storage metrics
|
||||
STORAGE_FREE = Gauge("frigate_storage_free_bytes", "Storage free bytes", ["storage"])
|
||||
STORAGE_TOTAL = Gauge("frigate_storage_total_bytes", "Storage total bytes", ["storage"])
|
||||
STORAGE_USED = Gauge("frigate_storage_used_bytes", "Storage used bytes", ["storage"])
|
||||
STORAGE_MOUNT = Info(
|
||||
"frigate_storage_mount_type", "Storage mount type", ["mount_type", "storage"]
|
||||
)
|
||||
|
||||
# Service metrics
|
||||
UPTIME = Gauge("frigate_service_uptime_seconds", "Uptime seconds")
|
||||
LAST_UPDATE = Gauge(
|
||||
"frigate_service_last_updated_timestamp", "Stats recorded time (unix timestamp)"
|
||||
)
|
||||
TEMPERATURE = Gauge("frigate_device_temperature", "Device Temperature", ["device"])
|
||||
|
||||
# Event metrics
|
||||
CAMERA_EVENTS = Counter(
|
||||
"frigate_camera_events",
|
||||
"Count of camera events since exporter started",
|
||||
["camera", "label"],
|
||||
from prometheus_client import CONTENT_TYPE_LATEST, generate_latest
|
||||
from prometheus_client.core import (
|
||||
REGISTRY,
|
||||
CounterMetricFamily,
|
||||
GaugeMetricFamily,
|
||||
InfoMetricFamily,
|
||||
)
|
||||
|
||||
|
||||
def update_metrics(stats: Dict) -> None:
|
||||
"""Update Prometheus metrics based on Frigate stats"""
|
||||
try:
|
||||
# Update process metrics
|
||||
if "cpu_usages" in stats:
|
||||
for pid, proc_stats in stats["cpu_usages"].items():
|
||||
cmdline = proc_stats.get("cmdline", "")
|
||||
process_type = "Other"
|
||||
process_name = cmdline
|
||||
class CustomCollector(object):
|
||||
def __init__(self, _url):
|
||||
self.process_stats = {}
|
||||
self.previous_event_id = None
|
||||
self.previous_event_start_time = None
|
||||
self.all_events = {}
|
||||
|
||||
CPU_USAGE.labels(
|
||||
pid=pid,
|
||||
name=process_name,
|
||||
process=process_name,
|
||||
type=process_type,
|
||||
cmdline=cmdline,
|
||||
).set(float(proc_stats["cpu"]))
|
||||
def add_metric(self, metric, label, stats, key, multiplier=1.0): # Now a method
|
||||
try:
|
||||
string = str(stats[key])
|
||||
value = float(re.findall(r"-?\d*\.?\d*", string)[0])
|
||||
metric.add_metric(label, value * multiplier)
|
||||
except (KeyError, TypeError, IndexError, ValueError):
|
||||
pass
|
||||
|
||||
MEMORY_USAGE.labels(
|
||||
pid=pid,
|
||||
name=process_name,
|
||||
process=process_name,
|
||||
type=process_type,
|
||||
cmdline=cmdline,
|
||||
).set(float(proc_stats["mem"]))
|
||||
def add_metric_process(
|
||||
self,
|
||||
metric,
|
||||
camera_stats,
|
||||
camera_name,
|
||||
pid_name,
|
||||
process_name,
|
||||
cpu_or_memory,
|
||||
process_type,
|
||||
):
|
||||
try:
|
||||
pid = str(camera_stats[pid_name])
|
||||
label_values = [pid, camera_name, process_name, process_type]
|
||||
try:
|
||||
# new frigate:0.13.0-beta3 stat 'cmdline'
|
||||
label_values.append(self.process_stats[pid]["cmdline"])
|
||||
except KeyError:
|
||||
pass
|
||||
metric.add_metric(label_values, self.process_stats[pid][cpu_or_memory])
|
||||
del self.process_stats[pid][cpu_or_memory]
|
||||
except (KeyError, TypeError, IndexError):
|
||||
pass
|
||||
|
||||
# Update camera metrics
|
||||
if "cameras" in stats:
|
||||
for camera_name, camera_stats in stats["cameras"].items():
|
||||
if "camera_fps" in camera_stats:
|
||||
CAMERA_FPS.labels(camera_name=camera_name).set(
|
||||
camera_stats["camera_fps"]
|
||||
)
|
||||
if "detection_fps" in camera_stats:
|
||||
DETECTION_FPS.labels(camera_name=camera_name).set(
|
||||
camera_stats["detection_fps"]
|
||||
)
|
||||
if "process_fps" in camera_stats:
|
||||
PROCESS_FPS.labels(camera_name=camera_name).set(
|
||||
camera_stats["process_fps"]
|
||||
)
|
||||
if "skipped_fps" in camera_stats:
|
||||
SKIPPED_FPS.labels(camera_name=camera_name).set(
|
||||
camera_stats["skipped_fps"]
|
||||
)
|
||||
if "detection_enabled" in camera_stats:
|
||||
DETECTION_ENABLED.labels(camera_name=camera_name).set(
|
||||
camera_stats["detection_enabled"]
|
||||
)
|
||||
if "audio_dBFS" in camera_stats:
|
||||
AUDIO_DBFS.labels(camera_name=camera_name).set(
|
||||
camera_stats["audio_dBFS"]
|
||||
)
|
||||
if "audio_rms" in camera_stats:
|
||||
AUDIO_RMS.labels(camera_name=camera_name).set(
|
||||
camera_stats["audio_rms"]
|
||||
)
|
||||
def collect(self):
|
||||
stats = self.process_stats # Assign self.process_stats to local variable stats
|
||||
|
||||
# Update detector metrics
|
||||
if "detectors" in stats:
|
||||
for name, detector in stats["detectors"].items():
|
||||
if "inference_speed" in detector:
|
||||
DETECTOR_INFERENCE.labels(name=name).set(
|
||||
detector["inference_speed"] * 0.001
|
||||
) # ms to seconds
|
||||
if "detection_start" in detector:
|
||||
DETECTOR_START.labels(name=name).set(detector["detection_start"])
|
||||
try:
|
||||
self.process_stats = stats["cpu_usages"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# Update GPU metrics
|
||||
if "gpu_usages" in stats:
|
||||
for gpu_name, gpu_stats in stats["gpu_usages"].items():
|
||||
if "gpu" in gpu_stats:
|
||||
GPU_USAGE.labels(gpu_name=gpu_name).set(float(gpu_stats["gpu"]))
|
||||
if "mem" in gpu_stats:
|
||||
GPU_MEMORY.labels(gpu_name=gpu_name).set(float(gpu_stats["mem"]))
|
||||
# process stats for cameras, detectors and other
|
||||
cpu_usages = GaugeMetricFamily(
|
||||
"frigate_cpu_usage_percent",
|
||||
"Process CPU usage %",
|
||||
labels=["pid", "name", "process", "type", "cmdline"],
|
||||
)
|
||||
mem_usages = GaugeMetricFamily(
|
||||
"frigate_mem_usage_percent",
|
||||
"Process memory usage %",
|
||||
labels=["pid", "name", "process", "type", "cmdline"],
|
||||
)
|
||||
|
||||
# Update service metrics
|
||||
if "service" in stats:
|
||||
service = stats["service"]
|
||||
# camera stats
|
||||
audio_dBFS = GaugeMetricFamily(
|
||||
"frigate_audio_dBFS", "Audio dBFS for camera", labels=["camera_name"]
|
||||
)
|
||||
audio_rms = GaugeMetricFamily(
|
||||
"frigate_audio_rms", "Audio RMS for camera", labels=["camera_name"]
|
||||
)
|
||||
camera_fps = GaugeMetricFamily(
|
||||
"frigate_camera_fps",
|
||||
"Frames per second being consumed from your camera.",
|
||||
labels=["camera_name"],
|
||||
)
|
||||
detection_enabled = GaugeMetricFamily(
|
||||
"frigate_detection_enabled",
|
||||
"Detection enabled for camera",
|
||||
labels=["camera_name"],
|
||||
)
|
||||
detection_fps = GaugeMetricFamily(
|
||||
"frigate_detection_fps",
|
||||
"Number of times detection is run per second.",
|
||||
labels=["camera_name"],
|
||||
)
|
||||
process_fps = GaugeMetricFamily(
|
||||
"frigate_process_fps",
|
||||
"Frames per second being processed by frigate.",
|
||||
labels=["camera_name"],
|
||||
)
|
||||
skipped_fps = GaugeMetricFamily(
|
||||
"frigate_skipped_fps",
|
||||
"Frames per second skip for processing by frigate.",
|
||||
labels=["camera_name"],
|
||||
)
|
||||
|
||||
if "uptime" in service:
|
||||
UPTIME.set(service["uptime"])
|
||||
if "last_updated" in service:
|
||||
LAST_UPDATE.set(service["last_updated"])
|
||||
# read camera stats assuming version < frigate:0.13.0-beta3
|
||||
cameras = stats
|
||||
try:
|
||||
# try to read camera stats in case >= frigate:0.13.0-beta3
|
||||
cameras = stats["cameras"]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# Storage metrics
|
||||
if "storage" in service:
|
||||
for path, storage in service["storage"].items():
|
||||
if "free" in storage:
|
||||
STORAGE_FREE.labels(storage=path).set(
|
||||
storage["free"] * 1e6
|
||||
) # MB to bytes
|
||||
if "total" in storage:
|
||||
STORAGE_TOTAL.labels(storage=path).set(storage["total"] * 1e6)
|
||||
if "used" in storage:
|
||||
STORAGE_USED.labels(storage=path).set(storage["used"] * 1e6)
|
||||
if "mount_type" in storage:
|
||||
STORAGE_MOUNT.labels(storage=path).info(
|
||||
{"mount_type": storage["mount_type"], "storage": path}
|
||||
)
|
||||
for camera_name, camera_stats in cameras.items():
|
||||
self.add_metric(audio_dBFS, [camera_name], camera_stats, "audio_dBFS")
|
||||
self.add_metric(audio_rms, [camera_name], camera_stats, "audio_rms")
|
||||
self.add_metric(camera_fps, [camera_name], camera_stats, "camera_fps")
|
||||
self.add_metric(
|
||||
detection_enabled, [camera_name], camera_stats, "detection_enabled"
|
||||
)
|
||||
self.add_metric(detection_fps, [camera_name], camera_stats, "detection_fps")
|
||||
self.add_metric(process_fps, [camera_name], camera_stats, "process_fps")
|
||||
self.add_metric(skipped_fps, [camera_name], camera_stats, "skipped_fps")
|
||||
|
||||
# Temperature metrics
|
||||
if "temperatures" in service:
|
||||
for device, temp in service["temperatures"].items():
|
||||
TEMPERATURE.labels(device=device).set(temp)
|
||||
self.add_metric_process(
|
||||
cpu_usages,
|
||||
camera_stats,
|
||||
camera_name,
|
||||
"ffmpeg_pid",
|
||||
"ffmpeg",
|
||||
"cpu",
|
||||
"Camera",
|
||||
)
|
||||
self.add_metric_process(
|
||||
cpu_usages,
|
||||
camera_stats,
|
||||
camera_name,
|
||||
"capture_pid",
|
||||
"capture",
|
||||
"cpu",
|
||||
"Camera",
|
||||
)
|
||||
self.add_metric_process(
|
||||
cpu_usages, camera_stats, camera_name, "pid", "detect", "cpu", "Camera"
|
||||
)
|
||||
|
||||
# Version info
|
||||
if "version" in service and "latest_version" in service:
|
||||
SYSTEM_INFO.info(
|
||||
{
|
||||
"version": service["version"],
|
||||
"latest_version": service["latest_version"],
|
||||
}
|
||||
self.add_metric_process(
|
||||
mem_usages,
|
||||
camera_stats,
|
||||
camera_name,
|
||||
"ffmpeg_pid",
|
||||
"ffmpeg",
|
||||
"mem",
|
||||
"Camera",
|
||||
)
|
||||
self.add_metric_process(
|
||||
mem_usages,
|
||||
camera_stats,
|
||||
camera_name,
|
||||
"capture_pid",
|
||||
"capture",
|
||||
"mem",
|
||||
"Camera",
|
||||
)
|
||||
self.add_metric_process(
|
||||
mem_usages, camera_stats, camera_name, "pid", "detect", "mem", "Camera"
|
||||
)
|
||||
|
||||
yield audio_dBFS
|
||||
yield audio_rms
|
||||
yield camera_fps
|
||||
yield detection_enabled
|
||||
yield detection_fps
|
||||
yield process_fps
|
||||
yield skipped_fps
|
||||
|
||||
# bandwidth stats
|
||||
bandwidth_usages = GaugeMetricFamily(
|
||||
"frigate_bandwidth_usages_kBps",
|
||||
"bandwidth usages kilobytes per second",
|
||||
labels=["pid", "name", "process", "cmdline"],
|
||||
)
|
||||
|
||||
try:
|
||||
for b_pid, b_stats in stats["bandwidth_usages"].items():
|
||||
label = [b_pid] # pid label
|
||||
try:
|
||||
n = stats["cpu_usages"][b_pid]["cmdline"]
|
||||
for p_name, p_stats in stats["processes"].items():
|
||||
if str(p_stats["pid"]) == b_pid:
|
||||
n = p_name
|
||||
break
|
||||
|
||||
# new frigate:0.13.0-beta3 stat 'cmdline'
|
||||
label.append(n) # name label
|
||||
label.append(stats["cpu_usages"][b_pid]["cmdline"]) # process label
|
||||
label.append(stats["cpu_usages"][b_pid]["cmdline"]) # cmdline label
|
||||
self.add_metric(bandwidth_usages, label, b_stats, "bandwidth")
|
||||
except KeyError:
|
||||
pass
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
yield bandwidth_usages
|
||||
|
||||
# detector stats
|
||||
try:
|
||||
yield GaugeMetricFamily(
|
||||
"frigate_detection_total_fps",
|
||||
"Sum of detection_fps across all cameras and detectors.",
|
||||
value=stats["detection_fps"],
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
detector_inference_speed = GaugeMetricFamily(
|
||||
"frigate_detector_inference_speed_seconds",
|
||||
"Time spent running object detection in seconds.",
|
||||
labels=["name"],
|
||||
)
|
||||
|
||||
detector_detection_start = GaugeMetricFamily(
|
||||
"frigate_detection_start",
|
||||
"Detector start time (unix timestamp)",
|
||||
labels=["name"],
|
||||
)
|
||||
|
||||
try:
|
||||
for detector_name, detector_stats in stats["detectors"].items():
|
||||
self.add_metric(
|
||||
detector_inference_speed,
|
||||
[detector_name],
|
||||
detector_stats,
|
||||
"inference_speed",
|
||||
0.001,
|
||||
) # ms to seconds
|
||||
self.add_metric(
|
||||
detector_detection_start,
|
||||
[detector_name],
|
||||
detector_stats,
|
||||
"detection_start",
|
||||
)
|
||||
self.add_metric_process(
|
||||
cpu_usages,
|
||||
stats["detectors"],
|
||||
detector_name,
|
||||
"pid",
|
||||
"detect",
|
||||
"cpu",
|
||||
"Detector",
|
||||
)
|
||||
self.add_metric_process(
|
||||
mem_usages,
|
||||
stats["detectors"],
|
||||
detector_name,
|
||||
"pid",
|
||||
"detect",
|
||||
"mem",
|
||||
"Detector",
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
yield detector_inference_speed
|
||||
yield detector_detection_start
|
||||
|
||||
# detector process stats
|
||||
try:
|
||||
for detector_name, detector_stats in stats["detectors"].items():
|
||||
p_pid = str(detector_stats["pid"])
|
||||
label = [p_pid] # pid label
|
||||
try:
|
||||
# new frigate:0.13.0-beta3 stat 'cmdline'
|
||||
label.append(detector_name) # name label
|
||||
label.append(detector_name) # process label
|
||||
label.append("detectors") # type label
|
||||
label.append(self.process_stats[p_pid]["cmdline"]) # cmdline label
|
||||
self.add_metric(cpu_usages, label, self.process_stats[p_pid], "cpu")
|
||||
self.add_metric(mem_usages, label, self.process_stats[p_pid], "mem")
|
||||
del self.process_stats[p_pid]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# other named process stats
|
||||
try:
|
||||
for process_name, process_stats in stats["processes"].items():
|
||||
p_pid = str(process_stats["pid"])
|
||||
label = [p_pid] # pid label
|
||||
try:
|
||||
# new frigate:0.13.0-beta3 stat 'cmdline'
|
||||
label.append(process_name) # name label
|
||||
label.append(process_name) # process label
|
||||
label.append(process_name) # type label
|
||||
label.append(self.process_stats[p_pid]["cmdline"]) # cmdline label
|
||||
self.add_metric(cpu_usages, label, self.process_stats[p_pid], "cpu")
|
||||
self.add_metric(mem_usages, label, self.process_stats[p_pid], "mem")
|
||||
del self.process_stats[p_pid]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
# remaining process stats
|
||||
try:
|
||||
for process_id, pid_stats in self.process_stats.items():
|
||||
label = [process_id] # pid label
|
||||
try:
|
||||
# new frigate:0.13.0-beta3 stat 'cmdline'
|
||||
label.append(pid_stats["cmdline"]) # name label
|
||||
label.append(pid_stats["cmdline"]) # process label
|
||||
label.append("Other") # type label
|
||||
label.append(pid_stats["cmdline"]) # cmdline label
|
||||
except KeyError:
|
||||
pass
|
||||
self.add_metric(cpu_usages, label, pid_stats, "cpu")
|
||||
self.add_metric(mem_usages, label, pid_stats, "mem")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
yield cpu_usages
|
||||
yield mem_usages
|
||||
|
||||
# gpu stats
|
||||
gpu_usages = GaugeMetricFamily(
|
||||
"frigate_gpu_usage_percent", "GPU utilisation %", labels=["gpu_name"]
|
||||
)
|
||||
gpu_mem_usages = GaugeMetricFamily(
|
||||
"frigate_gpu_mem_usage_percent", "GPU memory usage %", labels=["gpu_name"]
|
||||
)
|
||||
|
||||
try:
|
||||
for gpu_name, gpu_stats in stats["gpu_usages"].items():
|
||||
self.add_metric(gpu_usages, [gpu_name], gpu_stats, "gpu")
|
||||
self.add_metric(gpu_mem_usages, [gpu_name], gpu_stats, "mem")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
yield gpu_usages
|
||||
yield gpu_mem_usages
|
||||
|
||||
# service stats
|
||||
uptime_seconds = GaugeMetricFamily(
|
||||
"frigate_service_uptime_seconds", "Uptime seconds"
|
||||
)
|
||||
last_updated_timestamp = GaugeMetricFamily(
|
||||
"frigate_service_last_updated_timestamp",
|
||||
"Stats recorded time (unix timestamp)",
|
||||
)
|
||||
|
||||
try:
|
||||
service_stats = stats["service"]
|
||||
self.add_metric(uptime_seconds, [""], service_stats, "uptime")
|
||||
self.add_metric(last_updated_timestamp, [""], service_stats, "last_updated")
|
||||
|
||||
info = {
|
||||
"latest_version": stats["service"]["latest_version"],
|
||||
"version": stats["service"]["version"],
|
||||
}
|
||||
yield InfoMetricFamily(
|
||||
"frigate_service", "Frigate version info", value=info
|
||||
)
|
||||
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
yield uptime_seconds
|
||||
yield last_updated_timestamp
|
||||
|
||||
temperatures = GaugeMetricFamily(
|
||||
"frigate_device_temperature", "Device Temperature", labels=["device"]
|
||||
)
|
||||
try:
|
||||
for device_name in stats["service"]["temperatures"]:
|
||||
self.add_metric(
|
||||
temperatures,
|
||||
[device_name],
|
||||
stats["service"]["temperatures"],
|
||||
device_name,
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
yield temperatures
|
||||
|
||||
storage_free = GaugeMetricFamily(
|
||||
"frigate_storage_free_bytes", "Storage free bytes", labels=["storage"]
|
||||
)
|
||||
storage_mount_type = InfoMetricFamily(
|
||||
"frigate_storage_mount_type",
|
||||
"Storage mount type",
|
||||
labels=["mount_type", "storage"],
|
||||
)
|
||||
storage_total = GaugeMetricFamily(
|
||||
"frigate_storage_total_bytes", "Storage total bytes", labels=["storage"]
|
||||
)
|
||||
storage_used = GaugeMetricFamily(
|
||||
"frigate_storage_used_bytes", "Storage used bytes", labels=["storage"]
|
||||
)
|
||||
|
||||
try:
|
||||
for storage_path, storage_stats in stats["service"]["storage"].items():
|
||||
self.add_metric(
|
||||
storage_free, [storage_path], storage_stats, "free", 1e6
|
||||
) # MB to bytes
|
||||
self.add_metric(
|
||||
storage_total, [storage_path], storage_stats, "total", 1e6
|
||||
) # MB to bytes
|
||||
self.add_metric(
|
||||
storage_used, [storage_path], storage_stats, "used", 1e6
|
||||
) # MB to bytes
|
||||
storage_mount_type.add_metric(
|
||||
storage_path,
|
||||
{
|
||||
"mount_type": storage_stats["mount_type"],
|
||||
"storage": storage_path,
|
||||
},
|
||||
)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
yield storage_free
|
||||
yield storage_mount_type
|
||||
yield storage_total
|
||||
yield storage_used
|
||||
|
||||
# count events
|
||||
events = []
|
||||
|
||||
if len(events) > 0:
|
||||
# events[0] is newest event, last element is oldest, don't need to sort
|
||||
|
||||
if not self.previous_event_id:
|
||||
# ignore all previous events on startup, prometheus might have already counted them
|
||||
self.previous_event_id = events[0]["id"]
|
||||
self.previous_event_start_time = int(events[0]["start_time"])
|
||||
|
||||
for event in events:
|
||||
# break if event already counted
|
||||
if event["id"] == self.previous_event_id:
|
||||
break
|
||||
|
||||
# break if event starts before previous event
|
||||
if event["start_time"] < self.previous_event_start_time:
|
||||
break
|
||||
|
||||
# store counted events in a dict
|
||||
try:
|
||||
cam = self.all_events[event["camera"]]
|
||||
try:
|
||||
cam[event["label"]] += 1
|
||||
except KeyError:
|
||||
# create label dict if not exists
|
||||
cam.update({event["label"]: 1})
|
||||
except KeyError:
|
||||
# create camera and label dict if not exists
|
||||
self.all_events.update({event["camera"]: {event["label"]: 1}})
|
||||
|
||||
# don't recount events next time
|
||||
self.previous_event_id = events[0]["id"]
|
||||
self.previous_event_start_time = int(events[0]["start_time"])
|
||||
|
||||
camera_events = CounterMetricFamily(
|
||||
"frigate_camera_events",
|
||||
"Count of camera events since exporter started",
|
||||
labels=["camera", "label"],
|
||||
)
|
||||
|
||||
for camera, cam_dict in self.all_events.items():
|
||||
for label, label_value in cam_dict.items():
|
||||
camera_events.add_metric([camera, label], label_value)
|
||||
|
||||
yield camera_events
|
||||
|
||||
|
||||
collector = CustomCollector(None)
|
||||
REGISTRY.register(collector)
|
||||
|
||||
|
||||
def update_metrics(stats):
|
||||
"""Updates the Prometheus metrics with the given stats data."""
|
||||
try:
|
||||
collector.process_stats = stats # Directly assign the stats data
|
||||
# Important: Since we are not fetching from URL, we need to manually call collect
|
||||
for _ in collector.collect():
|
||||
pass
|
||||
except Exception as e:
|
||||
print(f"Error updating Prometheus metrics: {str(e)}")
|
||||
logging.error(f"Error updating metrics: {e}")
|
||||
|
||||
|
||||
def get_metrics() -> tuple[str, str]:
|
||||
"""Get Prometheus metrics in text format"""
|
||||
return generate_latest(), CONTENT_TYPE_LATEST
|
||||
def get_metrics():
|
||||
"""Returns the Prometheus metrics in text format."""
|
||||
content = generate_latest(REGISTRY) # Use generate_latest
|
||||
return content, CONTENT_TYPE_LATEST
|
||||
|
@ -282,16 +282,24 @@ def stats_snapshot(
|
||||
}
|
||||
stats["detection_fps"] = round(total_detection_fps, 2)
|
||||
|
||||
if config.semantic_search.enabled:
|
||||
embeddings_metrics = stats_tracking["embeddings_metrics"]
|
||||
stats["embeddings"] = {
|
||||
"image_embedding_speed": round(
|
||||
embeddings_metrics.image_embeddings_fps.value * 1000, 2
|
||||
),
|
||||
"text_embedding_speed": round(
|
||||
embeddings_metrics.text_embeddings_sps.value * 1000, 2
|
||||
),
|
||||
}
|
||||
stats["embeddings"] = {}
|
||||
|
||||
# Get metrics if available
|
||||
embeddings_metrics = stats_tracking.get("embeddings_metrics")
|
||||
|
||||
if embeddings_metrics:
|
||||
# Add metrics based on what's enabled
|
||||
if config.semantic_search.enabled:
|
||||
stats["embeddings"].update(
|
||||
{
|
||||
"image_embedding_speed": round(
|
||||
embeddings_metrics.image_embeddings_fps.value * 1000, 2
|
||||
),
|
||||
"text_embedding_speed": round(
|
||||
embeddings_metrics.text_embeddings_sps.value * 1000, 2
|
||||
),
|
||||
}
|
||||
)
|
||||
|
||||
if config.face_recognition.enabled:
|
||||
stats["embeddings"]["face_recognition_speed"] = round(
|
||||
@ -303,6 +311,11 @@ def stats_snapshot(
|
||||
embeddings_metrics.alpr_pps.value * 1000, 2
|
||||
)
|
||||
|
||||
if "license_plate" not in config.objects.all_objects:
|
||||
stats["embeddings"]["yolov9_plate_detection_speed"] = round(
|
||||
embeddings_metrics.yolov9_lpr_fps.value * 1000, 2
|
||||
)
|
||||
|
||||
get_processing_stats(config, stats, hwaccel_errors)
|
||||
|
||||
stats["service"] = {
|
||||
|
@ -10,6 +10,7 @@ from pydantic import Json
|
||||
|
||||
from frigate.api.fastapi_app import create_fastapi_app
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import BASE_DIR, CACHE_DIR
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS
|
||||
@ -73,19 +74,19 @@ class BaseTestHttp(unittest.TestCase):
|
||||
"total": 67.1,
|
||||
"used": 16.6,
|
||||
},
|
||||
"/media/frigate/clips": {
|
||||
os.path.join(BASE_DIR, "clips"): {
|
||||
"free": 42429.9,
|
||||
"mount_type": "ext4",
|
||||
"total": 244529.7,
|
||||
"used": 189607.0,
|
||||
},
|
||||
"/media/frigate/recordings": {
|
||||
os.path.join(BASE_DIR, "recordings"): {
|
||||
"free": 0.2,
|
||||
"mount_type": "ext4",
|
||||
"total": 8.0,
|
||||
"used": 7.8,
|
||||
},
|
||||
"/tmp/cache": {
|
||||
CACHE_DIR: {
|
||||
"free": 976.8,
|
||||
"mount_type": "tmpfs",
|
||||
"total": 1000.0,
|
||||
|
@ -854,9 +854,9 @@ class TestConfig(unittest.TestCase):
|
||||
assert frigate_config.model.merged_labelmap[0] == "person"
|
||||
|
||||
def test_plus_labelmap(self):
|
||||
with open("/config/model_cache/test", "w") as f:
|
||||
with open(os.path.join(MODEL_CACHE_DIR, "test"), "w") as f:
|
||||
json.dump(self.plus_model_info, f)
|
||||
with open("/config/model_cache/test.json", "w") as f:
|
||||
with open(os.path.join(MODEL_CACHE_DIR, "test.json"), "w") as f:
|
||||
json.dump(self.plus_model_info, f)
|
||||
|
||||
config = {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user