mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-08-08 13:51:01 +02:00
Compare commits
126 Commits
v0.15.0-be
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
334b6670e1 | ||
|
b5067c07f8 | ||
|
21e9b2f2ce | ||
|
4a94b43e52 | ||
|
3bda638678 | ||
|
687e118b58 | ||
|
95daf0ba05 | ||
|
213dc97c17 | ||
|
f29cf43f52 | ||
|
aabd5b0077 | ||
|
460e291bf1 | ||
|
ee51326d35 | ||
|
948b087d3c | ||
|
77589c18f4 | ||
|
6a62467998 | ||
|
6857cc2b97 | ||
|
37618b0f57 | ||
|
e7f6e069f6 | ||
|
ee4767b1ce | ||
|
6cb5cfb0c9 | ||
|
7cfa818e63 | ||
|
0764fea159 | ||
|
e3ed1ab8ec | ||
|
b01b1faa3f | ||
|
efbc1f836b | ||
|
7c33f9c579 | ||
|
a9255bddb5 | ||
|
6d80a19518 | ||
|
011a2dbfaf | ||
|
9a54c8ca49 | ||
|
cc99330063 | ||
|
7e6a241e03 | ||
|
2d281855fc | ||
|
22cc698b4e | ||
|
5a5a54fc66 | ||
|
6536368467 | ||
|
dc79af2d98 | ||
|
cc955b1e66 | ||
|
da34ff964f | ||
|
d6a2965cb2 | ||
|
4b429e440b | ||
|
8759b4a0d3 | ||
|
df840b7cd5 | ||
|
0645dc70a5 | ||
|
b230b35c62 | ||
|
31da9351f0 | ||
|
93d39370b6 | ||
|
cea210d800 | ||
|
7b65bcf13c | ||
|
335b7564d5 | ||
|
202e9ad9ce | ||
|
9dc4e8f290 | ||
|
99d27c154e | ||
|
5943fc1895 | ||
|
9efc20e58a | ||
|
6d8234fa27 | ||
|
ad76c28a66 | ||
|
131d07e649 | ||
|
776bb79f0b | ||
|
12e62488c6 | ||
|
aedfaa3641 | ||
|
83ac42cbdc | ||
|
a5ce8d0d77 | ||
|
0ee2e404da | ||
|
3947e79086 | ||
|
91ab1071d2 | ||
|
409e911752 | ||
|
9983bd8d92 | ||
|
32c71c4108 | ||
|
ef6952e3ea | ||
|
173b7aa308 | ||
|
c4727f19e1 | ||
|
b8a74793ca | ||
|
c1dede9369 | ||
|
0c4ea504d8 | ||
|
b265b6b190 | ||
|
d57a61b50f | ||
|
4fc9106c17 | ||
|
38e098ca31 | ||
|
e7ad38d827 | ||
|
b5e5127d48 | ||
|
a1ce9aacf2 | ||
|
322b847356 | ||
|
98338e4c7f | ||
|
171a89f37b | ||
|
8114b541a8 | ||
|
c48396c5c6 | ||
|
00371546a3 | ||
|
87e7b62c85 | ||
|
15ffe5c254 | ||
|
a767dad3a1 | ||
|
9387246f83 | ||
|
bed20de302 | ||
|
70fc5393b1 | ||
|
9b80dbe014 | ||
|
78a013d63a | ||
|
24f4aa79c8 | ||
|
dfc94b5ad6 | ||
|
ddfe8f3921 | ||
|
4af752028f | ||
|
b149828c9f | ||
|
3dc26e78ef | ||
|
5acbe37e6f | ||
|
d9ef8fa206 | ||
|
292499aebc | ||
|
717493e668 | ||
|
d49f958d4d | ||
|
33ee32865f | ||
|
17f8939f97 | ||
|
1b7fe9523d | ||
|
0763f56047 | ||
|
1ea282fba8 | ||
|
869fa2631e | ||
|
f336a91fee | ||
|
d302b6e198 | ||
|
ed2e1f3f72 | ||
|
b4d82084a9 | ||
|
53b96dfb89 | ||
|
0e3fb6cbdd | ||
|
2461d01329 | ||
|
5cafca1be0 | ||
|
9c5a04f25f | ||
|
1ffdd32013 | ||
|
99506845f7 | ||
|
ffd05f90f3 | ||
|
3a8c290f91 |
4
.github/actions/setup/action.yml
vendored
4
.github/actions/setup/action.yml
vendored
@ -33,9 +33,9 @@ runs:
|
||||
with:
|
||||
string: ${{ github.repository }}
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v2
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Log in to the Container registry
|
||||
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc
|
||||
with:
|
||||
|
62
.github/workflows/ci.yml
vendored
62
.github/workflows/ci.yml
vendored
@ -7,7 +7,7 @@ on:
|
||||
- dev
|
||||
- master
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- "docs/**"
|
||||
|
||||
# only run the latest commit to avoid cache overwrites
|
||||
concurrency:
|
||||
@ -19,11 +19,13 @@ env:
|
||||
|
||||
jobs:
|
||||
amd64_build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
name: AMD64 Build
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@ -40,11 +42,13 @@ jobs:
|
||||
tags: ${{ steps.setup.outputs.image-name }}-amd64
|
||||
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
||||
arm64_build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
name: ARM Build
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@ -62,8 +66,9 @@ jobs:
|
||||
${{ steps.setup.outputs.image-name }}-standard-arm64
|
||||
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
||||
- name: Build and push RPi build
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: rpi
|
||||
files: docker/rpi/rpi.hcl
|
||||
@ -71,21 +76,14 @@ jobs:
|
||||
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
|
||||
- name: Build and push Rockchip build
|
||||
uses: docker/bake-action@v3
|
||||
with:
|
||||
push: true
|
||||
targets: rk
|
||||
files: docker/rockchip/rk.hcl
|
||||
set: |
|
||||
rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
||||
*.cache-from=type=gha
|
||||
jetson_jp4_build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
name: Jetson Jetpack 4
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@ -97,8 +95,9 @@ jobs:
|
||||
BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest
|
||||
SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
|
||||
TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: tensorrt
|
||||
files: docker/tensorrt/trt.hcl
|
||||
@ -107,11 +106,13 @@ jobs:
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4,mode=max
|
||||
jetson_jp5_build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
name: Jetson Jetpack 5
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@ -123,8 +124,9 @@ jobs:
|
||||
BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
||||
SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
||||
TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: tensorrt
|
||||
files: docker/tensorrt/trt.hcl
|
||||
@ -133,13 +135,15 @@ jobs:
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max
|
||||
amd64_extra_builds:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
name: AMD64 Extra Build
|
||||
needs:
|
||||
- amd64_build
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
@ -148,8 +152,9 @@ jobs:
|
||||
- name: Build and push TensorRT (x86 GPU)
|
||||
env:
|
||||
COMPUTE_LEVEL: "50 60 70 80 90"
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: tensorrt
|
||||
files: docker/tensorrt/trt.hcl
|
||||
@ -158,21 +163,24 @@ jobs:
|
||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
|
||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
|
||||
arm64_extra_builds:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
name: ARM Extra Build
|
||||
needs:
|
||||
- arm64_build
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push Rockchip build
|
||||
uses: docker/bake-action@v3
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: rk
|
||||
files: docker/rockchip/rk.hcl
|
||||
@ -180,7 +188,7 @@ jobs:
|
||||
rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
||||
*.cache-from=type=gha
|
||||
combined_extra_builds:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
name: Combined Extra Builds
|
||||
needs:
|
||||
- amd64_build
|
||||
@ -188,14 +196,17 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up QEMU and Buildx
|
||||
id: setup
|
||||
uses: ./.github/actions/setup
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push Hailo-8l build
|
||||
uses: docker/bake-action@v4
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: h8l
|
||||
files: docker/hailo8l/h8l.hcl
|
||||
@ -207,8 +218,9 @@ jobs:
|
||||
env:
|
||||
AMDGPU: gfx
|
||||
HSA_OVERRIDE: 0
|
||||
uses: docker/bake-action@v3
|
||||
uses: docker/bake-action@v6
|
||||
with:
|
||||
source: .
|
||||
push: true
|
||||
targets: rocm
|
||||
files: docker/rocm/rocm.hcl
|
||||
@ -218,7 +230,7 @@ jobs:
|
||||
# The majority of users running arm64 are rpi users, so the rpi
|
||||
# build should be the primary arm64 image
|
||||
assemble_default_build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-22.04
|
||||
name: Assemble and push default build
|
||||
needs:
|
||||
- amd64_build
|
||||
|
24
.github/workflows/dependabot-auto-merge.yaml
vendored
24
.github/workflows/dependabot-auto-merge.yaml
vendored
@ -1,24 +0,0 @@
|
||||
name: dependabot-auto-merge
|
||||
on: pull_request
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
dependabot-auto-merge:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.actor == 'dependabot[bot]'
|
||||
steps:
|
||||
- name: Get Dependabot metadata
|
||||
id: metadata
|
||||
uses: dependabot/fetch-metadata@v2
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Enable auto-merge for Dependabot PRs
|
||||
if: steps.metadata.outputs.dependency-type == 'direct:development' && (steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch')
|
||||
run: |
|
||||
gh pr review --approve "$PR_URL"
|
||||
gh pr merge --auto --squash "$PR_URL"
|
||||
env:
|
||||
PR_URL: ${{ github.event.pull_request.html_url }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
14
.github/workflows/pull_request.yml
vendored
14
.github/workflows/pull_request.yml
vendored
@ -3,7 +3,7 @@ name: On pull request
|
||||
on:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- 'docs/**'
|
||||
- "docs/**"
|
||||
|
||||
env:
|
||||
DEFAULT_PYTHON: 3.9
|
||||
@ -19,6 +19,8 @@ jobs:
|
||||
DOCKER_BUILDKIT: "1"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 16.x
|
||||
@ -38,6 +40,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 16.x
|
||||
@ -52,6 +56,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 20.x
|
||||
@ -67,8 +73,10 @@ jobs:
|
||||
steps:
|
||||
- name: Check out the repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
|
||||
uses: actions/setup-python@v5.1.0
|
||||
uses: actions/setup-python@v5.3.0
|
||||
with:
|
||||
python-version: ${{ env.DEFAULT_PYTHON }}
|
||||
- name: Install requirements
|
||||
@ -88,6 +96,8 @@ jobs:
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: actions/setup-node@master
|
||||
with:
|
||||
node-version: 16.x
|
||||
|
9
.github/workflows/release.yml
vendored
9
.github/workflows/release.yml
vendored
@ -11,6 +11,8 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- id: lowercaseRepo
|
||||
uses: ASzc/change-string-case-action@v6
|
||||
with:
|
||||
@ -22,10 +24,13 @@ jobs:
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Create tag variables
|
||||
env:
|
||||
TAG: ${{ github.ref_name }}
|
||||
LOWERCASE_REPO: ${{ steps.lowercaseRepo.outputs.lowercase }}
|
||||
run: |
|
||||
BUILD_TYPE=$([[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta")
|
||||
BUILD_TYPE=$([[ "${TAG}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta")
|
||||
echo "BUILD_TYPE=${BUILD_TYPE}" >> $GITHUB_ENV
|
||||
echo "BASE=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}" >> $GITHUB_ENV
|
||||
echo "BASE=ghcr.io/${LOWERCASE_REPO}" >> $GITHUB_ENV
|
||||
echo "BUILD_TAG=${GITHUB_SHA::7}" >> $GITHUB_ENV
|
||||
echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV
|
||||
- name: Tag and push the main image
|
||||
|
5
.github/workflows/stale.yml
vendored
5
.github/workflows/stale.yml
vendored
@ -23,7 +23,9 @@ jobs:
|
||||
exempt-pr-labels: "pinned,security,dependencies"
|
||||
operations-per-run: 120
|
||||
- name: Print outputs
|
||||
run: echo ${{ join(steps.stale.outputs.*, ',') }}
|
||||
env:
|
||||
STALE_OUTPUT: ${{ join(steps.stale.outputs.*, ',') }}
|
||||
run: echo "$STALE_OUTPUT"
|
||||
|
||||
# clean_ghcr:
|
||||
# name: Delete outdated dev container images
|
||||
@ -38,4 +40,3 @@ jobs:
|
||||
# account-type: personal
|
||||
# token: ${{ secrets.GITHUB_TOKEN }}
|
||||
# token-type: github-token
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
default_target: local
|
||||
|
||||
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
|
||||
VERSION = 0.15.0
|
||||
VERSION = 0.15.2
|
||||
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
|
||||
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
|
||||
BOARDS= #Initialized empty
|
||||
|
@ -61,7 +61,7 @@ def start(id, num_detections, detection_queue, event):
|
||||
object_detector.cleanup()
|
||||
print(f"{id} - Processed for {duration:.2f} seconds.")
|
||||
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
|
||||
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
|
||||
print(f"{id} - Average frame processing time: {mean(frame_times) * 1000:.2f}ms")
|
||||
|
||||
|
||||
######
|
||||
|
@ -1,12 +1,12 @@
|
||||
appdirs==1.4.4
|
||||
argcomplete==2.0.0
|
||||
contextlib2==0.6.0.post1
|
||||
distlib==0.3.6
|
||||
filelock==3.8.0
|
||||
future==0.18.2
|
||||
importlib-metadata==5.1.0
|
||||
importlib-resources==5.1.2
|
||||
netaddr==0.8.0
|
||||
netifaces==0.10.9
|
||||
verboselogs==1.7
|
||||
virtualenv==20.17.0
|
||||
appdirs==1.4.*
|
||||
argcomplete==2.0.*
|
||||
contextlib2==0.6.*
|
||||
distlib==0.3.*
|
||||
filelock==3.8.*
|
||||
future==0.18.*
|
||||
importlib-metadata==5.1.*
|
||||
importlib-resources==5.1.*
|
||||
netaddr==0.8.*
|
||||
netifaces==0.10.*
|
||||
verboselogs==1.7.*
|
||||
virtualenv==20.17.*
|
||||
|
@ -215,7 +215,6 @@ ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
|
||||
ENV OPENCV_FFMPEG_LOGLEVEL=8
|
||||
|
||||
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
|
||||
ENV LIBAVFORMAT_VERSION_MAJOR=60
|
||||
|
||||
# Install dependencies
|
||||
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
|
||||
|
@ -42,8 +42,14 @@ function migrate_db_path() {
|
||||
fi
|
||||
}
|
||||
|
||||
function set_libva_version() {
|
||||
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+")
|
||||
}
|
||||
|
||||
echo "[INFO] Preparing Frigate..."
|
||||
migrate_db_path
|
||||
set_libva_version
|
||||
echo "[INFO] Starting Frigate..."
|
||||
|
||||
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"
|
||||
|
@ -43,6 +43,11 @@ function get_ip_and_port_from_supervisor() {
|
||||
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
|
||||
}
|
||||
|
||||
function set_libva_version() {
|
||||
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
|
||||
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+")
|
||||
}
|
||||
|
||||
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
|
||||
echo "[INFO] Removing stale config from last run..."
|
||||
rm /dev/shm/go2rtc.yaml
|
||||
@ -61,6 +66,8 @@ else
|
||||
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
|
||||
fi
|
||||
|
||||
set_libva_version
|
||||
|
||||
readonly config_path="/config"
|
||||
|
||||
if [[ -x "${config_path}/go2rtc" ]]; then
|
||||
|
45
docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py
Normal file
45
docker/main/rootfs/usr/local/ffmpeg/get_ffmpeg_path.py
Normal file
@ -0,0 +1,45 @@
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
|
||||
from ruamel.yaml import YAML
|
||||
|
||||
sys.path.insert(0, "/opt/frigate")
|
||||
from frigate.const import (
|
||||
DEFAULT_FFMPEG_VERSION,
|
||||
INCLUDED_FFMPEG_VERSIONS,
|
||||
)
|
||||
|
||||
sys.path.remove("/opt/frigate")
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
|
||||
try:
|
||||
with open(config_file) as f:
|
||||
raw_config = f.read()
|
||||
|
||||
if config_file.endswith((".yaml", ".yml")):
|
||||
config: dict[str, any] = yaml.load(raw_config)
|
||||
elif config_file.endswith(".json"):
|
||||
config: dict[str, any] = json.loads(raw_config)
|
||||
except FileNotFoundError:
|
||||
config: dict[str, any] = {}
|
||||
|
||||
path = config.get("ffmpeg", {}).get("path", "default")
|
||||
if path == "default":
|
||||
if shutil.which("ffmpeg") is None:
|
||||
print(f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg")
|
||||
else:
|
||||
print("ffmpeg")
|
||||
elif path in INCLUDED_FFMPEG_VERSIONS:
|
||||
print(f"/usr/lib/ffmpeg/{path}/bin/ffmpeg")
|
||||
else:
|
||||
print(f"{path}/bin/ffmpeg")
|
@ -22,6 +22,6 @@ ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt
|
||||
|
||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
|
||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/
|
||||
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/
|
||||
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"
|
||||
|
@ -12,7 +12,5 @@ RUN rm -rf /usr/lib/btbn-ffmpeg/
|
||||
RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \
|
||||
/deps/install_deps.sh
|
||||
|
||||
ENV LIBAVFORMAT_VERSION_MAJOR=58
|
||||
|
||||
WORKDIR /opt/frigate/
|
||||
COPY --from=rootfs / /
|
||||
|
@ -12,26 +12,11 @@ ARG TARGETARCH
|
||||
COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt
|
||||
RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
|
||||
|
||||
# Build CuDNN
|
||||
FROM wget AS cudnn-deps
|
||||
|
||||
ARG COMPUTE_LEVEL
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y git build-essential
|
||||
|
||||
RUN wget https://developer.download.nvidia.com/compute/cuda/repos/debian11/x86_64/cuda-keyring_1.1-1_all.deb \
|
||||
&& dpkg -i cuda-keyring_1.1-1_all.deb \
|
||||
&& apt-get update \
|
||||
&& apt-get -y install cuda-toolkit \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
FROM tensorrt-base AS frigate-tensorrt
|
||||
ENV TRT_VER=8.5.3
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
pip3 install -U /deps/trt-wheels/*.whl && \
|
||||
ldconfig
|
||||
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
|
||||
|
||||
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
|
||||
WORKDIR /opt/frigate/
|
||||
@ -42,7 +27,7 @@ FROM devcontainer AS devcontainer-trt
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY --from=cudnn-deps /usr/local/cuda-12.6 /usr/local/cuda
|
||||
COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \
|
||||
|
@ -24,6 +24,7 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
|
||||
|
||||
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
|
||||
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
|
||||
COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda
|
||||
COPY docker/tensorrt/detector/rootfs/ /
|
||||
ENV YOLO_MODELS=""
|
||||
|
||||
|
@ -20,7 +20,7 @@ FIRST_MODEL=true
|
||||
MODEL_DOWNLOAD=""
|
||||
MODEL_CONVERT=""
|
||||
|
||||
if [ -z "$YOLO_MODELS"]; then
|
||||
if [ -z "$YOLO_MODELS" ]; then
|
||||
echo "tensorrt model preparation disabled"
|
||||
exit 0
|
||||
fi
|
||||
|
@ -4,7 +4,9 @@ title: Advanced Options
|
||||
sidebar_label: Advanced Options
|
||||
---
|
||||
|
||||
### `logger`
|
||||
### Logging
|
||||
|
||||
#### Frigate `logger`
|
||||
|
||||
Change the default log level for troubleshooting purposes.
|
||||
|
||||
@ -28,6 +30,18 @@ Examples of available modules are:
|
||||
- `watchdog.<camera_name>`
|
||||
- `ffmpeg.<camera_name>.<sorted_roles>` NOTE: All FFmpeg logs are sent as `error` level.
|
||||
|
||||
#### Go2RTC Logging
|
||||
|
||||
See [the go2rtc docs](for logging configuration)
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
streams:
|
||||
...
|
||||
log:
|
||||
exec: trace
|
||||
```
|
||||
|
||||
### `environment_vars`
|
||||
|
||||
This section can be used to set environment variables for those unable to modify the environment of the container (ie. within HassOS)
|
||||
@ -174,7 +188,7 @@ NOTE: The folder that is set for the config needs to be the folder that contains
|
||||
|
||||
### Custom go2rtc version
|
||||
|
||||
Frigate currently includes go2rtc v1.9.4, there may be certain cases where you want to run a different version of go2rtc.
|
||||
Frigate currently includes go2rtc v1.9.2, there may be certain cases where you want to run a different version of go2rtc.
|
||||
|
||||
To do this:
|
||||
|
||||
@ -189,16 +203,16 @@ When frigate starts up, it checks whether your config file is valid, and if it i
|
||||
|
||||
### Via API
|
||||
|
||||
Frigate can accept a new configuration file as JSON at the `/config/save` endpoint. When updating the config this way, Frigate will validate the config before saving it, and return a `400` if the config is not valid.
|
||||
Frigate can accept a new configuration file as JSON at the `/api/config/save` endpoint. When updating the config this way, Frigate will validate the config before saving it, and return a `400` if the config is not valid.
|
||||
|
||||
```bash
|
||||
curl -X POST http://frigate_host:5000/config/save -d @config.json
|
||||
curl -X POST http://frigate_host:5000/api/config/save -d @config.json
|
||||
```
|
||||
|
||||
if you'd like you can use your yaml config directly by using [`yq`](https://github.com/mikefarah/yq) to convert it to json:
|
||||
|
||||
```bash
|
||||
yq r -j config.yml | curl -X POST http://frigate_host:5000/config/save -d @-
|
||||
yq r -j config.yml | curl -X POST http://frigate_host:5000/api/config/save -d @-
|
||||
```
|
||||
|
||||
### Via Command Line
|
||||
|
@ -24,6 +24,11 @@ On startup, an admin user and password are generated and printed in the logs. It
|
||||
|
||||
In the event that you are locked out of your instance, you can tell Frigate to reset the admin password and print it in the logs on next startup using the `reset_admin_password` setting in your config file.
|
||||
|
||||
```yaml
|
||||
auth:
|
||||
reset_admin_password: true
|
||||
```
|
||||
|
||||
## Login failure rate limiting
|
||||
|
||||
In order to limit the risk of brute force attacks, rate limiting is available for login failures. This is implemented with SlowApi, and the string notation for valid values is available in [the documentation](https://limits.readthedocs.io/en/stable/quickstart.html#examples).
|
||||
|
@ -41,6 +41,7 @@ cameras:
|
||||
...
|
||||
onvif:
|
||||
# Required: host of the camera being connected to.
|
||||
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
|
||||
host: 0.0.0.0
|
||||
# Optional: ONVIF port for device (default: shown below).
|
||||
port: 8000
|
||||
@ -49,6 +50,8 @@ cameras:
|
||||
user: admin
|
||||
# Optional: password for login.
|
||||
password: admin
|
||||
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
|
||||
tls_insecure: False
|
||||
# Optional: PTZ camera object autotracking. Keeps a moving object in
|
||||
# the center of the frame by automatically moving the PTZ camera.
|
||||
autotracking:
|
||||
|
@ -65,6 +65,18 @@ ffmpeg:
|
||||
|
||||
## Model/vendor specific setup
|
||||
|
||||
### Amcrest & Dahua
|
||||
|
||||
Amcrest & Dahua cameras should be connected to via RTSP using the following format:
|
||||
|
||||
```
|
||||
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=0 # this is the main stream
|
||||
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=1 # this is the sub stream, typically supporting low resolutions only
|
||||
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=2 # higher end cameras support a third stream with a mid resolution (1280x720, 1920x1080)
|
||||
rtsp://USERNAME:PASSWORD@CAMERA-IP/cam/realmonitor?channel=1&subtype=3 # new higher end cameras support a fourth stream with another mid resolution (1280x720, 1920x1080)
|
||||
|
||||
```
|
||||
|
||||
### Annke C800
|
||||
|
||||
This camera is H.265 only. To be able to play clips on some devices (like MacOs or iPhone) the H.265 stream has to be repackaged and the audio stream has to be converted to aac. Unfortunately direct playback of in the browser is not working (yet), but the downloaded clip can be played locally.
|
||||
@ -77,7 +89,7 @@ cameras:
|
||||
record: -f segment -segment_time 10 -segment_format mp4 -reset_timestamps 1 -strftime 1 -c:v copy -tag:v hvc1 -bsf:v hevc_mp4toannexb -c:a aac
|
||||
|
||||
inputs:
|
||||
- path: rtsp://user:password@camera-ip:554/H264/ch1/main/av_stream # <----- Update for your camera
|
||||
- path: rtsp://USERNAME:PASSWORD@CAMERA-IP/H264/ch1/main/av_stream # <----- Update for your camera
|
||||
roles:
|
||||
- detect
|
||||
- record
|
||||
@ -95,6 +107,29 @@ ffmpeg:
|
||||
input_args: preset-rtsp-blue-iris
|
||||
```
|
||||
|
||||
### Hikvision Cameras
|
||||
|
||||
Hikvision cameras should be connected to via RTSP using the following format:
|
||||
|
||||
```
|
||||
rtsp://USERNAME:PASSWORD@CAMERA-IP/streaming/channels/101 # this is the main stream
|
||||
rtsp://USERNAME:PASSWORD@CAMERA-IP/streaming/channels/102 # this is the sub stream, typically supporting low resolutions only
|
||||
rtsp://USERNAME:PASSWORD@CAMERA-IP/streaming/channels/103 # higher end cameras support a third stream with a mid resolution (1280x720, 1920x1080)
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
[Some users have reported](https://www.reddit.com/r/frigate_nvr/comments/1hg4ze7/hikvision_security_settings) that newer Hikvision cameras require adjustments to the security settings:
|
||||
|
||||
```
|
||||
RTSP Authentication - digest/basic
|
||||
RTSP Digest Algorithm - MD5
|
||||
WEB Authentication - digest/basic
|
||||
WEB Digest Algorithm - MD5
|
||||
```
|
||||
|
||||
:::
|
||||
|
||||
### Reolink Cameras
|
||||
|
||||
Reolink has older cameras (ex: 410 & 520) as well as newer camera (ex: 520a & 511wa) which support different subsets of options. In both cases using the http stream is recommended.
|
||||
@ -156,7 +191,9 @@ cameras:
|
||||
|
||||
#### Reolink Doorbell
|
||||
|
||||
The reolink doorbell supports 2-way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
|
||||
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
|
||||
|
||||
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
@ -194,3 +231,38 @@ ffmpeg:
|
||||
### TP-Link VIGI Cameras
|
||||
|
||||
TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded footage. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`.
|
||||
|
||||
## USB Cameras (aka Webcams)
|
||||
|
||||
To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's [FFmpeg Device](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#source-ffmpeg-device) support:
|
||||
|
||||
- Preparation outside of Frigate:
|
||||
- Get USB camera path. Run `v4l2-ctl --list-devices` to get a listing of locally-connected cameras available. (You may need to install `v4l-utils` in a way appropriate for your Linux distribution). In the sample configuration below, we use `video=0` to correlate with a detected device path of `/dev/video0`
|
||||
- Get USB camera formats & resolutions. Run `ffmpeg -f v4l2 -list_formats all -i /dev/video0` to get an idea of what formats and resolutions the USB Camera supports. In the sample configuration below, we use a width of 1024 and height of 576 in the stream and detection settings based on what was reported back.
|
||||
- If using Frigate in a container (e.g. Docker on TrueNAS), ensure you have USB Passthrough support enabled, along with a specific Host Device (`/dev/video0`) + Container Device (`/dev/video0`) listed.
|
||||
|
||||
- In your Frigate Configuration File, add the go2rtc stream and roles as appropriate:
|
||||
|
||||
```
|
||||
go2rtc:
|
||||
streams:
|
||||
usb_camera:
|
||||
- "ffmpeg:device?video=0&video_size=1024x576#video=h264"
|
||||
|
||||
cameras:
|
||||
usb_camera:
|
||||
enabled: true
|
||||
ffmpeg:
|
||||
inputs:
|
||||
- path: rtsp://127.0.0.1:8554/usb_camera
|
||||
input_args: preset-rtsp-restream
|
||||
roles:
|
||||
- detect
|
||||
- record
|
||||
detect:
|
||||
enabled: false # <---- disable detection until you have a working camera feed
|
||||
width: 1024
|
||||
height: 576
|
||||
```
|
||||
|
||||
|
||||
|
@ -100,6 +100,8 @@ This list of working and non-working PTZ cameras is based on user feedback.
|
||||
| Ctronics PTZ | ✅ | ❌ | |
|
||||
| Dahua | ✅ | ✅ | |
|
||||
| Dahua DH-SD2A500HB | ✅ | ❌ | |
|
||||
| Dahua DH-SD49825GB-HNR | ✅ | ✅ | |
|
||||
| Dahua DH-P5AE-PV | ❌ | ❌ | |
|
||||
| Foscam R5 | ✅ | ❌ | |
|
||||
| Hanwha XNP-6550RH | ✅ | ❌ | |
|
||||
| Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others |
|
||||
|
@ -5,6 +5,8 @@ title: Generative AI
|
||||
|
||||
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
|
||||
|
||||
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI.
|
||||
|
||||
:::info
|
||||
|
||||
Semantic Search must be enabled to use Generative AI.
|
||||
@ -13,9 +15,9 @@ Semantic Search must be enabled to use Generative AI.
|
||||
|
||||
## Configuration
|
||||
|
||||
Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 providers available to integrate with Frigate.
|
||||
Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
|
||||
|
||||
If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
||||
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
|
||||
|
||||
```yaml
|
||||
genai:
|
||||
@ -25,12 +27,23 @@ genai:
|
||||
model: gemini-1.5-flash
|
||||
|
||||
cameras:
|
||||
front_camera: ...
|
||||
front_camera:
|
||||
genai:
|
||||
enabled: True # <- enable GenAI for your front camera
|
||||
use_snapshot: True
|
||||
objects:
|
||||
- person
|
||||
required_zones:
|
||||
- steps
|
||||
indoor_camera:
|
||||
genai: # <- disable GenAI for your indoor camera
|
||||
enabled: False
|
||||
genai:
|
||||
enabled: False # <- disable GenAI for your indoor camera
|
||||
```
|
||||
|
||||
By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
||||
|
||||
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||
|
||||
## Ollama
|
||||
|
||||
:::warning
|
||||
@ -92,6 +105,12 @@ genai:
|
||||
model: gemini-1.5-flash
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
To use a different Gemini-compatible API endpoint, set the `GEMINI_BASE_URL` environment variable to your provider's API URL.
|
||||
|
||||
:::
|
||||
|
||||
## OpenAI
|
||||
|
||||
OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route.
|
||||
@ -114,6 +133,12 @@ genai:
|
||||
model: gpt-4o
|
||||
```
|
||||
|
||||
:::note
|
||||
|
||||
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
|
||||
|
||||
:::
|
||||
|
||||
## Azure OpenAI
|
||||
|
||||
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
|
||||
@ -174,9 +199,7 @@ genai:
|
||||
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
|
||||
```
|
||||
|
||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
|
||||
|
||||
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
|
||||
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire.
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
|
@ -203,14 +203,13 @@ detectors:
|
||||
ov:
|
||||
type: openvino
|
||||
device: AUTO
|
||||
model:
|
||||
path: /openvino-model/ssdlite_mobilenet_v2.xml
|
||||
|
||||
model:
|
||||
width: 300
|
||||
height: 300
|
||||
input_tensor: nhwc
|
||||
input_pixel_format: bgr
|
||||
path: /openvino-model/ssdlite_mobilenet_v2.xml
|
||||
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
|
||||
|
||||
record:
|
||||
|
@ -23,13 +23,13 @@ If you are using go2rtc, you should adjust the following settings in your camera
|
||||
|
||||
- Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below).
|
||||
- Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio.
|
||||
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes.
|
||||
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes. For many users this may not be an issue, but it should be noted that that a 1x i-frame interval will cause more storage utilization if you are using the stream for the `record` role as well.
|
||||
|
||||
The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information.
|
||||
|
||||
### Audio Support
|
||||
|
||||
MSE Requires AAC audio, WebRTC requires PCMU/PCMA, or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
|
||||
MSE Requires PCMA/PCMU or AAC audio, WebRTC requires PCMA/PCMU or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
|
||||
|
||||
```yaml
|
||||
go2rtc:
|
||||
@ -138,3 +138,13 @@ services:
|
||||
:::
|
||||
|
||||
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this.
|
||||
|
||||
### Two way talk
|
||||
|
||||
For devices that support two way talk, Frigate can be configured to use the feature from the camera's Live view in the Web UI. You should:
|
||||
|
||||
- Set up go2rtc with [WebRTC](#webrtc-extra-configuration).
|
||||
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
|
||||
- For the Home Assistant Frigate card, [follow the docs](http://card.camera/#/usage/2-way-audio) for the correct source.
|
||||
|
||||
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)
|
||||
|
@ -33,6 +33,14 @@ Frigate supports multiple different detectors that work on different types of ha
|
||||
|
||||
:::
|
||||
|
||||
:::note
|
||||
|
||||
Multiple detectors can not be mixed for object detection (ex: OpenVINO and Coral EdgeTPU can not be used for object detection at the same time).
|
||||
|
||||
This does not affect using hardware for accelerating other tasks such as [semantic search](./semantic_search.md)
|
||||
|
||||
:::
|
||||
|
||||
# Officially Supported Detectors
|
||||
|
||||
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, `rocm`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
|
||||
@ -116,6 +124,30 @@ detectors:
|
||||
device: pci
|
||||
```
|
||||
|
||||
## Hailo-8l
|
||||
|
||||
This detector is available for use with Hailo-8 AI Acceleration Module.
|
||||
|
||||
See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the hailo8.
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
hailo8l:
|
||||
type: hailo8l
|
||||
device: PCIe
|
||||
|
||||
model:
|
||||
width: 300
|
||||
height: 300
|
||||
input_tensor: nhwc
|
||||
input_pixel_format: bgr
|
||||
model_type: ssd
|
||||
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
|
||||
```
|
||||
|
||||
|
||||
## OpenVINO Detector
|
||||
|
||||
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
|
||||
@ -144,7 +176,9 @@ detectors:
|
||||
|
||||
#### SSDLite MobileNet v2
|
||||
|
||||
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
|
||||
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
|
||||
|
||||
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
@ -254,6 +288,7 @@ yolov4x-mish-640
|
||||
yolov7-tiny-288
|
||||
yolov7-tiny-416
|
||||
yolov7-640
|
||||
yolov7-416
|
||||
yolov7-320
|
||||
yolov7x-640
|
||||
yolov7x-320
|
||||
@ -282,6 +317,8 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
|
||||
|
||||
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
|
||||
|
||||
Use the config below to work with generated TRT models:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
tensorrt:
|
||||
@ -290,6 +327,7 @@ detectors:
|
||||
|
||||
model:
|
||||
path: /config/model_cache/tensorrt/yolov7-320.trt
|
||||
labelmap_path: /labelmap/coco-80.txt
|
||||
input_tensor: nchw
|
||||
input_pixel_format: rgb
|
||||
width: 320
|
||||
@ -501,11 +539,12 @@ detectors:
|
||||
cpu1:
|
||||
type: cpu
|
||||
num_threads: 3
|
||||
model:
|
||||
path: "/custom_model.tflite"
|
||||
cpu2:
|
||||
type: cpu
|
||||
num_threads: 3
|
||||
|
||||
model:
|
||||
path: "/custom_model.tflite"
|
||||
```
|
||||
|
||||
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
|
||||
@ -618,27 +657,3 @@ $ cat /sys/kernel/debug/rknpu/load
|
||||
|
||||
- All models are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
|
||||
- You can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
|
||||
|
||||
## Hailo-8l
|
||||
|
||||
This detector is available for use with Hailo-8 AI Acceleration Module.
|
||||
|
||||
See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the hailo8.
|
||||
|
||||
### Configuration
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
hailo8l:
|
||||
type: hailo8l
|
||||
device: PCIe
|
||||
model:
|
||||
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
|
||||
|
||||
model:
|
||||
width: 300
|
||||
height: 300
|
||||
input_tensor: nhwc
|
||||
input_pixel_format: bgr
|
||||
model_type: ssd
|
||||
```
|
||||
|
@ -20,5 +20,5 @@ In order to install Frigate as a PWA, the following requirements must be met:
|
||||
Installation varies slightly based on the device that is being used:
|
||||
|
||||
- Desktop: Use the install button typically found in right edge of the address bar
|
||||
- Android: Use the `Install as App` button in the more options menu
|
||||
- iOS: Use the `Add to Homescreen` button in the share menu
|
||||
- Android: Use the `Install as App` button in the more options menu for Chrome, and the `Add app to Home screen` button for Firefox
|
||||
- iOS: Use the `Add to Homescreen` button in the share menu
|
||||
|
@ -52,7 +52,7 @@ detectors:
|
||||
# Required: name of the detector
|
||||
detector_name:
|
||||
# Required: type of the detector
|
||||
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
|
||||
# Frigate provides many types, see https://docs.frigate.video/configuration/object_detectors for more details (default: shown below)
|
||||
# Additional detector types can also be plugged in.
|
||||
# Detectors may require additional configuration.
|
||||
# Refer to the Detectors configuration page for more information.
|
||||
@ -117,25 +117,27 @@ auth:
|
||||
hash_iterations: 600000
|
||||
|
||||
# Optional: model modifications
|
||||
# NOTE: The default values are for the EdgeTPU detector.
|
||||
# Other detectors will require the model config to be set.
|
||||
model:
|
||||
# Optional: path to the model (default: automatic based on detector)
|
||||
# Required: path to the model (default: automatic based on detector)
|
||||
path: /edgetpu_model.tflite
|
||||
# Optional: path to the labelmap (default: shown below)
|
||||
# Required: path to the labelmap (default: shown below)
|
||||
labelmap_path: /labelmap.txt
|
||||
# Required: Object detection model input width (default: shown below)
|
||||
width: 320
|
||||
# Required: Object detection model input height (default: shown below)
|
||||
height: 320
|
||||
# Optional: Object detection model input colorspace
|
||||
# Required: Object detection model input colorspace
|
||||
# Valid values are rgb, bgr, or yuv. (default: shown below)
|
||||
input_pixel_format: rgb
|
||||
# Optional: Object detection model input tensor format
|
||||
# Required: Object detection model input tensor format
|
||||
# Valid values are nhwc or nchw (default: shown below)
|
||||
input_tensor: nhwc
|
||||
# Optional: Object detection model type, currently only used with the OpenVINO detector
|
||||
# Required: Object detection model type, currently only used with the OpenVINO detector
|
||||
# Valid values are ssd, yolox, yolonas (default: shown below)
|
||||
model_type: ssd
|
||||
# Optional: Label name modifications. These are merged into the standard labelmap.
|
||||
# Required: Label name modifications. These are merged into the standard labelmap.
|
||||
labelmap:
|
||||
2: vehicle
|
||||
# Optional: Map of object labels to their attribute labels (default: depends on model)
|
||||
@ -546,6 +548,8 @@ genai:
|
||||
|
||||
# Optional: Restream configuration
|
||||
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
|
||||
# NOTE: The default go2rtc API port (1984) must be used,
|
||||
# changing this port for the integrated go2rtc instance is not supported.
|
||||
go2rtc:
|
||||
|
||||
# Optional: Live stream configuration for WebUI.
|
||||
@ -686,6 +690,7 @@ cameras:
|
||||
# to enable PTZ controls.
|
||||
onvif:
|
||||
# Required: host of the camera being connected to.
|
||||
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
|
||||
host: 0.0.0.0
|
||||
# Optional: ONVIF port for device (default: shown below).
|
||||
port: 8000
|
||||
@ -694,6 +699,8 @@ cameras:
|
||||
user: admin
|
||||
# Optional: password for login.
|
||||
password: admin
|
||||
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
|
||||
tls_insecure: False
|
||||
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
|
||||
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
|
||||
ignore_time_mismatch: False
|
||||
@ -757,6 +764,8 @@ cameras:
|
||||
- cat
|
||||
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
|
||||
required_zones: []
|
||||
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
|
||||
debug_save_thumbnails: False
|
||||
|
||||
# Optional
|
||||
ui:
|
||||
|
@ -21,6 +21,21 @@ In 0.14 and later, all of that is bundled into a single review item which starts
|
||||
|
||||
Not every segment of video captured by Frigate may be of the same level of interest to you. Video of people who enter your property may be a different priority than those walking by on the sidewalk. For this reason, Frigate 0.14 categorizes review items as _alerts_ and _detections_. By default, all person and car objects are considered alerts. You can refine categorization of your review items by configuring required zones for them.
|
||||
|
||||
:::note
|
||||
|
||||
Alerts and detections categorize the tracked objects in review items, but Frigate must first detect those objects with your configured object detector (Coral, OpenVINO, etc). By default, the object tracker only detects `person`. Setting `labels` for `alerts` and `detections` does not automatically enable detection of new objects. To detect more than `person`, you should add the following to your config:
|
||||
|
||||
```yaml
|
||||
objects:
|
||||
track:
|
||||
- person
|
||||
- car
|
||||
- ...
|
||||
```
|
||||
|
||||
See the [objects documentation](objects.md) for the list of objects that Frigate's default model tracks.
|
||||
:::
|
||||
|
||||
## Restricting alerts to specific labels
|
||||
|
||||
By default a review item will only be marked as an alert if a person or car is detected. This can be configured to include any object or audio label using the following config:
|
||||
|
@ -5,7 +5,7 @@ title: Using Semantic Search
|
||||
|
||||
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
|
||||
|
||||
Frigate has support for [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create embeddings, which runs locally. Embeddings are then saved to Frigate's database.
|
||||
Frigate uses [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create and save embeddings to Frigate's database. All of this runs locally.
|
||||
|
||||
Semantic Search is accessed via the _Explore_ view in the Frigate UI.
|
||||
|
||||
@ -19,7 +19,7 @@ For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
|
||||
|
||||
## Configuration
|
||||
|
||||
Semantic Search is disabled by default, and must be enabled in your config file before it can be used. Semantic Search is a global configuration setting.
|
||||
Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Settings page before it can be used. Semantic Search is a global configuration setting.
|
||||
|
||||
```yaml
|
||||
semantic_search:
|
||||
@ -29,9 +29,9 @@ semantic_search:
|
||||
|
||||
:::tip
|
||||
|
||||
The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to set the config back to `False` before restarting Frigate again.
|
||||
The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration or by toggling the switch on the Search Settings page in the UI and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to turn the UI's switch off or set the config back to `False` before restarting Frigate again.
|
||||
|
||||
If you are enabling the Search feature for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that.
|
||||
If you are enabling Semantic Search for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that.
|
||||
|
||||
:::
|
||||
|
||||
@ -39,9 +39,9 @@ If you are enabling the Search feature for the first time, be advised that Friga
|
||||
|
||||
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
|
||||
|
||||
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Search page when clicking on the gray tracked object chip at the top left of each review item. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
|
||||
|
||||
Differently weighted CLIP models are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
||||
Differently weighted versions of the Jina model are available and can be selected by setting the `model_size` config option as `small` or `large`:
|
||||
|
||||
```yaml
|
||||
semantic_search:
|
||||
@ -50,7 +50,7 @@ semantic_search:
|
||||
```
|
||||
|
||||
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
|
||||
- Configuring the `small` model employs a quantized version of the model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
|
||||
- Configuring the `small` model employs a quantized version of the Jina model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
|
||||
|
||||
### GPU Acceleration
|
||||
|
||||
@ -84,7 +84,7 @@ If the correct build is used for your GPU and the `large` model is configured, t
|
||||
|
||||
## Usage and Best Practices
|
||||
|
||||
1. Semantic Search is used in conjunction with the other filters available on the Search page. Use a combination of traditional filtering and Semantic Search for the best results.
|
||||
1. Semantic Search is used in conjunction with the other filters available on the Explore page. Use a combination of traditional filtering and Semantic Search for the best results.
|
||||
2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object.
|
||||
3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant.
|
||||
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
|
||||
|
@ -36,8 +36,8 @@ Note that certbot uses symlinks, and those can't be followed by the container un
|
||||
frigate:
|
||||
...
|
||||
volumes:
|
||||
- /etc/letsencrypt/live/frigate:/etc/letsencrypt/live/frigate:ro
|
||||
- /etc/letsencrypt/archive/frigate:/etc/letsencrypt/archive/frigate:ro
|
||||
- /etc/letsencrypt/live/your.fqdn.net:/etc/letsencrypt/live/frigate:ro
|
||||
- /etc/letsencrypt/archive/your.fqdn.net:/etc/letsencrypt/archive/your.fqdn.net:ro
|
||||
...
|
||||
|
||||
```
|
||||
|
@ -3,7 +3,7 @@ id: camera_setup
|
||||
title: Camera setup
|
||||
---
|
||||
|
||||
Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but less compatibility. Chrome 108+, Safari and Edge are the only browsers able to play H.265 and only support a limited number of H.265 profiles. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around.
|
||||
Cameras configured to output H.264 video and AAC audio will offer the most compatibility with all features of Frigate and Home Assistant. H.265 has better compression, but less compatibility. Firefox 134+/136+/137+ (Windows/Mac/Linux & Android), Chrome 108+, Safari and Edge are the only browsers able to play H.265 and only support a limited number of H.265 profiles. Ideally, cameras should be configured directly for the desired resolutions and frame rates you want to use in Frigate. Reducing frame rates within Frigate will waste CPU resources decoding extra frames that are discarded. There are three different goals that you want to tune your stream configurations around.
|
||||
|
||||
- **Detection**: This is the only stream that Frigate will decode for processing. Also, this is the stream where snapshots will be generated from. The resolution for detection should be tuned for the size of the objects you want to detect. See [Choosing a detect resolution](#choosing-a-detect-resolution) for more details. The recommended frame rate is 5fps, but may need to be higher (10fps is the recommended maximum for most users) for very fast moving objects. Higher resolutions and frame rates will drive higher CPU usage on your server.
|
||||
|
||||
@ -28,7 +28,7 @@ For the Dahua/Loryta 5442 camera, I use the following settings:
|
||||
- Encode Mode: H.264
|
||||
- Resolution: 2688\*1520
|
||||
- Frame Rate(FPS): 15
|
||||
- I Frame Interval: 30
|
||||
- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](../configuration/live) for more info)
|
||||
|
||||
**Sub Stream (Detection)**
|
||||
|
||||
|
@ -66,4 +66,4 @@ The time period starting when a tracked object entered the frame and ending when
|
||||
|
||||
## Zone
|
||||
|
||||
Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create an [event](#event). [See the zone docs for more info](/configuration/zones)
|
||||
Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create a [review item](#review-item). [See the zone docs for more info](/configuration/zones)
|
||||
|
@ -9,24 +9,36 @@ Cameras that output H.264 video and AAC audio will offer the most compatibility
|
||||
|
||||
I recommend Dahua, Hikvision, and Amcrest in that order. Dahua edges out Hikvision because they are easier to find and order, not because they are better cameras. I personally use Dahua cameras because they are easier to purchase directly. In my experience Dahua and Hikvision both have multiple streams with configurable resolutions and frame rates and rock solid streams. They also both have models with large sensors well known for excellent image quality at night. Not all the models are equal. Larger sensors are better than higher resolutions; especially at night. Amcrest is the fallback recommendation because they are rebranded Dahuas. They are rebranding the lower end models with smaller sensors or less configuration options.
|
||||
|
||||
Many users have reported various issues with Reolink cameras, so I do not recommend them. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-cameras). Wifi cameras are also not recommended. Their streams are less reliable and cause connection loss and/or lost video data.
|
||||
WiFi cameras are not recommended as [their streams are less reliable and cause connection loss and/or lost video data](https://ipcamtalk.com/threads/camera-conflicts.68142/#post-738821), especially when more than a few WiFi cameras will be used at the same time.
|
||||
|
||||
Here are some of the camera's I recommend:
|
||||
Many users have reported various issues with 4K-plus Reolink cameras, it is best to stick with 5MP and lower for Reolink cameras. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-cameras).
|
||||
|
||||
- <a href="https://amzn.to/3uFLtxB" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) T5442TM-AS-LED</a> (affiliate link)
|
||||
- <a href="https://amzn.to/3isJ3gU" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T5442TM-AS</a> (affiliate link)
|
||||
- <a href="https://amzn.to/2ZWNWIA" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-28MM</a> (affiliate link)
|
||||
Here are some of the cameras I recommend:
|
||||
|
||||
- <a href="https://amzn.to/4fwoNWA" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T549M-ALED-S3</a> (affiliate link)
|
||||
- <a href="https://amzn.to/3YXpcMw" target="_blank" rel="nofollow noopener sponsored">Loryta(Dahua) IPC-T54IR-AS</a> (affiliate link)
|
||||
- <a href="https://amzn.to/3AvBHoY" target="_blank" rel="nofollow noopener sponsored">Amcrest IP5M-T1179EW-AI-V3</a> (affiliate link)
|
||||
- <a href="https://amzn.to/4ltOpaC" target="_blank" rel="nofollow noopener sponsored">HIKVISION DS-2CD2387G2P-LSU/SL ColorVu 8MP Panoramic Turret IP Camera</a> (affiliate link)
|
||||
|
||||
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
|
||||
## Server
|
||||
|
||||
My current favorite is the Beelink EQ12 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
My current favorite is the Beelink EQ13 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral, Hailo, or other AI accelerators.
|
||||
|
||||
| Name | Coral Inference Speed | Coral Compatibility | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Beelink EQ12 (<a href="https://amzn.to/3OlTMJY" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
| Intel NUC (<a href="https://amzn.to/3psFlHi" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. |
|
||||
Note that many of these mini PCs come with Windows pre-installed, and you will need to install Linux according to the [getting started guide](../guides/getting_started.md).
|
||||
|
||||
I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website.
|
||||
|
||||
:::warning
|
||||
|
||||
If the EQ13 is out of stock, the link below may take you to a suggested alternative on Amazon. The Beelink EQ14 has some known compatibility issues, so you should avoid that model for now.
|
||||
|
||||
:::
|
||||
|
||||
| Name | Coral Inference Speed | Coral Compatibility | Notes |
|
||||
| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | ----------------------------------------------------------------------------------------- |
|
||||
| Beelink EQ13 (<a href="https://amzn.to/4jn2qVr" target="_blank" rel="nofollow noopener sponsored">Amazon</a>) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. |
|
||||
|
||||
## Detectors
|
||||
|
||||
@ -52,24 +64,26 @@ The OpenVINO detector type is able to run on:
|
||||
|
||||
More information is available [in the detector docs](/configuration/object_detectors#openvino-detector)
|
||||
|
||||
Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below:
|
||||
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:
|
||||
|
||||
| Name | Inference Speed | Notes |
|
||||
| -------------------- | --------------- | --------------------------------------------------------------------- |
|
||||
| Intel NCS2 VPU | 60 - 65 ms | May vary based on host device |
|
||||
| Intel Celeron J4105 | ~ 25 ms | Inference speeds on CPU were 150 - 200 ms |
|
||||
| Intel Celeron N3060 | 130 - 150 ms | Inference speeds on CPU were ~ 550 ms |
|
||||
| Intel Celeron N3205U | ~ 120 ms | Inference speeds on CPU were ~ 380 ms |
|
||||
| Intel Celeron N4020 | 50 - 200 ms | Inference speeds on CPU were ~ 800 ms, greatly depends on other loads |
|
||||
| Intel i3 6100T | 15 - 35 ms | Inference speeds on CPU were 60 - 120 ms |
|
||||
| Intel i3 8100 | ~ 15 ms | Inference speeds on CPU were ~ 65 ms |
|
||||
| Intel i5 4590 | ~ 20 ms | Inference speeds on CPU were ~ 230 ms |
|
||||
| Intel i5 6500 | ~ 15 ms | Inference speeds on CPU were ~ 150 ms |
|
||||
| Intel i5 7200u | 15 - 25 ms | Inference speeds on CPU were ~ 150 ms |
|
||||
| Intel i5 7500 | ~ 15 ms | Inference speeds on CPU were ~ 260 ms |
|
||||
| Intel i5 1135G7 | 10 - 15 ms | |
|
||||
| Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms |
|
||||
| Intel Arc A750 | ~ 4 ms | |
|
||||
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | Notes |
|
||||
| --------------------- | --------------------------- | --------------------------- | --------------------------------------- |
|
||||
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | |
|
||||
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms | |
|
||||
| Intel Ultra 5 125H | | 320: ~ 10 ms 640: ~ 22 ms | |
|
||||
| Intel i5 12600K | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | |
|
||||
| Intel i3 12000 | | 320: ~ 19 ms 640: ~ 54 ms | |
|
||||
| Intel i5 1135G7 | 10 - 15 ms | | |
|
||||
| Intel i5 7500 | ~ 15 ms | | |
|
||||
| Intel i5 7200u | 15 - 25 ms | | |
|
||||
| Intel i5 6500 | ~ 15 ms | | |
|
||||
| Intel i5 4590 | ~ 20 ms | | |
|
||||
| Intel i3 8100 | ~ 15 ms | | |
|
||||
| Intel i3 6100T | 15 - 35 ms | | Can only run one detector instance |
|
||||
| Intel Celeron N4020 | 50 - 200 ms | | Inference speed depends on other loads |
|
||||
| Intel Celeron N3205U | ~ 120 ms | | Can only run one detector instance |
|
||||
| Intel Celeron N3060 | 130 - 150 ms | | Can only run one detector instance |
|
||||
| Intel Celeron J4105 | ~ 25 ms | | Can only run one |
|
||||
|
||||
### TensorRT - Nvidia GPU
|
||||
|
||||
@ -78,29 +92,35 @@ The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which
|
||||
Inference speeds will vary greatly depending on the GPU and the model used.
|
||||
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
|
||||
|
||||
| Name | Inference Speed |
|
||||
| --------------- | --------------- |
|
||||
| GTX 1060 6GB | ~ 7 ms |
|
||||
| GTX 1070 | ~ 6 ms |
|
||||
| GTX 1660 SUPER | ~ 4 ms |
|
||||
| RTX 3050 | 5 - 7 ms |
|
||||
| RTX 3070 Mobile | ~ 5 ms |
|
||||
| Quadro P400 2GB | 20 - 25 ms |
|
||||
| Quadro P2000 | ~ 12 ms |
|
||||
| Name | YoloV7 Inference Time | YOLO-NAS Inference Time |
|
||||
| --------------- | ---------------------- | --------------------------- |
|
||||
| Quadro P2000 | ~ 12 ms | |
|
||||
| Quadro P400 2GB | 20 - 25 ms | |
|
||||
| RTX 3070 Mobile | ~ 5 ms | |
|
||||
| RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms |
|
||||
| GTX 1660 SUPER | ~ 4 ms | |
|
||||
| GTX 1070 | ~ 6 ms | |
|
||||
| GTX 1060 6GB | ~ 7 ms | |
|
||||
|
||||
#### AMD GPUs
|
||||
### AMD GPUs
|
||||
|
||||
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many AMD GPUs.
|
||||
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
|
||||
|
||||
### Community Supported:
|
||||
### Hailo-8l PCIe
|
||||
|
||||
#### Nvidia Jetson
|
||||
Frigate supports the Hailo-8l M.2 card on any hardware but currently it is only tested on the Raspberry Pi5 PCIe hat from the AI kit.
|
||||
|
||||
The inference time for the Hailo-8L chip at time of writing is around 17-21 ms for the SSD MobileNet Version 1 model.
|
||||
|
||||
## Community Supported Detectors
|
||||
|
||||
### Nvidia Jetson
|
||||
|
||||
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
|
||||
|
||||
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
|
||||
|
||||
#### Rockchip platform
|
||||
### Rockchip platform
|
||||
|
||||
Frigate supports hardware video processing on all Rockchip boards. However, hardware object detection is only supported on these boards:
|
||||
|
||||
@ -112,12 +132,6 @@ Frigate supports hardware video processing on all Rockchip boards. However, hard
|
||||
|
||||
The inference time of a rk3588 with all 3 cores enabled is typically 25-30 ms for yolo-nas s.
|
||||
|
||||
#### Hailo-8l PCIe
|
||||
|
||||
Frigate supports the Hailo-8l M.2 card on any hardware but currently it is only tested on the Raspberry Pi5 PCIe hat from the AI kit.
|
||||
|
||||
The inference time for the Hailo-8L chip at time of writing is around 17-21 ms for the SSD MobileNet Version 1 model.
|
||||
|
||||
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
|
||||
|
||||
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.
|
||||
@ -138,4 +152,4 @@ Basically - When you increase the resolution and/or the frame rate of the stream
|
||||
|
||||
YES! The Coral does not help with decoding video streams.
|
||||
|
||||
Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://blog.video.ibm.com/streaming-video-tips/keyframes-interframe-video-compression/). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work.
|
||||
Decompressing video streams takes a significant amount of CPU power. Video compression uses key frames (also known as I-frames) to send a full frame in the video stream. The following frames only include the difference from the key frame, and the CPU has to compile each frame by merging the differences with the key frame. [More detailed explanation](https://support.video.ibm.com/hc/en-us/articles/18106203580316-Keyframes-InterFrame-Video-Compression). Higher resolutions and frame rates mean more processing power is needed to decode the video stream, so try and set them on the camera to avoid unnecessary decoding work.
|
||||
|
@ -111,7 +111,7 @@ For Raspberry Pi 5 users with the AI Kit, installation is straightforward. Simpl
|
||||
For other installations, follow these steps for installation:
|
||||
|
||||
1. Install the driver from the [Hailo GitHub repository](https://github.com/hailo-ai/hailort-drivers). A convenient script for Linux is available to clone the repository, build the driver, and install it.
|
||||
2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/41c9b13d2fffce508b32dfc971fa529b49295fbd/docker/hailo8l/user_installation.sh).
|
||||
2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/dev/docker/hailo8l/user_installation.sh).
|
||||
3. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
|
||||
4. Run the script with `./user_installation.sh`
|
||||
|
||||
@ -187,7 +187,6 @@ Next, you should configure [hardware object detection](/configuration/object_det
|
||||
Running in Docker with compose is the recommended install method.
|
||||
|
||||
```yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
frigate:
|
||||
container_name: frigate
|
||||
@ -305,8 +304,15 @@ To install make sure you have the [community app plugin here](https://forums.unr
|
||||
|
||||
## Proxmox
|
||||
|
||||
It is recommended to run Frigate in LXC, rather than in a VM, for maximum performance. The setup can be complex so be prepared to read the Proxmox and LXC documentation. Suggestions include:
|
||||
[According to Proxmox documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pct) it is recommended that you run application containers like Frigate inside a Proxmox QEMU VM. This will give you all the advantages of application containerization, while also providing the benefits that VMs offer, such as strong isolation from the host and the ability to live-migrate, which otherwise isn’t possible with containers.
|
||||
|
||||
:::warning
|
||||
|
||||
If you choose to run Frigate via LXC in Proxmox the setup can be complex so be prepared to read the Proxmox and LXC documentation, Frigate does not officially support running inside of an LXC.
|
||||
|
||||
:::
|
||||
|
||||
Suggestions include:
|
||||
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
|
||||
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
|
||||
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`
|
||||
|
74
docs/docs/frigate/planning_setup.md
Normal file
74
docs/docs/frigate/planning_setup.md
Normal file
@ -0,0 +1,74 @@
|
||||
---
|
||||
id: planning_setup
|
||||
title: Planning a New Installation
|
||||
---
|
||||
|
||||
Choosing the right hardware for your Frigate NVR setup is important for optimal performance and a smooth experience. This guide will walk you through the key considerations, focusing on the number of cameras and the hardware required for efficient object detection.
|
||||
|
||||
## Key Considerations
|
||||
|
||||
### Number of Cameras and Simultaneous Activity
|
||||
|
||||
The most fundamental factor in your hardware decision is the number of cameras you plan to use. However, it's not just about the raw count; it's also about how many of those cameras are likely to see activity and require object detection simultaneously.
|
||||
|
||||
When motion is detected in a camera's feed, regions of that frame are sent to your chosen [object detection hardware](/configuration/object_detectors).
|
||||
|
||||
- **Low Simultaneous Activity (1-6 cameras with occasional motion)**: If you have a few cameras in areas with infrequent activity (e.g., a seldom-used backyard, a quiet interior), the demand on your object detection hardware will be lower. A single, entry-level AI accelerator will suffice.
|
||||
- **Moderate Simultaneous Activity (6-12 cameras with some overlapping motion)**: For setups with more cameras, especially in areas like a busy street or a property with multiple access points, it's more likely that several cameras will capture activity at the same time. This increases the load on your object detection hardware, requiring more processing power.
|
||||
- **High Simultaneous Activity (12+ cameras or highly active zones)**: Large installations or scenarios where many cameras frequently capture activity (e.g., busy street with overview, identification, dedicated LPR cameras, etc.) will necessitate robust object detection capabilities. You'll likely need multiple entry-level AI accelerators or a more powerful single unit such as a discrete GPU.
|
||||
- **Commercial Installations (40+ cameras)**: Commercial installations or scenarios where a substantial number of cameras capture activity (e.g., a commercial property, an active public space) will necessitate robust object detection capabilities. You'll likely need a modern discrete GPU.
|
||||
|
||||
### Video Decoding
|
||||
|
||||
Modern CPUs with integrated GPUs (Intel Quick Sync, AMD VCN) or dedicated GPUs can significantly offload video decoding from the main CPU, freeing up resources. This is highly recommended, especially for multiple cameras.
|
||||
|
||||
:::tip
|
||||
|
||||
For commercial installations it is important to verify the number of supported concurrent streams on your GPU, many consumer GPUs max out at ~20 concurrent camera streams.
|
||||
|
||||
:::
|
||||
|
||||
## Hardware Considerations
|
||||
|
||||
### Object Detection
|
||||
|
||||
There are many different hardware options for object detection depending on priorities and available hardware. See [the recommended hardware page](./hardware.md#detectors) for more specifics on what hardware is recommended for object detection.
|
||||
|
||||
### Storage
|
||||
|
||||
Storage is an important consideration when planning a new installation. To get a more precise estimate of your storage requirements, you can use an IP camera storage calculator. Websites like [IPConfigure Storage Calculator](https://calculator.ipconfigure.com/) can help you determine the necessary disk space based on your camera settings.
|
||||
|
||||
|
||||
#### SSDs (Solid State Drives)
|
||||
|
||||
SSDs are an excellent choice for Frigate, offering high speed and responsiveness. The older concern that SSDs would quickly "wear out" from constant video recording is largely no longer valid for modern consumer and enterprise-grade SSDs.
|
||||
|
||||
- Longevity: Modern SSDs are designed with advanced wear-leveling algorithms and significantly higher "Terabytes Written" (TBW) ratings than earlier models. For typical home NVR use, a good quality SSD will likely outlast the useful life of your NVR hardware itself.
|
||||
- Performance: SSDs excel at handling the numerous small write operations that occur during continuous video recording and can significantly improve the responsiveness of the Frigate UI and clip retrieval.
|
||||
- Silence and Efficiency: SSDs produce no noise and consume less power than traditional HDDs.
|
||||
|
||||
#### HDDs (Hard Disk Drives)
|
||||
|
||||
Traditional Hard Disk Drives (HDDs) remain a great and often more cost-effective option for long-term video storage, especially for larger setups where raw capacity is prioritized.
|
||||
|
||||
- Cost-Effectiveness: HDDs offer the best cost per gigabyte, making them ideal for storing many days, weeks, or months of continuous footage.
|
||||
- Capacity: HDDs are available in much larger capacities than most consumer SSDs, which is beneficial for extensive video archives.
|
||||
- NVR-Rated Drives: If choosing an HDD, consider drives specifically designed for surveillance (NVR) use, such as Western Digital Purple or Seagate SkyHawk. These drives are engineered for 24/7 operation and continuous write workloads, offering improved reliability compared to standard desktop drives.
|
||||
|
||||
Determining Your Storage Needs
|
||||
The amount of storage you need will depend on several factors:
|
||||
|
||||
- Number of Cameras: More cameras naturally require more space.
|
||||
- Resolution and Framerate: Higher resolution (e.g., 4K) and higher framerate (e.g., 30fps) streams consume significantly more storage.
|
||||
- Recording Method: Continuous recording uses the most space. motion-only recording or object-triggered recording can save space, but may miss some footage.
|
||||
- Retention Period: How many days, weeks, or months of footage do you want to keep?
|
||||
|
||||
#### Network Storage (NFS/SMB)
|
||||
|
||||
While supported, using network-attached storage (NAS) for recordings can introduce latency and network dependency considerations. For optimal performance and reliability, it is generally recommended to have local storage for your Frigate recordings. If using a NAS, ensure your network connection to it is robust and fast (Gigabit Ethernet at minimum) and that the NAS itself can handle the continuous write load.
|
||||
|
||||
### RAM (Memory)
|
||||
|
||||
- **Basic Minimum: 4GB RAM**: This is generally sufficient for a very basic Frigate setup with a few cameras and a dedicated object detection accelerator, without running any enrichments. Performance might be tight, especially with higher resolution streams or numerous detections.
|
||||
- **Minimum for Enrichments: 8GB RAM**: If you plan to utilize Frigate's enrichment features (e.g., facial recognition, license plate recognition, or other AI models that run alongside standard object detection), 8GB of RAM should be considered the minimum. Enrichments require additional memory to load and process their respective models and data.
|
||||
- **Recommended: 16GB RAM**: For most users, especially those with many cameras (8+) or who plan to heavily leverage enrichments, 16GB of RAM is highly recommended. This provides ample headroom for smooth operation, reduces the likelihood of swapping to disk (which can impact performance), and allows for future expansion.
|
119
docs/docs/frigate/updating.md
Normal file
119
docs/docs/frigate/updating.md
Normal file
@ -0,0 +1,119 @@
|
||||
---
|
||||
id: updating
|
||||
title: Updating
|
||||
---
|
||||
|
||||
# Updating Frigate
|
||||
|
||||
The current stable version of Frigate is **0.15.0**. The release notes and any breaking changes for this version can be found on the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases/tag/v0.15.0).
|
||||
|
||||
Keeping Frigate up to date ensures you benefit from the latest features, performance improvements, and bug fixes. The update process varies slightly depending on your installation method (Docker, Home Assistant Addon, etc.). Below are instructions for the most common setups.
|
||||
|
||||
## Before You Begin
|
||||
|
||||
- **Stop Frigate**: For most methods, you’ll need to stop the running Frigate instance before backing up and updating.
|
||||
- **Backup Your Configuration**: Always back up your `/config` directory (e.g., `config.yml` and `frigate.db`, the SQLite database) before updating. This ensures you can roll back if something goes wrong.
|
||||
- **Check Release Notes**: Carefully review the [Frigate GitHub releases page](https://github.com/blakeblackshear/frigate/releases) for breaking changes or configuration updates that might affect your setup.
|
||||
|
||||
## Updating with Docker
|
||||
|
||||
If you’re running Frigate via Docker (recommended method), follow these steps:
|
||||
|
||||
1. **Stop the Container**:
|
||||
|
||||
- If using Docker Compose:
|
||||
```bash
|
||||
docker compose down frigate
|
||||
```
|
||||
- If using `docker run`:
|
||||
```bash
|
||||
docker stop frigate
|
||||
```
|
||||
|
||||
2. **Update and Pull the Latest Image**:
|
||||
|
||||
- If using Docker Compose:
|
||||
- Edit your `docker-compose.yml` file to specify the desired version tag (e.g., `0.15.0` instead of `0.14.1`). For example:
|
||||
```yaml
|
||||
services:
|
||||
frigate:
|
||||
image: ghcr.io/blakeblackshear/frigate:0.15.0
|
||||
```
|
||||
- Then pull the image:
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.15.0
|
||||
```
|
||||
- **Note for `stable` Tag Users**: If your `docker-compose.yml` uses the `stable` tag (e.g., `ghcr.io/blakeblackshear/frigate:stable`), you don’t need to update the tag manually. The `stable` tag always points to the latest stable release after pulling.
|
||||
- If using `docker run`:
|
||||
- Pull the image with the appropriate tag (e.g., `0.15.0`, `0.15.0-tensorrt`, or `stable`):
|
||||
```bash
|
||||
docker pull ghcr.io/blakeblackshear/frigate:0.15.0
|
||||
```
|
||||
|
||||
3. **Start the Container**:
|
||||
|
||||
- If using Docker Compose:
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
- If using `docker run`, re-run your original command (e.g., from the [Installation](./installation.md#docker) section) with the updated image tag.
|
||||
|
||||
4. **Verify the Update**:
|
||||
- Check the container logs to ensure Frigate starts successfully:
|
||||
```bash
|
||||
docker logs frigate
|
||||
```
|
||||
- Visit the Frigate Web UI (default: `http://<your-ip>:5000`) to confirm the new version is running. The version number is displayed at the top of the System Metrics page.
|
||||
|
||||
### Notes
|
||||
|
||||
- If you’ve customized other settings (e.g., `shm-size`), ensure they’re still appropriate after the update.
|
||||
- Docker will automatically use the updated image when you restart the container, as long as you pulled the correct version.
|
||||
|
||||
## Updating the Home Assistant Addon
|
||||
|
||||
For users running Frigate as a Home Assistant Addon:
|
||||
|
||||
1. **Check for Updates**:
|
||||
|
||||
- Navigate to **Settings > Add-ons** in Home Assistant.
|
||||
- Find your installed Frigate addon (e.g., "Frigate NVR" or "Frigate NVR (Full Access)").
|
||||
- If an update is available, you’ll see an "Update" button.
|
||||
|
||||
2. **Update the Addon**:
|
||||
|
||||
- Click the "Update" button next to the Frigate addon.
|
||||
- Wait for the process to complete. Home Assistant will handle downloading and installing the new version.
|
||||
|
||||
3. **Restart the Addon**:
|
||||
|
||||
- After updating, go to the addon’s page and click "Restart" to apply the changes.
|
||||
|
||||
4. **Verify the Update**:
|
||||
- Check the addon logs (under the "Log" tab) to ensure Frigate starts without errors.
|
||||
- Access the Frigate Web UI to confirm the new version is running.
|
||||
|
||||
### Notes
|
||||
|
||||
- Ensure your `/config/frigate.yml` is compatible with the new version by reviewing the [Release notes](https://github.com/blakeblackshear/frigate/releases).
|
||||
- If using custom hardware (e.g., Coral or GPU), verify that configurations still work, as addon updates don’t modify your hardware settings.
|
||||
|
||||
## Rolling Back
|
||||
|
||||
If an update causes issues:
|
||||
|
||||
1. Stop Frigate.
|
||||
2. Restore your backed-up config file and database.
|
||||
3. Revert to the previous image version:
|
||||
- For Docker: Specify an older tag (e.g., `ghcr.io/blakeblackshear/frigate:0.14.1`) in your `docker run` command.
|
||||
- For Docker Compose: Edit your `docker-compose.yml`, specify the older version tag (e.g., `ghcr.io/blakeblackshear/frigate:0.14.1`), and re-run `docker compose up -d`.
|
||||
- For Home Assistant: Reinstall the previous addon version manually via the repository if needed and restart the addon.
|
||||
4. Verify the old version is running again.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **Container Fails to Start**: Check logs (`docker logs frigate`) for errors.
|
||||
- **UI Not Loading**: Ensure ports (e.g., 5000, 8971) are still mapped correctly and the service is running.
|
||||
- **Hardware Issues**: Revisit hardware-specific setup (e.g., Coral, GPU) if detection or decoding fails post-update.
|
||||
|
||||
Common questions are often answered in the [FAQ](https://github.com/blakeblackshear/frigate/discussions), pinned at the top of the support discussions.
|
@ -7,7 +7,7 @@ title: Configuring go2rtc
|
||||
|
||||
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
|
||||
|
||||
- WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream
|
||||
- WebRTC or MSE for live viewing with audio, higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream and does not support audio
|
||||
- Live stream support for cameras in Home Assistant Integration
|
||||
- RTSP relay for use with other consumers to reduce the number of connections to your camera streams
|
||||
|
||||
|
@ -35,6 +35,7 @@ There are many solutions available to implement reverse proxies and the communit
|
||||
* [Apache2](#apache2-reverse-proxy)
|
||||
* [Nginx](#nginx-reverse-proxy)
|
||||
* [Traefik](#traefik-reverse-proxy)
|
||||
* [Caddy](#caddy-reverse-proxy)
|
||||
|
||||
## Apache2 Reverse Proxy
|
||||
|
||||
@ -117,7 +118,8 @@ server {
|
||||
set $port 8971;
|
||||
|
||||
listen 80;
|
||||
listen 443 ssl http2;
|
||||
listen 443 ssl;
|
||||
http2 on;
|
||||
|
||||
server_name frigate.domain.com;
|
||||
}
|
||||
@ -177,3 +179,33 @@ The above configuration will create a "service" in Traefik, automatically adding
|
||||
It will also add a router, routing requests to "traefik.example.com" to your local container.
|
||||
|
||||
Note that with this approach, you don't need to expose any ports for the Frigate instance since all traffic will be routed over the internal Docker network.
|
||||
|
||||
## Caddy Reverse Proxy
|
||||
|
||||
This example shows Frigate running under a subdomain with logging and a tls cert (in this case a wildcard domain cert obtained independently of caddy) handled via imports
|
||||
|
||||
```caddy
|
||||
(logging) {
|
||||
log {
|
||||
output file /var/log/caddy/{args[0]}.log {
|
||||
roll_size 10MiB
|
||||
roll_keep 5
|
||||
roll_keep_for 10d
|
||||
}
|
||||
format json
|
||||
level INFO
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
(tls) {
|
||||
tls /var/lib/caddy/wildcard.YOUR_DOMAIN.TLD.fullchain.pem /var/lib/caddy/wildcard.YOUR_DOMAIN.TLD.privkey.pem
|
||||
}
|
||||
|
||||
frigate.YOUR_DOMAIN.TLD {
|
||||
reverse_proxy http://localhost:8971
|
||||
import tls
|
||||
import logging frigate.YOUR_DOMAIN.TLD
|
||||
}
|
||||
|
||||
```
|
||||
|
@ -47,7 +47,7 @@ that card.
|
||||
|
||||
## Configuration
|
||||
|
||||
When configuring the integration, you will be asked for the `URL` of your Frigate instance which needs to be pointed at the internal unauthenticated port (`5000`) for your instance. This may look like `http://<host>:5000/`.
|
||||
When configuring the integration, you will be asked for the `URL` of your Frigate instance which can be pointed at the internal unauthenticated port (`5000`) or the authenticated port (`8971`) for your instance. This may look like `http://<host>:5000/`.
|
||||
|
||||
### Docker Compose Examples
|
||||
|
||||
@ -55,7 +55,7 @@ If you are running Home Assistant Core and Frigate with Docker Compose on the sa
|
||||
|
||||
#### Home Assistant running with host networking
|
||||
|
||||
It is not recommended to run Frigate in host networking mode. In this example, you would use `http://172.17.0.1:5000` when configuring the integration.
|
||||
It is not recommended to run Frigate in host networking mode. In this example, you would use `http://172.17.0.1:5000` or `http://172.17.0.1:8971` when configuring the integration.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
@ -75,7 +75,7 @@ services:
|
||||
|
||||
#### Home Assistant _not_ running with host networking or in a separate compose file
|
||||
|
||||
In this example, you would use `http://frigate:5000` when configuring the integration. There is no need to map the port for the Frigate container.
|
||||
In this example, it is recommended to connect to the authenticated port, for example, `http://frigate:8971` when configuring the integration. There is no need to map the port for the Frigate container.
|
||||
|
||||
```yaml
|
||||
services:
|
||||
@ -97,20 +97,29 @@ services:
|
||||
|
||||
If you are using HassOS with the addon, the URL should be one of the following depending on which addon version you are using. Note that if you are using the Proxy Addon, you do NOT point the integration at the proxy URL. Just enter the URL used to access Frigate directly from your network.
|
||||
|
||||
| Addon Version | URL |
|
||||
| ------------------------------ | -------------------------------------- |
|
||||
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
|
||||
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
|
||||
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
|
||||
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
|
||||
| Addon Version | URL |
|
||||
| ------------------------------ | ----------------------------------------- |
|
||||
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
|
||||
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
|
||||
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
|
||||
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
|
||||
| Frigate NVR HailoRT Beta | `http://ccab4aaf-frigate-hailo-beta:5000` |
|
||||
|
||||
### Frigate running on a separate machine
|
||||
|
||||
If you run Frigate on a separate device within your local network, Home Assistant will need access to port 5000.
|
||||
If you run Frigate on a separate device within your local network, Home Assistant will need access to port 8971.
|
||||
|
||||
#### Local network
|
||||
|
||||
Use `http://<frigate_device_ip>:5000` as the URL for the integration. If you want to protect access to port 5000, you can use firewall rules to limit access to the device running Home Assistant.
|
||||
Use `http://<frigate_device_ip>:8971` as the URL for the integration so that authentication is required.
|
||||
|
||||
:::tip
|
||||
|
||||
The above URL assumes you have [disabled TLS](../configuration/tls).
|
||||
By default, TLS is enabled and Frigate will be using a self-signed certificate. HomeAssistant will fail to connect HTTPS to port 8971 since it fails to verify the self-signed certificate.
|
||||
Either disable TLS and use HTTP from HomeAssistant, or configure Frigate to be acessible with a valid certificate.
|
||||
|
||||
:::
|
||||
|
||||
```yaml
|
||||
services:
|
||||
@ -118,7 +127,7 @@ services:
|
||||
image: ghcr.io/blakeblackshear/frigate:stable
|
||||
...
|
||||
ports:
|
||||
- "5000:5000"
|
||||
- "8971:8971"
|
||||
...
|
||||
```
|
||||
|
||||
@ -195,12 +204,30 @@ To load a snapshot for a tracked object:
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/snapshot.jpg
|
||||
```
|
||||
|
||||
To load a video clip of a tracked object:
|
||||
To load a video clip of a tracked object using an Android device:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/clip.mp4
|
||||
```
|
||||
|
||||
To load a video clip of a tracked object using an iOS device:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/master.m3u8
|
||||
```
|
||||
|
||||
To load a preview gif of a tracked object:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<event-id>/event_preview.gif
|
||||
```
|
||||
|
||||
To load a preview gif of a review item:
|
||||
|
||||
```
|
||||
https://HA_URL/api/frigate/notifications/<review-id>/review_preview.gif
|
||||
```
|
||||
|
||||
<a name="streams"></a>
|
||||
|
||||
## RTSP stream
|
||||
@ -282,3 +309,7 @@ which server they are referring to.
|
||||
#### If I am detecting multiple objects, how do I assign the correct `binary_sensor` to the camera in HomeKit?
|
||||
|
||||
The [HomeKit integration](https://www.home-assistant.io/integrations/homekit/) randomly links one of the binary sensors (motion sensor entities) grouped with the camera device in Home Assistant. You can specify a `linked_motion_sensor` in the Home Assistant [HomeKit configuration](https://www.home-assistant.io/integrations/homekit/#linked_motion_sensor) for each camera.
|
||||
|
||||
#### I have set up automations based on the occupancy sensors. Sometimes the automation runs because the sensors are turned on, but then I look at Frigate I can't find the object that triggered the sensor. Is this a bug?
|
||||
|
||||
No. The occupancy sensors have fewer checks in place because they are often used for things like turning the lights on where latency needs to be as low as possible. So false positives can sometimes trigger these sensors. If you want false positive filtering, you should use an mqtt sensor on the `frigate/events` or `frigate/reviews` topic.
|
||||
|
@ -28,7 +28,14 @@ Message published for each changed tracked object. The first message is publishe
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"camera": "front_door",
|
||||
"frame_time": 1607123961.837752,
|
||||
"snapshot_time": 1607123961.837752,
|
||||
"snapshot": {
|
||||
"frame_time": 1607123965.975463,
|
||||
"box": [415, 489, 528, 700],
|
||||
"area": 12728,
|
||||
"region": [260, 446, 660, 846],
|
||||
"score": 0.77546,
|
||||
"attributes": [],
|
||||
},
|
||||
"label": "person",
|
||||
"sub_label": null,
|
||||
"top_score": 0.958984375,
|
||||
@ -58,7 +65,14 @@ Message published for each changed tracked object. The first message is publishe
|
||||
"id": "1607123955.475377-mxklsc",
|
||||
"camera": "front_door",
|
||||
"frame_time": 1607123962.082975,
|
||||
"snapshot_time": 1607123961.837752,
|
||||
"snapshot": {
|
||||
"frame_time": 1607123965.975463,
|
||||
"box": [415, 489, 528, 700],
|
||||
"area": 12728,
|
||||
"region": [260, 446, 660, 846],
|
||||
"score": 0.77546,
|
||||
"attributes": [],
|
||||
},
|
||||
"label": "person",
|
||||
"sub_label": ["John Smith", 0.79],
|
||||
"top_score": 0.958984375,
|
||||
|
@ -29,7 +29,9 @@ You cannot use the `environment_vars` section of your Frigate configuration file
|
||||
|
||||
## Submit examples
|
||||
|
||||
Once your API key is configured, you can submit examples directly from the Explore page in Frigate using the `Frigate+` button.
|
||||
Once your API key is configured, you can submit examples directly from the Explore page in Frigate. From the More Filters menu, select "Has a Snapshot - Yes" and "Submitted to Frigate+ - No", and press Apply at the bottom of the pane. Then, click on a thumbnail and select the Snapshot tab.
|
||||
|
||||
You can use your keyboard's left and right arrow keys to quickly navigate between the tracked object snapshots.
|
||||
|
||||
:::note
|
||||
|
||||
@ -37,13 +39,11 @@ Snapshots must be enabled to be able to submit examples to Frigate+
|
||||
|
||||
:::
|
||||
|
||||

|
||||
|
||||

|
||||
|
||||
### Annotate and verify
|
||||
|
||||
You can view all of your submitted images at [https://plus.frigate.video](https://plus.frigate.video). Annotations can be added by clicking an image. For more detailed information about labeling, see the documentation on [improving your model](../plus/improving_model.md).
|
||||
You can view all of your submitted images at [https://plus.frigate.video](https://plus.frigate.video). Annotations can be added by clicking an image. For more detailed information about labeling, see the documentation on [annotating](../plus/annotating.md).
|
||||
|
||||

|
||||
|
||||
|
@ -13,12 +13,20 @@ Please use your own knowledge to assess and vet them before you install anything
|
||||
|
||||
:::
|
||||
|
||||
## [Advanced Camera Card (formerly known as Frigate Card](https://card.camera/#/README)
|
||||
|
||||
The [Advanced Camera Card](https://card.camera/#/README) is a Home Assistant dashboard card with deep Frigate integration.
|
||||
|
||||
## [Double Take](https://github.com/skrashevich/double-take)
|
||||
|
||||
[Double Take](https://github.com/skrashevich/double-take) provides an unified UI and API for processing and training images for facial recognition.
|
||||
It supports automatically setting the sub labels in Frigate for person objects that are detected and recognized.
|
||||
This is a fork (with fixed errors and new features) of [original Double Take](https://github.com/jakowenko/double-take) project which, unfortunately, isn't being maintained by author.
|
||||
|
||||
## [Frigate Notify](https://github.com/0x2142/frigate-notify)
|
||||
|
||||
[Frigate Notify](https://github.com/0x2142/frigate-notify) is a simple app designed to send notifications from Frigate NVR to your favorite platforms. Intended to be used with standalone Frigate installations - Home Assistant not required, MQTT is optional but recommended.
|
||||
|
||||
## [Frigate telegram](https://github.com/OldTyT/frigate-telegram)
|
||||
|
||||
[Frigate telegram](https://github.com/OldTyT/frigate-telegram) makes it possible to send events from Frigate to Telegram. Events are sent as a message with a text description, video, and thumbnail.
|
||||
|
52
docs/docs/plus/annotating.md
Normal file
52
docs/docs/plus/annotating.md
Normal file
@ -0,0 +1,52 @@
|
||||
---
|
||||
id: annotating
|
||||
title: Annotating your images
|
||||
---
|
||||
|
||||
For the best results, follow these guidelines. You may also want to review the documentation on [improving your model](./index.md#improving-your-model).
|
||||
|
||||
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused. You can exclude labels that you don't want detected on any of your cameras.
|
||||
|
||||
**Make tight bounding boxes**: Tighter bounding boxes improve the recognition and ensure that accurate bounding boxes are predicted at runtime.
|
||||
|
||||
**Label the full object even when occluded**: If you have a person standing behind a car, label the full person even though a portion of their body may be hidden behind the car. This helps predict accurate bounding boxes and improves zone accuracy and filters at runtime. If an object is partly out of frame, label it only when a person would reasonably be able to recognize the object from the visible parts.
|
||||
|
||||
**Label objects hard to identify as difficult**: When objects are truly difficult to make out, such as a car barely visible through a bush, or a dog that is hard to distinguish from the background at night, flag it as 'difficult'. This is not used in the model training as of now, but will in the future.
|
||||
|
||||
**Delivery logos such as `amazon`, `ups`, and `fedex` should label the logo**: For a Fedex truck, label the truck as a `car` and make a different bounding box just for the Fedex logo. If there are multiple logos, label each of them.
|
||||
|
||||

|
||||
|
||||
## AI suggested labels
|
||||
|
||||
If you have an active Frigate+ subscription, new uploads will be scanned for the objects configured for you camera and you will see suggested labels as light blue boxes when annotating in Frigate+. These suggestions are processed via a queue and typically complete within a minute after uploading, but processing times can be longer.
|
||||
|
||||

|
||||
|
||||
Suggestions are converted to labels when saving, so you should remove any errant suggestions. There is already some logic designed to avoid duplicate labels, but you may still occasionally see some duplicate suggestions. You should keep the most accurate bounding box and delete any duplicates so that you have just one label per object remaining.
|
||||
|
||||
## False positive labels
|
||||
|
||||
False positives will be shown with a read box and the label will have a strike through. These can't be adjusted, but they can be deleted if you accidentally submit a true positive as a false positive from Frigate.
|
||||

|
||||
|
||||
Misidentified objects should have a correct label added. For example, if a person was mistakenly detected as a cat, you should submit it as a false positive in Frigate and add a label for the person. The boxes will overlap.
|
||||
|
||||

|
||||
|
||||
## Shortcuts for a faster workflow
|
||||
|
||||
| Shortcut Key | Description |
|
||||
| ----------------- | ----------------------------- |
|
||||
| `?` | Show all keyboard shortcuts |
|
||||
| `w` | Add box |
|
||||
| `d` | Toggle difficult |
|
||||
| `s` | Switch to the next label |
|
||||
| `tab` | Select next largest box |
|
||||
| `del` | Delete current box |
|
||||
| `esc` | Deselect/Cancel |
|
||||
| `← ↑ → ↓` | Move box |
|
||||
| `Shift + ← ↑ → ↓` | Resize box |
|
||||
| `scrollwheel` | Zoom in/out |
|
||||
| `f` | Hide/show all but current box |
|
||||
| `spacebar` | Verify and save |
|
@ -5,15 +5,15 @@ title: Requesting your first model
|
||||
|
||||
## Step 1: Upload and annotate your images
|
||||
|
||||
Before requesting your first model, you will need to upload and verify at least 1 image to Frigate+. The more images you upload, annotate, and verify the better your results will be. Most users start to see very good results once they have at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
|
||||
Before requesting your first model, you will need to upload and verify at least 10 images to Frigate+. The more images you upload, annotate, and verify the better your results will be. Most users start to see very good results once they have at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. Refer to the [integration docs](../integrations/plus.md#generate-an-api-key) for instructions on how to easily submit images to Frigate+ directly from Frigate.
|
||||
|
||||
It is recommended to submit **both** true positives and false positives. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||
|
||||
For more detailed recommendations, you can refer to the docs on [improving your model](./improving_model.md).
|
||||
For more detailed recommendations, you can refer to the docs on [annotating](./annotating.md).
|
||||
|
||||
## Step 2: Submit a model request
|
||||
|
||||
Once you have an initial set of verified images, you can request a model on the Models page. For guidance on choosing a model type, refer to [this part of the documentation](./index.md#available-model-types). Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
|
||||
Once you have an initial set of verified images, you can request a model on the Models page. For guidance on choosing a model type, refer to [this part of the documentation](./index.md#available-model-types). If you are unsure which type to request, you can test the base model for each version from the "Base Models" tab. Each model request requires 1 of the 12 trainings that you receive with your annual subscription. This model will support all [label types available](./index.md#available-label-types) even if you do not submit any examples for those labels. Model creation can take up to 36 hours.
|
||||

|
||||
|
||||
## Step 3: Set your model id in the config
|
||||
|
@ -1,52 +0,0 @@
|
||||
---
|
||||
id: improving_model
|
||||
title: Improving your model
|
||||
---
|
||||
|
||||
You may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. With all the new images now being submitted by subscribers, future base models will improve as more and more examples are incorporated. Note that only images with at least one verified label will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
|
||||
|
||||
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
|
||||
- **Submit diverse images**. For the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. As circumstances change, you may need to submit new examples to address new types of false positives. For example, the change from summer days to snowy winter days or other changes such as a new grill or patio furniture may require additional examples and training.
|
||||
|
||||
## Properly labeling images
|
||||
|
||||
For the best results, follow the following guidelines.
|
||||
|
||||
**Label every object in the image**: It is important that you label all objects in each image before verifying. If you don't label a car for example, the model will be taught that part of the image is _not_ a car and it will start to get confused.
|
||||
|
||||
**Make tight bounding boxes**: Tighter bounding boxes improve the recognition and ensure that accurate bounding boxes are predicted at runtime.
|
||||
|
||||
**Label the full object even when occluded**: If you have a person standing behind a car, label the full person even though a portion of their body may be hidden behind the car. This helps predict accurate bounding boxes and improves zone accuracy and filters at runtime. If an object is partly out of frame, label it only when a person would reasonably be able to recognize the object from the visible parts.
|
||||
|
||||
**Label objects hard to identify as difficult**: When objects are truly difficult to make out, such as a car barely visible through a bush, or a dog that is hard to distinguish from the background at night, flag it as 'difficult'. This is not used in the model training as of now, but will in the future.
|
||||
|
||||
**`amazon`, `ups`, and `fedex` should label the logo**: For a Fedex truck, label the truck as a `car` and make a different bounding box just for the Fedex logo. If there are multiple logos, label each of them.
|
||||
|
||||

|
||||
|
||||
## False positive labels
|
||||
|
||||
False positives will be shown with a read box and the label will have a strike through.
|
||||

|
||||
|
||||
Misidentified objects should have a correct label added. For example, if a person was mistakenly detected as a cat, you should submit it as a false positive in Frigate and add a label for the person. The boxes will overlap.
|
||||
|
||||

|
||||
|
||||
## Shortcuts for a faster workflow
|
||||
|
||||
| Shortcut Key | Description |
|
||||
| ----------------- | ----------------------------- |
|
||||
| `?` | Show all keyboard shortcuts |
|
||||
| `w` | Add box |
|
||||
| `d` | Toggle difficult |
|
||||
| `s` | Switch to the next label |
|
||||
| `tab` | Select next largest box |
|
||||
| `del` | Delete current box |
|
||||
| `esc` | Deselect/Cancel |
|
||||
| `← ↑ → ↓` | Move box |
|
||||
| `Shift + ← ↑ → ↓` | Resize box |
|
||||
| `scrollwheel` | Zoom in/out |
|
||||
| `f` | Hide/show all but current box |
|
||||
| `spacebar` | Verify and save |
|
@ -3,23 +3,17 @@ id: index
|
||||
title: Models
|
||||
---
|
||||
|
||||
<a href="https://frigate.video/plus" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate NVR analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a baseline model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions.
|
||||
<a href="https://frigate.video/plus" target="_blank" rel="nofollow">Frigate+</a> offers models trained on images submitted by Frigate+ users from their security cameras and is specifically designed for the way Frigate NVR analyzes video footage. These models offer higher accuracy with less resources. The images you upload are used to fine tune a base model trained from images uploaded by all Frigate+ users. This fine tuning process results in a model that is optimized for accuracy in your specific conditions.
|
||||
|
||||
:::info
|
||||
|
||||
The baseline model isn't directly available after subscribing. This may change in the future, but for now you will need to submit a model request with the minimum number of images.
|
||||
|
||||
:::
|
||||
|
||||
With a subscription, 12 model trainings per year are included. If you cancel your subscription, you will retain access to any trained models. An active subscription is required to submit model requests or purchase additional trainings.
|
||||
With a subscription, 12 model trainings to fine tune your model per year are included. In addition, you will have access to any base models published while your subscription is active. If you cancel your subscription, you will retain access to any trained and base models in your account. An active subscription is required to submit model requests or purchase additional trainings. New base models are published quarterly with target dates of January 15th, April 15th, July 15th, and October 15th.
|
||||
|
||||
Information on how to integrate Frigate+ with Frigate can be found in the [integration docs](../integrations/plus.md).
|
||||
|
||||
## Available model types
|
||||
|
||||
There are two model types offered in Frigate+: `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
|
||||
There are two model types offered in Frigate+, `mobiledet` and `yolonas`. Both of these models are object detection models and are trained to detect the same set of labels [listed below](#available-label-types).
|
||||
|
||||
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types).
|
||||
Not all model types are supported by all detectors, so it's important to choose a model type to match your detector as shown in the table under [supported detector types](#supported-detector-types). You can test model types for compatibility and speed on your hardware by using the base models.
|
||||
|
||||
| Model Type | Description |
|
||||
| ----------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
@ -32,27 +26,53 @@ Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVi
|
||||
|
||||
:::warning
|
||||
|
||||
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15, which is still under development.
|
||||
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15 and later.
|
||||
|
||||
:::
|
||||
|
||||
| Hardware | Recommended Detector Type | Recommended Model Type |
|
||||
| ---------------------------------------------------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
|
||||
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
|
||||
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
|
||||
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
|
||||
| [NVidia GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
|
||||
| [AMD ROCm GPU](https://deploy-preview-13787--frigate-docs.netlify.app/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` |
|
||||
| Hardware | Recommended Detector Type | Recommended Model Type |
|
||||
| -------------------------------------------------------------------------------- | ------------------------- | ---------------------- |
|
||||
| [CPU](/configuration/object_detectors.md#cpu-detector-not-recommended) | `cpu` | `mobiledet` |
|
||||
| [Coral (all form factors)](/configuration/object_detectors.md#edge-tpu-detector) | `edgetpu` | `mobiledet` |
|
||||
| [Intel](/configuration/object_detectors.md#openvino-detector) | `openvino` | `yolonas` |
|
||||
| [NVidia GPU](/configuration/object_detectors#onnx)\* | `onnx` | `yolonas` |
|
||||
| [AMD ROCm GPU](/configuration/object_detectors#amdrocm-gpu-detector)\* | `rocm` | `yolonas` |
|
||||
|
||||
_\* Requires Frigate 0.15_
|
||||
|
||||
## Improving your model
|
||||
|
||||
Some users may find that Frigate+ models result in more false positives initially, but by submitting true and false positives, the model will improve. With all the new images now being submitted by subscribers, future base models will improve as more and more examples are incorporated. Note that only images with at least one verified label will be used when training your model. Submitting an image from Frigate as a true or false positive will not verify the image. You still must verify the image in Frigate+ in order for it to be used in training.
|
||||
|
||||
- **Submit both true positives and false positives**. This will help the model differentiate between what is and isn't correct. You should aim for a target of 80% true positive submissions and 20% false positives across all of your images. If you are experiencing false positives in a specific area, submitting true positives for any object type near that area in similar lighting conditions will help teach the model what that area looks like when no objects are present.
|
||||
- **Lower your thresholds a little in order to generate more false/true positives near the threshold value**. For example, if you have some false positives that are scoring at 68% and some true positives scoring at 72%, you can try lowering your threshold to 65% and submitting both true and false positives within that range. This will help the model learn and widen the gap between true and false positive scores.
|
||||
- **Submit diverse images**. For the best results, you should provide at least 100 verified images per camera. Keep in mind that varying conditions should be included. You will want images from cloudy days, sunny days, dawn, dusk, and night. As circumstances change, you may need to submit new examples to address new types of false positives. For example, the change from summer days to snowy winter days or other changes such as a new grill or patio furniture may require additional examples and training.
|
||||
|
||||
## Available label types
|
||||
|
||||
Frigate+ models support a more relevant set of objects for security cameras. Currently, only the following objects are supported: `person`, `face`, `car`, `license_plate`, `amazon`, `ups`, `fedex`, `package`, `dog`, `cat`, `deer`. Other object types available in the default Frigate model are not available. Additional object types will be added in future releases.
|
||||
Frigate+ models support a more relevant set of objects for security cameras. The labels for annotation in Frigate+ are configurable by editing the camera in the Cameras section of Frigate+. Currently, the following objects are supported:
|
||||
|
||||
- **People**: `person`, `face`
|
||||
- **Vehicles**: `car`, `motorcycle`, `bicycle`, `boat`, `school_bus`, `license_plate`
|
||||
- **Delivery Logos**: `amazon`, `usps`, `ups`, `fedex`, `dhl`, `an_post`, `purolator`, `postnl`, `nzpost`, `postnord`, `gls`, `dpd`, `canada_post`, `royal_mail`
|
||||
- **Animals**: `dog`, `cat`, `deer`, `horse`, `bird`, `raccoon`, `fox`, `bear`, `cow`, `squirrel`, `goat`, `rabbit`, `skunk`, `kangaroo`
|
||||
- **Other**: `package`, `waste_bin`, `bbq_grill`, `robot_lawnmower`, `umbrella`
|
||||
|
||||
Other object types available in the default Frigate model are not available. Additional object types will be added in future releases.
|
||||
|
||||
### Candidate labels
|
||||
|
||||
Candidate labels are also available for annotation. These labels don't have enough data to be included in the model yet, but using them will help add support sooner. You can enable these labels by editing the camera settings.
|
||||
|
||||
Where possible, these labels are mapped to existing labels during training. For example, any `baby` labels are mapped to `person` until support for new labels is added.
|
||||
|
||||
The candidate labels are: `baby`, `bpost`, `badger`, `possum`, `rodent`, `chicken`, `groundhog`, `boar`, `hedgehog`, `tractor`, `golf cart`, `garbage truck`, `bus`, `sports ball`
|
||||
|
||||
Candidate labels are not available for automatic suggestions.
|
||||
|
||||
### Label attributes
|
||||
|
||||
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate review items directly. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
|
||||
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, and delivery logos such as `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate review items directly. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
|
||||
|
||||
In order to have Frigate start using these attribute labels, you will need to add them to the list of objects to track:
|
||||
|
||||
@ -75,6 +95,6 @@ When using Frigate+ models, Frigate will choose the snapshot of a person object
|
||||
|
||||

|
||||
|
||||
`amazon`, `ups`, and `fedex` labels are used to automatically assign a sub label to car objects.
|
||||
Delivery logos such as `amazon`, `ups`, and `fedex` labels are used to automatically assign a sub label to car objects.
|
||||
|
||||

|
||||
|
@ -40,6 +40,17 @@ Some users have reported that this older device runs an older kernel causing iss
|
||||
6. Open the control panel - info scree. The coral TPU will now be recognised as a USB Device - google inc
|
||||
7. Start the frigate container. Everything should work now!
|
||||
|
||||
### QNAP NAS
|
||||
|
||||
QNAP NAS devices, such as the TS-253A, may use connected Coral TPU devices if [QuMagie](https://www.qnap.com/en/software/qumagie) is installed along with its QNAP AI Core extension. If any of the features—`facial recognition`, `object recognition`, or `similar photo recognition`—are enabled, Container Station applications such as `Frigate` or `CodeProject.AI Server` will be unable to initialize the TPU device in use.
|
||||
To allow the Coral TPU device to be discovered, the you must either:
|
||||
|
||||
1. [Disable the AI recognition features in QuMagie](https://docs.qnap.com/application/qumagie/2.x/en-us/configuring-qnap-ai-core-settings-FB13CE03.html),
|
||||
2. Remove the QNAP AI Core extension or
|
||||
3. Manually start the QNAP AI Core extension after Frigate has fully started (not recommended).
|
||||
|
||||
It is also recommended to restart the NAS once the changes have been made.
|
||||
|
||||
## USB Coral Detection Appears to be Stuck
|
||||
|
||||
The USB Coral can become stuck and need to be restarted, this can happen for a number of reasons depending on hardware and software setup. Some common reasons are:
|
||||
@ -54,6 +65,17 @@ The most common reason for the PCIe Coral not being detected is that the driver
|
||||
- In most cases [the Coral docs](https://coral.ai/docs/m2/get-started/#2-install-the-pcie-driver-and-edge-tpu-runtime) show how to install the driver for the PCIe based Coral.
|
||||
- For Ubuntu 22.04+ https://github.com/jnicolson/gasket-builder can be used to build and install the latest version of the driver.
|
||||
|
||||
### Not detected on Raspberry Pi5
|
||||
|
||||
A kernel update to the RPi5 means an upate to config.txt is required, see [the raspberry pi forum for more info](https://forums.raspberrypi.com/viewtopic.php?t=363682&sid=cb59b026a412f0dc041595951273a9ca&start=25)
|
||||
|
||||
Specifically, add the following to config.txt
|
||||
|
||||
```
|
||||
dtoverlay=pciex1-compat-pi5,no-mip
|
||||
dtoverlay=pcie-32bit-dma-pi5
|
||||
```
|
||||
|
||||
## Only One PCIe Coral Is Detected With Coral Dual EdgeTPU
|
||||
|
||||
Coral Dual EdgeTPU is one card with two identical TPU cores. Each core has it's own PCIe interface and motherboard needs to have two PCIe busses on the m.2 slot to make them both work.
|
||||
|
@ -17,6 +17,10 @@ ffmpeg:
|
||||
record: preset-record-generic-audio-aac
|
||||
```
|
||||
|
||||
### How can I get sound in live view?
|
||||
|
||||
Audio is only supported for live view when go2rtc is configured, see [the live docs](../configuration/live.md) for more information.
|
||||
|
||||
### I can't view recordings in the Web UI.
|
||||
|
||||
Ensure your cameras send h264 encoded video, or [transcode them](/configuration/restream.md).
|
||||
@ -98,3 +102,11 @@ docker run -d \
|
||||
-p 8555:8555/udp \
|
||||
ghcr.io/blakeblackshear/frigate:stable
|
||||
```
|
||||
|
||||
### My RTSP stream works fine in VLC, but it does not work when I put the same URL in my Frigate config. Is this a bug?
|
||||
|
||||
No. Frigate uses the TCP protocol to connect to your camera's RTSP URL. VLC automatically switches between UDP and TCP depending on network conditions and stream availability. So a stream that works in VLC but not in Frigate is likely due to VLC selecting UDP as the transfer protocol.
|
||||
|
||||
TCP ensures that all data packets arrive in the correct order. This is crucial for video recording, decoding, and stream processing, which is why Frigate enforces a TCP connection. UDP is faster but less reliable, as it does not guarantee packet delivery or order, and VLC does not have the same requirements as Frigate.
|
||||
|
||||
You can still configure Frigate to use UDP by using ffmpeg input args or the preset `preset-rtsp-udp`. See the [ffmpeg presets](/configuration/ffmpeg_presets) documentation.
|
||||
|
@ -3,7 +3,15 @@ id: recordings
|
||||
title: Troubleshooting Recordings
|
||||
---
|
||||
|
||||
### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
|
||||
## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why?
|
||||
|
||||
You'll want to:
|
||||
|
||||
- Make sure your camera's timestamp is masked out with a motion mask. Even if there is no motion occurring in your scene, your motion settings may be sensitive enough to count your timestamp as motion.
|
||||
- If you have audio detection enabled, keep in mind that audio that is heard above `min_volume` is considered motion.
|
||||
- [Tune your motion detection settings](/configuration/motion_detection) either by editing your config file or by using the UI's Motion Tuner.
|
||||
|
||||
## I see the message: WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
|
||||
|
||||
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording. This will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
|
||||
|
||||
@ -40,6 +48,7 @@ On linux, some helpful tools/commands in diagnosing would be:
|
||||
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
|
||||
|
||||
**Compose example**
|
||||
|
||||
```yaml
|
||||
version: "3.9"
|
||||
services:
|
||||
@ -54,6 +63,7 @@ services:
|
||||
```
|
||||
|
||||
**Run command example**
|
||||
|
||||
```
|
||||
--memory=<MAXRAM> --memory-swap=<MAXSWAP> --memory-swappiness=0
|
||||
```
|
||||
|
@ -1,56 +1,101 @@
|
||||
import type * as Preset from '@docusaurus/preset-classic';
|
||||
import * as path from 'node:path';
|
||||
import type { Config, PluginConfig } from '@docusaurus/types';
|
||||
import type * as OpenApiPlugin from 'docusaurus-plugin-openapi-docs';
|
||||
import type * as Preset from "@docusaurus/preset-classic";
|
||||
import * as path from "node:path";
|
||||
import type { Config, PluginConfig } from "@docusaurus/types";
|
||||
import type * as OpenApiPlugin from "docusaurus-plugin-openapi-docs";
|
||||
|
||||
const config: Config = {
|
||||
title: 'Frigate',
|
||||
tagline: 'NVR With Realtime Object Detection for IP Cameras',
|
||||
url: 'https://docs.frigate.video',
|
||||
baseUrl: '/',
|
||||
onBrokenLinks: 'throw',
|
||||
onBrokenMarkdownLinks: 'warn',
|
||||
favicon: 'img/favicon.ico',
|
||||
organizationName: 'blakeblackshear',
|
||||
projectName: 'frigate',
|
||||
themes: ['@docusaurus/theme-mermaid', 'docusaurus-theme-openapi-docs'],
|
||||
title: "Frigate",
|
||||
tagline: "NVR With Realtime Object Detection for IP Cameras",
|
||||
url: "https://docs.frigate.video",
|
||||
baseUrl: "/",
|
||||
onBrokenLinks: "throw",
|
||||
onBrokenMarkdownLinks: "warn",
|
||||
favicon: "img/favicon.ico",
|
||||
organizationName: "blakeblackshear",
|
||||
projectName: "frigate",
|
||||
themes: [
|
||||
"@docusaurus/theme-mermaid",
|
||||
"docusaurus-theme-openapi-docs",
|
||||
"@inkeep/docusaurus/chatButton",
|
||||
"@inkeep/docusaurus/searchBar",
|
||||
],
|
||||
markdown: {
|
||||
mermaid: true,
|
||||
},
|
||||
themeConfig: {
|
||||
algolia: {
|
||||
appId: 'WIURGBNBPY',
|
||||
apiKey: 'd02cc0a6a61178b25da550212925226b',
|
||||
indexName: 'frigate',
|
||||
announcementBar: {
|
||||
id: 'frigate_plus',
|
||||
content: `
|
||||
<span style="margin-right: 8px; display: inline-block; animation: pulse 2s infinite;">🚀</span>
|
||||
Get more relevant and accurate detections with Frigate+ models.
|
||||
<a style="margin-left: 12px; padding: 3px 10px; background: #94d2bd; color: #001219; text-decoration: none; border-radius: 4px; font-weight: 500; " target="_blank" rel="noopener noreferrer" href="https://frigate.video/plus/">Learn more</a>
|
||||
<span style="margin-left: 8px; display: inline-block; animation: pulse 2s infinite;">✨</span>
|
||||
<style>
|
||||
@keyframes pulse {
|
||||
0%, 100% { transform: scale(1); }
|
||||
50% { transform: scale(1.1); }
|
||||
}
|
||||
</style>`,
|
||||
backgroundColor: '#005f73',
|
||||
textColor: '#e0fbfc',
|
||||
isCloseable: false,
|
||||
},
|
||||
docs: {
|
||||
sidebar: {
|
||||
hideable: true,
|
||||
},
|
||||
},
|
||||
inkeepConfig: {
|
||||
baseSettings: {
|
||||
apiKey: "b1a4c4d73c9b48aa5b3cdae6e4c81f0bb3d1134eeb5a7100",
|
||||
integrationId: "cm6xmhn9h000gs601495fkkdx",
|
||||
organizationId: "org_map2JQEOco8U1ZYY",
|
||||
primaryBrandColor: "#010101",
|
||||
},
|
||||
aiChatSettings: {
|
||||
chatSubjectName: "Frigate",
|
||||
botAvatarSrcUrl: "https://frigate.video/images/favicon.png",
|
||||
getHelpCallToActions: [
|
||||
{
|
||||
name: "GitHub",
|
||||
url: "https://github.com/blakeblackshear/frigate",
|
||||
icon: {
|
||||
builtIn: "FaGithub",
|
||||
},
|
||||
},
|
||||
],
|
||||
quickQuestions: [
|
||||
"How to configure and setup camera settings?",
|
||||
"How to setup notifications?",
|
||||
"Supported builtin detectors?",
|
||||
"How to restream video feed?",
|
||||
"How can I get sound or audio in my recordings?",
|
||||
],
|
||||
},
|
||||
},
|
||||
prism: {
|
||||
additionalLanguages: ['bash', 'json'],
|
||||
additionalLanguages: ["bash", "json"],
|
||||
},
|
||||
languageTabs: [
|
||||
{
|
||||
highlight: 'python',
|
||||
language: 'python',
|
||||
logoClass: 'python',
|
||||
highlight: "python",
|
||||
language: "python",
|
||||
logoClass: "python",
|
||||
},
|
||||
{
|
||||
highlight: 'javascript',
|
||||
language: 'nodejs',
|
||||
logoClass: 'nodejs',
|
||||
highlight: "javascript",
|
||||
language: "nodejs",
|
||||
logoClass: "nodejs",
|
||||
},
|
||||
{
|
||||
highlight: 'javascript',
|
||||
language: 'javascript',
|
||||
logoClass: 'javascript',
|
||||
highlight: "javascript",
|
||||
language: "javascript",
|
||||
logoClass: "javascript",
|
||||
},
|
||||
{
|
||||
highlight: 'bash',
|
||||
language: 'curl',
|
||||
logoClass: 'curl',
|
||||
highlight: "bash",
|
||||
language: "curl",
|
||||
logoClass: "curl",
|
||||
},
|
||||
{
|
||||
highlight: "rust",
|
||||
@ -59,49 +104,49 @@ const config: Config = {
|
||||
},
|
||||
],
|
||||
navbar: {
|
||||
title: 'Frigate',
|
||||
title: "Frigate",
|
||||
logo: {
|
||||
alt: 'Frigate',
|
||||
src: 'img/logo.svg',
|
||||
srcDark: 'img/logo-dark.svg',
|
||||
alt: "Frigate",
|
||||
src: "img/logo.svg",
|
||||
srcDark: "img/logo-dark.svg",
|
||||
},
|
||||
items: [
|
||||
{
|
||||
to: '/',
|
||||
activeBasePath: 'docs',
|
||||
label: 'Docs',
|
||||
position: 'left',
|
||||
to: "/",
|
||||
activeBasePath: "docs",
|
||||
label: "Docs",
|
||||
position: "left",
|
||||
},
|
||||
{
|
||||
href: 'https://frigate.video',
|
||||
label: 'Website',
|
||||
position: 'right',
|
||||
href: "https://frigate.video",
|
||||
label: "Website",
|
||||
position: "right",
|
||||
},
|
||||
{
|
||||
href: 'http://demo.frigate.video',
|
||||
label: 'Demo',
|
||||
position: 'right',
|
||||
href: "http://demo.frigate.video",
|
||||
label: "Demo",
|
||||
position: "right",
|
||||
},
|
||||
{
|
||||
href: 'https://github.com/blakeblackshear/frigate',
|
||||
label: 'GitHub',
|
||||
position: 'right',
|
||||
href: "https://github.com/blakeblackshear/frigate",
|
||||
label: "GitHub",
|
||||
position: "right",
|
||||
},
|
||||
],
|
||||
},
|
||||
footer: {
|
||||
style: 'dark',
|
||||
style: "dark",
|
||||
links: [
|
||||
{
|
||||
title: 'Community',
|
||||
title: "Community",
|
||||
items: [
|
||||
{
|
||||
label: 'GitHub',
|
||||
href: 'https://github.com/blakeblackshear/frigate',
|
||||
label: "GitHub",
|
||||
href: "https://github.com/blakeblackshear/frigate",
|
||||
},
|
||||
{
|
||||
label: 'Discussions',
|
||||
href: 'https://github.com/blakeblackshear/frigate/discussions',
|
||||
label: "Discussions",
|
||||
href: "https://github.com/blakeblackshear/frigate/discussions",
|
||||
},
|
||||
],
|
||||
},
|
||||
@ -110,19 +155,19 @@ const config: Config = {
|
||||
},
|
||||
},
|
||||
plugins: [
|
||||
path.resolve(__dirname, 'plugins', 'raw-loader'),
|
||||
path.resolve(__dirname, "plugins", "raw-loader"),
|
||||
[
|
||||
'docusaurus-plugin-openapi-docs',
|
||||
"docusaurus-plugin-openapi-docs",
|
||||
{
|
||||
id: 'openapi',
|
||||
docsPluginId: 'classic', // configured for preset-classic
|
||||
id: "openapi",
|
||||
docsPluginId: "classic", // configured for preset-classic
|
||||
config: {
|
||||
frigateApi: {
|
||||
specPath: 'static/frigate-api.yaml',
|
||||
outputDir: 'docs/integrations/api',
|
||||
specPath: "static/frigate-api.yaml",
|
||||
outputDir: "docs/integrations/api",
|
||||
sidebarOptions: {
|
||||
groupPathsBy: 'tag',
|
||||
categoryLinkSource: 'tag',
|
||||
groupPathsBy: "tag",
|
||||
categoryLinkSource: "tag",
|
||||
sidebarCollapsible: true,
|
||||
sidebarCollapsed: true,
|
||||
},
|
||||
@ -130,23 +175,24 @@ const config: Config = {
|
||||
} satisfies OpenApiPlugin.Options,
|
||||
},
|
||||
},
|
||||
]
|
||||
],
|
||||
] as PluginConfig[],
|
||||
presets: [
|
||||
[
|
||||
'classic',
|
||||
"classic",
|
||||
{
|
||||
docs: {
|
||||
routeBasePath: '/',
|
||||
sidebarPath: './sidebars.ts',
|
||||
routeBasePath: "/",
|
||||
sidebarPath: "./sidebars.ts",
|
||||
// Please change this to your repo.
|
||||
editUrl: 'https://github.com/blakeblackshear/frigate/edit/master/docs/',
|
||||
editUrl:
|
||||
"https://github.com/blakeblackshear/frigate/edit/master/docs/",
|
||||
sidebarCollapsible: false,
|
||||
docItemComponent: '@theme/ApiItem', // Derived from docusaurus-theme-openapi
|
||||
docItemComponent: "@theme/ApiItem", // Derived from docusaurus-theme-openapi
|
||||
},
|
||||
|
||||
theme: {
|
||||
customCss: './src/css/custom.css',
|
||||
customCss: "./src/css/custom.css",
|
||||
},
|
||||
} satisfies Preset.Options,
|
||||
],
|
||||
|
7076
docs/package-lock.json
generated
7076
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@ -17,15 +17,16 @@
|
||||
"write-heading-ids": "docusaurus write-heading-ids"
|
||||
},
|
||||
"dependencies": {
|
||||
"@docusaurus/core": "^3.5.2",
|
||||
"@docusaurus/preset-classic": "^3.5.2",
|
||||
"@docusaurus/theme-mermaid": "^3.5.2",
|
||||
"@docusaurus/plugin-content-docs": "^3.5.2",
|
||||
"@mdx-js/react": "^3.0.1",
|
||||
"@docusaurus/core": "^3.6.3",
|
||||
"@docusaurus/plugin-content-docs": "^3.6.3",
|
||||
"@docusaurus/preset-classic": "^3.6.3",
|
||||
"@docusaurus/theme-mermaid": "^3.6.3",
|
||||
"@inkeep/docusaurus": "^2.0.16",
|
||||
"@mdx-js/react": "^3.1.0",
|
||||
"clsx": "^2.1.1",
|
||||
"docusaurus-plugin-openapi-docs": "^4.1.0",
|
||||
"docusaurus-theme-openapi-docs": "^4.1.0",
|
||||
"prism-react-renderer": "^2.4.0",
|
||||
"docusaurus-plugin-openapi-docs": "^4.3.1",
|
||||
"docusaurus-theme-openapi-docs": "^4.3.1",
|
||||
"prism-react-renderer": "^2.4.1",
|
||||
"raw-loader": "^4.0.2",
|
||||
"react": "^18.3.1",
|
||||
"react-dom": "^18.3.1"
|
||||
|
@ -7,7 +7,9 @@ const sidebars: SidebarsConfig = {
|
||||
Frigate: [
|
||||
'frigate/index',
|
||||
'frigate/hardware',
|
||||
'frigate/planning_setup',
|
||||
'frigate/installation',
|
||||
'frigate/updating',
|
||||
'frigate/camera_setup',
|
||||
'frigate/video_pipeline',
|
||||
'frigate/glossary',
|
||||
@ -86,8 +88,8 @@ const sidebars: SidebarsConfig = {
|
||||
],
|
||||
'Frigate+': [
|
||||
'plus/index',
|
||||
'plus/annotating',
|
||||
'plus/first_model',
|
||||
'plus/improving_model',
|
||||
'plus/faq',
|
||||
],
|
||||
Troubleshooting: [
|
||||
|
BIN
docs/static/img/plus/suggestions.webp
vendored
Normal file
BIN
docs/static/img/plus/suggestions.webp
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 71 KiB |
@ -21,13 +21,13 @@ from frigate.api.defs.query.app_query_parameters import AppTimelineHourlyQueryPa
|
||||
from frigate.api.defs.request.app_body import AppConfigSetBody
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import CONFIG_DIR
|
||||
from frigate.models import Event, Timeline
|
||||
from frigate.util.builtin import (
|
||||
clean_camera_user_pass,
|
||||
get_tz_modifiers,
|
||||
update_yaml_from_url,
|
||||
)
|
||||
from frigate.util.config import find_config_file
|
||||
from frigate.util.services import (
|
||||
ffprobe_stream,
|
||||
get_nvidia_driver_info,
|
||||
@ -134,9 +134,28 @@ def config(request: Request):
|
||||
for zone_name, zone in config_obj.cameras[camera_name].zones.items():
|
||||
camera_dict["zones"][zone_name]["color"] = zone.color
|
||||
|
||||
# remove go2rtc stream passwords
|
||||
go2rtc: dict[str, any] = config_obj.go2rtc.model_dump(
|
||||
mode="json", warnings="none", exclude_none=True
|
||||
)
|
||||
for stream_name, stream in go2rtc.get("streams", {}).items():
|
||||
if stream is None:
|
||||
continue
|
||||
if isinstance(stream, str):
|
||||
cleaned = clean_camera_user_pass(stream)
|
||||
else:
|
||||
cleaned = []
|
||||
|
||||
for item in stream:
|
||||
cleaned.append(clean_camera_user_pass(item))
|
||||
|
||||
config["go2rtc"]["streams"][stream_name] = cleaned
|
||||
|
||||
config["plus"] = {"enabled": request.app.frigate_config.plus_api.is_active()}
|
||||
config["model"]["colormap"] = config_obj.model.colormap
|
||||
config["model"]["all_attributes"] = config_obj.model.all_attributes
|
||||
|
||||
# use merged labelamp
|
||||
for detector_config in config["detectors"].values():
|
||||
detector_config["model"]["labelmap"] = (
|
||||
request.app.frigate_config.model.merged_labelmap
|
||||
@ -147,13 +166,7 @@ def config(request: Request):
|
||||
|
||||
@router.get("/config/raw")
|
||||
def config_raw():
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
if not os.path.isfile(config_file):
|
||||
return JSONResponse(
|
||||
@ -198,13 +211,7 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
|
||||
|
||||
# Save the config to file
|
||||
try:
|
||||
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
with open(config_file, "w") as f:
|
||||
f.write(new_config)
|
||||
@ -253,13 +260,7 @@ def config_save(save_option: str, body: Any = Body(media_type="text/plain")):
|
||||
|
||||
@router.put("/config/set")
|
||||
def config_set(request: Request, body: AppConfigSetBody):
|
||||
config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
with open(config_file, "r") as f:
|
||||
old_raw_config = f.read()
|
||||
|
@ -26,14 +26,13 @@ from frigate.storage import StorageMaintainer
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def check_csrf(request: Request):
|
||||
def check_csrf(request: Request) -> bool:
|
||||
if request.method in ["GET", "HEAD", "OPTIONS", "TRACE"]:
|
||||
pass
|
||||
return True
|
||||
if "origin" in request.headers and "x-csrf-token" not in request.headers:
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Missing CSRF header"},
|
||||
status_code=401,
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# Used to retrieve the remote-user header: https://starlette-context.readthedocs.io/en/latest/plugins.html#easy-mode
|
||||
@ -71,7 +70,12 @@ def create_fastapi_app(
|
||||
@app.middleware("http")
|
||||
async def frigate_middleware(request: Request, call_next):
|
||||
# Before request
|
||||
check_csrf(request)
|
||||
if not check_csrf(request):
|
||||
return JSONResponse(
|
||||
content={"success": False, "message": "Missing CSRF header"},
|
||||
status_code=401,
|
||||
)
|
||||
|
||||
if database.is_closed():
|
||||
database.connect()
|
||||
|
||||
|
@ -133,6 +133,15 @@ def latest_frame(
|
||||
"regions": params.regions,
|
||||
}
|
||||
quality = params.quality
|
||||
mime_type = extension
|
||||
|
||||
if extension == "png":
|
||||
quality_params = None
|
||||
elif extension == "webp":
|
||||
quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), quality]
|
||||
else:
|
||||
quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality]
|
||||
mime_type = "jpeg"
|
||||
|
||||
if camera_name in request.app.frigate_config.cameras:
|
||||
frame = frame_processor.get_current_frame(camera_name, draw_options)
|
||||
@ -173,13 +182,11 @@ def latest_frame(
|
||||
|
||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||
|
||||
ret, img = cv2.imencode(
|
||||
f".{extension}", frame, [int(cv2.IMWRITE_WEBP_QUALITY), quality]
|
||||
)
|
||||
ret, img = cv2.imencode(f".{extension}", frame, quality_params)
|
||||
return Response(
|
||||
content=img.tobytes(),
|
||||
media_type=f"image/{extension}",
|
||||
headers={"Content-Type": f"image/{extension}", "Cache-Control": "no-store"},
|
||||
media_type=f"image/{mime_type}",
|
||||
headers={"Content-Type": f"image/{mime_type}", "Cache-Control": "no-store"},
|
||||
)
|
||||
elif camera_name == "birdseye" and request.app.frigate_config.birdseye.restream:
|
||||
frame = cv2.cvtColor(
|
||||
@ -192,13 +199,11 @@ def latest_frame(
|
||||
|
||||
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
|
||||
|
||||
ret, img = cv2.imencode(
|
||||
f".{extension}", frame, [int(cv2.IMWRITE_WEBP_QUALITY), quality]
|
||||
)
|
||||
ret, img = cv2.imencode(f".{extension}", frame, quality_params)
|
||||
return Response(
|
||||
content=img.tobytes(),
|
||||
media_type=f"image/{extension}",
|
||||
headers={"Content-Type": f"image/{extension}", "Cache-Control": "no-store"},
|
||||
media_type=f"image/{mime_type}",
|
||||
headers={"Content-Type": f"image/{mime_type}", "Cache-Control": "no-store"},
|
||||
)
|
||||
else:
|
||||
return JSONResponse(
|
||||
@ -241,6 +246,7 @@ def get_snapshot_from_recording(
|
||||
recording: Recordings = recording_query.get()
|
||||
time_in_segment = frame_time - recording.start_time
|
||||
codec = "png" if format == "png" else "mjpeg"
|
||||
mime_type = "png" if format == "png" else "jpeg"
|
||||
config: FrigateConfig = request.app.frigate_config
|
||||
|
||||
image_data = get_image_from_recording(
|
||||
@ -257,7 +263,7 @@ def get_snapshot_from_recording(
|
||||
),
|
||||
status_code=404,
|
||||
)
|
||||
return Response(image_data, headers={"Content-Type": f"image/{format}"})
|
||||
return Response(image_data, headers={"Content-Type": f"image/{mime_type}"})
|
||||
except DoesNotExist:
|
||||
return JSONResponse(
|
||||
content={
|
||||
|
@ -490,8 +490,6 @@ def set_not_reviewed(review_id: str):
|
||||
review.save()
|
||||
|
||||
return JSONResponse(
|
||||
content=(
|
||||
{"success": True, "message": "Set Review " + review_id + " as not viewed"}
|
||||
),
|
||||
content=({"success": True, "message": f"Set Review {review_id} as not viewed"}),
|
||||
status_code=200,
|
||||
)
|
||||
|
@ -71,6 +71,7 @@ from frigate.timeline import TimelineProcessor
|
||||
from frigate.util.builtin import empty_and_close_queue
|
||||
from frigate.util.image import SharedMemoryFrameManager, UntrackedSharedMemory
|
||||
from frigate.util.object import get_camera_regions_grid
|
||||
from frigate.util.services import set_file_limit
|
||||
from frigate.version import VERSION
|
||||
from frigate.video import capture_camera, track_camera
|
||||
from frigate.watchdog import FrigateWatchdog
|
||||
@ -437,7 +438,7 @@ class FrigateApp:
|
||||
# pre-create shms
|
||||
for i in range(shm_frame_count):
|
||||
frame_size = config.frame_shape_yuv[0] * config.frame_shape_yuv[1]
|
||||
self.frame_manager.create(f"{config.name}_{i}", frame_size)
|
||||
self.frame_manager.create(f"{config.name}_frame{i}", frame_size)
|
||||
|
||||
capture_process = util.Process(
|
||||
target=capture_camera,
|
||||
@ -587,6 +588,9 @@ class FrigateApp:
|
||||
# Ensure global state.
|
||||
self.ensure_dirs()
|
||||
|
||||
# Set soft file limits.
|
||||
set_file_limit()
|
||||
|
||||
# Start frigate services.
|
||||
self.init_camera_metrics()
|
||||
self.init_queues()
|
||||
|
@ -151,7 +151,7 @@ class WebPushClient(Communicator): # type: ignore[misc]
|
||||
camera: str = payload["after"]["camera"]
|
||||
title = f"{', '.join(sorted_objects).replace('_', ' ').title()}{' was' if state == 'end' else ''} detected in {', '.join(payload['after']['data']['zones']).replace('_', ' ').title()}"
|
||||
message = f"Detected on {camera.replace('_', ' ').title()}"
|
||||
image = f'{payload["after"]["thumb_path"].replace("/media/frigate", "")}'
|
||||
image = f"{payload['after']['thumb_path'].replace('/media/frigate', '')}"
|
||||
|
||||
# if event is ongoing open to live view otherwise open to recordings view
|
||||
direct_url = f"/review?id={reviewId}" if state == "end" else f"/#{camera}"
|
||||
|
@ -38,6 +38,10 @@ class GenAICameraConfig(BaseModel):
|
||||
default_factory=list,
|
||||
title="List of required zones to be entered in order to run generative AI.",
|
||||
)
|
||||
debug_save_thumbnails: bool = Field(
|
||||
default=False,
|
||||
title="Save thumbnails sent to generative AI for debugging purposes.",
|
||||
)
|
||||
|
||||
@field_validator("required_zones", mode="before")
|
||||
@classmethod
|
||||
|
@ -74,6 +74,7 @@ class OnvifConfig(FrigateBaseModel):
|
||||
port: int = Field(default=8000, title="Onvif Port")
|
||||
user: Optional[EnvString] = Field(default=None, title="Onvif Username")
|
||||
password: Optional[EnvString] = Field(default=None, title="Onvif Password")
|
||||
tls_insecure: bool = Field(default=False, title="Onvif Disable TLS verification")
|
||||
autotracking: PtzAutotrackConfig = Field(
|
||||
default_factory=PtzAutotrackConfig,
|
||||
title="PTZ auto tracking config.",
|
||||
|
@ -4,6 +4,7 @@ from typing import Optional
|
||||
from pydantic import Field
|
||||
|
||||
from frigate.const import MAX_PRE_CAPTURE
|
||||
from frigate.review.types import SeverityEnum
|
||||
|
||||
from ..base import FrigateBaseModel
|
||||
|
||||
@ -101,3 +102,15 @@ class RecordConfig(FrigateBaseModel):
|
||||
self.alerts.pre_capture,
|
||||
self.detections.pre_capture,
|
||||
)
|
||||
|
||||
def get_review_pre_capture(self, severity: SeverityEnum) -> int:
|
||||
if severity == SeverityEnum.alert:
|
||||
return self.alerts.pre_capture
|
||||
else:
|
||||
return self.detections.pre_capture
|
||||
|
||||
def get_review_post_capture(self, severity: SeverityEnum) -> int:
|
||||
if severity == SeverityEnum.alert:
|
||||
return self.alerts.post_capture
|
||||
else:
|
||||
return self.detections.post_capture
|
||||
|
@ -85,7 +85,7 @@ class ZoneConfig(BaseModel):
|
||||
if explicit:
|
||||
self.coordinates = ",".join(
|
||||
[
|
||||
f'{round(int(p.split(",")[0]) / frame_shape[1], 3)},{round(int(p.split(",")[1]) / frame_shape[0], 3)}'
|
||||
f"{round(int(p.split(',')[0]) / frame_shape[1], 3)},{round(int(p.split(',')[1]) / frame_shape[0], 3)}"
|
||||
for p in coordinates
|
||||
]
|
||||
)
|
||||
|
@ -29,6 +29,7 @@ from frigate.util.builtin import (
|
||||
)
|
||||
from frigate.util.config import (
|
||||
StreamInfoRetriever,
|
||||
find_config_file,
|
||||
get_relative_coordinates,
|
||||
migrate_frigate_config,
|
||||
)
|
||||
@ -67,7 +68,6 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
yaml = YAML()
|
||||
|
||||
DEFAULT_CONFIG_FILE = "/config/config.yml"
|
||||
DEFAULT_CONFIG = """
|
||||
mqtt:
|
||||
enabled: False
|
||||
@ -594,35 +594,27 @@ class FrigateConfig(FrigateBaseModel):
|
||||
if isinstance(detector, dict)
|
||||
else detector.model_dump(warnings="none")
|
||||
)
|
||||
detector_config: DetectorConfig = adapter.validate_python(model_dict)
|
||||
if detector_config.model is None:
|
||||
detector_config.model = self.model.model_copy()
|
||||
else:
|
||||
path = detector_config.model.path
|
||||
detector_config.model = self.model.model_copy()
|
||||
detector_config.model.path = path
|
||||
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
|
||||
|
||||
if "path" not in model_dict or len(model_dict.keys()) > 1:
|
||||
logger.warning(
|
||||
"Customizing more than a detector model path is unsupported."
|
||||
)
|
||||
# users should not set model themselves
|
||||
if detector_config.model:
|
||||
detector_config.model = None
|
||||
|
||||
merged_model = deep_merge(
|
||||
detector_config.model.model_dump(exclude_unset=True, warnings="none"),
|
||||
self.model.model_dump(exclude_unset=True, warnings="none"),
|
||||
)
|
||||
model_config = self.model.model_dump(exclude_unset=True, warnings="none")
|
||||
|
||||
if "path" not in merged_model:
|
||||
if detector_config.model_path:
|
||||
model_config["path"] = detector_config.model_path
|
||||
|
||||
if "path" not in model_config:
|
||||
if detector_config.type == "cpu":
|
||||
merged_model["path"] = "/cpu_model.tflite"
|
||||
model_config["path"] = "/cpu_model.tflite"
|
||||
elif detector_config.type == "edgetpu":
|
||||
merged_model["path"] = "/edgetpu_model.tflite"
|
||||
model_config["path"] = "/edgetpu_model.tflite"
|
||||
|
||||
detector_config.model = ModelConfig.model_validate(merged_model)
|
||||
detector_config.model.check_and_load_plus_model(
|
||||
self.plus_api, detector_config.type
|
||||
)
|
||||
detector_config.model.compute_model_hash()
|
||||
model = ModelConfig.model_validate(model_config)
|
||||
model.check_and_load_plus_model(self.plus_api, detector_config.type)
|
||||
model.compute_model_hash()
|
||||
detector_config.model = model
|
||||
self.detectors[key] = detector_config
|
||||
|
||||
return self
|
||||
@ -638,16 +630,13 @@ class FrigateConfig(FrigateBaseModel):
|
||||
|
||||
@classmethod
|
||||
def load(cls, **kwargs):
|
||||
config_path = os.environ.get("CONFIG_FILE", DEFAULT_CONFIG_FILE)
|
||||
|
||||
if not os.path.isfile(config_path):
|
||||
config_path = config_path.replace("yml", "yaml")
|
||||
config_path = find_config_file()
|
||||
|
||||
# No configuration file found, create one.
|
||||
new_config = False
|
||||
if not os.path.isfile(config_path):
|
||||
logger.info("No config file found, saving default config")
|
||||
config_path = DEFAULT_CONFIG_FILE
|
||||
config_path = config_path
|
||||
new_config = True
|
||||
else:
|
||||
# Check if the config file needs to be migrated.
|
||||
|
@ -194,6 +194,9 @@ class BaseDetectorConfig(BaseModel):
|
||||
model: Optional[ModelConfig] = Field(
|
||||
default=None, title="Detector specific model configuration."
|
||||
)
|
||||
model_path: Optional[str] = Field(
|
||||
default=None, title="Detector specific model path."
|
||||
)
|
||||
model_config = ConfigDict(
|
||||
extra="allow", arbitrary_types_allowed=True, protected_namespaces=()
|
||||
)
|
||||
|
@ -32,6 +32,7 @@ class DeepStack(DetectionApi):
|
||||
self.api_timeout = detector_config.api_timeout
|
||||
self.api_key = detector_config.api_key
|
||||
self.labels = detector_config.model.merged_labelmap
|
||||
self.session = requests.Session()
|
||||
|
||||
def get_label_index(self, label_value):
|
||||
if label_value.lower() == "truck":
|
||||
@ -51,7 +52,7 @@ class DeepStack(DetectionApi):
|
||||
data = {"api_key": self.api_key}
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
response = self.session.post(
|
||||
self.api_url,
|
||||
data=data,
|
||||
files={"image": image_bytes},
|
||||
|
@ -136,17 +136,17 @@ class Rknn(DetectionApi):
|
||||
def check_config(self, config):
|
||||
if (config.model.width != 320) or (config.model.height != 320):
|
||||
raise Exception(
|
||||
"Make sure to set the model width and height to 320 in your config.yml."
|
||||
"Make sure to set the model width and height to 320 in your config."
|
||||
)
|
||||
|
||||
if config.model.input_pixel_format != "bgr":
|
||||
raise Exception(
|
||||
'Make sure to set the model input_pixel_format to "bgr" in your config.yml.'
|
||||
'Make sure to set the model input_pixel_format to "bgr" in your config.'
|
||||
)
|
||||
|
||||
if config.model.input_tensor != "nhwc":
|
||||
raise Exception(
|
||||
'Make sure to set the model input_tensor to "nhwc" in your config.yml.'
|
||||
'Make sure to set the model input_tensor to "nhwc" in your config.'
|
||||
)
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
|
@ -219,19 +219,19 @@ class TensorRtDetector(DetectionApi):
|
||||
]
|
||||
|
||||
def __init__(self, detector_config: TensorRTDetectorConfig):
|
||||
assert (
|
||||
TRT_SUPPORT
|
||||
), f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
|
||||
assert TRT_SUPPORT, (
|
||||
f"TensorRT libraries not found, {DETECTOR_KEY} detector not present"
|
||||
)
|
||||
|
||||
(cuda_err,) = cuda.cuInit(0)
|
||||
assert (
|
||||
cuda_err == cuda.CUresult.CUDA_SUCCESS
|
||||
), f"Failed to initialize cuda {cuda_err}"
|
||||
assert cuda_err == cuda.CUresult.CUDA_SUCCESS, (
|
||||
f"Failed to initialize cuda {cuda_err}"
|
||||
)
|
||||
err, dev_count = cuda.cuDeviceGetCount()
|
||||
logger.debug(f"Num Available Devices: {dev_count}")
|
||||
assert (
|
||||
detector_config.device < dev_count
|
||||
), f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
|
||||
assert detector_config.device < dev_count, (
|
||||
f"Invalid TensorRT Device Config. Device {detector_config.device} Invalid."
|
||||
)
|
||||
err, self.cu_ctx = cuda.cuCtxCreate(
|
||||
cuda.CUctx_flags.CU_CTX_MAP_HOST, detector_config.device
|
||||
)
|
||||
|
@ -5,6 +5,7 @@ import logging
|
||||
import os
|
||||
import threading
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import cv2
|
||||
@ -217,16 +218,47 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
_, buffer = cv2.imencode(".jpg", cropped_image)
|
||||
snapshot_image = buffer.tobytes()
|
||||
|
||||
num_thumbnails = len(self.tracked_events.get(event_id, []))
|
||||
|
||||
embed_image = (
|
||||
[snapshot_image]
|
||||
if event.has_snapshot and camera_config.genai.use_snapshot
|
||||
else (
|
||||
[thumbnail for data in self.tracked_events[event_id]]
|
||||
if len(self.tracked_events.get(event_id, [])) > 0
|
||||
[
|
||||
data["thumbnail"]
|
||||
for data in self.tracked_events[event_id]
|
||||
]
|
||||
if num_thumbnails > 0
|
||||
else [thumbnail]
|
||||
)
|
||||
)
|
||||
|
||||
if camera_config.genai.debug_save_thumbnails and num_thumbnails > 0:
|
||||
logger.debug(
|
||||
f"Saving {num_thumbnails} thumbnails for event {event.id}"
|
||||
)
|
||||
|
||||
Path(
|
||||
os.path.join(CLIPS_DIR, f"genai-requests/{event.id}")
|
||||
).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for idx, data in enumerate(self.tracked_events[event_id], 1):
|
||||
jpg_bytes: bytes = data["thumbnail"]
|
||||
|
||||
if jpg_bytes is None:
|
||||
logger.warning(
|
||||
f"Unable to save thumbnail {idx} for {event.id}."
|
||||
)
|
||||
else:
|
||||
with open(
|
||||
os.path.join(
|
||||
CLIPS_DIR,
|
||||
f"genai-requests/{event.id}/{idx}.jpg",
|
||||
),
|
||||
"wb",
|
||||
) as j:
|
||||
j.write(jpg_bytes)
|
||||
|
||||
# Generate the description. Call happens in a thread since it is network bound.
|
||||
threading.Thread(
|
||||
target=self._embed_description,
|
||||
@ -325,18 +357,25 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
)
|
||||
|
||||
if event.has_snapshot and source == "snapshot":
|
||||
with open(
|
||||
os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg"),
|
||||
"rb",
|
||||
) as image_file:
|
||||
snapshot_file = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.jpg")
|
||||
|
||||
if not os.path.isfile(snapshot_file):
|
||||
logger.error(
|
||||
f"Cannot regenerate description for {event.id}, snapshot file not found: {snapshot_file}"
|
||||
)
|
||||
return
|
||||
|
||||
with open(snapshot_file, "rb") as image_file:
|
||||
snapshot_image = image_file.read()
|
||||
img = cv2.imdecode(
|
||||
np.frombuffer(snapshot_image, dtype=np.int8), cv2.IMREAD_COLOR
|
||||
)
|
||||
|
||||
# crop snapshot based on region before sending off to genai
|
||||
# provide full image if region doesn't exist (manual events)
|
||||
region = event.data.get("region", [0, 0, 1, 1])
|
||||
height, width = img.shape[:2]
|
||||
x1_rel, y1_rel, width_rel, height_rel = event.data["region"]
|
||||
x1_rel, y1_rel, width_rel, height_rel = region
|
||||
|
||||
x1, y1 = int(x1_rel * width), int(y1_rel * height)
|
||||
cropped_image = img[
|
||||
@ -350,7 +389,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
[snapshot_image]
|
||||
if event.has_snapshot and source == "snapshot"
|
||||
else (
|
||||
[thumbnail for data in self.tracked_events[event_id]]
|
||||
[data["thumbnail"] for data in self.tracked_events[event_id]]
|
||||
if len(self.tracked_events.get(event_id, [])) > 0
|
||||
else [thumbnail]
|
||||
)
|
||||
|
@ -121,8 +121,8 @@ class EventCleanup(threading.Thread):
|
||||
|
||||
events_to_update = []
|
||||
|
||||
for batch in query.iterator():
|
||||
events_to_update.extend([event.id for event in batch])
|
||||
for event in query.iterator():
|
||||
events_to_update.append(event.id)
|
||||
if len(events_to_update) >= CHUNK_SIZE:
|
||||
logger.debug(
|
||||
f"Updating {update_params} for {len(events_to_update)} events"
|
||||
@ -256,8 +256,9 @@ class EventCleanup(threading.Thread):
|
||||
|
||||
events_to_update = []
|
||||
|
||||
for batch in query.iterator():
|
||||
events_to_update.extend([event.id for event in batch])
|
||||
for event in query.iterator():
|
||||
events_to_update.append(event.id)
|
||||
|
||||
if len(events_to_update) >= CHUNK_SIZE:
|
||||
logger.debug(
|
||||
f"Updating {update_params} for {len(events_to_update)} events"
|
||||
@ -330,9 +331,8 @@ class EventCleanup(threading.Thread):
|
||||
|
||||
def run(self) -> None:
|
||||
# only expire events every 5 minutes
|
||||
while not self.stop_event.wait(1):
|
||||
while not self.stop_event.wait(300):
|
||||
events_with_expired_clips = self.expire_clips()
|
||||
return
|
||||
|
||||
# delete timeline entries for events that have expired recordings
|
||||
# delete up to 100,000 at a time
|
||||
|
@ -82,18 +82,23 @@ class EventProcessor(threading.Thread):
|
||||
)
|
||||
|
||||
if source_type == EventTypeEnum.tracked_object:
|
||||
id = event_data["id"]
|
||||
self.timeline_queue.put(
|
||||
(
|
||||
camera,
|
||||
source_type,
|
||||
event_type,
|
||||
self.events_in_process.get(event_data["id"]),
|
||||
self.events_in_process.get(id),
|
||||
event_data,
|
||||
)
|
||||
)
|
||||
|
||||
if event_type == EventStateEnum.start:
|
||||
self.events_in_process[event_data["id"]] = event_data
|
||||
# if this is the first message, just store it and continue, its not time to insert it in the db
|
||||
if (
|
||||
event_type == EventStateEnum.start
|
||||
or id not in self.events_in_process
|
||||
):
|
||||
self.events_in_process[id] = event_data
|
||||
continue
|
||||
|
||||
self.handle_object_detection(event_type, camera, event_data)
|
||||
@ -123,10 +128,6 @@ class EventProcessor(threading.Thread):
|
||||
"""handle tracked object event updates."""
|
||||
updated_db = False
|
||||
|
||||
# if this is the first message, just store it and continue, its not time to insert it in the db
|
||||
if event_type == EventStateEnum.start:
|
||||
self.events_in_process[event_data["id"]] = event_data
|
||||
|
||||
if should_update_db(self.events_in_process[event_data["id"]], event_data):
|
||||
updated_db = True
|
||||
camera_config = self.config.cameras[camera]
|
||||
|
@ -50,16 +50,9 @@ class LibvaGpuSelector:
|
||||
return ""
|
||||
|
||||
|
||||
FPS_VFR_PARAM = (
|
||||
"-fps_mode vfr"
|
||||
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") >= 59
|
||||
else "-vsync 2"
|
||||
)
|
||||
TIMEOUT_PARAM = (
|
||||
"-timeout"
|
||||
if int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") >= 59
|
||||
else "-stimeout"
|
||||
)
|
||||
LIBAV_VERSION = int(os.getenv("LIBAVFORMAT_VERSION_MAJOR", "59") or "59")
|
||||
FPS_VFR_PARAM = "-fps_mode vfr" if LIBAV_VERSION >= 59 else "-vsync 2"
|
||||
TIMEOUT_PARAM = "-timeout" if LIBAV_VERSION >= 59 else "-stimeout"
|
||||
|
||||
_gpu_selector = LibvaGpuSelector()
|
||||
_user_agent_args = [
|
||||
@ -71,8 +64,8 @@ PRESETS_HW_ACCEL_DECODE = {
|
||||
"preset-rpi-64-h264": "-c:v:1 h264_v4l2m2m",
|
||||
"preset-rpi-64-h265": "-c:v:1 hevc_v4l2m2m",
|
||||
FFMPEG_HWACCEL_VAAPI: f"-hwaccel_flags allow_profile_mismatch -hwaccel vaapi -hwaccel_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format vaapi",
|
||||
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv",
|
||||
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v hevc_qsv",
|
||||
"preset-intel-qsv-h264": f"-hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv -c:v h264_qsv{' -bsf:v dump_extra' if LIBAV_VERSION >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17
|
||||
"preset-intel-qsv-h265": f"-load_plugin hevc_hw -hwaccel qsv -qsv_device {_gpu_selector.get_selected_gpu()} -hwaccel_output_format qsv{' -bsf:v dump_extra' if LIBAV_VERSION >= 61 else ''}", # https://trac.ffmpeg.org/ticket/9766#comment:17
|
||||
FFMPEG_HWACCEL_NVIDIA: "-hwaccel cuda -hwaccel_output_format cuda",
|
||||
"preset-jetson-h264": "-c:v h264_nvmpi -resize {1}x{2}",
|
||||
"preset-jetson-h265": "-c:v hevc_nvmpi -resize {1}x{2}",
|
||||
@ -118,12 +111,12 @@ PRESETS_HW_ACCEL_ENCODE_BIRDSEYE = {
|
||||
"preset-rpi-64-h265": "{0} -hide_banner {1} -c:v hevc_v4l2m2m {2}",
|
||||
FFMPEG_HWACCEL_VAAPI: "{0} -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {3} {1} -c:v h264_vaapi -g 50 -bf 0 -profile:v high -level:v 4.1 -sei:v 0 -an -vf format=vaapi|nv12,hwupload {2}",
|
||||
"preset-intel-qsv-h264": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
"preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
"preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v h264_qsv -g 50 -bf 0 -profile:v main -level:v 4.1 -async_depth:v 1 {2}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner {1} -c:v h264_nvenc -g 50 -profile:v high -level:v auto -preset:v p2 -tune:v ll {2}",
|
||||
"preset-jetson-h264": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}",
|
||||
"preset-jetson-h265": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}",
|
||||
"preset-jetson-h265": "{0} -hide_banner {1} -c:v h264_nvmpi -profile main {2}",
|
||||
"preset-rk-h264": "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}",
|
||||
"preset-rk-h265": "{0} -hide_banner {1} -c:v hevc_rkmpp -profile:v high {2}",
|
||||
"preset-rk-h265": "{0} -hide_banner {1} -c:v hevc_rkmpp -profile:v main {2}",
|
||||
"default": "{0} -hide_banner {1} -c:v libx264 -g 50 -profile:v high -level:v 4.1 -preset:v superfast -tune:v zerolatency {2}",
|
||||
}
|
||||
PRESETS_HW_ACCEL_ENCODE_BIRDSEYE["preset-nvidia-h264"] = (
|
||||
@ -138,13 +131,13 @@ PRESETS_HW_ACCEL_ENCODE_TIMELAPSE = {
|
||||
"preset-rpi-64-h265": "{0} -hide_banner {1} -c:v hevc_v4l2m2m -pix_fmt yuv420p {2}",
|
||||
FFMPEG_HWACCEL_VAAPI: "{0} -hide_banner -hwaccel vaapi -hwaccel_output_format vaapi -hwaccel_device {3} {1} -c:v h264_vaapi {2}",
|
||||
"preset-intel-qsv-h264": "{0} -hide_banner {1} -c:v h264_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
"preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v hevc_qsv -profile:v high -level:v 4.1 -async_depth:v 1 {2}",
|
||||
"preset-intel-qsv-h265": "{0} -hide_banner {1} -c:v hevc_qsv -profile:v main -level:v 4.1 -async_depth:v 1 {2}",
|
||||
FFMPEG_HWACCEL_NVIDIA: "{0} -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {1} -c:v h264_nvenc {2}",
|
||||
"preset-nvidia-h265": "{0} -hide_banner -hwaccel cuda -hwaccel_output_format cuda -extra_hw_frames 8 {1} -c:v hevc_nvenc {2}",
|
||||
"preset-jetson-h264": "{0} -hide_banner {1} -c:v h264_nvmpi -profile high {2}",
|
||||
"preset-jetson-h265": "{0} -hide_banner {1} -c:v hevc_nvmpi -profile high {2}",
|
||||
"preset-jetson-h265": "{0} -hide_banner {1} -c:v hevc_nvmpi -profile main {2}",
|
||||
"preset-rk-h264": "{0} -hide_banner {1} -c:v h264_rkmpp -profile:v high {2}",
|
||||
"preset-rk-h265": "{0} -hide_banner {1} -c:v hevc_rkmpp -profile:v high {2}",
|
||||
"preset-rk-h265": "{0} -hide_banner {1} -c:v hevc_rkmpp -profile:v main {2}",
|
||||
"default": "{0} -hide_banner {1} -c:v libx264 -preset:v ultrafast -tune:v zerolatency {2}",
|
||||
}
|
||||
PRESETS_HW_ACCEL_ENCODE_TIMELAPSE["preset-nvidia-h264"] = (
|
||||
|
@ -38,6 +38,11 @@ class OllamaClient(GenAIClient):
|
||||
|
||||
def _send(self, prompt: str, images: list[bytes]) -> Optional[str]:
|
||||
"""Submit a request to Ollama"""
|
||||
if self.provider is None:
|
||||
logger.warning(
|
||||
"Ollama provider has not been initialized, a description will not be generated. Check your Ollama configuration."
|
||||
)
|
||||
return None
|
||||
try:
|
||||
result = self.provider.generate(
|
||||
self.genai_config.model,
|
||||
|
@ -473,7 +473,7 @@ class CameraState:
|
||||
|
||||
if current_frame is not None:
|
||||
self.current_frame_time = frame_time
|
||||
self._current_frame = current_frame
|
||||
self._current_frame = np.copy(current_frame)
|
||||
|
||||
if self.previous_frame_id is not None:
|
||||
self.frame_manager.close(self.previous_frame_id)
|
||||
|
@ -68,11 +68,13 @@ class PlusApi:
|
||||
or self._token_data["expires"] - datetime.datetime.now().timestamp() < 60
|
||||
):
|
||||
if self.key is None:
|
||||
raise Exception("Plus API not activated")
|
||||
raise Exception(
|
||||
"Plus API key not set. See https://docs.frigate.video/integrations/plus#set-your-api-key"
|
||||
)
|
||||
parts = self.key.split(":")
|
||||
r = requests.get(f"{self.host}/v1/auth/token", auth=(parts[0], parts[1]))
|
||||
if not r.ok:
|
||||
raise Exception("Unable to refresh API token")
|
||||
raise Exception(f"Unable to refresh API token: {r.text}")
|
||||
self._token_data = r.json()
|
||||
|
||||
def _get_authorization_header(self) -> dict:
|
||||
@ -116,15 +118,6 @@ class PlusApi:
|
||||
logger.error(f"Failed to upload original: {r.status_code} {r.text}")
|
||||
raise Exception(r.text)
|
||||
|
||||
# resize and submit annotate
|
||||
files = {"file": get_jpg_bytes(image, 640, 70)}
|
||||
data = presigned_urls["annotate"]["fields"]
|
||||
data["content-type"] = "image/jpeg"
|
||||
r = requests.post(presigned_urls["annotate"]["url"], files=files, data=data)
|
||||
if not r.ok:
|
||||
logger.error(f"Failed to upload annotate: {r.status_code} {r.text}")
|
||||
raise Exception(r.text)
|
||||
|
||||
# resize and submit thumbnail
|
||||
files = {"file": get_jpg_bytes(image, 200, 70)}
|
||||
data = presigned_urls["thumbnail"]["fields"]
|
||||
|
@ -2,7 +2,6 @@
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
import queue
|
||||
import threading
|
||||
import time
|
||||
@ -29,11 +28,11 @@ from frigate.const import (
|
||||
AUTOTRACKING_ZOOM_EDGE_THRESHOLD,
|
||||
AUTOTRACKING_ZOOM_IN_HYSTERESIS,
|
||||
AUTOTRACKING_ZOOM_OUT_HYSTERESIS,
|
||||
CONFIG_DIR,
|
||||
)
|
||||
from frigate.ptz.onvif import OnvifController
|
||||
from frigate.track.tracked_object import TrackedObject
|
||||
from frigate.util.builtin import update_yaml_file
|
||||
from frigate.util.config import find_config_file
|
||||
from frigate.util.image import SharedMemoryFrameManager, intersection_over_union
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -136,7 +135,7 @@ class PtzMotionEstimator:
|
||||
|
||||
try:
|
||||
logger.debug(
|
||||
f"{camera}: Motion estimator transformation: {self.coord_transformations.rel_to_abs([[0,0]])}"
|
||||
f"{camera}: Motion estimator transformation: {self.coord_transformations.rel_to_abs([[0, 0]])}"
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
@ -328,13 +327,7 @@ class PtzAutoTracker:
|
||||
self.autotracker_init[camera] = True
|
||||
|
||||
def _write_config(self, camera):
|
||||
config_file = os.environ.get("CONFIG_FILE", f"{CONFIG_DIR}/config.yml")
|
||||
|
||||
# Check if we can use .yaml instead of .yml
|
||||
config_file_yaml = config_file.replace(".yml", ".yaml")
|
||||
|
||||
if os.path.isfile(config_file_yaml):
|
||||
config_file = config_file_yaml
|
||||
config_file = find_config_file()
|
||||
|
||||
logger.debug(
|
||||
f"{camera}: Writing new config with autotracker motion coefficients: {self.config.cameras[camera].onvif.autotracking.movement_weights}"
|
||||
@ -478,7 +471,7 @@ class PtzAutoTracker:
|
||||
self.onvif.get_camera_status(camera)
|
||||
|
||||
logger.info(
|
||||
f"Calibration for {camera} in progress: {round((step/num_steps)*100)}% complete"
|
||||
f"Calibration for {camera} in progress: {round((step / num_steps) * 100)}% complete"
|
||||
)
|
||||
|
||||
self.calibrating[camera] = False
|
||||
@ -697,7 +690,7 @@ class PtzAutoTracker:
|
||||
f"{camera}: Predicted movement time: {self._predict_movement_time(camera, pan, tilt)}"
|
||||
)
|
||||
logger.debug(
|
||||
f"{camera}: Actual movement time: {self.ptz_metrics[camera].stop_time.value-self.ptz_metrics[camera].start_time.value}"
|
||||
f"{camera}: Actual movement time: {self.ptz_metrics[camera].stop_time.value - self.ptz_metrics[camera].start_time.value}"
|
||||
)
|
||||
|
||||
# save metrics for better estimate calculations
|
||||
@ -990,10 +983,10 @@ class PtzAutoTracker:
|
||||
logger.debug(f"{camera}: Zoom test: at max zoom: {at_max_zoom}")
|
||||
logger.debug(f"{camera}: Zoom test: at min zoom: {at_min_zoom}")
|
||||
logger.debug(
|
||||
f'{camera}: Zoom test: zoom in hysteresis limit: {zoom_in_hysteresis} value: {AUTOTRACKING_ZOOM_IN_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]["target_box"]}'
|
||||
f"{camera}: Zoom test: zoom in hysteresis limit: {zoom_in_hysteresis} value: {AUTOTRACKING_ZOOM_IN_HYSTERESIS} original: {self.tracked_object_metrics[camera]['original_target_box']} max: {self.tracked_object_metrics[camera]['max_target_box']} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]['target_box']}"
|
||||
)
|
||||
logger.debug(
|
||||
f'{camera}: Zoom test: zoom out hysteresis limit: {zoom_out_hysteresis} value: {AUTOTRACKING_ZOOM_OUT_HYSTERESIS} original: {self.tracked_object_metrics[camera]["original_target_box"]} max: {self.tracked_object_metrics[camera]["max_target_box"]} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]["target_box"]}'
|
||||
f"{camera}: Zoom test: zoom out hysteresis limit: {zoom_out_hysteresis} value: {AUTOTRACKING_ZOOM_OUT_HYSTERESIS} original: {self.tracked_object_metrics[camera]['original_target_box']} max: {self.tracked_object_metrics[camera]['max_target_box']} target: {calculated_target_box if calculated_target_box else self.tracked_object_metrics[camera]['target_box']}"
|
||||
)
|
||||
|
||||
# Zoom in conditions (and)
|
||||
@ -1076,7 +1069,7 @@ class PtzAutoTracker:
|
||||
pan = ((centroid_x / camera_width) - 0.5) * 2
|
||||
tilt = (0.5 - (centroid_y / camera_height)) * 2
|
||||
|
||||
logger.debug(f'{camera}: Original box: {obj.obj_data["box"]}')
|
||||
logger.debug(f"{camera}: Original box: {obj.obj_data['box']}")
|
||||
logger.debug(f"{camera}: Predicted box: {tuple(predicted_box)}")
|
||||
logger.debug(
|
||||
f"{camera}: Velocity: {tuple(np.round(average_velocity).flatten().astype(int))}"
|
||||
@ -1186,7 +1179,7 @@ class PtzAutoTracker:
|
||||
)
|
||||
zoom = (ratio - 1) / (ratio + 1)
|
||||
logger.debug(
|
||||
f'{camera}: limit: {self.tracked_object_metrics[camera]["max_target_box"]}, ratio: {ratio} zoom calculation: {zoom}'
|
||||
f"{camera}: limit: {self.tracked_object_metrics[camera]['max_target_box']}, ratio: {ratio} zoom calculation: {zoom}"
|
||||
)
|
||||
if not result:
|
||||
# zoom out with special condition if zooming out because of velocity, edges, etc.
|
||||
|
@ -6,6 +6,7 @@ from importlib.util import find_spec
|
||||
from pathlib import Path
|
||||
|
||||
import numpy
|
||||
import requests
|
||||
from onvif import ONVIFCamera, ONVIFError
|
||||
from zeep.exceptions import Fault, TransportError
|
||||
from zeep.transports import Transport
|
||||
@ -48,7 +49,11 @@ class OnvifController:
|
||||
|
||||
if cam.onvif.host:
|
||||
try:
|
||||
transport = Transport(timeout=10, operation_timeout=10)
|
||||
session = requests.Session()
|
||||
session.verify = not cam.onvif.tls_insecure
|
||||
transport = Transport(
|
||||
timeout=10, operation_timeout=10, session=session
|
||||
)
|
||||
self.cams[cam_name] = {
|
||||
"onvif": ONVIFCamera(
|
||||
cam.onvif.host,
|
||||
@ -406,19 +411,19 @@ class OnvifController:
|
||||
# The onvif spec says this can report as +INF and -INF, so this may need to be modified
|
||||
pan = numpy.interp(
|
||||
pan,
|
||||
[-1, 1],
|
||||
[
|
||||
self.cams[camera_name]["relative_fov_range"]["XRange"]["Min"],
|
||||
self.cams[camera_name]["relative_fov_range"]["XRange"]["Max"],
|
||||
],
|
||||
[-1, 1],
|
||||
)
|
||||
tilt = numpy.interp(
|
||||
tilt,
|
||||
[-1, 1],
|
||||
[
|
||||
self.cams[camera_name]["relative_fov_range"]["YRange"]["Min"],
|
||||
self.cams[camera_name]["relative_fov_range"]["YRange"]["Max"],
|
||||
],
|
||||
[-1, 1],
|
||||
)
|
||||
|
||||
move_request.Speed = {
|
||||
@ -531,11 +536,11 @@ class OnvifController:
|
||||
# function takes in 0 to 1 for zoom, interpolate to the values of the camera.
|
||||
zoom = numpy.interp(
|
||||
zoom,
|
||||
[0, 1],
|
||||
[
|
||||
self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Min"],
|
||||
self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Max"],
|
||||
],
|
||||
[0, 1],
|
||||
)
|
||||
|
||||
move_request.Speed = {"Zoom": speed}
|
||||
@ -558,22 +563,26 @@ class OnvifController:
|
||||
if not self._init_onvif(camera_name):
|
||||
return
|
||||
|
||||
if command == OnvifCommandEnum.init:
|
||||
# already init
|
||||
return
|
||||
elif command == OnvifCommandEnum.stop:
|
||||
self._stop(camera_name)
|
||||
elif command == OnvifCommandEnum.preset:
|
||||
self._move_to_preset(camera_name, param)
|
||||
elif command == OnvifCommandEnum.move_relative:
|
||||
_, pan, tilt = param.split("_")
|
||||
self._move_relative(camera_name, float(pan), float(tilt), 0, 1)
|
||||
elif (
|
||||
command == OnvifCommandEnum.zoom_in or command == OnvifCommandEnum.zoom_out
|
||||
):
|
||||
self._zoom(camera_name, command)
|
||||
else:
|
||||
self._move(camera_name, command)
|
||||
try:
|
||||
if command == OnvifCommandEnum.init:
|
||||
# already init
|
||||
return
|
||||
elif command == OnvifCommandEnum.stop:
|
||||
self._stop(camera_name)
|
||||
elif command == OnvifCommandEnum.preset:
|
||||
self._move_to_preset(camera_name, param)
|
||||
elif command == OnvifCommandEnum.move_relative:
|
||||
_, pan, tilt = param.split("_")
|
||||
self._move_relative(camera_name, float(pan), float(tilt), 0, 1)
|
||||
elif (
|
||||
command == OnvifCommandEnum.zoom_in
|
||||
or command == OnvifCommandEnum.zoom_out
|
||||
):
|
||||
self._zoom(camera_name, command)
|
||||
else:
|
||||
self._move(camera_name, command)
|
||||
except ONVIFError as e:
|
||||
logger.error(f"Unable to handle onvif command: {e}")
|
||||
|
||||
def get_camera_info(self, camera_name: str) -> dict[str, any]:
|
||||
if camera_name not in self.cams.keys():
|
||||
|
@ -121,22 +121,29 @@ class RecordingCleanup(threading.Thread):
|
||||
review_start = 0
|
||||
deleted_recordings = set()
|
||||
kept_recordings: list[tuple[float, float]] = []
|
||||
recording: Recordings
|
||||
for recording in recordings:
|
||||
keep = False
|
||||
mode = None
|
||||
# Now look for a reason to keep this recording segment
|
||||
for idx in range(review_start, len(reviews)):
|
||||
review: ReviewSegment = reviews[idx]
|
||||
severity = review.severity
|
||||
pre_capture = config.record.get_review_pre_capture(severity)
|
||||
post_capture = config.record.get_review_post_capture(severity)
|
||||
|
||||
# if the review starts in the future, stop checking reviews
|
||||
# and let this recording segment expire
|
||||
if review.start_time > recording.end_time:
|
||||
if review.start_time - pre_capture > recording.end_time:
|
||||
keep = False
|
||||
break
|
||||
|
||||
# if the review is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at reviews
|
||||
if review.end_time is None or review.end_time >= recording.start_time:
|
||||
if (
|
||||
review.end_time is None
|
||||
or review.end_time + post_capture >= recording.start_time
|
||||
):
|
||||
keep = True
|
||||
mode = (
|
||||
config.record.alerts.retain.mode
|
||||
@ -149,7 +156,7 @@ class RecordingCleanup(threading.Thread):
|
||||
# this review and check the next review for an overlap.
|
||||
# since the review and recordings are sorted, we can skip review
|
||||
# that end before the previous recording segment started on future segments
|
||||
if review.end_time < recording.start_time:
|
||||
if review.end_time + post_capture < recording.start_time:
|
||||
review_start = idx
|
||||
|
||||
# Delete recordings outside of the retention window or based on the retention mode
|
||||
|
@ -29,6 +29,7 @@ from frigate.const import (
|
||||
RECORD_DIR,
|
||||
)
|
||||
from frigate.models import Recordings, ReviewSegment
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.util.services import get_video_properties
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -194,6 +195,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
ReviewSegment.select(
|
||||
ReviewSegment.start_time,
|
||||
ReviewSegment.end_time,
|
||||
ReviewSegment.severity,
|
||||
ReviewSegment.data,
|
||||
)
|
||||
.where(
|
||||
@ -219,11 +221,15 @@ class RecordingMaintainer(threading.Thread):
|
||||
[r for r in recordings_to_insert if r is not None],
|
||||
)
|
||||
|
||||
def drop_segment(self, cache_path: str) -> None:
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
|
||||
async def validate_and_move_segment(
|
||||
self, camera: str, reviews: list[ReviewSegment], recording: dict[str, any]
|
||||
) -> None:
|
||||
cache_path = recording["cache_path"]
|
||||
start_time = recording["start_time"]
|
||||
cache_path: str = recording["cache_path"]
|
||||
start_time: datetime.datetime = recording["start_time"]
|
||||
record_config = self.config.cameras[camera].record
|
||||
|
||||
# Just delete files if recordings are turned off
|
||||
@ -231,8 +237,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
camera not in self.config.cameras
|
||||
or not self.config.cameras[camera].record.enabled
|
||||
):
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
self.drop_segment(cache_path)
|
||||
return
|
||||
|
||||
if cache_path in self.end_time_cache:
|
||||
@ -260,24 +265,34 @@ class RecordingMaintainer(threading.Thread):
|
||||
return
|
||||
|
||||
# if cached file's start_time is earlier than the retain days for the camera
|
||||
# meaning continuous recording is not enabled
|
||||
if start_time <= (
|
||||
datetime.datetime.now().astimezone(datetime.timezone.utc)
|
||||
- datetime.timedelta(days=self.config.cameras[camera].record.retain.days)
|
||||
):
|
||||
# if the cached segment overlaps with the events:
|
||||
# if the cached segment overlaps with the review items:
|
||||
overlaps = False
|
||||
for review in reviews:
|
||||
# if the event starts in the future, stop checking events
|
||||
severity = SeverityEnum[review.severity]
|
||||
|
||||
# if the review item starts in the future, stop checking review items
|
||||
# and remove this segment
|
||||
if review.start_time > end_time.timestamp():
|
||||
if (
|
||||
review.start_time - record_config.get_review_pre_capture(severity)
|
||||
) > end_time.timestamp():
|
||||
overlaps = False
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
break
|
||||
|
||||
# if the event is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at events
|
||||
if review.end_time is None or review.end_time >= start_time.timestamp():
|
||||
# if the review item is in progress or ends after the recording starts, keep it
|
||||
# and stop looking at review items
|
||||
if (
|
||||
review.end_time is None
|
||||
or (
|
||||
review.end_time
|
||||
+ record_config.get_review_post_capture(severity)
|
||||
)
|
||||
>= start_time.timestamp()
|
||||
):
|
||||
overlaps = True
|
||||
break
|
||||
|
||||
@ -296,7 +311,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
cache_path,
|
||||
record_mode,
|
||||
)
|
||||
# if it doesn't overlap with an event, go ahead and drop the segment
|
||||
# if it doesn't overlap with an review item, go ahead and drop the segment
|
||||
# if it ends more than the configured pre_capture for the camera
|
||||
else:
|
||||
camera_info = self.object_recordings_info[camera]
|
||||
@ -307,9 +322,9 @@ class RecordingMaintainer(threading.Thread):
|
||||
most_recently_processed_frame_time - record_config.event_pre_capture
|
||||
).astimezone(datetime.timezone.utc)
|
||||
if end_time < retain_cutoff:
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
self.drop_segment(cache_path)
|
||||
# else retain days includes this segment
|
||||
# meaning continuous recording is enabled
|
||||
else:
|
||||
# assume that empty means the relevant recording info has not been received yet
|
||||
camera_info = self.object_recordings_info[camera]
|
||||
@ -390,8 +405,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
|
||||
# check if the segment shouldn't be stored
|
||||
if segment_info.should_discard_segment(store_mode):
|
||||
Path(cache_path).unlink(missing_ok=True)
|
||||
self.end_time_cache.pop(cache_path, None)
|
||||
self.drop_segment(cache_path)
|
||||
return
|
||||
|
||||
# directory will be in utc due to start_time being in utc
|
||||
@ -435,7 +449,7 @@ class RecordingMaintainer(threading.Thread):
|
||||
return None
|
||||
else:
|
||||
logger.debug(
|
||||
f"Copied {file_path} in {datetime.datetime.now().timestamp()-start_frame} seconds."
|
||||
f"Copied {file_path} in {datetime.datetime.now().timestamp() - start_frame} seconds."
|
||||
)
|
||||
|
||||
try:
|
||||
|
@ -256,7 +256,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
elif object["sub_label"][0] in self.config.model.all_attributes:
|
||||
segment.detections[object["id"]] = object["sub_label"][0]
|
||||
else:
|
||||
segment.detections[object["id"]] = f'{object["label"]}-verified'
|
||||
segment.detections[object["id"]] = f"{object['label']}-verified"
|
||||
segment.sub_labels[object["id"]] = object["sub_label"][0]
|
||||
|
||||
# if object is alert label
|
||||
@ -352,7 +352,7 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
elif object["sub_label"][0] in self.config.model.all_attributes:
|
||||
detections[object["id"]] = object["sub_label"][0]
|
||||
else:
|
||||
detections[object["id"]] = f'{object["label"]}-verified'
|
||||
detections[object["id"]] = f"{object['label']}-verified"
|
||||
sub_labels[object["id"]] = object["sub_label"][0]
|
||||
|
||||
# if object is alert label
|
||||
@ -527,7 +527,9 @@ class ReviewSegmentMaintainer(threading.Thread):
|
||||
|
||||
if event_id in self.indefinite_events[camera]:
|
||||
self.indefinite_events[camera].pop(event_id)
|
||||
current_segment.last_update = manual_info["end_time"]
|
||||
|
||||
if len(self.indefinite_events[camera]) == 0:
|
||||
current_segment.last_update = manual_info["end_time"]
|
||||
else:
|
||||
logger.error(
|
||||
f"Event with ID {event_id} has a set duration and can not be ended manually."
|
||||
|
@ -72,8 +72,7 @@ class BaseServiceProcess(Service, ABC):
|
||||
running = False
|
||||
except TimeoutError:
|
||||
self.manager.logger.warning(
|
||||
f"{self.name} is still running after "
|
||||
f"{timeout} seconds. Killing."
|
||||
f"{self.name} is still running after {timeout} seconds. Killing."
|
||||
)
|
||||
|
||||
if running:
|
||||
|
@ -293,7 +293,7 @@ def stats_snapshot(
|
||||
for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]:
|
||||
try:
|
||||
storage_stats = shutil.disk_usage(path)
|
||||
except FileNotFoundError:
|
||||
except (FileNotFoundError, OSError):
|
||||
stats["service"]["storage"][path] = {}
|
||||
continue
|
||||
|
||||
|
@ -17,6 +17,8 @@ bandwidth_equation = Recordings.segment_size / (
|
||||
Recordings.end_time - Recordings.start_time
|
||||
)
|
||||
|
||||
MAX_CALCULATED_BANDWIDTH = 10000 # 10Gb/hr
|
||||
|
||||
|
||||
class StorageMaintainer(threading.Thread):
|
||||
"""Maintain frigates recording storage."""
|
||||
@ -52,6 +54,12 @@ class StorageMaintainer(threading.Thread):
|
||||
* 3600,
|
||||
2,
|
||||
)
|
||||
|
||||
if bandwidth > MAX_CALCULATED_BANDWIDTH:
|
||||
logger.warning(
|
||||
f"{camera} has a bandwidth of {bandwidth} MB/hr which exceeds the expected maximum. This typically indicates an issue with the cameras recordings."
|
||||
)
|
||||
bandwidth = MAX_CALCULATED_BANDWIDTH
|
||||
except TypeError:
|
||||
bandwidth = 0
|
||||
|
||||
|
@ -6,6 +6,7 @@ import unittest
|
||||
from peewee_migrate import Router
|
||||
from playhouse.sqlite_ext import SqliteExtDatabase
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
from pydantic import Json
|
||||
|
||||
from frigate.api.fastapi_app import create_fastapi_app
|
||||
from frigate.config import FrigateConfig
|
||||
@ -123,7 +124,12 @@ class BaseTestHttp(unittest.TestCase):
|
||||
def insert_mock_event(
|
||||
self,
|
||||
id: str,
|
||||
start_time: datetime.datetime = datetime.datetime.now().timestamp(),
|
||||
start_time: float = datetime.datetime.now().timestamp(),
|
||||
end_time: float = datetime.datetime.now().timestamp() + 20,
|
||||
has_clip: bool = True,
|
||||
top_score: int = 100,
|
||||
score: int = 0,
|
||||
data: Json = {},
|
||||
) -> Event:
|
||||
"""Inserts a basic event model with a given id."""
|
||||
return Event.insert(
|
||||
@ -131,16 +137,18 @@ class BaseTestHttp(unittest.TestCase):
|
||||
label="Mock",
|
||||
camera="front_door",
|
||||
start_time=start_time,
|
||||
end_time=start_time + 20,
|
||||
top_score=100,
|
||||
end_time=end_time,
|
||||
top_score=top_score,
|
||||
score=score,
|
||||
false_positive=False,
|
||||
zones=list(),
|
||||
thumbnail="",
|
||||
region=[],
|
||||
box=[],
|
||||
area=0,
|
||||
has_clip=True,
|
||||
has_clip=has_clip,
|
||||
has_snapshot=True,
|
||||
data=data,
|
||||
).execute()
|
||||
|
||||
def insert_mock_review_segment(
|
||||
@ -150,6 +158,7 @@ class BaseTestHttp(unittest.TestCase):
|
||||
end_time: float = datetime.datetime.now().timestamp() + 20,
|
||||
severity: SeverityEnum = SeverityEnum.alert,
|
||||
has_been_reviewed: bool = False,
|
||||
data: Json = {},
|
||||
) -> Event:
|
||||
"""Inserts a review segment model with a given id."""
|
||||
return ReviewSegment.insert(
|
||||
@ -160,7 +169,7 @@ class BaseTestHttp(unittest.TestCase):
|
||||
has_been_reviewed=has_been_reviewed,
|
||||
severity=severity,
|
||||
thumb_path=False,
|
||||
data={},
|
||||
data=data,
|
||||
).execute()
|
||||
|
||||
def insert_mock_recording(
|
||||
@ -168,6 +177,7 @@ class BaseTestHttp(unittest.TestCase):
|
||||
id: str,
|
||||
start_time: float = datetime.datetime.now().timestamp(),
|
||||
end_time: float = datetime.datetime.now().timestamp() + 20,
|
||||
motion: int = 0,
|
||||
) -> Event:
|
||||
"""Inserts a recording model with a given id."""
|
||||
return Recordings.insert(
|
||||
@ -177,4 +187,5 @@ class BaseTestHttp(unittest.TestCase):
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
duration=end_time - start_time,
|
||||
motion=motion,
|
||||
).execute()
|
||||
|
26
frigate/test/http_api/test_http_app.py
Normal file
26
frigate/test/http_api/test_http_app.py
Normal file
@ -0,0 +1,26 @@
|
||||
from unittest.mock import Mock
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.stats.emitter import StatsEmitter
|
||||
from frigate.test.http_api.base_http_test import BaseTestHttp
|
||||
|
||||
|
||||
class TestHttpApp(BaseTestHttp):
|
||||
def setUp(self):
|
||||
super().setUp([Event, Recordings, ReviewSegment])
|
||||
self.app = super().create_app()
|
||||
|
||||
####################################################################################################################
|
||||
################################### GET /stats Endpoint #########################################################
|
||||
####################################################################################################################
|
||||
def test_stats_endpoint(self):
|
||||
stats = Mock(spec=StatsEmitter)
|
||||
stats.get_latest_stats.return_value = self.test_stats
|
||||
app = super().create_app(stats)
|
||||
|
||||
with TestClient(app) as client:
|
||||
response = client.get("/stats")
|
||||
response_json = response.json()
|
||||
assert response_json == self.test_stats
|
137
frigate/test/http_api/test_http_event.py
Normal file
137
frigate/test/http_api/test_http_event.py
Normal file
@ -0,0 +1,137 @@
|
||||
from datetime import datetime
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from frigate.models import Event, Recordings, ReviewSegment
|
||||
from frigate.test.http_api.base_http_test import BaseTestHttp
|
||||
|
||||
|
||||
class TestHttpApp(BaseTestHttp):
|
||||
def setUp(self):
|
||||
super().setUp([Event, Recordings, ReviewSegment])
|
||||
self.app = super().create_app()
|
||||
|
||||
####################################################################################################################
|
||||
################################### GET /events Endpoint #########################################################
|
||||
####################################################################################################################
|
||||
def test_get_event_list_no_events(self):
|
||||
with TestClient(self.app) as client:
|
||||
events = client.get("/events").json()
|
||||
assert len(events) == 0
|
||||
|
||||
def test_get_event_list_no_match_event_id(self):
|
||||
id = "123456.random"
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_event(id)
|
||||
events = client.get("/events", params={"event_id": "abc"}).json()
|
||||
assert len(events) == 0
|
||||
|
||||
def test_get_event_list_match_event_id(self):
|
||||
id = "123456.random"
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_event(id)
|
||||
events = client.get("/events", params={"event_id": id}).json()
|
||||
assert len(events) == 1
|
||||
assert events[0]["id"] == id
|
||||
|
||||
def test_get_event_list_match_length(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
id = "123456.random"
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_event(id, now, now + 1)
|
||||
events = client.get(
|
||||
"/events", params={"max_length": 1, "min_length": 1}
|
||||
).json()
|
||||
assert len(events) == 1
|
||||
assert events[0]["id"] == id
|
||||
|
||||
def test_get_event_list_no_match_max_length(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_event(id, now, now + 2)
|
||||
events = client.get("/events", params={"max_length": 1}).json()
|
||||
assert len(events) == 0
|
||||
|
||||
def test_get_event_list_no_match_min_length(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_event(id, now, now + 2)
|
||||
events = client.get("/events", params={"min_length": 3}).json()
|
||||
assert len(events) == 0
|
||||
|
||||
def test_get_event_list_limit(self):
|
||||
id = "123456.random"
|
||||
id2 = "54321.random"
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
super().insert_mock_event(id)
|
||||
events = client.get("/events").json()
|
||||
assert len(events) == 1
|
||||
assert events[0]["id"] == id
|
||||
|
||||
super().insert_mock_event(id2)
|
||||
events = client.get("/events").json()
|
||||
assert len(events) == 2
|
||||
|
||||
events = client.get("/events", params={"limit": 1}).json()
|
||||
assert len(events) == 1
|
||||
assert events[0]["id"] == id
|
||||
|
||||
events = client.get("/events", params={"limit": 3}).json()
|
||||
assert len(events) == 2
|
||||
|
||||
def test_get_event_list_no_match_has_clip(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_event(id, now, now + 2)
|
||||
events = client.get("/events", params={"has_clip": 0}).json()
|
||||
assert len(events) == 0
|
||||
|
||||
def test_get_event_list_has_clip(self):
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_event(id, has_clip=True)
|
||||
events = client.get("/events", params={"has_clip": 1}).json()
|
||||
assert len(events) == 1
|
||||
assert events[0]["id"] == id
|
||||
|
||||
def test_get_event_list_sort_score(self):
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
id2 = "54321.random"
|
||||
super().insert_mock_event(id, top_score=37, score=37, data={"score": 50})
|
||||
super().insert_mock_event(id2, top_score=47, score=47, data={"score": 20})
|
||||
events = client.get("/events", params={"sort": "score_asc"}).json()
|
||||
assert len(events) == 2
|
||||
assert events[0]["id"] == id2
|
||||
assert events[1]["id"] == id
|
||||
|
||||
events = client.get("/events", params={"sort": "score_des"}).json()
|
||||
assert len(events) == 2
|
||||
assert events[0]["id"] == id
|
||||
assert events[1]["id"] == id2
|
||||
|
||||
def test_get_event_list_sort_start_time(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
id2 = "54321.random"
|
||||
super().insert_mock_event(id, start_time=now + 3)
|
||||
super().insert_mock_event(id2, start_time=now)
|
||||
events = client.get("/events", params={"sort": "date_asc"}).json()
|
||||
assert len(events) == 2
|
||||
assert events[0]["id"] == id2
|
||||
assert events[1]["id"] == id
|
||||
|
||||
events = client.get("/events", params={"sort": "date_desc"}).json()
|
||||
assert len(events) == 2
|
||||
assert events[0]["id"] == id
|
||||
assert events[1]["id"] == id2
|
@ -569,3 +569,177 @@ class TestHttpReview(BaseTestHttp):
|
||||
recording_ids_in_db_after = self._get_recordings(ids)
|
||||
assert len(review_ids_in_db_after) == 0
|
||||
assert len(recording_ids_in_db_after) == 0
|
||||
|
||||
####################################################################################################################
|
||||
################################### GET /review/activity/motion Endpoint ########################################
|
||||
####################################################################################################################
|
||||
def test_review_activity_motion_no_data_for_time_range(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
params = {
|
||||
"after": now,
|
||||
"before": now + 3,
|
||||
}
|
||||
response = client.get("/review/activity/motion", params=params)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 0
|
||||
|
||||
def test_review_activity_motion(self):
|
||||
now = int(datetime.now().timestamp())
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
one_m = int((datetime.now() + timedelta(minutes=1)).timestamp())
|
||||
id = "123456.random"
|
||||
id2 = "123451.random"
|
||||
super().insert_mock_recording(id, now + 1, now + 2, motion=101)
|
||||
super().insert_mock_recording(id2, one_m + 1, one_m + 2, motion=200)
|
||||
params = {
|
||||
"after": now,
|
||||
"before": one_m + 3,
|
||||
"scale": 1,
|
||||
}
|
||||
response = client.get("/review/activity/motion", params=params)
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
assert len(response_json) == 61
|
||||
self.assertDictEqual(
|
||||
{"motion": 50.5, "camera": "front_door", "start_time": now + 1},
|
||||
response_json[0],
|
||||
)
|
||||
for item in response_json[1:-1]:
|
||||
self.assertDictEqual(
|
||||
{"motion": 0.0, "camera": "", "start_time": item["start_time"]},
|
||||
item,
|
||||
)
|
||||
self.assertDictEqual(
|
||||
{"motion": 100.0, "camera": "front_door", "start_time": one_m + 1},
|
||||
response_json[len(response_json) - 1],
|
||||
)
|
||||
|
||||
####################################################################################################################
|
||||
################################### GET /review/event/{event_id} Endpoint #######################################
|
||||
####################################################################################################################
|
||||
def test_review_event_not_found(self):
|
||||
with TestClient(self.app) as client:
|
||||
response = client.get("/review/event/123456.random")
|
||||
assert response.status_code == 404
|
||||
response_json = response.json()
|
||||
self.assertDictEqual(
|
||||
{"success": False, "message": "Review item not found"},
|
||||
response_json,
|
||||
)
|
||||
|
||||
def test_review_event_not_found_in_data(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
id = "123456.random"
|
||||
super().insert_mock_review_segment(id, now + 1, now + 2)
|
||||
response = client.get(f"/review/event/{id}")
|
||||
assert response.status_code == 404
|
||||
response_json = response.json()
|
||||
self.assertDictEqual(
|
||||
{"success": False, "message": "Review item not found"},
|
||||
response_json,
|
||||
)
|
||||
|
||||
def test_review_get_specific_event(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
event_id = "123456.event.random"
|
||||
super().insert_mock_event(event_id)
|
||||
review_id = "123456.review.random"
|
||||
super().insert_mock_review_segment(
|
||||
review_id, now + 1, now + 2, data={"detections": {"event_id": event_id}}
|
||||
)
|
||||
response = client.get(f"/review/event/{event_id}")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"id": review_id,
|
||||
"camera": "front_door",
|
||||
"start_time": now + 1,
|
||||
"end_time": now + 2,
|
||||
"has_been_reviewed": False,
|
||||
"severity": SeverityEnum.alert,
|
||||
"thumb_path": "False",
|
||||
"data": {"detections": {"event_id": event_id}},
|
||||
},
|
||||
response_json,
|
||||
)
|
||||
|
||||
####################################################################################################################
|
||||
################################### GET /review/{review_id} Endpoint #######################################
|
||||
####################################################################################################################
|
||||
def test_review_not_found(self):
|
||||
with TestClient(self.app) as client:
|
||||
response = client.get("/review/123456.random")
|
||||
assert response.status_code == 404
|
||||
response_json = response.json()
|
||||
self.assertDictEqual(
|
||||
{"success": False, "message": "Review item not found"},
|
||||
response_json,
|
||||
)
|
||||
|
||||
def test_get_review(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
review_id = "123456.review.random"
|
||||
super().insert_mock_review_segment(review_id, now + 1, now + 2)
|
||||
response = client.get(f"/review/{review_id}")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
self.assertDictEqual(
|
||||
{
|
||||
"id": review_id,
|
||||
"camera": "front_door",
|
||||
"start_time": now + 1,
|
||||
"end_time": now + 2,
|
||||
"has_been_reviewed": False,
|
||||
"severity": SeverityEnum.alert,
|
||||
"thumb_path": "False",
|
||||
"data": {},
|
||||
},
|
||||
response_json,
|
||||
)
|
||||
|
||||
####################################################################################################################
|
||||
################################### DELETE /review/{review_id}/viewed Endpoint ##################################
|
||||
####################################################################################################################
|
||||
def test_delete_review_viewed_review_not_found(self):
|
||||
with TestClient(self.app) as client:
|
||||
review_id = "123456.random"
|
||||
response = client.delete(f"/review/{review_id}/viewed")
|
||||
assert response.status_code == 404
|
||||
response_json = response.json()
|
||||
self.assertDictEqual(
|
||||
{"success": False, "message": f"Review {review_id} not found"},
|
||||
response_json,
|
||||
)
|
||||
|
||||
def test_delete_review_viewed(self):
|
||||
now = datetime.now().timestamp()
|
||||
|
||||
with TestClient(self.app) as client:
|
||||
review_id = "123456.review.random"
|
||||
super().insert_mock_review_segment(
|
||||
review_id, now + 1, now + 2, has_been_reviewed=True
|
||||
)
|
||||
review_before = ReviewSegment.get(ReviewSegment.id == review_id)
|
||||
assert review_before.has_been_reviewed == True
|
||||
|
||||
response = client.delete(f"/review/{review_id}/viewed")
|
||||
assert response.status_code == 200
|
||||
response_json = response.json()
|
||||
self.assertDictEqual(
|
||||
{"success": True, "message": f"Set Review {review_id} as not viewed"},
|
||||
response_json,
|
||||
)
|
||||
|
||||
review_after = ReviewSegment.get(ReviewSegment.id == review_id)
|
||||
assert review_after.has_been_reviewed == False
|
||||
|
@ -75,11 +75,11 @@ class TestConfig(unittest.TestCase):
|
||||
"detectors": {
|
||||
"cpu": {
|
||||
"type": "cpu",
|
||||
"model": {"path": "/cpu_model.tflite"},
|
||||
"model_path": "/cpu_model.tflite",
|
||||
},
|
||||
"edgetpu": {
|
||||
"type": "edgetpu",
|
||||
"model": {"path": "/edgetpu_model.tflite"},
|
||||
"model_path": "/edgetpu_model.tflite",
|
||||
},
|
||||
"openvino": {
|
||||
"type": "openvino",
|
||||
|
@ -2,7 +2,6 @@ import datetime
|
||||
import logging
|
||||
import os
|
||||
import unittest
|
||||
from unittest.mock import Mock
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
from peewee_migrate import Router
|
||||
@ -13,7 +12,6 @@ from playhouse.sqliteq import SqliteQueueDatabase
|
||||
from frigate.api.fastapi_app import create_fastapi_app
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.models import Event, Recordings, Timeline
|
||||
from frigate.stats.emitter import StatsEmitter
|
||||
from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS
|
||||
|
||||
|
||||
@ -111,43 +109,6 @@ class TestHttp(unittest.TestCase):
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
def test_get_event_list(self):
|
||||
app = create_fastapi_app(
|
||||
FrigateConfig(**self.minimal_config),
|
||||
self.db,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
id = "123456.random"
|
||||
id2 = "7890.random"
|
||||
|
||||
with TestClient(app) as client:
|
||||
_insert_mock_event(id)
|
||||
events = client.get("/events").json()
|
||||
assert events
|
||||
assert len(events) == 1
|
||||
assert events[0]["id"] == id
|
||||
_insert_mock_event(id2)
|
||||
events = client.get("/events").json()
|
||||
assert events
|
||||
assert len(events) == 2
|
||||
events = client.get(
|
||||
"/events",
|
||||
params={"limit": 1},
|
||||
).json()
|
||||
assert events
|
||||
assert len(events) == 1
|
||||
events = client.get(
|
||||
"/events",
|
||||
params={"has_clip": 0},
|
||||
).json()
|
||||
assert not events
|
||||
|
||||
def test_get_good_event(self):
|
||||
app = create_fastapi_app(
|
||||
FrigateConfig(**self.minimal_config),
|
||||
@ -381,25 +342,6 @@ class TestHttp(unittest.TestCase):
|
||||
assert recording
|
||||
assert recording[0]["id"] == id
|
||||
|
||||
def test_stats(self):
|
||||
stats = Mock(spec=StatsEmitter)
|
||||
stats.get_latest_stats.return_value = self.test_stats
|
||||
app = create_fastapi_app(
|
||||
FrigateConfig(**self.minimal_config),
|
||||
self.db,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
stats,
|
||||
None,
|
||||
)
|
||||
|
||||
with TestClient(app) as client:
|
||||
full_stats = client.get("/stats").json()
|
||||
assert full_stats == self.test_stats
|
||||
|
||||
|
||||
def _insert_mock_event(
|
||||
id: str,
|
||||
|
@ -339,7 +339,7 @@ class TrackedObject:
|
||||
box[2],
|
||||
box[3],
|
||||
self.obj_data["label"],
|
||||
f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}",
|
||||
f"{int(self.thumbnail_data['score'] * 100)}% {int(self.thumbnail_data['area'])}",
|
||||
thickness=thickness,
|
||||
color=color,
|
||||
)
|
||||
|
@ -13,7 +13,17 @@ from frigate.util.services import get_video_properties
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CURRENT_CONFIG_VERSION = "0.15-0"
|
||||
CURRENT_CONFIG_VERSION = "0.15-1"
|
||||
DEFAULT_CONFIG_FILE = "/config/config.yml"
|
||||
|
||||
|
||||
def find_config_file() -> str:
|
||||
config_path = os.environ.get("CONFIG_FILE", DEFAULT_CONFIG_FILE)
|
||||
|
||||
if not os.path.isfile(config_path):
|
||||
config_path = config_path.replace("yml", "yaml")
|
||||
|
||||
return config_path
|
||||
|
||||
|
||||
def migrate_frigate_config(config_file: str):
|
||||
@ -67,6 +77,13 @@ def migrate_frigate_config(config_file: str):
|
||||
yaml.dump(new_config, f)
|
||||
previous_version = "0.15-0"
|
||||
|
||||
if previous_version < "0.15-1":
|
||||
logger.info(f"Migrating frigate config from {previous_version} to 0.15-1...")
|
||||
new_config = migrate_015_1(config)
|
||||
with open(config_file, "w") as f:
|
||||
yaml.dump(new_config, f)
|
||||
previous_version = "0.15-1"
|
||||
|
||||
logger.info("Finished frigate config migration...")
|
||||
|
||||
|
||||
@ -257,6 +274,21 @@ def migrate_015_0(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]
|
||||
return new_config
|
||||
|
||||
|
||||
def migrate_015_1(config: dict[str, dict[str, any]]) -> dict[str, dict[str, any]]:
|
||||
"""Handle migrating frigate config to 0.15-1"""
|
||||
new_config = config.copy()
|
||||
|
||||
for detector, detector_config in config.get("detectors", {}).items():
|
||||
path = detector_config.get("model", {}).get("path")
|
||||
|
||||
if path:
|
||||
new_config["detectors"][detector]["model_path"] = path
|
||||
del new_config["detectors"][detector]["model"]
|
||||
|
||||
new_config["version"] = "0.15-1"
|
||||
return new_config
|
||||
|
||||
|
||||
def get_relative_coordinates(
|
||||
mask: Optional[Union[str, list]], frame_shape: tuple[int, int]
|
||||
) -> Union[str, list]:
|
||||
@ -282,7 +314,7 @@ def get_relative_coordinates(
|
||||
continue
|
||||
|
||||
rel_points.append(
|
||||
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
|
||||
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
|
||||
)
|
||||
|
||||
relative_masks.append(",".join(rel_points))
|
||||
@ -305,7 +337,7 @@ def get_relative_coordinates(
|
||||
return []
|
||||
|
||||
rel_points.append(
|
||||
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
|
||||
f"{round(x / frame_shape[1], 3)},{round(y / frame_shape[0], 3)}"
|
||||
)
|
||||
|
||||
mask = ",".join(rel_points)
|
||||
|
@ -5,6 +5,7 @@ import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import resource
|
||||
import signal
|
||||
import subprocess as sp
|
||||
import traceback
|
||||
@ -390,12 +391,22 @@ def try_get_info(f, h, default="N/A"):
|
||||
|
||||
|
||||
def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
names: dict[str, int] = {}
|
||||
results = {}
|
||||
try:
|
||||
nvml.nvmlInit()
|
||||
deviceCount = nvml.nvmlDeviceGetCount()
|
||||
for i in range(deviceCount):
|
||||
handle = nvml.nvmlDeviceGetHandleByIndex(i)
|
||||
gpu_name = nvml.nvmlDeviceGetName(handle)
|
||||
|
||||
# handle case where user has multiple of same GPU
|
||||
if gpu_name in names:
|
||||
names[gpu_name] += 1
|
||||
gpu_name += f" ({names.get(gpu_name)})"
|
||||
else:
|
||||
names[gpu_name] = 1
|
||||
|
||||
meminfo = try_get_info(nvml.nvmlDeviceGetMemoryInfo, handle)
|
||||
util = try_get_info(nvml.nvmlDeviceGetUtilizationRates, handle)
|
||||
enc = try_get_info(nvml.nvmlDeviceGetEncoderUtilization, handle)
|
||||
@ -423,7 +434,7 @@ def get_nvidia_gpu_stats() -> dict[int, dict]:
|
||||
dec_util = -1
|
||||
|
||||
results[i] = {
|
||||
"name": nvml.nvmlDeviceGetName(handle),
|
||||
"name": gpu_name,
|
||||
"gpu": gpu_util,
|
||||
"mem": gpu_mem_util,
|
||||
"enc": enc_util,
|
||||
@ -622,3 +633,19 @@ async def get_video_properties(
|
||||
result["fourcc"] = fourcc
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def set_file_limit() -> None:
|
||||
# Newer versions of containerd 2.X+ impose a very low soft file limit of 1024
|
||||
# This applies to OSs like HA OS (see https://github.com/home-assistant/operating-system/issues/4110)
|
||||
# Attempt to increase this limit
|
||||
soft_limit = int(os.getenv("SOFT_FILE_LIMIT", "65536") or "65536")
|
||||
|
||||
current_soft, current_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
|
||||
logger.info(f"Current file limits - Soft: {current_soft}, Hard: {current_hard}")
|
||||
|
||||
new_soft = min(soft_limit, current_hard)
|
||||
resource.setrlimit(resource.RLIMIT_NOFILE, (new_soft, current_hard))
|
||||
logger.info(
|
||||
f"File limit set. New soft limit: {new_soft}, Hard limit remains: {current_hard}"
|
||||
)
|
||||
|
@ -113,7 +113,7 @@ def capture_frames(
|
||||
fps.value = frame_rate.eps()
|
||||
skipped_fps.value = skipped_eps.eps()
|
||||
current_frame.value = datetime.datetime.now().timestamp()
|
||||
frame_name = f"{config.name}_{frame_index}"
|
||||
frame_name = f"{config.name}_frame{frame_index}"
|
||||
frame_buffer = frame_manager.write(frame_name)
|
||||
try:
|
||||
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
||||
|
@ -11,6 +11,18 @@
|
||||
"! pip install -q super_gradients==3.7.1"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"source": [
|
||||
"! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.10/dist-packages/super_gradients/training/pretrained_models.py\n",
|
||||
"! sed -i 's/sghub.deci.ai/sg-hub-nv.s3.amazonaws.com/' /usr/local/lib/python3.10/dist-packages/super_gradients/training/utils/checkpoint_utils.py"
|
||||
],
|
||||
"metadata": {
|
||||
"id": "NiRCt917KKcL"
|
||||
},
|
||||
"execution_count": null,
|
||||
"outputs": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
@ -72,4 +84,4 @@
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 0
|
||||
}
|
||||
}
|
@ -208,7 +208,7 @@ class ProcessClip:
|
||||
box[2],
|
||||
box[3],
|
||||
obj["id"],
|
||||
f"{int(obj['score']*100)}% {int(obj['area'])}",
|
||||
f"{int(obj['score'] * 100)}% {int(obj['area'])}",
|
||||
thickness=thickness,
|
||||
color=color,
|
||||
)
|
||||
@ -227,7 +227,7 @@ class ProcessClip:
|
||||
)
|
||||
|
||||
cv2.imwrite(
|
||||
f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg",
|
||||
f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time * 1000000)}.jpg",
|
||||
current_frame,
|
||||
)
|
||||
|
||||
@ -290,7 +290,7 @@ def process(path, label, output, debug_path):
|
||||
1 for result in results if result[1]["true_positive_objects"] > 0
|
||||
)
|
||||
print(
|
||||
f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s)."
|
||||
f"Objects were detected in {positive_count}/{len(results)}({positive_count / len(results) * 100:.2f}%) clip(s)."
|
||||
)
|
||||
|
||||
if output:
|
||||
|
@ -1,4 +1,4 @@
|
||||
import { useMemo } from "react";
|
||||
import { useCallback, useMemo } from "react";
|
||||
import { useApiHost } from "@/api";
|
||||
import { getIconForLabel } from "@/utils/iconUtil";
|
||||
import useSWR from "swr";
|
||||
@ -33,6 +33,16 @@ export default function SearchThumbnail({
|
||||
onClick(searchResult, true, false);
|
||||
});
|
||||
|
||||
const handleOnClick = useCallback(
|
||||
(e: React.MouseEvent<HTMLDivElement>) => {
|
||||
if (e.metaKey) {
|
||||
e.stopPropagation();
|
||||
onClick(searchResult, true, false);
|
||||
}
|
||||
},
|
||||
[searchResult, onClick],
|
||||
);
|
||||
|
||||
const objectLabel = useMemo(() => {
|
||||
if (
|
||||
!config ||
|
||||
@ -57,6 +67,7 @@ export default function SearchThumbnail({
|
||||
<div className={`size-full ${imgLoaded ? "visible" : "invisible"}`}>
|
||||
<img
|
||||
ref={imgRef}
|
||||
onClick={handleOnClick}
|
||||
className={cn(
|
||||
"size-full select-none object-cover object-center opacity-100 transition-opacity",
|
||||
)}
|
||||
|
@ -755,7 +755,11 @@ export function CameraGroupEdit({
|
||||
<FormMessage />
|
||||
{[
|
||||
...(birdseyeConfig?.enabled ? ["birdseye"] : []),
|
||||
...Object.keys(config?.cameras ?? {}),
|
||||
...Object.keys(config?.cameras ?? {}).sort(
|
||||
(a, b) =>
|
||||
(config?.cameras[a]?.ui?.order ?? 0) -
|
||||
(config?.cameras[b]?.ui?.order ?? 0),
|
||||
),
|
||||
].map((camera) => (
|
||||
<FormControl key={camera}>
|
||||
<FilterSwitch
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user