Merge pull request #13787 from blakeblackshear/dev

0.15 Release
This commit is contained in:
Blake Blackshear 2025-02-08 12:44:47 -06:00 committed by GitHub
commit dc79af2d98
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
400 changed files with 43665 additions and 12325 deletions

View File

@ -1,168 +1,311 @@
rtmp
edgetpu
labelmap
rockchip
jetson
rocm
vaapi
CUDA
hwaccel
RTSP
Hikvision
Dahua
Amcrest
Reolink
Loryta
Beelink
Celeron
vaapi
blakeblackshear
workdir
onvif
autotracking
openvino
tflite
deepstack
codeproject
udev
tailscale
restream
restreaming
webrtc
ssdlite
mobilenet
mosquitto
datasheet
Jellyfin
Radeon
libva
Ubiquiti
Unifi
Tapo
Annke
autotracker
autotracked
variations
ONVIF
traefik
devcontainer
rootfs
ffprobe
autotrack
logpipe
imread
imwrite
imencode
imutils
thresholded
timelapse
ultrafast
sleeptime
radeontop
vainfo
tmpfs
homography
websockets
LIBAVFORMAT
NTSC
onnxruntime
fourcc
radeonsi
paho
imagestream
jsonify
cgroups
sysconf
memlimit
gpuload
nvml
setproctitle
psutil
Kalman
frontdoor
namedtuples
zeep
fflags
probesize
wallclock
rknn
socs
pydantic
shms
imdecode
colormap
webui
mse
jsmpeg
unreviewed
Chromecast
Swipeable
flac
scroller
cmdline
toggleable
bottombar
opencv
apexcharts
buildx
mqtt
rawvideo
defragment
Norfair
subclassing
yolo
tensorrt
blackshear
stylelint
HACS
homeassistant
hass
castable
mobiledet
framebuffer
mjpeg
substream
codeowner
noninteractive
restreamed
mountpoint
fstype
OWASP
iotop
letsencrypt
fullchain
lsusb
iostat
usermod
balena
passwordless
debconf
dpkg
poweroff
surveillance
qnap
homekit
colorspace
quantisation
skylake
Cuvid
foscam
onnx
numpy
protobuf
aarch
absdiff
airockchip
Alloc
Amcrest
amdgpu
chipset
referer
mpegts
webp
analyzeduration
Annke
apexcharts
arange
argmax
argmin
argpartition
ascontiguousarray
astype
authelia
authentik
unichip
rebranded
udevadm
autodetected
automations
unraid
hideable
autotrack
autotracked
autotracker
autotracking
balena
Beelink
BGRA
BHWC
blackshear
blakeblackshear
bottombar
buildx
castable
cdist
Celeron
cgroups
chipset
chromadb
Chromecast
cmdline
codeowner
CODEOWNERS
codeproject
colormap
colorspace
comms
coro
ctypeslib
CUDA
Cuvid
Dahua
datasheet
debconf
deci
deepstack
defragment
devcontainer
DEVICEMAP
discardcorrupt
dpkg
dsize
dtype
ECONNRESET
edgetpu
fastapi
faststart
fflags
ffprobe
fillna
flac
foscam
fourcc
framebuffer
fregate
frégate
fromarray
frombuffer
frontdoor
fstype
fullchain
fullscreen
genai
generativeai
genpts
getpid
gpuload
HACS
Hailo
hass
hconcat
healthcheck
keepalive
hideable
Hikvision
homeassistant
homekit
homography
hsize
hstack
httpx
hwaccel
hwdownload
hwmap
hwupload
iloc
imagestream
imdecode
imencode
imread
imutils
imwrite
interp
iostat
iotop
itemsize
Jellyfin
jetson
jetsons
joserfc
jsmpeg
jsonify
Kalman
keepalive
keepdims
labelmap
letsencrypt
levelname
LIBAVFORMAT
libedgetpu
libnvinfer
libva
libwebp
libx
libyolo
linalg
localzone
logpipe
Loryta
lstsq
lsusb
markupsafe
maxsplit
MEMHOSTALLOC
memlimit
meshgrid
metadatas
migraphx
minilm
mjpeg
mkfifo
mobiledet
mobilenet
modelpath
mosquitto
mountpoint
movflags
mpegts
mqtt
mse
msenc
namedtuples
nbytes
nchw
ndarray
ndimage
nethogs
newaxis
nhwc
NOBLOCK
nobuffer
nokey
NONBLOCK
noninteractive
noprint
Norfair
nptype
NTSC
numpy
nvenc
nvhost
nvml
nvmpi
ollama
onnx
onnxruntime
onvif
ONVIF
openai
opencv
openvino
OWASP
paho
passwordless
popleft
posthog
postprocess
poweroff
preexec
probesize
protobuf
pstate
psutil
pubkey
putenv
pycache
pydantic
pyobj
pysqlite
pytz
pywebpush
qnap
quantisation
Radeon
radeonsi
radeontop
rawvideo
rcond
RDONLY
rebranded
referer
reindex
Reolink
restream
restreamed
restreaming
rkmpp
rknn
rkrga
rockchip
rocm
rocminfo
rootfs
rtmp
RTSP
ruamel
scroller
setproctitle
setpts
shms
SIGUSR
skylake
sleeptime
SNDMORE
socs
sqliteq
sqlitevecq
ssdlite
statm
stimeout
stylelint
subclassing
substream
superfast
surveillance
svscan
Swipeable
sysconf
tailscale
Tapo
tensorrt
tflite
thresholded
timelapse
tmpfs
tobytes
toggleable
traefik
tzlocal
Ubiquiti
udev
udevadm
ultrafast
unichip
unidecode
Unifi
unixepoch
unraid
unreviewed
userdata
usermod
uvicorn
vaapi
vainfo
variations
vbios
vconcat
vitb
vstream
vsync
wallclock
webp
webpush
webrtc
websockets
webui
werkzeug
workdir
WRONLY
wsgirefserver
wsgiutils
wsize
xaddr
xmaxs
xmins
XPUB
XSUB
ymaxs
ymins
yolo
yolonas
yolox
zeep
zerolatency

View File

@ -52,7 +52,8 @@
"csstools.postcss",
"blanu.vscode-styled-jsx",
"bradlc.vscode-tailwindcss",
"charliermarsh.ruff"
"charliermarsh.ruff",
"eamodio.gitlens"
],
"settings": {
"remote.autoForwardPorts": false,

View File

@ -3,10 +3,12 @@
set -euxo pipefail
# Cleanup the old github host key
sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts
# Add new github host key
curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
sed -e 's/^/github.com /' >> ~/.ssh/known_hosts
if [[ -f ~/.ssh/known_hosts ]]; then
# Add new github host key
sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts
curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \
sed -e 's/^/github.com /' >> ~/.ssh/known_hosts
fi
# Frigate normal container runs as root, so it have permission to create
# the folders. But the devcontainer runs as the host user, so we need to
@ -17,7 +19,7 @@ sudo chown -R "$(id -u):$(id -g)" /media/frigate
# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the
# s6 service file. For dev, where frigate is started from an interactive
# shell, we define it in .bashrc instead.
echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc
echo 'export LIBAVFORMAT_VERSION_MAJOR=$(/usr/lib/ffmpeg/7.0/bin/ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc
make version

View File

@ -90,6 +90,9 @@ body:
- HassOS Addon
- Docker Compose
- Docker CLI
- Proxmox via Docker
- Proxmox via TTeck Script
- Windows WSL2
validations:
required: true
- type: dropdown
@ -102,7 +105,7 @@ body:
- TensorRT
- RKNN
- Other
- CPU (no Coral)
- CPU (no coral)
validations:
required: true
- type: dropdown

View File

@ -76,6 +76,17 @@ body:
- HassOS Addon
- Docker Compose
- Docker CLI
- Proxmox via Docker
- Proxmox via TTeck Script
- Windows WSL2
validations:
required: true
- type: textarea
id: docker
attributes:
label: docker-compose file or Docker CLI command
description: This will be automatically formatted into code, so no need for backticks.
render: yaml
validations:
required: true
- type: dropdown

View File

@ -48,28 +48,6 @@ body:
render: shell
validations:
required: true
- type: textarea
id: go2rtclogs
attributes:
label: Relevant go2rtc log output
description: Please copy and paste any relevant go2rtc log output. Include logs before and after your exact error when possible. Logs can be viewed via the Frigate UI, Docker, or the go2rtc dashboard. This will be automatically formatted into code, so no need for backticks.
render: shell
validations:
required: true
- type: dropdown
id: os
attributes:
label: Operating system
options:
- HassOS
- Debian
- Other Linux
- Proxmox
- UNRAID
- Windows
- Other
validations:
required: true
- type: dropdown
id: install-method
attributes:
@ -78,6 +56,9 @@ body:
- HassOS Addon
- Docker Compose
- Docker CLI
- Proxmox via Docker
- Proxmox via TTeck Script
- Windows WSL2
validations:
required: true
- type: dropdown

View File

@ -68,20 +68,6 @@ body:
label: Frigate stats
description: Output from frigate's /api/stats endpoint
render: json
- type: dropdown
id: os
attributes:
label: Operating system
options:
- HassOS
- Debian
- Other Linux
- Proxmox
- UNRAID
- Windows
- Other
validations:
required: true
- type: dropdown
id: install-method
attributes:
@ -90,6 +76,17 @@ body:
- HassOS Addon
- Docker Compose
- Docker CLI
- Proxmox via Docker
- Proxmox via TTeck Script
- Windows WSL2
validations:
required: true
- type: textarea
id: docker
attributes:
label: docker-compose file or Docker CLI command
description: This will be automatically formatted into code, so no need for backticks.
render: yaml
validations:
required: true
- type: dropdown

View File

@ -24,12 +24,6 @@ body:
description: Visible on the System page in the Web UI. Please include the full version including the build identifier (eg. 0.14.0-ea36ds1)
validations:
required: true
- type: input
attributes:
label: In which browser(s) are you experiencing the issue with?
placeholder: Google Chrome 88.0.4324.150
description: >
Provide the full name and don't forget to add the version!
- type: textarea
id: config
attributes:
@ -70,20 +64,6 @@ body:
render: shell
validations:
required: true
- type: dropdown
id: os
attributes:
label: Operating system
options:
- HassOS
- Debian
- Other Linux
- Proxmox
- UNRAID
- Windows
- Other
validations:
required: true
- type: dropdown
id: install-method
attributes:
@ -92,6 +72,22 @@ body:
- HassOS Addon
- Docker Compose
- Docker CLI
- Proxmox via Docker
- Proxmox via TTeck Script
- Windows WSL2
validations:
required: true
- type: dropdown
id: object-detector
attributes:
label: Object Detector
options:
- Coral
- OpenVino
- TensorRT
- RKNN
- Other
- CPU (no coral)
validations:
required: true
- type: dropdown

View File

@ -33,9 +33,9 @@ runs:
with:
string: ${{ github.repository }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc
with:

32
.github/pull_request_template.md vendored Normal file
View File

@ -0,0 +1,32 @@
## Proposed change
<!--
Describe what this pull request does and how it will benefit users of Frigate.
Please describe in detail any considerations, breaking changes, etc. that are
made in this pull request.
-->
## Type of change
- [ ] Dependency upgrade
- [ ] Bugfix (non-breaking change which fixes an issue)
- [ ] New feature
- [ ] Breaking change (fix/feature causing existing functionality to break)
- [ ] Code quality improvements to existing code
- [ ] Documentation Update
## Additional information
- This PR fixes or closes issue: fixes #
- This PR is related to issue:
## Checklist
<!--
Put an `x` in the boxes that apply.
-->
- [ ] The code change is tested and works locally.
- [ ] Local tests pass. **Your PR cannot be merged unless tests pass**
- [ ] There is no commented out code in this PR.
- [ ] The code has been formatted using Ruff (`ruff format frigate`)

View File

@ -6,6 +6,8 @@ on:
branches:
- dev
- master
paths-ignore:
- "docs/**"
# only run the latest commit to avoid cache overwrites
concurrency:
@ -17,11 +19,13 @@ env:
jobs:
amd64_build:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: AMD64 Build
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
@ -38,11 +42,13 @@ jobs:
tags: ${{ steps.setup.outputs.image-name }}-amd64
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
arm64_build:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: ARM Build
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
@ -60,8 +66,9 @@ jobs:
${{ steps.setup.outputs.image-name }}-standard-arm64
cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
- name: Build and push RPi build
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
source: .
push: true
targets: rpi
files: docker/rpi/rpi.hcl
@ -69,21 +76,14 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
- name: Build and push Rockchip build
uses: docker/bake-action@v3
with:
push: true
targets: rk
files: docker/rockchip/rk.hcl
set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha
jetson_jp4_build:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: Jetson Jetpack 4
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
@ -95,8 +95,9 @@ jobs:
BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest
SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
source: .
push: true
targets: tensorrt
files: docker/tensorrt/trt.hcl
@ -105,11 +106,13 @@ jobs:
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4,mode=max
jetson_jp5_build:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: Jetson Jetpack 5
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
@ -121,8 +124,9 @@ jobs:
BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
source: .
push: true
targets: tensorrt
files: docker/tensorrt/trt.hcl
@ -131,13 +135,15 @@ jobs:
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max
amd64_extra_builds:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: AMD64 Extra Build
needs:
- amd64_build
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
@ -146,8 +152,9 @@ jobs:
- name: Build and push TensorRT (x86 GPU)
env:
COMPUTE_LEVEL: "50 60 70 80 90"
uses: docker/bake-action@v4
uses: docker/bake-action@v6
with:
source: .
push: true
targets: tensorrt
files: docker/tensorrt/trt.hcl
@ -155,61 +162,75 @@ jobs:
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
#- name: AMD/ROCm general build
# env:
# AMDGPU: gfx
# HSA_OVERRIDE: 0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
# *.cache-from=type=gha
#- name: AMD/ROCm gfx900
# env:
# AMDGPU: gfx900
# HSA_OVERRIDE: 1
# HSA_OVERRIDE_GFX_VERSION: 9.0.0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx900
# *.cache-from=type=gha
#- name: AMD/ROCm gfx1030
# env:
# AMDGPU: gfx1030
# HSA_OVERRIDE: 1
# HSA_OVERRIDE_GFX_VERSION: 10.3.0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1030
# *.cache-from=type=gha
#- name: AMD/ROCm gfx1100
# env:
# AMDGPU: gfx1100
# HSA_OVERRIDE: 1
# HSA_OVERRIDE_GFX_VERSION: 11.0.0
# uses: docker/bake-action@v3
# with:
# push: true
# targets: rocm
# files: docker/rocm/rocm.hcl
# set: |
# rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1100
# *.cache-from=type=gha
arm64_extra_builds:
runs-on: ubuntu-22.04
name: ARM Extra Build
needs:
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Rockchip build
uses: docker/bake-action@v6
with:
source: .
push: true
targets: rk
files: docker/rockchip/rk.hcl
set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha
combined_extra_builds:
runs-on: ubuntu-22.04
name: Combined Extra Builds
needs:
- amd64_build
- arm64_build
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up QEMU and Buildx
id: setup
uses: ./.github/actions/setup
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Build and push Hailo-8l build
uses: docker/bake-action@v6
with:
source: .
push: true
targets: h8l
files: docker/hailo8l/h8l.hcl
set: |
h8l.tags=${{ steps.setup.outputs.image-name }}-h8l
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-h8l,mode=max
- name: AMD/ROCm general build
env:
AMDGPU: gfx
HSA_OVERRIDE: 0
uses: docker/bake-action@v6
with:
source: .
push: true
targets: rocm
files: docker/rocm/rocm.hcl
set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
*.cache-from=type=gha
# The majority of users running arm64 are rpi users, so the rpi
# build should be the primary arm64 image
assemble_default_build:
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
name: Assemble and push default build
needs:
- amd64_build
@ -220,7 +241,7 @@ jobs:
with:
string: ${{ github.repository }}
- name: Log in to the Container registry
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
with:
registry: ghcr.io
username: ${{ github.actor }}

View File

@ -1,24 +0,0 @@
name: dependabot-auto-merge
on: pull_request
permissions:
contents: write
jobs:
dependabot-auto-merge:
runs-on: ubuntu-latest
if: github.actor == 'dependabot[bot]'
steps:
- name: Get Dependabot metadata
id: metadata
uses: dependabot/fetch-metadata@v2
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
- name: Enable auto-merge for Dependabot PRs
if: steps.metadata.outputs.dependency-type == 'direct:development' && (steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch')
run: |
gh pr review --approve "$PR_URL"
gh pr merge --auto --squash "$PR_URL"
env:
PR_URL: ${{ github.event.pull_request.html_url }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,6 +1,9 @@
name: On pull request
on: pull_request
on:
pull_request:
paths-ignore:
- "docs/**"
env:
DEFAULT_PYTHON: 3.9
@ -16,6 +19,8 @@ jobs:
DOCKER_BUILDKIT: "1"
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/setup-node@master
with:
node-version: 16.x
@ -35,6 +40,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/setup-node@master
with:
node-version: 16.x
@ -49,6 +56,8 @@ jobs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/setup-node@master
with:
node-version: 20.x
@ -64,8 +73,10 @@ jobs:
steps:
- name: Check out the repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Set up Python ${{ env.DEFAULT_PYTHON }}
uses: actions/setup-python@v5.1.0
uses: actions/setup-python@v5.3.0
with:
python-version: ${{ env.DEFAULT_PYTHON }}
- name: Install requirements
@ -85,6 +96,8 @@ jobs:
steps:
- name: Check out code
uses: actions/checkout@v4
with:
persist-credentials: false
- uses: actions/setup-node@master
with:
node-version: 16.x

View File

@ -11,21 +11,26 @@ jobs:
steps:
- uses: actions/checkout@v4
with:
persist-credentials: false
- id: lowercaseRepo
uses: ASzc/change-string-case-action@v6
with:
string: ${{ github.repository }}
- name: Log in to the Container registry
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Create tag variables
env:
TAG: ${{ github.ref_name }}
LOWERCASE_REPO: ${{ steps.lowercaseRepo.outputs.lowercase }}
run: |
BUILD_TYPE=$([[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta")
BUILD_TYPE=$([[ "${TAG}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "stable" || echo "beta")
echo "BUILD_TYPE=${BUILD_TYPE}" >> $GITHUB_ENV
echo "BASE=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}" >> $GITHUB_ENV
echo "BASE=ghcr.io/${LOWERCASE_REPO}" >> $GITHUB_ENV
echo "BUILD_TAG=${GITHUB_SHA::7}" >> $GITHUB_ENV
echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV
- name: Tag and push the main image
@ -34,14 +39,14 @@ jobs:
STABLE_TAG=${BASE}:stable
PULL_TAG=${BASE}:${BUILD_TAG}
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant}
done
# stable tag
if [[ "${BUILD_TYPE}" == "stable" ]]; then
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${STABLE_TAG}
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do
for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk h8l rocm; do
docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${STABLE_TAG}-${variant}
done
fi

View File

@ -23,7 +23,9 @@ jobs:
exempt-pr-labels: "pinned,security,dependencies"
operations-per-run: 120
- name: Print outputs
run: echo ${{ join(steps.stale.outputs.*, ',') }}
env:
STALE_OUTPUT: ${{ join(steps.stale.outputs.*, ',') }}
run: echo "$STALE_OUTPUT"
# clean_ghcr:
# name: Delete outdated dev container images
@ -38,4 +40,3 @@ jobs:
# account-type: personal
# token: ${{ secrets.GITHUB_TOKEN }}
# token-type: github-token

3
.gitignore vendored
View File

@ -1,5 +1,6 @@
.DS_Store
*.pyc
__pycache__
.mypy_cache
*.swp
debug
.vscode/*

5
.vscode/launch.json vendored
View File

@ -3,10 +3,9 @@
"configurations": [
{
"name": "Python: Launch Frigate",
"type": "python",
"type": "debugpy",
"request": "launch",
"module": "frigate",
"justMyCode": true
"module": "frigate"
}
]
}

View File

@ -4,3 +4,4 @@
/docker/tensorrt/*jetson* @madsciencetist
/docker/rockchip/ @MarcA711
/docker/rocm/ @harakas
/docker/hailo8l/ @spanner3003

View File

@ -1,11 +1,9 @@
default_target: local
COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1)
VERSION = 0.14.1
VERSION = 0.15.0
IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate
GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD)
CURRENT_UID := $(shell id -u)
CURRENT_GID := $(shell id -g)
BOARDS= #Initialized empty
include docker/*/*.mk
@ -18,25 +16,38 @@ version:
echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py
local: version
docker buildx build --target=frigate --tag frigate:latest --load --file docker/main/Dockerfile .
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag frigate:latest \
--load
amd64:
docker buildx build --platform linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile .
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
--platform linux/amd64
arm64:
docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile .
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
--platform linux/arm64
build: version amd64 arm64
docker buildx build --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile .
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) \
--platform linux/arm64/v8,linux/amd64
push: push-boards
docker buildx build --push --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) --file docker/main/Dockerfile .
docker buildx build --target=frigate --file docker/main/Dockerfile . \
--tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) \
--platform linux/arm64/v8,linux/amd64 \
--push
run: local
docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest
run_tests: local
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest python3 -u -m unittest
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest python3 -u -m mypy --config-file frigate/mypy.ini frigate
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \
python3 -u -m unittest
docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest \
python3 -u -m mypy --config-file frigate/mypy.ini frigate
.PHONY: run_tests

View File

@ -4,6 +4,7 @@ from statistics import mean
import numpy as np
import frigate.util as util
from frigate.config import DetectorTypeEnum
from frigate.object_detection import (
ObjectDetectProcess,
@ -60,7 +61,7 @@ def start(id, num_detections, detection_queue, event):
object_detector.cleanup()
print(f"{id} - Processed for {duration:.2f} seconds.")
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
print(f"{id} - Average frame processing time: {mean(frame_times) * 1000:.2f}ms")
######
@ -90,7 +91,7 @@ edgetpu_process_2 = ObjectDetectProcess(
)
for x in range(0, 10):
camera_process = mp.Process(
camera_process = util.Process(
target=start, args=(x, 300, detection_queue, events[str(x)])
)
camera_process.daemon = True

View File

@ -7,7 +7,8 @@
"*.db",
"node_modules",
"__pycache__",
"dist"
"dist",
"/audio-labelmap.txt"
],
"language": "en",
"dictionaryDefinitions": [

View File

@ -23,7 +23,7 @@ services:
# count: 1
# capabilities: [gpu]
environment:
YOLO_MODELS: yolov7-320
YOLO_MODELS: ""
devices:
- /dev/bus/usb:/dev/bus/usb
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware

40
docker/hailo8l/Dockerfile Normal file
View File

@ -0,0 +1,40 @@
# syntax=docker/dockerfile:1.6
ARG DEBIAN_FRONTEND=noninteractive
# Build Python wheels
FROM wheels AS h8l-wheels
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
COPY docker/hailo8l/requirements-wheels-h8l.txt /requirements-wheels-h8l.txt
RUN sed -i "/https:\/\//d" /requirements-wheels.txt
# Create a directory to store the built wheels
RUN mkdir /h8l-wheels
# Build the wheels
RUN pip3 wheel --wheel-dir=/h8l-wheels -c /requirements-wheels.txt -r /requirements-wheels-h8l.txt
FROM wget AS hailort
ARG TARGETARCH
RUN --mount=type=bind,source=docker/hailo8l/install_hailort.sh,target=/deps/install_hailort.sh \
/deps/install_hailort.sh
# Use deps as the base image
FROM deps AS h8l-frigate
# Copy the wheels from the wheels stage
COPY --from=h8l-wheels /h8l-wheels /deps/h8l-wheels
COPY --from=hailort /hailo-wheels /deps/hailo-wheels
COPY --from=hailort /rootfs/ /
# Install the wheels
RUN pip3 install -U /deps/h8l-wheels/*.whl
RUN pip3 install -U /deps/hailo-wheels/*.whl
# Copy base files from the rootfs stage
COPY --from=rootfs / /
# Set workdir
WORKDIR /opt/frigate/

34
docker/hailo8l/h8l.hcl Normal file
View File

@ -0,0 +1,34 @@
target wget {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64","linux/amd64"]
target = "wget"
}
target wheels {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64","linux/amd64"]
target = "wheels"
}
target deps {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64","linux/amd64"]
target = "deps"
}
target rootfs {
dockerfile = "docker/main/Dockerfile"
platforms = ["linux/arm64","linux/amd64"]
target = "rootfs"
}
target h8l {
dockerfile = "docker/hailo8l/Dockerfile"
contexts = {
wget = "target:wget"
wheels = "target:wheels"
deps = "target:deps"
rootfs = "target:rootfs"
}
platforms = ["linux/arm64","linux/amd64"]
}

15
docker/hailo8l/h8l.mk Normal file
View File

@ -0,0 +1,15 @@
BOARDS += h8l
local-h8l: version
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
--set h8l.tags=frigate:latest-h8l \
--load
build-h8l: version
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
--set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l
push-h8l: build-h8l
docker buildx bake --file=docker/hailo8l/h8l.hcl h8l \
--set h8l.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-h8l \
--push

View File

@ -0,0 +1,19 @@
#!/bin/bash
set -euxo pipefail
hailo_version="4.19.0"
if [[ "${TARGETARCH}" == "amd64" ]]; then
arch="x86_64"
elif [[ "${TARGETARCH}" == "arm64" ]]; then
arch="aarch64"
fi
wget -qO- "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${TARGETARCH}.tar.gz" |
tar -C / -xzf -
mkdir -p /hailo-wheels
wget -P /hailo-wheels/ "https://github.com/frigate-nvr/hailort/releases/download/v${hailo_version}/hailort-${hailo_version}-cp39-cp39-linux_${arch}.whl"

View File

@ -0,0 +1,12 @@
appdirs==1.4.*
argcomplete==2.0.*
contextlib2==0.6.*
distlib==0.3.*
filelock==3.8.*
future==0.18.*
importlib-metadata==5.1.*
importlib-resources==5.1.*
netaddr==0.8.*
netifaces==0.10.*
verboselogs==1.7.*
virtualenv==20.17.*

View File

@ -0,0 +1,48 @@
#!/bin/bash
# Update package list and install dependencies
sudo apt-get update
sudo apt-get install -y build-essential cmake git wget
arch=$(uname -m)
if [[ $arch == "x86_64" ]]; then
sudo apt install -y linux-headers-$(uname -r);
else
sudo apt install -y linux-modules-extra-$(uname -r);
fi
# Clone the HailoRT driver repository
git clone --depth 1 --branch v4.19.0 https://github.com/hailo-ai/hailort-drivers.git
# Build and install the HailoRT driver
cd hailort-drivers/linux/pcie
sudo make all
sudo make install
# Load the Hailo PCI driver
sudo modprobe hailo_pci
if [ $? -ne 0 ]; then
echo "Unable to load hailo_pci module, common reasons for this are:"
echo "- Key was rejected by service: Secure Boot is enabling disallowing install."
echo "- Permissions are not setup correctly."
exit 1
fi
# Download and install the firmware
cd ../../
./download_firmware.sh
# verify the firmware folder is present
if [ ! -d /lib/firmware/hailo ]; then
sudo mkdir /lib/firmware/hailo
fi
sudo mv hailo8_fw.*.bin /lib/firmware/hailo/hailo8_fw.bin
# Install udev rules
sudo cp ./linux/pcie/51-hailo-udev.rules /etc/udev/rules.d/
sudo udevadm control --reload-rules && sudo udevadm trigger
echo "HailoRT driver installation complete."
echo "reboot your system to load the firmware!"

View File

@ -30,6 +30,16 @@ RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=cache,target=/root/.ccache \
/deps/build_nginx.sh
FROM wget AS sqlite-vec
ARG DEBIAN_FRONTEND
# Build sqlite_vec from source
COPY docker/main/build_sqlite_vec.sh /deps/build_sqlite_vec.sh
RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \
--mount=type=bind,source=docker/main/build_sqlite_vec.sh,target=/deps/build_sqlite_vec.sh \
--mount=type=cache,target=/root/.ccache \
/deps/build_sqlite_vec.sh
FROM scratch AS go2rtc
ARG TARGETARCH
WORKDIR /rootfs/usr/local/go2rtc/bin
@ -148,6 +158,8 @@ RUN apt-get -qq update \
gfortran openexr libatlas-base-dev libssl-dev\
libtbb2 libtbb-dev libdc1394-22-dev libopenexr-dev \
libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev \
# sqlite3 dependencies
tclsh \
# scipy dependencies
gcc gfortran libopenblas-dev liblapack-dev && \
rm -rf /var/lib/apt/lists/*
@ -161,6 +173,10 @@ RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \
COPY docker/main/requirements.txt /requirements.txt
RUN pip3 install -r /requirements.txt
# Build pysqlite3 from source
COPY docker/main/build_pysqlite3.sh /build_pysqlite3.sh
RUN /build_pysqlite3.sh
COPY docker/main/requirements-wheels.txt /requirements-wheels.txt
RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt
@ -168,6 +184,7 @@ RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt
# Collect deps in a single layer
FROM scratch AS deps-rootfs
COPY --from=nginx /usr/local/nginx/ /usr/local/nginx/
COPY --from=sqlite-vec /usr/local/lib/ /usr/local/lib/
COPY --from=go2rtc /rootfs/ /
COPY --from=libusb-build /usr/local/lib /usr/local/lib
COPY --from=tempio /rootfs/ /
@ -188,7 +205,16 @@ ARG APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn
ENV NVIDIA_VISIBLE_DEVICES=all
ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility"
ENV PATH="/usr/lib/btbn-ffmpeg/bin:/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
# Disable tokenizer parallelism warning
# https://stackoverflow.com/questions/62691279/how-to-disable-tokenizers-parallelism-true-false-warning/72926996#72926996
ENV TOKENIZERS_PARALLELISM=true
# https://github.com/huggingface/transformers/issues/27214
ENV TRANSFORMERS_NO_ADVISORY_WARNINGS=1
# Set OpenCV ffmpeg loglevel to fatal: https://ffmpeg.org/doxygen/trunk/log_8h.html
ENV OPENCV_FFMPEG_LOGLEVEL=8
ENV PATH="/usr/local/go2rtc/bin:/usr/local/tempio/bin:/usr/local/nginx/sbin:${PATH}"
# Install dependencies
RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \
@ -214,7 +240,7 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
ENTRYPOINT ["/init"]
CMD []
HEALTHCHECK --start-period=120s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
HEALTHCHECK --start-period=300s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1
# Frigate deps with Node.js and NPM for devcontainer

35
docker/main/build_pysqlite3.sh Executable file
View File

@ -0,0 +1,35 @@
#!/bin/bash
set -euxo pipefail
SQLITE3_VERSION="96c92aba00c8375bc32fafcdf12429c58bd8aabfcadab6683e35bbb9cdebf19e" # 3.46.0
PYSQLITE3_VERSION="0.5.3"
# Fetch the source code for the latest release of Sqlite.
if [[ ! -d "sqlite" ]]; then
wget https://www.sqlite.org/src/tarball/sqlite.tar.gz?r=${SQLITE3_VERSION} -O sqlite.tar.gz
tar xzf sqlite.tar.gz
cd sqlite/
LIBS="-lm" ./configure --disable-tcl --enable-tempstore=always
make sqlite3.c
cd ../
rm sqlite.tar.gz
fi
# Grab the pysqlite3 source code.
if [[ ! -d "./pysqlite3" ]]; then
git clone https://github.com/coleifer/pysqlite3.git
fi
cd pysqlite3/
git checkout ${PYSQLITE3_VERSION}
# Copy the sqlite3 source amalgamation into the pysqlite3 directory so we can
# create a self-contained extension module.
cp "../sqlite/sqlite3.c" ./
cp "../sqlite/sqlite3.h" ./
# Create the wheel and put it in the /wheels dir.
sed -i "s|name='pysqlite3-binary'|name=PACKAGE_NAME|g" setup.py
python3 setup.py build_static
pip3 wheel . -w /wheels

31
docker/main/build_sqlite_vec.sh Executable file
View File

@ -0,0 +1,31 @@
#!/bin/bash
set -euxo pipefail
SQLITE_VEC_VERSION="0.1.3"
cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list
sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list
apt-get update
apt-get -yqq build-dep sqlite3 gettext git
mkdir /tmp/sqlite_vec
# Grab the sqlite_vec source code.
wget -nv https://github.com/asg017/sqlite-vec/archive/refs/tags/v${SQLITE_VEC_VERSION}.tar.gz
tar -zxf v${SQLITE_VEC_VERSION}.tar.gz -C /tmp/sqlite_vec
cd /tmp/sqlite_vec/sqlite-vec-${SQLITE_VEC_VERSION}
mkdir -p vendor
wget -O sqlite-amalgamation.zip https://www.sqlite.org/2024/sqlite-amalgamation-3450300.zip
unzip sqlite-amalgamation.zip
mv sqlite-amalgamation-3450300/* vendor/
rmdir sqlite-amalgamation-3450300
rm sqlite-amalgamation.zip
# build loadable module
make loadable
# install it
cp dist/vec0.* /usr/local/lib

View File

@ -8,11 +8,13 @@ apt-get -qq install --no-install-recommends -y \
apt-transport-https \
gnupg \
wget \
lbzip2 \
procps vainfo \
unzip locales tzdata libxml2 xz-utils \
python3.9 \
python3-pip \
curl \
lsof \
jq \
nethogs
@ -39,39 +41,68 @@ apt-get -qq install --no-install-recommends --no-install-suggests -y \
# btbn-ffmpeg -> amd64
if [[ "${TARGETARCH}" == "amd64" ]]; then
mkdir -p /usr/lib/btbn-ffmpeg
mkdir -p /usr/lib/ffmpeg/5.0
mkdir -p /usr/lib/ffmpeg/7.0
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linux64-gpl-5.1.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/btbn-ffmpeg --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/btbn-ffmpeg/doc /usr/lib/btbn-ffmpeg/bin/ffplay
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linux64-gpl-7.0.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
fi
# ffmpeg -> arm64
if [[ "${TARGETARCH}" == "arm64" ]]; then
mkdir -p /usr/lib/btbn-ffmpeg
mkdir -p /usr/lib/ffmpeg/5.0
mkdir -p /usr/lib/ffmpeg/7.0
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/btbn-ffmpeg --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/btbn-ffmpeg/doc /usr/lib/btbn-ffmpeg/bin/ffplay
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/5.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/5.0/doc /usr/lib/ffmpeg/5.0/bin/ffplay
wget -qO btbn-ffmpeg.tar.xz "https://github.com/NickM-27/FFmpeg-Builds/releases/download/autobuild-2024-09-19-12-51/ffmpeg-n7.0.2-18-g3e6cec1286-linuxarm64-gpl-7.0.tar.xz"
tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/ffmpeg/7.0 --strip-components 1
rm -rf btbn-ffmpeg.tar.xz /usr/lib/ffmpeg/7.0/doc /usr/lib/ffmpeg/7.0/bin/ffplay
fi
# arch specific packages
if [[ "${TARGETARCH}" == "amd64" ]]; then
# use debian bookworm for hwaccel packages
# use debian bookworm for amd / intel-i965 driver packages
echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-opencl-icd \
mesa-va-drivers radeontop libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 intel-gpu-tools
i965-va-driver intel-gpu-tools onevpl-tools \
libva-drm2 \
mesa-va-drivers radeontop
# something about this dependency requires it to be installed in a separate call rather than in the line above
apt-get -qq install --no-install-recommends --no-install-suggests -y \
i965-va-driver-shaders
# intel packages use zst compression so we need to update dpkg
apt-get install -y dpkg
rm -f /etc/apt/sources.list.d/debian-bookworm.list
# use intel apt intel packages
wget -qO - https://repositories.intel.com/gpu/intel-graphics.key | gpg --yes --dearmor --output /usr/share/keyrings/intel-graphics.gpg
echo "deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/gpu/ubuntu jammy client" | tee /etc/apt/sources.list.d/intel-gpu-jammy.list
apt-get -qq update
apt-get -qq install --no-install-recommends --no-install-suggests -y \
intel-opencl-icd=24.35.30872.31-996~22.04 intel-level-zero-gpu=1.3.29735.27-914~22.04 intel-media-va-driver-non-free=24.3.3-996~22.04 \
libmfx1=23.2.2-880~22.04 libmfxgen1=24.2.4-914~22.04 libvpl2=1:2.13.0.0-996~22.04
rm -f /usr/share/keyrings/intel-graphics.gpg
rm -f /etc/apt/sources.list.d/intel-gpu-jammy.list
fi
if [[ "${TARGETARCH}" == "arm64" ]]; then
apt-get -qq install --no-install-recommends --no-install-suggests -y \
libva-drm2 mesa-va-drivers
libva-drm2 mesa-va-drivers radeontop
fi
# install vulkan
apt-get -qq install --no-install-recommends --no-install-suggests -y \
libvulkan1 mesa-vulkan-drivers
apt-get purge gnupg apt-transport-https xz-utils -y
apt-get clean autoclean -y
apt-get autoremove --purge -y

View File

@ -1,8 +1,14 @@
click == 8.1.*
Flask == 3.0.*
Flask_Limiter == 3.7.*
# FastAPI
aiohttp == 3.11.2
starlette == 0.41.2
starlette-context == 0.3.6
fastapi == 0.115.*
uvicorn == 0.30.*
slowapi == 0.1.*
imutils == 0.5.*
joserfc == 0.11.*
joserfc == 1.0.*
pathvalidate == 3.2.*
markupsafe == 2.1.*
mypy == 1.6.1
numpy == 1.26.*
@ -11,16 +17,14 @@ opencv-python-headless == 4.9.0.*
paho-mqtt == 2.1.*
pandas == 2.2.*
peewee == 3.17.*
peewee_migrate == 1.12.*
psutil == 5.9.*
pydantic == 2.7.*
peewee_migrate == 1.13.*
psutil == 6.1.*
pydantic == 2.8.*
git+https://github.com/fbcotter/py3nvml#egg=py3nvml
PyYAML == 6.0.*
pytz == 2024.1
pyzmq == 26.0.*
pytz == 2024.*
pyzmq == 26.2.*
ruamel.yaml == 0.18.*
tzlocal == 5.2
types-PyYAML == 6.0.*
requests == 2.32.*
types-requests == 2.32.*
scipy == 1.13.*
@ -28,5 +32,16 @@ norfair == 2.2.*
setproctitle == 1.3.*
ws4py == 0.5.*
unidecode == 1.3.*
onnxruntime == 1.18.*
openvino == 2024.1.*
# OpenVino & ONNX
openvino == 2024.3.*
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
onnxruntime == 1.19.* ; platform_machine == 'aarch64'
# Embeddings
transformers == 4.45.*
# Generative AI
google-generativeai == 0.8.*
ollama == 0.3.*
openai == 1.51.*
# push notifications
py-vapid == 1.9.*
pywebpush == 2.0.*

View File

@ -42,10 +42,14 @@ function migrate_db_path() {
fi
}
function set_libva_version() {
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+")
}
echo "[INFO] Preparing Frigate..."
migrate_db_path
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
set_libva_version
echo "[INFO] Starting Frigate..."
cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate"

View File

@ -43,7 +43,10 @@ function get_ip_and_port_from_supervisor() {
export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}"
}
export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+')
function set_libva_version() {
local ffmpeg_path=$(python3 /usr/local/ffmpeg/get_ffmpeg_path.py)
export LIBAVFORMAT_VERSION_MAJOR=$($ffmpeg_path -version | grep -Po "libavformat\W+\K\d+")
}
if [[ -f "/dev/shm/go2rtc.yaml" ]]; then
echo "[INFO] Removing stale config from last run..."
@ -63,6 +66,8 @@ else
echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually."
fi
set_libva_version
readonly config_path="/config"
if [[ -x "${config_path}/go2rtc" ]]; then

View File

@ -0,0 +1,45 @@
import json
import os
import shutil
import sys
from ruamel.yaml import YAML
sys.path.insert(0, "/opt/frigate")
from frigate.const import (
DEFAULT_FFMPEG_VERSION,
INCLUDED_FFMPEG_VERSIONS,
)
sys.path.remove("/opt/frigate")
yaml = YAML()
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
# Check if we can use .yaml instead of .yml
config_file_yaml = config_file.replace(".yml", ".yaml")
if os.path.isfile(config_file_yaml):
config_file = config_file_yaml
try:
with open(config_file) as f:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, any] = yaml.load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, any] = json.loads(raw_config)
except FileNotFoundError:
config: dict[str, any] = {}
path = config.get("ffmpeg", {}).get("path", "default")
if path == "default":
if shutil.which("ffmpeg") is None:
print(f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg")
else:
print("ffmpeg")
elif path in INCLUDED_FFMPEG_VERSIONS:
print(f"/usr/lib/ffmpeg/{path}/bin/ffmpeg")
else:
print(f"{path}/bin/ffmpeg")

View File

@ -2,19 +2,23 @@
import json
import os
import shutil
import sys
from pathlib import Path
import yaml
from ruamel.yaml import YAML
sys.path.insert(0, "/opt/frigate")
from frigate.const import BIRDSEYE_PIPE # noqa: E402
from frigate.ffmpeg_presets import ( # noqa: E402
parse_preset_hardware_acceleration_encode,
from frigate.const import (
BIRDSEYE_PIPE,
DEFAULT_FFMPEG_VERSION,
INCLUDED_FFMPEG_VERSIONS,
)
from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode
sys.path.remove("/opt/frigate")
yaml = YAML()
FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")}
# read docker secret files as env vars too
@ -37,7 +41,7 @@ try:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, any] = yaml.safe_load(raw_config)
config: dict[str, any] = yaml.load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, any] = json.loads(raw_config)
except FileNotFoundError:
@ -105,16 +109,32 @@ else:
**FRIGATE_ENV_VARS
)
# ensure ffmpeg path is set correctly
path = config.get("ffmpeg", {}).get("path", "default")
if path == "default":
if shutil.which("ffmpeg") is None:
ffmpeg_path = f"/usr/lib/ffmpeg/{DEFAULT_FFMPEG_VERSION}/bin/ffmpeg"
else:
ffmpeg_path = "ffmpeg"
elif path in INCLUDED_FFMPEG_VERSIONS:
ffmpeg_path = f"/usr/lib/ffmpeg/{path}/bin/ffmpeg"
else:
ffmpeg_path = f"{path}/bin/ffmpeg"
if go2rtc_config.get("ffmpeg") is None:
go2rtc_config["ffmpeg"] = {"bin": ffmpeg_path}
elif go2rtc_config["ffmpeg"].get("bin") is None:
go2rtc_config["ffmpeg"]["bin"] = ffmpeg_path
# need to replace ffmpeg command when using ffmpeg4
if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59:
if go2rtc_config.get("ffmpeg") is None:
go2rtc_config["ffmpeg"] = {
"rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
}
elif go2rtc_config["ffmpeg"].get("rtsp") is None:
if int(os.environ.get("LIBAVFORMAT_VERSION_MAJOR", "59") or "59") < 59:
if go2rtc_config["ffmpeg"].get("rtsp") is None:
go2rtc_config["ffmpeg"]["rtsp"] = (
"-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}"
)
else:
if go2rtc_config.get("ffmpeg") is None:
go2rtc_config["ffmpeg"] = {"path": ""}
for name in go2rtc_config.get("streams", {}):
stream = go2rtc_config["streams"][name]
@ -145,7 +165,7 @@ if config.get("birdseye", {}).get("restream", False):
birdseye: dict[str, any] = config.get("birdseye")
input = f"-f rawvideo -pix_fmt yuv420p -video_size {birdseye.get('width', 1280)}x{birdseye.get('height', 720)} -r 10 -i {BIRDSEYE_PIPE}"
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(config.get('ffmpeg', {}).get('hwaccel_args'), input, '-rtsp_transport tcp -f rtsp {output}')}"
ffmpeg_cmd = f"exec:{parse_preset_hardware_acceleration_encode(ffmpeg_path, config.get('ffmpeg', {}).get('hwaccel_args', ''), input, '-rtsp_transport tcp -f rtsp {output}')}"
if go2rtc_config.get("streams"):
go2rtc_config["streams"]["birdseye"] = ffmpeg_cmd

View File

@ -104,6 +104,8 @@ http {
add_header Cache-Control "no-store";
expires off;
keepalive_disable safari;
}
location /stream/ {
@ -224,7 +226,7 @@ http {
location ~* /api/.*\.(jpg|jpeg|png|webp|gif)$ {
include auth_request.conf;
rewrite ^/api/(.*)$ $1 break;
rewrite ^/api/(.*)$ /$1 break;
proxy_pass http://frigate_api;
include proxy.conf;
}

View File

@ -3,7 +3,9 @@
import json
import os
import yaml
from ruamel.yaml import YAML
yaml = YAML()
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
@ -17,7 +19,7 @@ try:
raw_config = f.read()
if config_file.endswith((".yaml", ".yml")):
config: dict[str, any] = yaml.safe_load(raw_config)
config: dict[str, any] = yaml.load(raw_config)
elif config_file.endswith(".json"):
config: dict[str, any] = json.loads(raw_config)
except FileNotFoundError:

View File

@ -22,5 +22,6 @@ ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffmpeg /usr/lib/btbn-ffmpeg/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-5/ffprobe /usr/lib/btbn-ffmpeg/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffmpeg /usr/lib/ffmpeg/6.0/bin/
ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.1-7/ffprobe /usr/lib/ffmpeg/6.0/bin/
ENV PATH="/usr/lib/ffmpeg/6.0/bin/:${PATH}"

View File

@ -1,10 +1,15 @@
BOARDS += rk
local-rk: version
docker buildx bake --load --file=docker/rockchip/rk.hcl --set rk.tags=frigate:latest-rk rk
docker buildx bake --file=docker/rockchip/rk.hcl rk \
--set rk.tags=frigate:latest-rk \
--load
build-rk: version
docker buildx bake --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk
docker buildx bake --file=docker/rockchip/rk.hcl rk \
--set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk
push-rk: build-rk
docker buildx bake --push --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk
docker buildx bake --file=docker/rockchip/rk.hcl rk \
--set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk \
--push

View File

@ -23,11 +23,11 @@ COPY docker/rocm/rocm-pin-600 /etc/apt/preferences.d/
RUN apt-get update
RUN apt-get -y install --no-install-recommends migraphx
RUN apt-get -y install --no-install-recommends migraphx hipfft roctracer
RUN apt-get -y install --no-install-recommends migraphx-dev
RUN mkdir -p /opt/rocm-dist/opt/rocm-$ROCM/lib
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/
RUN cd /opt/rocm-$ROCM/lib && cp -dpr libMIOpen*.so* libamd*.so* libhip*.so* libhsa*.so* libmigraphx*.so* librocm*.so* librocblas*.so* libroctracer*.so* librocfft*.so* /opt/rocm-dist/opt/rocm-$ROCM/lib/
RUN cd /opt/rocm-dist/opt/ && ln -s rocm-$ROCM rocm
RUN mkdir -p /opt/rocm-dist/etc/ld.so.conf.d/
@ -69,7 +69,11 @@ RUN apt-get -y install libnuma1
WORKDIR /opt/frigate/
COPY --from=rootfs / /
COPY docker/rocm/rootfs/ /
COPY docker/rocm/requirements-wheels-rocm.txt /requirements.txt
RUN python3 -m pip install --upgrade pip \
&& pip3 uninstall -y onnxruntime-openvino \
&& pip3 install -r /requirements.txt
#######################################################################
FROM scratch AS rocm-dist
@ -79,6 +83,7 @@ ARG AMDGPU
COPY --from=rocm /opt/rocm-$ROCM/bin/rocminfo /opt/rocm-$ROCM/bin/migraphx-driver /opt/rocm-$ROCM/bin/
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*$AMDGPU* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/share/miopen/db/*gfx908* /opt/rocm-$ROCM/share/miopen/db/
COPY --from=rocm /opt/rocm-$ROCM/lib/rocblas/library/*$AMDGPU* /opt/rocm-$ROCM/lib/rocblas/library/
COPY --from=rocm /opt/rocm-dist/ /
COPY --from=debian-build /opt/rocm/lib/migraphx.cpython-39-x86_64-linux-gnu.so /opt/rocm-$ROCM/lib/
@ -101,6 +106,3 @@ ENV HSA_OVERRIDE_GFX_VERSION=$HSA_OVERRIDE_GFX_VERSION
#######################################################################
FROM rocm-prelim-hsa-override$HSA_OVERRIDE as rocm-deps
# Request yolov8 download at startup
ENV DOWNLOAD_YOLOV8=1

View File

@ -0,0 +1 @@
onnxruntime-rocm @ https://github.com/NickM-27/frigate-onnxruntime-rocm/releases/download/v1.0.0/onnxruntime_rocm-1.17.3-cp39-cp39-linux_x86_64.whl

View File

@ -4,14 +4,50 @@ BOARDS += rocm
ROCM_CHIPSETS:=gfx900:9.0.0 gfx1030:10.3.0 gfx1100:11.0.0
local-rocm: version
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --load --file=docker/rocm/rocm.hcl --set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) rocm;)
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --load --file=docker/rocm/rocm.hcl --set rocm.tags=frigate:latest-rocm rocm
$(foreach chipset,$(ROCM_CHIPSETS), \
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
HSA_OVERRIDE=1 \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=frigate:latest-rocm-$(word 1,$(subst :, ,$(chipset))) \
--load \
&&) true
unset HSA_OVERRIDE_GFX_VERSION && \
HSA_OVERRIDE=0 \
AMDGPU=gfx \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=frigate:latest-rocm \
--load
build-rocm: version
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) rocm;)
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm rocm
$(foreach chipset,$(ROCM_CHIPSETS), \
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
HSA_OVERRIDE=1 \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
&&) true
unset HSA_OVERRIDE_GFX_VERSION && \
HSA_OVERRIDE=0 \
AMDGPU=gfx \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm
push-rocm: build-rocm
$(foreach chipset,$(ROCM_CHIPSETS),AMDGPU=$(word 1,$(subst :, ,$(chipset))) HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) HSA_OVERRIDE=1 docker buildx bake --push --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) rocm;)
unset HSA_OVERRIDE_GFX_VERSION && HSA_OVERRIDE=0 AMDGPU=gfx docker buildx bake --push --file=docker/rocm/rocm.hcl --set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm rocm
$(foreach chipset,$(ROCM_CHIPSETS), \
AMDGPU=$(word 1,$(subst :, ,$(chipset))) \
HSA_OVERRIDE_GFX_VERSION=$(word 2,$(subst :, ,$(chipset))) \
HSA_OVERRIDE=1 \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm-$(chipset) \
--push \
&&) true
unset HSA_OVERRIDE_GFX_VERSION && \
HSA_OVERRIDE=0 \
AMDGPU=gfx \
docker buildx bake --file=docker/rocm/rocm.hcl rocm \
--set rocm.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rocm \
--push

View File

@ -1,20 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Compile YoloV8 ONNX files into ROCm MIGraphX files
OVERRIDE=$(cd /opt/frigate && python3 -c 'import frigate.detectors.plugins.rocm as rocm; print(rocm.auto_override_gfx_version())')
if ! test -z "$OVERRIDE"; then
echo "Using HSA_OVERRIDE_GFX_VERSION=${OVERRIDE}"
export HSA_OVERRIDE_GFX_VERSION=$OVERRIDE
fi
for onnx in /config/model_cache/yolov8/*.onnx
do
mxr="${onnx%.onnx}.mxr"
if ! test -f $mxr; then
echo "processing $onnx into $mxr"
/opt/rocm/bin/migraphx-driver compile $onnx --optimize --gpu --enable-offload-copy --binary -o $mxr
fi
done

View File

@ -1 +0,0 @@
/etc/s6-overlay/s6-rc.d/compile-rocm-models/run

View File

@ -1,10 +1,15 @@
BOARDS += rpi
local-rpi: version
docker buildx bake --load --file=docker/rpi/rpi.hcl --set rpi.tags=frigate:latest-rpi rpi
docker buildx bake --file=docker/rpi/rpi.hcl rpi \
--set rpi.tags=frigate:latest-rpi \
--load
build-rpi: version
docker buildx bake --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi
docker buildx bake --file=docker/rpi/rpi.hcl rpi \
--set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi
push-rpi: build-rpi
docker buildx bake --push --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi
docker buildx bake --file=docker/rpi/rpi.hcl rpi \
--set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi \
--push

View File

@ -18,6 +18,7 @@ RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels
pip3 install -U /deps/trt-wheels/*.whl && \
ldconfig
ENV LD_LIBRARY_PATH=/usr/local/lib/python3.9/dist-packages/tensorrt:/usr/local/cuda/lib64:/usr/local/lib/python3.9/dist-packages/nvidia/cufft/lib
WORKDIR /opt/frigate/
COPY --from=rootfs / /
@ -26,6 +27,7 @@ FROM devcontainer AS devcontainer-trt
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=trt-deps /usr/local/cuda-12.1 /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \

View File

@ -10,8 +10,8 @@ ARG DEBIAN_FRONTEND
# Use a separate container to build wheels to prevent build dependencies in final image
RUN apt-get -qq update \
&& apt-get -qq install -y --no-install-recommends \
python3.9 python3.9-dev \
wget build-essential cmake git \
python3.9 python3.9-dev \
wget build-essential cmake git \
&& rm -rf /var/lib/apt/lists/*
# Ensure python3 defaults to python3.9
@ -41,7 +41,11 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t
&& TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh
COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt
RUN pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt
ADD https://nvidia.box.com/shared/static/9aemm4grzbbkfaesg5l7fplgjtmswhj8.whl /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
RUN pip3 uninstall -y onnxruntime-openvino \
&& pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt \
&& pip3 install --no-deps /tmp/onnxruntime_gpu-1.15.1-cp39-cp39-linux_aarch64.whl
FROM build-wheels AS trt-model-wheels
ARG DEBIAN_FRONTEND

View File

@ -24,8 +24,9 @@ ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0
COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so
COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos
COPY --from=trt-deps /usr/local/cuda-12.* /usr/local/cuda
COPY docker/tensorrt/detector/rootfs/ /
ENV YOLO_MODELS="yolov7-320"
ENV YOLO_MODELS=""
HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \
CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1

View File

@ -11,6 +11,7 @@ set -o errexit -o nounset -o pipefail
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"}
TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)}
OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}"
YOLO_MODELS=${YOLO_MODELS:-""}
# Create output folder
mkdir -p ${OUTPUT_FOLDER}
@ -19,6 +20,11 @@ FIRST_MODEL=true
MODEL_DOWNLOAD=""
MODEL_CONVERT=""
if [ -z "$YOLO_MODELS"]; then
echo "tensorrt model preparation disabled"
exit 0
fi
for model in ${YOLO_MODELS//,/ }
do
# Remove old link in case path/version changed

View File

@ -8,5 +8,7 @@ nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64'
nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64'
nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64'
nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64'
onnx==1.14.0; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'
nvidia-cufft-cu11==10.*; platform_machine == 'x86_64'
onnx==1.16.*; platform_machine == 'x86_64'
onnxruntime-gpu==1.18.*; platform_machine == 'x86_64'
protobuf==3.20.3; platform_machine == 'x86_64'

View File

@ -1 +1 @@
cuda-python == 11.7; platform_machine == 'aarch64'
cuda-python == 11.7; platform_machine == 'aarch64'

View File

@ -7,20 +7,35 @@ JETPACK4_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK4_BASE) SLIM_BASE=$(JETPACK4_BAS
JETPACK5_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK5_BASE) SLIM_BASE=$(JETPACK5_BASE) TRT_BASE=$(JETPACK5_BASE)
local-trt: version
$(X86_DGPU_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt tensorrt
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=frigate:latest-tensorrt \
--load
local-trt-jp4: version
$(JETPACK4_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp4 tensorrt
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=frigate:latest-tensorrt-jp4 \
--load
local-trt-jp5: version
$(JETPACK5_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp5 tensorrt
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=frigate:latest-tensorrt-jp5 \
--load
build-trt:
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5
push-trt: build-trt
$(X86_DGPU_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt
$(JETPACK4_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt
$(JETPACK5_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt
$(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt \
--push
$(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 \
--push
$(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl tensorrt \
--set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 \
--push

View File

@ -1,5 +1,10 @@
# Website
This website is built using [Docusaurus 2](https://v2.docusaurus.io/), a modern static website generator.
This website is built using [Docusaurus 3.5](https://docusaurus.io/docs), a modern static website generator.
For installation and contributing instructions, please follow the [Contributing Docs](https://docs.frigate.video/development/contributing).
# Development
1. Run `npm i` to install dependencies
2. Run `npm run start` to start the website

View File

@ -55,7 +55,7 @@ environment_vars:
### `database`
Event and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant.
Tracked object and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant.
If you are storing your database on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database in the config if necessary.
@ -176,19 +176,19 @@ listen [::]:5000 ipv6only=off;
### Custom ffmpeg build
Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, a docker volume mapping can be used to overwrite the included ffmpeg build with an ffmpeg build that works for your specific hardware setup.
Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, statically built ffmpeg binary can be downloaded to /config and used.
To do this:
1. Download your ffmpeg build and uncompress to a folder on the host (let's use `/home/appdata/frigate/custom-ffmpeg` for this example).
1. Download your ffmpeg build and uncompress to the Frigate config folder.
2. Update your docker-compose or docker CLI to include `'/home/appdata/frigate/custom-ffmpeg':'/usr/lib/btbn-ffmpeg':'ro'` in the volume mappings.
3. Restart Frigate and the custom version will be used if the mapping was done correctly.
NOTE: The folder that is mapped from the host needs to be the folder that contains `/bin`. So if the full structure is `/home/appdata/frigate/custom-ffmpeg/bin/ffmpeg` then `/home/appdata/frigate/custom-ffmpeg` needs to be mapped to `/usr/lib/btbn-ffmpeg`.
NOTE: The folder that is set for the config needs to be the folder that contains `/bin`. So if the full structure is `/home/appdata/frigate/custom-ffmpeg/bin/ffmpeg` then the `ffmpeg -> path` field should be `/config/custom-ffmpeg/bin`.
### Custom go2rtc version
Frigate currently includes go2rtc v1.9.4, there may be certain cases where you want to run a different version of go2rtc.
Frigate currently includes go2rtc v1.9.2, there may be certain cases where you want to run a different version of go2rtc.
To do this:
@ -225,5 +225,5 @@ docker run \
--entrypoint python3 \
ghcr.io/blakeblackshear/frigate:stable \
-u -m frigate \
--validate_config
--validate-config
```

View File

@ -31,7 +31,7 @@ auth:
## Login failure rate limiting
In order to limit the risk of brute force attacks, rate limiting is available for login failures. This is implemented with Flask-Limiter, and the string notation for valid values is available in [the documentation](https://flask-limiter.readthedocs.io/en/stable/configuration.html#rate-limit-string-notation).
In order to limit the risk of brute force attacks, rate limiting is available for login failures. This is implemented with SlowApi, and the string notation for valid values is available in [the documentation](https://limits.readthedocs.io/en/stable/quickstart.html#examples).
For example, `1/second;5/minute;20/hour` will rate limit the login endpoint when failures occur more than:

View File

@ -41,6 +41,7 @@ cameras:
...
onvif:
# Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
port: 8000
@ -49,6 +50,8 @@ cameras:
user: admin
# Optional: password for login.
password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: PTZ camera object autotracking. Keeps a moving object in
# the center of the frame by automatically moving the PTZ camera.
autotracking:

View File

@ -191,7 +191,9 @@ cameras:
#### Reolink Doorbell
The reolink doorbell supports 2-way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
The reolink doorbell supports two way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only.
Ensure HTTP is enabled in the camera's advanced network settings. To use two way talk with Frigate, see the [Live view documentation](/configuration/live#two-way-talk).
```yaml
go2rtc:
@ -216,7 +218,7 @@ go2rtc:
- rtspx://192.168.1.1:7441/abcdefghijk
```
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-rtsp)
[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-rtsp)
In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record if used directly with unifi protect.
@ -228,4 +230,4 @@ ffmpeg:
### TP-Link VIGI Cameras
TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded events. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`.
TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded footage. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`.

View File

@ -7,7 +7,7 @@ title: Camera Configuration
Several inputs can be configured for each camera and the role of each input can be mixed and matched based on your needs. This allows you to use a lower resolution stream for object detection, but create recordings from a higher resolution stream, or vice versa.
A camera is enabled by default but can be temporarily disabled by using `enabled: False`. Existing events and recordings can still be accessed. Live streams, recording and detecting are not working. Camera specific configurations will be used.
A camera is enabled by default but can be temporarily disabled by using `enabled: False`. Existing tracked objects and recordings can still be accessed. Live streams, recording and detecting are not working. Camera specific configurations will be used.
Each role can only be assigned to one input per camera. The options for roles are as follows:
@ -109,7 +109,7 @@ This list of working and non-working PTZ cameras is based on user feedback.
| Reolink E1 Zoom | ✅ | ❌ | |
| Reolink RLC-823A 16x | ✅ | ❌ | |
| Speco O8P32X | ✅ | ❌ | |
| Sunba 405-D20X | ✅ | ❌ | |
| Sunba 405-D20X | ✅ | ❌ | Incomplete ONVIF support reported on original, and 4k models. All models are suspected incompatable. |
| Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 |
| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands |
| Uniview IPC6612SR-X33-VG | ✅ | ✅ | Leave `calibrate_on_startup` as `False`. A user has reported that zooming with `absolute` is working. |

View File

@ -0,0 +1,211 @@
---
id: genai
title: Generative AI
---
Generative AI can be used to automatically generate descriptive text based on the thumbnails of your tracked objects. This helps with [Semantic Search](/configuration/semantic_search) in Frigate to provide more context about your tracked objects. Descriptions are accessed via the _Explore_ view in the Frigate UI by clicking on a tracked object's thumbnail.
Requests for a description are sent off automatically to your AI provider at the end of the tracked object's lifecycle. Descriptions can also be regenerated manually via the Frigate UI.
:::info
Semantic Search must be enabled to use Generative AI.
:::
## Configuration
Generative AI can be enabled for all cameras or only for specific cameras. There are currently 3 native providers available to integrate with Frigate. Other providers that support the OpenAI standard API can also be used. See the OpenAI section below.
To use Generative AI, you must define a single provider at the global level of your Frigate configuration. If the provider you choose requires an API key, you may either directly paste it in your configuration, or store it in an environment variable prefixed with `FRIGATE_`.
```yaml
genai:
enabled: True
provider: gemini
api_key: "{FRIGATE_GEMINI_API_KEY}"
model: gemini-1.5-flash
cameras:
front_camera: ...
indoor_camera:
genai: # <- disable GenAI for your indoor camera
enabled: False
```
## Ollama
:::warning
Using Ollama on CPU is not recommended, high inference times make using Generative AI impractical.
:::
[Ollama](https://ollama.com/) allows you to self-host large language models and keep everything running locally. It provides a nice API over [llama.cpp](https://github.com/ggerganov/llama.cpp). It is highly recommended to host this server on a machine with an Nvidia graphics card, or on a Apple silicon Mac for best performance.
Most of the 7b parameter 4-bit vision models will fit inside 8GB of VRAM. There is also a [Docker container](https://hub.docker.com/r/ollama/ollama) available.
Parallel requests also come with some caveats. You will need to set `OLLAMA_NUM_PARALLEL=1` and choose a `OLLAMA_MAX_QUEUE` and `OLLAMA_MAX_LOADED_MODELS` values that are appropriate for your hardware and preferences. See the [Ollama documentation](https://github.com/ollama/ollama/blob/main/docs/faq.md#how-does-ollama-handle-concurrent-requests).
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their model library](https://ollama.com/library). At the time of writing, this includes `llava`, `llava-llama3`, `llava-phi3`, and `moondream`. Note that Frigate will not automatically download the model you specify in your config, you must download the model to your local instance of Ollama first i.e. by running `ollama pull llava:7b` on your Ollama server/Docker container. Note that the model specified in Frigate's config must match the downloaded model tag.
:::note
You should have at least 8 GB of RAM available (or VRAM if running on GPU) to run the 7B models, 16 GB to run the 13B models, and 32 GB to run the 33B models.
:::
### Configuration
```yaml
genai:
enabled: True
provider: ollama
base_url: http://localhost:11434
model: llava:7b
```
## Google Gemini
Google Gemini has a free tier allowing [15 queries per minute](https://ai.google.dev/pricing) to the API, which is more than sufficient for standard Frigate usage.
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://ai.google.dev/gemini-api/docs/models/gemini). At the time of writing, this includes `gemini-1.5-pro` and `gemini-1.5-flash`.
### Get API Key
To start using Gemini, you must first get an API key from [Google AI Studio](https://aistudio.google.com).
1. Accept the Terms of Service
2. Click "Get API Key" from the right hand navigation
3. Click "Create API key in new project"
4. Copy the API key for use in your config
### Configuration
```yaml
genai:
enabled: True
provider: gemini
api_key: "{FRIGATE_GEMINI_API_KEY}"
model: gemini-1.5-flash
```
## OpenAI
OpenAI does not have a free tier for their API. With the release of gpt-4o, pricing has been reduced and each generation should cost fractions of a cent if you choose to go this route.
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://platform.openai.com/docs/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
### Get API Key
To start using OpenAI, you must first [create an API key](https://platform.openai.com/api-keys) and [configure billing](https://platform.openai.com/settings/organization/billing/overview).
### Configuration
```yaml
genai:
enabled: True
provider: openai
api_key: "{FRIGATE_OPENAI_API_KEY}"
model: gpt-4o
```
:::note
To use a different OpenAI-compatible API endpoint, set the `OPENAI_BASE_URL` environment variable to your provider's API URL.
:::
## Azure OpenAI
Microsoft offers several vision models through Azure OpenAI. A subscription is required.
### Supported Models
You must use a vision capable model with Frigate. Current model variants can be found [in their documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models). At the time of writing, this includes `gpt-4o` and `gpt-4-turbo`.
### Create Resource and Get API Key
To start using Azure OpenAI, you must first [create a resource](https://learn.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource). You'll need your API key and resource URL, which must include the `api-version` parameter (see the example below). The model field is not required in your configuration as the model is part of the deployment name you chose when deploying the resource.
### Configuration
```yaml
genai:
enabled: True
provider: azure_openai
base_url: https://example-endpoint.openai.azure.com/openai/deployments/gpt-4o/chat/completions?api-version=2023-03-15-preview
api_key: "{FRIGATE_OPENAI_API_KEY}"
```
## Usage and Best Practices
Frigate's thumbnail search excels at identifying specific details about tracked objects for example, using an "image caption" approach to find a "person wearing a yellow vest," "a white dog running across the lawn," or "a red car on a residential street." To enhance this further, Frigates default prompts are designed to ask your AI provider about the intent behind the object's actions, rather than just describing its appearance.
While generating simple descriptions of detected objects is useful, understanding intent provides a deeper layer of insight. Instead of just recognizing "what" is in a scene, Frigates default prompts aim to infer "why" it might be there or "what" it could do next. Descriptions tell you whats happening, but intent gives context. For instance, a person walking toward a door might seem like a visitor, but if theyre moving quickly after hours, you can infer a potential break-in attempt. Detecting a person loitering near a door at night can trigger an alert sooner than simply noting "a person standing by the door," helping you respond based on the situations context.
### Using GenAI for notifications
Frigate provides an [MQTT topic](/integrations/mqtt), `frigate/tracked_object_update`, that is updated with a JSON payload containing `event_id` and `description` when your AI provider returns a description for a tracked object. This description could be used directly in notifications, such as sending alerts to your phone or making audio announcements. If additional details from the tracked object are needed, you can query the [HTTP API](/integrations/api/event-events-event-id-get) using the `event_id`, eg: `http://frigate_ip:5000/api/events/<event_id>`.
## Custom Prompts
Frigate sends multiple frames from the tracked object along with a prompt to your Generative AI provider asking it to generate a description. The default prompt is as follows:
```
Analyze the sequence of images containing the {label}. Focus on the likely intent or behavior of the {label} based on its actions and movement, rather than describing its appearance or the surroundings. Consider what the {label} is doing, why, and what it might do next.
```
:::tip
Prompts can use variable replacements like `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt.
:::
You are also able to define custom prompts in your configuration.
```yaml
genai:
enabled: True
provider: ollama
base_url: http://localhost:11434
model: llava
prompt: "Analyze the {label} in these images from the {camera} security camera. Focus on the actions, behavior, and potential intent of the {label}, rather than just describing its appearance."
object_prompts:
person: "Examine the main person in these images. What are they doing and what might their actions suggest about their intent (e.g., approaching a door, leaving an area, standing still)? Do not describe the surroundings or static details."
car: "Observe the primary vehicle in these images. Focus on its movement, direction, or purpose (e.g., parking, approaching, circling). If it's a delivery vehicle, mention the company."
```
Prompts can also be overriden at the camera level to provide a more detailed prompt to the model about your specific camera, if you desire. By default, descriptions will be generated for all tracked objects and all zones. But you can also optionally specify `objects` and `required_zones` to only generate descriptions for certain tracked objects or zones.
Optionally, you can generate the description using a snapshot (if enabled) by setting `use_snapshot` to `True`. By default, this is set to `False`, which sends the uncompressed images from the `detect` stream collected over the object's lifetime to the model. Once the object lifecycle ends, only a single compressed and cropped thumbnail is saved with the tracked object. Using a snapshot might be useful when you want to _regenerate_ a tracked object's description as it will provide the AI with a higher-quality image (typically downscaled by the AI itself) than the cropped/compressed thumbnail. Using a snapshot otherwise has a trade-off in that only a single image is sent to your provider, which will limit the model's ability to determine object movement or direction.
```yaml
cameras:
front_door:
genai:
use_snapshot: True
prompt: "Analyze the {label} in these images from the {camera} security camera at the front door. Focus on the actions and potential intent of the {label}."
object_prompts:
person: "Examine the person in these images. What are they doing, and how might their actions suggest their purpose (e.g., delivering something, approaching, leaving)? If they are carrying or interacting with a package, include details about its source or destination."
cat: "Observe the cat in these images. Focus on its movement and intent (e.g., wandering, hunting, interacting with objects). If the cat is near the flower pots or engaging in any specific actions, mention it."
objects:
- person
- cat
required_zones:
- steps
```
### Experiment with prompts
Many providers also have a public facing chat interface for their models. Download a couple of different thumbnails or snapshots from Frigate and try new things in the playground to get descriptions to your liking before updating the prompt in Frigate.
- OpenAI - [ChatGPT](https://chatgpt.com)
- Gemini - [Google AI Studio](https://aistudio.google.com)
- Ollama - [Open WebUI](https://docs.openwebui.com/)

View File

@ -65,24 +65,37 @@ Or map in all the `/dev/video*` devices.
## Intel-based CPUs
:::info
**Recommended hwaccel Preset**
| CPU Generation | Intel Driver | Recommended Preset | Notes |
| -------------- | ------------ | ------------------ | ----------------------------------- |
| gen1 - gen7 | i965 | preset-vaapi | qsv is not supported |
| gen8 - gen12 | iHD | preset-vaapi | preset-intel-qsv-* can also be used |
| gen13+ | iHD / Xe | preset-intel-qsv-* | |
| Intel Arc GPU | iHD / Xe | preset-intel-qsv-* | |
:::
:::note
The default driver is `iHD`. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
See [The Intel Docs](https://www.intel.com/content/www/us/en/support/articles/000005505/processors.html) to figure out what generation your CPU is.
:::
### Via VAAPI
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI is recommended for all generations of Intel-based CPUs.
VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams.
```yaml
ffmpeg:
hwaccel_args: preset-vaapi
```
:::note
With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars).
:::
### Via Quicksync (>=10th Generation only)
If VAAPI does not work for you, you can try QSV if your processor supports it. QSV must be set specifically based on the video encoding of the stream.
### Via Quicksync
#### H.264 streams
@ -218,28 +231,11 @@ docker run -d \
### Setup Decoder
The decoder you need to pass in the `hwaccel_args` will depend on the input video.
A list of supported codecs (you can use `ffmpeg -decoders | grep cuvid` in the container to get the ones your card supports)
```
V..... h263_cuvid Nvidia CUVID H263 decoder (codec h263)
V..... h264_cuvid Nvidia CUVID H264 decoder (codec h264)
V..... hevc_cuvid Nvidia CUVID HEVC decoder (codec hevc)
V..... mjpeg_cuvid Nvidia CUVID MJPEG decoder (codec mjpeg)
V..... mpeg1_cuvid Nvidia CUVID MPEG1VIDEO decoder (codec mpeg1video)
V..... mpeg2_cuvid Nvidia CUVID MPEG2VIDEO decoder (codec mpeg2video)
V..... mpeg4_cuvid Nvidia CUVID MPEG4 decoder (codec mpeg4)
V..... vc1_cuvid Nvidia CUVID VC1 decoder (codec vc1)
V..... vp8_cuvid Nvidia CUVID VP8 decoder (codec vp8)
V..... vp9_cuvid Nvidia CUVID VP9 decoder (codec vp9)
```
For example, for H264 video, you'll select `preset-nvidia-h264`.
Using `preset-nvidia` ffmpeg will automatically select the necessary profile for the incoming video, and will log an error if the profile is not supported by your GPU.
```yaml
ffmpeg:
hwaccel_args: preset-nvidia-h264
hwaccel_args: preset-nvidia
```
If everything is working correctly, you should see a significant improvement in performance.

View File

@ -56,6 +56,11 @@ go2rtc:
password: "{FRIGATE_GO2RTC_RTSP_PASSWORD}"
```
```yaml
genai:
api_key: "{FRIGATE_GENAI_API_KEY}"
```
## Common configuration examples
Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values.
@ -67,7 +72,7 @@ Here are some common starter configuration examples. Refer to the [reference con
- Hardware acceleration for decoding video
- USB Coral detector
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
- Continue to keep all video if it was during any event for 30 days
- Continue to keep all video if it qualified as an alert or detection for 30 days
- Save snapshots for 30 days
- Motion mask for the camera timestamp
@ -90,10 +95,12 @@ record:
retain:
days: 7
mode: motion
events:
alerts:
retain:
default: 30
mode: motion
days: 30
detections:
retain:
days: 30
snapshots:
enabled: True
@ -123,7 +130,7 @@ cameras:
- VAAPI hardware acceleration for decoding video
- USB Coral detector
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
- Continue to keep all video if it was during any event for 30 days
- Continue to keep all video if it qualified as an alert or detection for 30 days
- Save snapshots for 30 days
- Motion mask for the camera timestamp
@ -144,10 +151,12 @@ record:
retain:
days: 7
mode: motion
events:
alerts:
retain:
default: 30
mode: motion
days: 30
detections:
retain:
days: 30
snapshots:
enabled: True
@ -177,7 +186,7 @@ cameras:
- VAAPI hardware acceleration for decoding video
- OpenVino detector
- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not
- Continue to keep all video if it was during any event for 30 days
- Continue to keep all video if it qualified as an alert or detection for 30 days
- Save snapshots for 30 days
- Motion mask for the camera timestamp
@ -194,14 +203,13 @@ detectors:
ov:
type: openvino
device: AUTO
model:
path: /openvino-model/ssdlite_mobilenet_v2.xml
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
path: /openvino-model/ssdlite_mobilenet_v2.xml
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
record:
@ -209,10 +217,12 @@ record:
retain:
days: 7
mode: motion
events:
alerts:
retain:
default: 30
mode: motion
days: 30
detections:
retain:
days: 30
snapshots:
enabled: True

View File

@ -23,13 +23,13 @@ If you are using go2rtc, you should adjust the following settings in your camera
- Video codec: **H.264** - provides the most compatible video codec with all Live view technologies and browsers. Avoid any kind of "smart codec" or "+" codec like _H.264+_ or _H.265+_. as these non-standard codecs remove keyframes (see below).
- Audio codec: **AAC** - provides the most compatible audio codec with all Live view technologies and browsers that support audio.
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes.
- I-frame interval (sometimes called the keyframe interval, the interframe space, or the GOP length): match your camera's frame rate, or choose "1x" (for interframe space on Reolink cameras). For example, if your stream outputs 20fps, your i-frame interval should be 20 (or 1x on Reolink). Values higher than the frame rate will cause the stream to take longer to begin playback. See [this page](https://gardinal.net/understanding-the-keyframe-interval/) for more on keyframes. For many users this may not be an issue, but it should be noted that that a 1x i-frame interval will cause more storage utilization if you are using the stream for the `record` role as well.
The default video and audio codec on your camera may not always be compatible with your browser, which is why setting them to H.264 and AAC is recommended. See the [go2rtc docs](https://github.com/AlexxIT/go2rtc?tab=readme-ov-file#codecs-madness) for codec support information.
### Audio Support
MSE Requires AAC audio, WebRTC requires PCMU/PCMA, or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
MSE Requires PCMA/PCMU or AAC audio, WebRTC requires PCMA/PCMU or opus audio. If you want to support both MSE and WebRTC then your restream config needs to make sure both are enabled.
```yaml
go2rtc:
@ -138,3 +138,13 @@ services:
:::
See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this.
### Two way talk
For devices that support two way talk, Frigate can be configured to use the feature from the camera's Live view in the Web UI. You should:
- Set up go2rtc with [WebRTC](#webrtc-extra-configuration).
- Ensure you access Frigate via https (may require [opening port 8971](/frigate/installation/#ports)).
- For the Home Assistant Frigate card, [follow the docs](https://github.com/dermotduffy/frigate-hass-card?tab=readme-ov-file#using-2-way-audio) for the correct source.
To use the Reolink Doorbell with two way talk, you should use the [recommended Reolink configuration](/configuration/camera_specific#reolink-doorbell)

View File

@ -13,11 +13,11 @@ Once motion is detected, it tries to group up nearby areas of motion together in
The default motion settings should work well for the majority of cameras, however there are cases where tuning motion detection can lead to better and more optimal results. Each camera has its own environment with different variables that affect motion, this means that the same motion settings will not fit all of your cameras.
Before tuning motion it is important to understand the goal. In an optimal configuration, motion from people and cars would be detected, but not grass moving, lighting changes, timestamps, etc. If your motion detection is too sensitive, you will experience higher CPU loads and greater false positives from the increased rate of object detection. If it is not sensitive enough, you will miss events.
Before tuning motion it is important to understand the goal. In an optimal configuration, motion from people and cars would be detected, but not grass moving, lighting changes, timestamps, etc. If your motion detection is too sensitive, you will experience higher CPU loads and greater false positives from the increased rate of object detection. If it is not sensitive enough, you will miss objects that you want to track.
## Create Motion Masks
First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want events**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md).
First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want alerts or detections**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md).
## Prepare For Testing
@ -29,7 +29,7 @@ Now that things are set up, find a time to tune that represents normal circumsta
:::note
Remember that motion detection is just used to determine when object detection should be used. You should aim to have motion detection sensitive enough that you won't miss events from objects you want to detect with object detection. The goal is to prevent object detection from running constantly for every small pixel change in the image. Windy days are still going to result in lots of motion being detected.
Remember that motion detection is just used to determine when object detection should be used. You should aim to have motion detection sensitive enough that you won't miss objects you want to detect with object detection. The goal is to prevent object detection from running constantly for every small pixel change in the image. Windy days are still going to result in lots of motion being detected.
:::
@ -92,9 +92,15 @@ motion:
lightning_threshold: 0.8
```
:::tip
:::warning
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these events are not missed.
Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these objects are not missed.
:::
:::note
Lightning threshold does not stop motion based recordings from being saved.
:::

View File

@ -0,0 +1,42 @@
---
id: notifications
title: Notifications
---
# Notifications
Frigate offers native notifications using the [WebPush Protocol](https://web.dev/articles/push-notifications-web-push-protocol) which uses the [VAPID spec](https://tools.ietf.org/html/draft-thomson-webpush-vapid) to deliver notifications to web apps using encryption.
## Setting up Notifications
In order to use notifications the following requirements must be met:
- Frigate must be accessed via a secure https connection
- A supported browser must be used. Currently Chrome, Firefox, and Safari are known to be supported.
- In order for notifications to be usable externally, Frigate must be accessible externally
### Configuration
To configure notifications, go to the Frigate WebUI -> Settings -> Notifications and enable, then fill out the fields and save.
### Registration
Once notifications are enabled, press the `Register for Notifications` button on all devices that you would like to receive notifications on. This will register the background worker. After this Frigate must be restarted and then notifications will begin to be sent.
## Supported Notifications
Currently notifications are only supported for review alerts. More notifications will be supported in the future.
:::note
Currently, only Chrome supports images in notifications. Safari and Firefox will only show a title and message in the notification.
:::
## Reduce Notification Latency
Different platforms handle notifications differently, some settings changes may be required to get optimal notification delivery.
### Android
Most Android phones have battery optimization settings. To get reliable Notification delivery the browser (Chrome, Firefox) should have battery optimizations disabled. If Frigate is running as a PWA then the Frigate app should have battery optimizations disabled as well.

View File

@ -3,37 +3,47 @@ id: object_detectors
title: Object Detectors
---
# Officially Supported Detectors
# Supported Hardware
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, `tensorrt`, and `rknn`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
:::info
## CPU Detector (not recommended)
Frigate supports multiple different detectors that work on different types of hardware:
The CPU detector type runs a TensorFlow Lite model utilizing the CPU without hardware acceleration. It is recommended to use a hardware accelerated detector type instead for better performance. To configure a CPU based detector, set the `"type"` attribute to `"cpu"`.
**Most Hardware**
- [Coral EdgeTPU](#edge-tpu-detector): The Google Coral EdgeTPU is available in USB and m.2 format allowing for a wide range of compatibility with devices.
- [Hailo](#hailo-8l): The Hailo8 AI Acceleration module is available in m.2 format with a HAT for RPi devices, offering a wide range of compatibility with devices.
:::tip
**AMD**
- [ROCm](#amdrocm-gpu-detector): ROCm can run on AMD Discrete GPUs to provide efficient object detection.
- [ONNX](#onnx): ROCm will automatically be detected and used as a detector in the `-rocm` Frigate image when a supported ONNX model is configured.
If you do not have GPU or Edge TPU hardware, using the [OpenVINO Detector](#openvino-detector) is often more efficient than using the CPU detector.
**Intel**
- [OpenVino](#openvino-detector): OpenVino can run on Intel Arc GPUs, Intel integrated GPUs, and Intel CPUs to provide efficient object detection.
- [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured.
**Nvidia**
- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models.
- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp(4/5)` Frigate images when a supported ONNX model is configured.
**Rockchip**
- [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs.
**For Testing**
- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results.
:::
The number of threads used by the interpreter can be specified using the `"num_threads"` attribute, and defaults to `3.`
:::note
A TensorFlow Lite model is provided in the container at `/cpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
Multiple detectors can not be mixed for object detection (ex: OpenVINO and Coral EdgeTPU can not be used for object detection at the same time).
```yaml
detectors:
cpu1:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
cpu2:
type: cpu
num_threads: 3
```
This does not affect using hardware for accelerating other tasks such as [semantic search](./semantic_search.md)
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
:::
# Officially Supported Detectors
Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `hailo8l`, `onnx`, `openvino`, `rknn`, `rocm`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras.
## Edge TPU Detector
@ -114,6 +124,30 @@ detectors:
device: pci
```
## Hailo-8l
This detector is available for use with Hailo-8 AI Acceleration Module.
See the [installation docs](../frigate/installation.md#hailo-8l) for information on configuring the hailo8.
### Configuration
```yaml
detectors:
hailo8l:
type: hailo8l
device: PCIe
model:
width: 300
height: 300
input_tensor: nhwc
input_pixel_format: bgr
model_type: ssd
path: /config/model_cache/h8l_cache/ssd_mobilenet_v1.hef
```
## OpenVINO Detector
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
@ -122,11 +156,29 @@ The OpenVINO device to be used is specified using the `"device"` attribute accor
OpenVINO is supported on 6th Gen Intel platforms (Skylake) and newer. It will also run on AMD CPUs despite having no official support for it. A supported Intel platform is required to use the `GPU` device with OpenVINO. For detailed system requirements, see [OpenVINO System Requirements](https://docs.openvino.ai/2024/about-openvino/release-notes-openvino/system-requirements.html)
:::tip
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
```yaml
detectors:
ov_0:
type: openvino
device: GPU
ov_1:
type: openvino
device: GPU
```
:::
### Supported Models
#### SSDLite MobileNet v2
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model. Use the model configuration shown below when using the OpenVINO detector with the default model.
An OpenVINO model is provided in the container at `/openvino-model/ssdlite_mobilenet_v2.xml` and is used by this detector type by default. The model comes from Intel's Open Model Zoo [SSDLite MobileNet V2](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/ssdlite_mobilenet_v2) and is converted to an FP16 precision IR model.
Use the model configuration shown below when using the OpenVINO detector with the default OpenVINO model:
```yaml
detectors:
@ -205,7 +257,7 @@ The model used for TensorRT must be preprocessed on the same hardware platform t
The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host.
By default, the `yolov7-320` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
By default, no models will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder.
If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU.
@ -236,6 +288,7 @@ yolov4x-mish-640
yolov7-tiny-288
yolov7-tiny-416
yolov7-640
yolov7-416
yolov7-320
yolov7x-640
yolov7x-320
@ -246,7 +299,7 @@ An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yol
```yml
frigate:
environment:
- YOLO_MODELS=yolov4-608,yolov7x-640
- YOLO_MODELS=yolov7-320,yolov7x-640
- USE_FP16=false
```
@ -264,6 +317,8 @@ The TensorRT detector can be selected by specifying `tensorrt` as the model type
The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated.
Use the config below to work with generated TRT models:
```yaml
detectors:
tensorrt:
@ -278,6 +333,221 @@ model:
height: 320
```
## AMD/ROCm GPU detector
### Setup
The `rocm` detector supports running YOLO-NAS models on AMD GPUs. Use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`.
### Docker settings for GPU access
ROCm needs access to the `/dev/kfd` and `/dev/dri` devices. When docker or frigate is not run under root then also `video` (and possibly `render` and `ssl/_ssl`) groups should be added.
When running docker directly the following flags should be added for device access:
```bash
$ docker run --device=/dev/kfd --device=/dev/dri \
...
```
When using docker compose:
```yaml
services:
frigate:
---
devices:
- /dev/dri
- /dev/kfd
```
For reference on recommended settings see [running ROCm/pytorch in Docker](https://rocm.docs.amd.com/projects/install-on-linux/en/develop/how-to/3rd-party/pytorch-install.html#using-docker-with-pytorch-pre-installed).
### Docker settings for overriding the GPU chipset
Your GPU might work just fine without any special configuration but in many cases they need manual settings. AMD/ROCm software stack comes with a limited set of GPU drivers and for newer or missing models you will have to override the chipset version to an older/generic version to get things working.
Also AMD/ROCm does not "officially" support integrated GPUs. It still does work with most of them just fine but requires special settings. One has to configure the `HSA_OVERRIDE_GFX_VERSION` environment variable. See the [ROCm bug report](https://github.com/ROCm/ROCm/issues/1743) for context and examples.
For the rocm frigate build there is some automatic detection:
- gfx90c -> 9.0.0
- gfx1031 -> 10.3.0
- gfx1103 -> 11.0.0
If you have something else you might need to override the `HSA_OVERRIDE_GFX_VERSION` at Docker launch. Suppose the version you want is `9.0.0`, then you should configure it from command line as:
```bash
$ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \
...
```
When using docker compose:
```yaml
services:
frigate:
...
environment:
HSA_OVERRIDE_GFX_VERSION: "9.0.0"
```
Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name.
- first make sure that rocm environment is running properly by running `/opt/rocm/bin/rocminfo` in the frigate container -- it should list both the CPU and the GPU with their properties
- find the chipset version you have (gfxNNN) from the output of the `rocminfo` (see below)
- use a search engine to query what `HSA_OVERRIDE_GFX_VERSION` you need for the given gfx name ("gfxNNN ROCm HSA_OVERRIDE_GFX_VERSION")
- override the `HSA_OVERRIDE_GFX_VERSION` with relevant value
- if things are not working check the frigate docker logs
#### Figuring out if AMD/ROCm is working and found your GPU
```bash
$ docker exec -it frigate /opt/rocm/bin/rocminfo
```
#### Figuring out your AMD GPU chipset version:
We unset the `HSA_OVERRIDE_GFX_VERSION` to prevent an existing override from messing up the result:
```bash
$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)'
```
### Supported Models
There is no default model provided, the following formats are supported:
#### YOLO-NAS
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
:::warning
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
:::
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
detectors:
rocm:
type: rocm
model:
model_type: yolonas
width: 320 # <--- should match whatever was set in notebook
height: 320 # <--- should match whatever was set in notebook
input_pixel_format: bgr
path: /config/yolo_nas_s.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## ONNX
ONNX is an open format for building machine learning models, Frigate supports running ONNX models on CPU, OpenVINO, and TensorRT. On startup Frigate will automatically try to use a GPU if one is available.
:::info
If the correct build is used for your GPU then the GPU will be detected and used automatically.
- **AMD**
- ROCm will automatically be detected and used with the ONNX detector in the `-rocm` Frigate image.
- **Intel**
- OpenVINO will automatically be detected and used with the ONNX detector in the default Frigate image.
- **Nvidia**
- Nvidia GPUs will automatically be detected and used with the ONNX detector in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used with the ONNX detector in the `-tensorrt-jp(4/5)` Frigate image.
:::
:::tip
When using many cameras one detector may not be enough to keep up. Multiple detectors can be defined assuming GPU resources are available. An example configuration would be:
```yaml
detectors:
onnx_0:
type: onnx
onnx_1:
type: onnx
```
:::
### Supported Models
There is no default model provided, the following formats are supported:
#### YOLO-NAS
[YOLO-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) models are supported, but not included by default. You can build and download a compatible model with pre-trained weights using [this notebook](https://github.com/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/blakeblackshear/frigate/blob/dev/notebooks/YOLO_NAS_Pretrained_Export.ipynb).
:::warning
The pre-trained YOLO-NAS weights from DeciAI are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
:::
The input image size in this notebook is set to 320x320. This results in lower CPU usage and faster inference times without impacting performance in most cases due to the way Frigate crops video frames to areas of interest before running detection. The notebook and config can be updated to 640x640 if desired.
After placing the downloaded onnx model in your config folder, you can use the following configuration:
```yaml
detectors:
onnx:
type: onnx
model:
model_type: yolonas
width: 320 # <--- should match whatever was set in notebook
height: 320 # <--- should match whatever was set in notebook
input_pixel_format: bgr
input_tensor: nchw
path: /config/yolo_nas_s.onnx
labelmap_path: /labelmap/coco-80.txt
```
Note that the labelmap uses a subset of the complete COCO label set that has only 80 objects.
## CPU Detector (not recommended)
The CPU detector type runs a TensorFlow Lite model utilizing the CPU without hardware acceleration. It is recommended to use a hardware accelerated detector type instead for better performance. To configure a CPU based detector, set the `"type"` attribute to `"cpu"`.
:::danger
The CPU detector is not recommended for general use. If you do not have GPU or Edge TPU hardware, using the [OpenVINO Detector](#openvino-detector) in CPU mode is often more efficient than using the CPU detector.
:::
The number of threads used by the interpreter can be specified using the `"num_threads"` attribute, and defaults to `3.`
A TensorFlow Lite model is provided in the container at `/cpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`.
```yaml
detectors:
cpu1:
type: cpu
num_threads: 3
cpu2:
type: cpu
num_threads: 3
model:
path: "/custom_model.tflite"
```
When using CPU detectors, you can add one CPU detector per camera. Adding more detectors than the number of cameras should not improve performance.
## Deepstack / CodeProject.AI Server Detector
The Deepstack / CodeProject.AI Server detector for Frigate allows you to integrate Deepstack and CodeProject.AI object detection capabilities into Frigate. CodeProject.AI and DeepStack are open-source AI platforms that can be run on various devices such as the Raspberry Pi, Nvidia Jetson, and other compatible hardware. It is important to note that the integration is performed over the network, so the inference times may not be as fast as native Frigate detectors, but it still provides an efficient and reliable solution for object detection and tracking.

View File

@ -20,15 +20,13 @@ For object filters in your configuration, any single detection below `min_score`
In frame 2, the score is below the `min_score` value, so Frigate ignores it and it becomes a 0.0. The computed score is the median of the score history (padding to at least 3 values), and only when that computed score crosses the `threshold` is the object marked as a true positive. That happens in frame 4 in the example.
show image of snapshot vs event with differing scores
### Minimum Score
Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid events to be lost or disjointed.
Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid tracked objects to be lost or disjointed.
### Threshold
`threshold` is used to determine that the object is a true positive. Once an object is detected with a score >= `threshold` object is considered a true positive. If `threshold` is too low then some higher scoring false positives may create an event. If `threshold` is too high then true positive events may be missed due to the object never scoring high enough.
`threshold` is used to determine that the object is a true positive. Once an object is detected with a score >= `threshold` object is considered a true positive. If `threshold` is too low then some higher scoring false positives may create an tracked object. If `threshold` is too high then true positive tracked objects may be missed due to the object never scoring high enough.
## Object Shape
@ -52,7 +50,7 @@ Conceptually, a ratio of 1 is a square, 0.5 is a "tall skinny" box, and 2 is a "
### Zones
[Required zones](/configuration/zones.md) can be a great tool to reduce false positives that may be detected in the sky or other areas that are not of interest. The required zones will only create events for objects that enter the zone.
[Required zones](/configuration/zones.md) can be a great tool to reduce false positives that may be detected in the sky or other areas that are not of interest. The required zones will only create tracked objects for objects that enter the zone.
### Object Masks

View File

@ -3,7 +3,7 @@ id: record
title: Recording
---
Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH/<camera_name>/MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed.
Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH/<camera_name>/MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the tracked object retention when determining if a recording should be removed.
New recording segments are written from the camera stream to cache, they are only moved to disk if they match the setup recording retention policy.
@ -13,7 +13,7 @@ H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other br
### Most conservative: Ensure all video is saved
For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with events will be retained until 30 days have passed.
For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with alerts or detections will be retained until 30 days have passed.
```yaml
record:
@ -21,9 +21,13 @@ record:
retain:
days: 3
mode: all
events:
alerts:
retain:
default: 30
days: 30
mode: motion
detections:
retain:
days: 30
mode: motion
```
@ -37,25 +41,28 @@ record:
retain:
days: 3
mode: motion
events:
alerts:
retain:
default: 30
days: 30
mode: motion
detections:
retain:
days: 30
mode: motion
```
### Minimum: Events only
### Minimum: Alerts only
If you only want to retain video that occurs during an event, this config will discard video unless an event is ongoing.
If you only want to retain video that occurs during a tracked object, this config will discard video unless an alert is ongoing.
```yaml
record:
enabled: True
retain:
days: 0
mode: all
events:
alerts:
retain:
default: 30
days: 30
mode: motion
```
@ -65,7 +72,7 @@ As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 h
## Configuring Recording Retention
Frigate supports both continuous and event based recordings with separate retention modes and retention periods.
Frigate supports both continuous and tracked object based recordings with separate retention modes and retention periods.
:::tip
@ -86,25 +93,28 @@ record:
Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean)
### Event Recording
### Object Recording
If you only used clips in previous versions with recordings disabled, you can use the following config to get the same behavior. This is also the default behavior when recordings are enabled.
The number of days to record review items can be specified for review items classified as alerts as well as tracked objects.
```yaml
record:
enabled: True
events:
alerts:
retain:
default: 10 # <- number of days to keep event recordings
days: 10 # <- number of days to keep alert recordings
detections:
retain:
days: 10 # <- number of days to keep detections recordings
```
This configuration will retain recording segments that overlap with events and have active tracked objects for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs.
This configuration will retain recording segments that overlap with alerts and detections for 10 days. Because multiple tracked objects can reference the same recording segments, this avoids storing duplicate footage for overlapping tracked objects and reduces overall storage needs.
**WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect.
## What do the different retain modes mean?
Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect events).
Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect tracked objects).
Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of continuous recording.
@ -112,11 +122,7 @@ Let's say you have Frigate configured so that your doorbell camera would retain
- With the `motion` option the only parts of those 48 hours would be segments that Frigate detected motion. This is the middle ground option that won't keep all 48 hours, but will likely keep all segments of interest along with the potential for some extra segments.
- With the `active_objects` option the only segments that would be kept are those where there was a true positive object that was not considered stationary.
The same options are available with events. Let's consider a scenario where you drive up and park in your driveway, go inside, then come back out 4 hours later.
- With the `all` option all segments for the duration of the event would be saved for the event. This event would have 4 hours of footage.
- With the `motion` option all segments for the duration of the event with motion would be saved. This means any segment where a car drove by in the street, person walked by, lighting changed, etc. would be saved.
- With the `active_objects` it would only keep segments where the object was active. In this case the only segments that would be saved would be the ones where the car was driving up, you going inside, you coming outside, and the car driving away. Essentially reducing the 4 hours to a minute or two of event footage.
The same options are available with alerts and detections, except it will only save the recordings when it overlaps with a review item of that type.
A configuration example of the above retain modes where all `motion` segments are stored for 7 days and `active objects` are stored for 14 days would be as follows:
@ -126,33 +132,18 @@ record:
retain:
days: 7
mode: motion
events:
alerts:
retain:
default: 14
days: 14
mode: active_objects
detections:
retain:
days: 14
mode: active_objects
```
The above configuration example can be added globally or on a per camera basis.
### Object Specific Retention
You can also set specific retention length for an object type. The below configuration example builds on from above but also specifies that recordings of dogs only need to be kept for 2 days and recordings of cars should be kept for 7 days.
```yaml
record:
enabled: True
retain:
days: 7
mode: motion
events:
retain:
default: 14
mode: active_objects
objects:
dog: 2
car: 7
```
## Can I have "continuous" recordings, but only at certain times?
Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times.
@ -163,7 +154,7 @@ Footage can be exported from Frigate by right-clicking (desktop) or long pressin
### Time-lapse export
Time lapse exporting is available only via the [HTTP API](../integrations/api.md#post-apiexportcamerastartstart-timestampendend-timestamp).
Time lapse exporting is available only via the [HTTP API](../integrations/api/export-recording-export-camera-name-start-start-time-end-end-time-post.api.mdx).
When exporting a time-lapse the default speed-up is 25x with 30 FPS. This means that every 25 seconds of (real-time) recording is condensed into 1 second of time-lapse video (always without audio) with a smoothness of 30 FPS.

View File

@ -52,7 +52,7 @@ detectors:
# Required: name of the detector
detector_name:
# Required: type of the detector
# Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below)
# Frigate provides many types, see https://docs.frigate.video/configuration/object_detectors for more details (default: shown below)
# Additional detector types can also be plugged in.
# Detectors may require additional configuration.
# Refer to the Detectors configuration page for more information.
@ -117,27 +117,39 @@ auth:
hash_iterations: 600000
# Optional: model modifications
# NOTE: The default values are for the EdgeTPU detector.
# Other detectors will require the model config to be set.
model:
# Optional: path to the model (default: automatic based on detector)
# Required: path to the model (default: automatic based on detector)
path: /edgetpu_model.tflite
# Optional: path to the labelmap (default: shown below)
# Required: path to the labelmap (default: shown below)
labelmap_path: /labelmap.txt
# Required: Object detection model input width (default: shown below)
width: 320
# Required: Object detection model input height (default: shown below)
height: 320
# Optional: Object detection model input colorspace
# Required: Object detection model input colorspace
# Valid values are rgb, bgr, or yuv. (default: shown below)
input_pixel_format: rgb
# Optional: Object detection model input tensor format
# Required: Object detection model input tensor format
# Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc
# Optional: Object detection model type, currently only used with the OpenVINO detector
# Required: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolonas (default: shown below)
model_type: ssd
# Optional: Label name modifications. These are merged into the standard labelmap.
# Required: Label name modifications. These are merged into the standard labelmap.
labelmap:
2: vehicle
# Optional: Map of object labels to their attribute labels (default: depends on model)
attributes_map:
person:
- amazon
- face
car:
- amazon
- fedex
- license_plate
- ups
# Optional: Audio Events Configuration
# NOTE: Can be overridden at the camera level
@ -210,6 +222,10 @@ birdseye:
# Optional: ffmpeg configuration
# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets
ffmpeg:
# Optional: ffmpeg binry path (default: shown below)
# can also be set to `7.0` or `5.0` to specify one of the included versions
# or can be set to any path that holds `bin/ffmpeg` & `bin/ffprobe`
path: "default"
# Optional: global ffmpeg args (default: shown below)
global_args: -hide_banner -loglevel warning -threads 2
# Optional: global hwaccel args (default: auto detect)
@ -271,13 +287,13 @@ detect:
# especially when using separate streams for detect and record.
# Use this setting to make the timeline bounding boxes more closely align
# with the recording. The value can be positive or negative.
# TIP: Imagine there is an event clip with a person walking from left to right.
# If the event timeline bounding box is consistently to the left of the person
# TIP: Imagine there is an tracked object clip with a person walking from left to right.
# If the tracked object lifecycle bounding box is consistently to the left of the person
# then the value should be decreased. Similarly, if a person is walking from
# left to right and the bounding box is consistently ahead of the person
# then the value should be increased.
# TIP: This offset is dynamic so you can change the value and it will update existing
# events, this makes it easy to tune.
# tracked objects, this makes it easy to tune.
# WARNING: Fast moving objects will likely not have the bounding box align.
annotation_offset: 0
@ -383,6 +399,14 @@ motion:
# Optional: Delay when updating camera motion through MQTT from ON -> OFF (default: shown below).
mqtt_off_delay: 30
# Optional: Notification Configuration
notifications:
# Optional: Enable notification service (default: shown below)
enabled: False
# Optional: Email for push service to reach out to
# NOTE: This is required to use notifications
email: "admin@example.com"
# Optional: Record configuration
# NOTE: Can be overridden at the camera level
record:
@ -397,9 +421,9 @@ record:
sync_recordings: False
# Optional: Retention settings for recording
retain:
# Optional: Number of days to retain recordings regardless of events (default: shown below)
# NOTE: This should be set to 0 and retention should be defined in events section below
# if you only want to retain recordings of events.
# Optional: Number of days to retain recordings regardless of tracked objects (default: shown below)
# NOTE: This should be set to 0 and retention should be defined in alerts and detections section below
# if you only want to retain recordings of alerts and detections.
days: 0
# Optional: Mode for retention. Available options are: all, motion, and active_objects
# all - save all recording segments regardless of activity
@ -422,34 +446,48 @@ record:
# Optional: Quality of recording preview (default: shown below).
# Options are: very_low, low, medium, high, very_high
quality: medium
# Optional: Event recording settings
events:
# Optional: Number of seconds before the event to include (default: shown below)
# Optional: alert recording settings
alerts:
# Optional: Number of seconds before the alert to include (default: shown below)
pre_capture: 5
# Optional: Number of seconds after the event to include (default: shown below)
# Optional: Number of seconds after the alert to include (default: shown below)
post_capture: 5
# Optional: Objects to save recordings for. (default: all tracked objects)
objects:
- person
# Optional: Retention settings for recordings of events
# Optional: Retention settings for recordings of alerts
retain:
# Required: Default retention days (default: shown below)
default: 10
# Required: Retention days (default: shown below)
days: 14
# Optional: Mode for retention. (default: shown below)
# all - save all recording segments for events regardless of activity
# motion - save all recordings segments for events with any detected motion
# active_objects - save all recording segments for event with active/moving objects
# all - save all recording segments for alerts regardless of activity
# motion - save all recordings segments for alerts with any detected motion
# active_objects - save all recording segments for alerts with active/moving objects
#
# NOTE: If the retain mode for the camera is more restrictive than the mode configured
# here, the segments will already be gone by the time this mode is applied.
# For example, if the camera retain mode is "motion", the segments without motion are
# never stored, so setting the mode to "all" here won't bring them back.
mode: motion
# Optional: detection recording settings
detections:
# Optional: Number of seconds before the detection to include (default: shown below)
pre_capture: 5
# Optional: Number of seconds after the detection to include (default: shown below)
post_capture: 5
# Optional: Retention settings for recordings of detections
retain:
# Required: Retention days (default: shown below)
days: 14
# Optional: Mode for retention. (default: shown below)
# all - save all recording segments for detections regardless of activity
# motion - save all recordings segments for detections with any detected motion
# active_objects - save all recording segments for detections with active/moving objects
#
# NOTE: If the retain mode for the camera is more restrictive than the mode configured
# here, the segments will already be gone by the time this mode is applied.
# For example, if the camera retain mode is "motion", the segments without motion are
# never stored, so setting the mode to "all" here won't bring them back.
mode: motion
# Optional: Per object retention days
objects:
person: 15
# Optional: Configuration for the jpg snapshots written to the clips directory for each event
# Optional: Configuration for the jpg snapshots written to the clips directory for each tracked object
# NOTE: Can be overridden at the camera level
snapshots:
# Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below)
@ -476,8 +514,42 @@ snapshots:
# Optional: quality of the encoded jpeg, 0-100 (default: shown below)
quality: 70
# Optional: Configuration for semantic search capability
semantic_search:
# Optional: Enable semantic search (default: shown below)
enabled: False
# Optional: Re-index embeddings database from historical tracked objects (default: shown below)
reindex: False
# Optional: Set the model size used for embeddings. (default: shown below)
# NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Configuration for AI generated tracked object descriptions
# NOTE: Semantic Search must be enabled for this to do anything.
# WARNING: Depending on the provider, this will send thumbnails over the internet
# to Google or OpenAI's LLMs to generate descriptions. It can be overridden at
# the camera level (enabled: False) to enhance privacy for indoor cameras.
genai:
# Optional: Enable AI description generation (default: shown below)
enabled: False
# Required if enabled: Provider must be one of ollama, gemini, or openai
provider: ollama
# Required if provider is ollama. May also be used for an OpenAI API compatible backend with the openai provider.
base_url: http://localhost::11434
# Required if gemini or openai
api_key: "{FRIGATE_GENAI_API_KEY}"
# Optional: The default prompt for generating descriptions. Can use replacement
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below)
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background."
# Optional: Object specific prompts to customize description results
# Format: {label}: {prompt}
object_prompts:
person: "My special person prompt."
# Optional: Restream configuration
# Uses https://github.com/AlexxIT/go2rtc (v1.9.2)
# NOTE: The default go2rtc API port (1984) must be used,
# changing this port for the integrated go2rtc instance is not supported.
go2rtc:
# Optional: Live stream configuration for WebUI.
@ -618,6 +690,7 @@ cameras:
# to enable PTZ controls.
onvif:
# Required: host of the camera being connected to.
# NOTE: HTTP is assumed by default; HTTPS is supported if you specify the scheme, ex: "https://0.0.0.0".
host: 0.0.0.0
# Optional: ONVIF port for device (default: shown below).
port: 8000
@ -626,6 +699,8 @@ cameras:
user: admin
# Optional: password for login.
password: admin
# Optional: Skip TLS verification from the ONVIF server (default: shown below)
tls_insecure: False
# Optional: Ignores time synchronization mismatches between the camera and the server during authentication.
# Using NTP on both ends is recommended and this should only be set to True in a "safe" environment due to the security risk it represents.
ignore_time_mismatch: False
@ -670,6 +745,28 @@ cameras:
# By default the cameras are sorted alphabetically.
order: 0
# Optional: Configuration for AI generated tracked object descriptions
genai:
# Optional: Enable AI description generation (default: shown below)
enabled: False
# Optional: Use the object snapshot instead of thumbnails for description generation (default: shown below)
use_snapshot: False
# Optional: The default prompt for generating descriptions. Can use replacement
# variables like "label", "sub_label", "camera" to make more dynamic. (default: shown below)
prompt: "Describe the {label} in the sequence of images with as much detail as possible. Do not describe the background."
# Optional: Object specific prompts to customize description results
# Format: {label}: {prompt}
object_prompts:
person: "My special person prompt."
# Optional: objects to generate descriptions for (default: all objects that are tracked)
objects:
- person
- cat
# Optional: Restrict generation to objects that entered any of the listed zones (default: none, all zones qualify)
required_zones: []
# Optional: Save thumbnails sent to generative AI for review/debugging purposes (default: shown below)
debug_save_thumbnails: False
# Optional
ui:
# Optional: Set a timezone to use in the UI (default: use browser local time)

View File

@ -7,7 +7,7 @@ title: Restream
Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://<frigate_host>:8554/<camera_name>`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.4) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration) for more advanced configurations and features.
Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.9.2) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration) for more advanced configurations and features.
:::note
@ -21,7 +21,7 @@ Birdseye RTSP restream can be accessed at `rtsp://<frigate_host>:8554/birdseye`.
```yaml
birdseye:
restream: true
restream: True
```
### Securing Restream With Authentication
@ -132,9 +132,31 @@ cameras:
- detect
```
## Handling Complex Passwords
go2rtc expects URL-encoded passwords in the config, [urlencoder.org](https://urlencoder.org) can be used for this purpose.
For example:
```yaml
go2rtc:
streams:
my_camera: rtsp://username:$@foo%@192.168.1.100
```
becomes
```yaml
go2rtc:
streams:
my_camera: rtsp://username:$%40foo%25@192.168.1.100
```
See [this comment(https://github.com/AlexxIT/go2rtc/issues/1217#issuecomment-2242296489) for more information.
## Advanced Restream Configurations
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below:
NOTE: The output will need to be passed with two curly braces `{{output}}`

View File

@ -7,13 +7,13 @@ The Review page of the Frigate UI is for quickly reviewing historical footage of
Review items are filterable by date, object type, and camera.
### Review items vs. events
### Review items vs. tracked objects (formerly "events")
In Frigate 0.13 and earlier versions, the UI presented "events". An event was synonymous with a tracked or detected object. In Frigate 0.14 and later, a review item is a time period where any number of tracked objects were active.
For example, consider a situation where two people walked past your house. One was walking a dog. At the same time, a car drove by on the street behind them.
In this scenario, Frigate 0.13 and earlier would show 4 events in the UI - one for each person, another for the dog, and yet another for the car. You would have had 4 separate videos to watch even though they would have all overlapped.
In this scenario, Frigate 0.13 and earlier would show 4 "events" in the UI - one for each person, another for the dog, and yet another for the car. You would have had 4 separate videos to watch even though they would have all overlapped.
In 0.14 and later, all of that is bundled into a single review item which starts and ends to capture all of that activity. Reviews for a single camera cannot overlap. Once you have watched that time period on that camera, it is marked as reviewed.

View File

@ -0,0 +1,92 @@
---
id: semantic_search
title: Using Semantic Search
---
Semantic Search in Frigate allows you to find tracked objects within your review items using either the image itself, a user-defined text description, or an automatically generated one. This feature works by creating _embeddings_ — numerical vector representations — for both the images and text descriptions of your tracked objects. By comparing these embeddings, Frigate assesses their similarities to deliver relevant search results.
Frigate uses [Jina AI's CLIP model](https://huggingface.co/jinaai/jina-clip-v1) to create and save embeddings to Frigate's database. All of this runs locally.
Semantic Search is accessed via the _Explore_ view in the Frigate UI.
## Minimum System Requirements
Semantic Search works by running a large AI model locally on your system. Small or underpowered systems like a Raspberry Pi will not run Semantic Search reliably or at all.
A minimum of 8GB of RAM is required to use Semantic Search. A GPU is not strictly required but will provide a significant performance increase over CPU-only systems.
For best performance, 16GB or more of RAM and a dedicated GPU are recommended.
## Configuration
Semantic Search is disabled by default, and must be enabled in your config file or in the UI's Settings page before it can be used. Semantic Search is a global configuration setting.
```yaml
semantic_search:
enabled: True
reindex: False
```
:::tip
The embeddings database can be re-indexed from the existing tracked objects in your database by adding `reindex: True` to your `semantic_search` configuration or by toggling the switch on the Search Settings page in the UI and restarting Frigate. Depending on the number of tracked objects you have, it can take a long while to complete and may max out your CPU while indexing. Make sure to turn the UI's switch off or set the config back to `False` before restarting Frigate again.
If you are enabling Semantic Search for the first time, be advised that Frigate does not automatically index older tracked objects. You will need to enable the `reindex` feature in order to do that.
:::
### Jina AI CLIP
The vision model is able to embed both images and text into the same vector space, which allows `image -> image` and `text -> image` similarity searches. Frigate uses this model on tracked objects to encode the thumbnail image and store it in the database. When searching for tracked objects via text in the search box, Frigate will perform a `text -> image` similarity search against this embedding. When clicking "Find Similar" in the tracked object detail pane, Frigate will perform an `image -> image` similarity search to retrieve the closest matching thumbnails.
The text model is used to embed tracked object descriptions and perform searches against them. Descriptions can be created, viewed, and modified on the Explore page when clicking on thumbnail of a tracked object. See [the Generative AI docs](/configuration/genai.md) for more information on how to automatically generate tracked object descriptions.
Differently weighted versions of the Jina model are available and can be selected by setting the `model_size` config option as `small` or `large`:
```yaml
semantic_search:
enabled: True
model_size: small
```
- Configuring the `large` model employs the full Jina model and will automatically run on the GPU if applicable.
- Configuring the `small` model employs a quantized version of the Jina model that uses less RAM and runs on CPU with a very negligible difference in embedding quality.
### GPU Acceleration
The CLIP models are downloaded in ONNX format, and the `large` model can be accelerated using GPU hardware, when available. This depends on the Docker build that is used.
```yaml
semantic_search:
enabled: True
model_size: large
```
:::info
If the correct build is used for your GPU and the `large` model is configured, then the GPU will be detected and used automatically.
**NOTE:** Object detection and Semantic Search are independent features. If you want to use your GPU with Semantic Search, you must choose the appropriate Frigate Docker image for your GPU.
- **AMD**
- ROCm will automatically be detected and used for Semantic Search in the `-rocm` Frigate image.
- **Intel**
- OpenVINO will automatically be detected and used for Semantic Search in the default Frigate image.
- **Nvidia**
- Nvidia GPUs will automatically be detected and used for Semantic Search in the `-tensorrt` Frigate image.
- Jetson devices will automatically be detected and used for Semantic Search in the `-tensorrt-jp(4/5)` Frigate image.
:::
## Usage and Best Practices
1. Semantic Search is used in conjunction with the other filters available on the Explore page. Use a combination of traditional filtering and Semantic Search for the best results.
2. Use the thumbnail search type when searching for particular objects in the scene. Use the description search type when attempting to discern the intent of your object.
3. Because of how the AI models Frigate uses have been trained, the comparison between text and image embedding distances generally means that with multi-modal (`thumbnail` and `description`) searches, results matching `description` will appear first, even if a `thumbnail` embedding may be a better match. Play with the "Search Type" setting to help find what you are looking for. Note that if you are generating descriptions for specific objects or zones only, this may cause search results to prioritize the objects with descriptions even if the the ones without them are more relevant.
4. Make your search language and tone closely match exactly what you're looking for. If you are using thumbnail search, **phrase your query as an image caption**. Searching for "red car" may not work as well as "red sedan driving down a residential street on a sunny day".
5. Semantic search on thumbnails tends to return better results when matching large subjects that take up most of the frame. Small things like "cat" tend to not work well.
6. Experiment! Find a tracked object you want to test and start typing keywords and phrases to see what works for you.

View File

@ -3,7 +3,7 @@ id: snapshots
title: Snapshots
---
Frigate can save a snapshot image to `/media/frigate/clips` for each object that is detected named as `<camera>-<id>.jpg`. They are also accessible [via the api](../integrations/api.md#get-apieventsidsnapshotjpg)
Frigate can save a snapshot image to `/media/frigate/clips` for each object that is detected named as `<camera>-<id>.jpg`. They are also accessible [via the api](../integrations/api/event-snapshot-events-event-id-snapshot-jpg-get.api.mdx)
For users with Frigate+ enabled, snapshots are accessible in the UI in the Frigate+ pane to allow for quick submission to the Frigate+ service.

View File

@ -64,7 +64,7 @@ cameras:
### Restricting zones to specific objects
Sometimes you want to limit a zone to specific object types to have more granular control of when events/snapshots are saved. The following example will limit one zone to person objects and the other to cars.
Sometimes you want to limit a zone to specific object types to have more granular control of when alerts, detections, and snapshots are saved. The following example will limit one zone to person objects and the other to cars.
```yaml
cameras:
@ -80,7 +80,7 @@ cameras:
- car
```
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. You will get events for person objects that enter anywhere in the yard, and events for cars only if they enter the street.
Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. Objects will be tracked for any `person` that enter anywhere in the yard, and for cars only if they enter the street.
### Zone Loitering

View File

@ -193,7 +193,7 @@ npm run test
#### 1. Installation
```console
npm install
cd docs && npm install
```
#### 2. Local Development

View File

@ -28,7 +28,7 @@ For the Dahua/Loryta 5442 camera, I use the following settings:
- Encode Mode: H.264
- Resolution: 2688\*1520
- Frame Rate(FPS): 15
- I Frame Interval: 30
- I Frame Interval: 30 (15 can also be used to prioritize streaming performance - see the [camera settings recommendations](../configuration/live) for more info)
**Sub Stream (Detection)**

View File

@ -16,10 +16,6 @@ A box returned from the object detection model that outlines an object in the fr
- A gray thin line indicates that object is detected as being stationary
- A thick line indicates that object is the subject of autotracking (when enabled).
## Event
The time period starting when a tracked object entered the frame and ending when it left the frame, including any time that the object remained still. Events are saved when it is considered a [true positive](#threshold) and meets the requirements for a snapshot or recording to be saved.
## False Positive
An incorrect detection of an object type. For example a dog being detected as a person, a chair being detected as a dog, etc. A person being detected in an area you want to ignore is not a false positive.
@ -64,6 +60,10 @@ The threshold is the median score that an object must reach in order to be consi
The top score for an object is the highest median score for an object.
## Tracked Object ("event" in previous versions)
The time period starting when a tracked object entered the frame and ending when it left the frame, including any time that the object remained still. Tracked objects are saved when it is considered a [true positive](#threshold) and meets the requirements for a snapshot or recording to be saved.
## Zone
Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create an [event](#event). [See the zone docs for more info](/configuration/zones)

View File

@ -51,24 +51,25 @@ The OpenVINO detector type is able to run on:
More information is available [in the detector docs](/configuration/object_detectors#openvino-detector)
Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below:
Inference speeds vary greatly depending on the CPU or GPU used, some known examples of GPU inference times are below:
| Name | Inference Speed | Notes |
| -------------------- | --------------- | --------------------------------------------------------------------- |
| Intel NCS2 VPU | 60 - 65 ms | May vary based on host device |
| Intel Celeron J4105 | ~ 25 ms | Inference speeds on CPU were 150 - 200 ms |
| Intel Celeron N3060 | 130 - 150 ms | Inference speeds on CPU were ~ 550 ms |
| Intel Celeron N3205U | ~ 120 ms | Inference speeds on CPU were ~ 380 ms |
| Intel Celeron N4020 | 50 - 200 ms | Inference speeds on CPU were ~ 800 ms, greatly depends on other loads |
| Intel i3 6100T | 15 - 35 ms | Inference speeds on CPU were 60 - 120 ms |
| Intel i3 8100 | ~ 15 ms | Inference speeds on CPU were ~ 65 ms |
| Intel i5 4590 | ~ 20 ms | Inference speeds on CPU were ~ 230 ms |
| Intel i5 6500 | ~ 15 ms | Inference speeds on CPU were ~ 150 ms |
| Intel i5 7200u | 15 - 25 ms | Inference speeds on CPU were ~ 150 ms |
| Intel i5 7500 | ~ 15 ms | Inference speeds on CPU were ~ 260 ms |
| Intel i5 1135G7 | 10 - 15 ms | |
| Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms |
| Intel Arc A750 | ~ 4 ms | |
| Name | MobileNetV2 Inference Time | YOLO-NAS Inference Time | Notes |
| -------------------- | -------------------------- | ------------------------- | -------------------------------------- |
| Intel Celeron J4105 | ~ 25 ms | | Can only run one detector instance |
| Intel Celeron N3060 | 130 - 150 ms | | Can only run one detector instance |
| Intel Celeron N3205U | ~ 120 ms | | Can only run one detector instance |
| Intel Celeron N4020 | 50 - 200 ms | | Inference speed depends on other loads |
| Intel i3 6100T | 15 - 35 ms | | Can only run one detector instance |
| Intel i3 8100 | ~ 15 ms | | |
| Intel i5 4590 | ~ 20 ms | | |
| Intel i5 6500 | ~ 15 ms | | |
| Intel i5 7200u | 15 - 25 ms | | |
| Intel i5 7500 | ~ 15 ms | | |
| Intel i5 1135G7 | 10 - 15 ms | | |
| Intel i3 12000 | | 320: ~ 19 ms 640: ~ 54 ms | |
| Intel i5 12600K | ~ 15 ms | 320: ~ 20 ms 640: ~ 46 ms | |
| Intel Arc A380 | ~ 6 ms | 320: ~ 10 ms | |
| Intel Arc A750 | ~ 4 ms | 320: ~ 8 ms | |
### TensorRT - Nvidia GPU
@ -77,25 +78,35 @@ The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which
Inference speeds will vary greatly depending on the GPU and the model used.
`tiny` variants are faster than the equivalent non-tiny model, some known examples are below:
| Name | Inference Speed |
| --------------- | --------------- |
| GTX 1060 6GB | ~ 7 ms |
| GTX 1070 | ~ 6 ms |
| GTX 1660 SUPER | ~ 4 ms |
| RTX 3050 | 5 - 7 ms |
| RTX 3070 Mobile | ~ 5 ms |
| Quadro P400 2GB | 20 - 25 ms |
| Quadro P2000 | ~ 12 ms |
| Name | YoloV7 Inference Time | YOLO-NAS Inference Time |
| --------------- | --------------------- | ------------------------- |
| GTX 1060 6GB | ~ 7 ms | |
| GTX 1070 | ~ 6 ms | |
| GTX 1660 SUPER | ~ 4 ms | |
| RTX 3050 | 5 - 7 ms | 320: ~ 10 ms 640: ~ 16 ms |
| RTX 3070 Mobile | ~ 5 ms | |
| Quadro P400 2GB | 20 - 25 ms | |
| Quadro P2000 | ~ 12 ms | |
### Community Supported:
### AMD GPUs
#### Nvidia Jetson
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many discrete AMD GPUs.
### Hailo-8l PCIe
Frigate supports the Hailo-8l M.2 card on any hardware but currently it is only tested on the Raspberry Pi5 PCIe hat from the AI kit.
The inference time for the Hailo-8L chip at time of writing is around 17-21 ms for the SSD MobileNet Version 1 model.
## Community Supported Detectors
### Nvidia Jetson
Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector).
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
#### Rockchip platform
### Rockchip platform
Frigate supports hardware video processing on all Rockchip boards. However, hardware object detection is only supported on these boards:

View File

@ -73,23 +73,23 @@ Users of the Snapcraft build of Docker cannot use storage locations outside your
Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**.
The default shm size of **64MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose).
The default shm size of **128MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose).
The Frigate container also stores logs in shm, which can take up to **30MB**, so make sure to take this into account in your math as well.
The Frigate container also stores logs in shm, which can take up to **40MB**, so make sure to take this into account in your math as well.
You can calculate the necessary shm size for each camera with the following formula using the resolution specified for detect:
You can calculate the **minimum** shm size for each camera with the following formula using the resolution specified for detect:
```console
# Replace <width> and <height>
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 9 + 270480) / 1048576))'
$ python -c 'print("{:.2f}MB".format((<width> * <height> * 1.5 * 20 + 270480) / 1048576))'
# Example for 1280x720
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 9 + 270480) / 1048576))'
12.12MB
# Example for 1280x720, including logs
$ python -c 'print("{:.2f}MB".format((1280 * 720 * 1.5 * 20 + 270480) / 1048576)) + 40'
46.63MB
# Example for eight cameras detecting at 1280x720, including logs
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 9 + 270480) / 1048576) * 8 + 30))'
126.99MB
$ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 20 + 270480) / 1048576) * 8 + 40))'
253MB
```
The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration.
@ -100,6 +100,38 @@ By default, the Raspberry Pi limits the amount of memory available to the GPU. I
Additionally, the USB Coral draws a considerable amount of power. If using any other USB devices such as an SSD, you will experience instability due to the Pi not providing enough power to USB devices. You will need to purchase an external USB hub with it's own power supply. Some have reported success with <a href="https://amzn.to/3a2mH0P" target="_blank" rel="nofollow noopener sponsored">this</a> (affiliate link).
### Hailo-8L
The Hailo-8L is an M.2 card typically connected to a carrier board for PCIe, which then connects to the Raspberry Pi 5 as part of the AI Kit. However, it can also be used on other boards equipped with an M.2 M key edge connector.
#### Installation
For Raspberry Pi 5 users with the AI Kit, installation is straightforward. Simply follow this [guide](https://www.raspberrypi.com/documentation/accessories/ai-kit.html#ai-kit-installation) to install the driver and software.
For other installations, follow these steps for installation:
1. Install the driver from the [Hailo GitHub repository](https://github.com/hailo-ai/hailort-drivers). A convenient script for Linux is available to clone the repository, build the driver, and install it.
2. Copy or download [this script](https://github.com/blakeblackshear/frigate/blob/dev/docker/hailo8l/user_installation.sh).
3. Ensure it has execution permissions with `sudo chmod +x user_installation.sh`
4. Run the script with `./user_installation.sh`
#### Setup
To set up Frigate, follow the default installation instructions, but use a Docker image with the `-h8l` suffix, for example: `ghcr.io/blakeblackshear/frigate:stable-h8l`
Next, grant Docker permissions to access your hardware by adding the following lines to your `docker-compose.yml` file:
```yaml
devices:
- /dev/hailo0
```
If you are using `docker run`, add this option to your command `--device /dev/hailo0`
#### Configuration
Finally, configure [hardware object detection](/configuration/object_detectors#hailo-8l) to complete the setup.
### Rockchip platform
Make sure that you use a linux distribution that comes with the rockchip BSP kernel 5.10 or 6.1 and necessary drivers (especially rkvdec2 and rknpu). To check, enter the following commands:
@ -161,8 +193,9 @@ services:
container_name: frigate
privileged: true # this may not be necessary for all setups
restart: unless-stopped
stop_grace_period: 30s # allow enough time to shut down the various services
image: ghcr.io/blakeblackshear/frigate:stable
shm_size: "64mb" # update for your cameras based on calculation above
shm_size: "512mb" # update for your cameras based on calculation above
devices:
- /dev/bus/usb:/dev/bus/usb # Passes the USB Coral, needs to be modified for other versions
- /dev/apex_0:/dev/apex_0 # Passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux
@ -192,6 +225,7 @@ If you can't use docker compose, you can run the container with something simila
docker run -d \
--name frigate \
--restart=unless-stopped \
--stop-timeout 30 \
--mount type=tmpfs,target=/tmp/cache,tmpfs-size=1000000000 \
--device /dev/bus/usb:/dev/bus/usb \
--device /dev/dri/renderD128 \
@ -218,10 +252,8 @@ The community supported docker image tags for the current stable version are:
- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5
- `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6
- `stable-rk` - Frigate build for SBCs with Rockchip SoC
- `stable-rocm` - Frigate build for [AMD GPUs and iGPUs](../configuration/object_detectors.md#amdrocm-gpu-detector), all drivers
- `stable-rocm-gfx900` - AMD gfx900 driver only
- `stable-rocm-gfx1030` - AMD gfx1030 driver only
- `stable-rocm-gfx1100` - AMD gfx1100 driver only
- `stable-rocm` - Frigate build for [AMD GPUs](../configuration/object_detectors.md#amdrocm-gpu-detector)
- `stable-h8l` - Frigate build for the Hailo-8L M.2 PICe Raspberry Pi 5 hat
## Home Assistant Addon
@ -273,8 +305,15 @@ To install make sure you have the [community app plugin here](https://forums.unr
## Proxmox
It is recommended to run Frigate in LXC, rather than in a VM, for maximum performance. The setup can be complex so be prepared to read the Proxmox and LXC documentation. Suggestions include:
[According to Proxmox documentation](https://pve.proxmox.com/pve-docs/pve-admin-guide.html#chapter_pct) it is recommended that you run application containers like Frigate inside a Proxmox QEMU VM. This will give you all the advantages of application containerization, while also providing the benefits that VMs offer, such as strong isolation from the host and the ability to live-migrate, which otherwise isnt possible with containers.
:::warning
If you choose to run Frigate via LXC in Proxmox the setup can be complex so be prepared to read the Proxmox and LXC documentation, Frigate does not officially support running inside of an LXC.
:::
Suggestions include:
- For Intel-based hardware acceleration, to allow access to the `/dev/dri/renderD128` device with major number 226 and minor number 128, add the following lines to the `/etc/pve/lxc/<id>.conf` LXC configuration:
- `lxc.cgroup2.devices.allow: c 226:128 rwm`
- `lxc.mount.entry: /dev/dri/renderD128 dev/dri/renderD128 none bind,optional,create=file`

View File

@ -7,13 +7,13 @@ title: Configuring go2rtc
Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features:
- WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream
- WebRTC or MSE for live viewing with audio, higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream and does not support audio
- Live stream support for cameras in Home Assistant Integration
- RTSP relay for use with other consumers to reduce the number of connections to your camera streams
# Setup a go2rtc stream
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#module-streams), not just rtsp.
First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#module-streams), not just rtsp.
:::tip
@ -47,8 +47,8 @@ After adding this to the config, restart Frigate and try to watch the live strea
- Check Video Codec:
- If the camera stream works in go2rtc but not in your browser, the video codec might be unsupported.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.4#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
- If using H265, switch to H264. Refer to [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#codecs-madness) in go2rtc documentation.
- If unable to switch from H265 to H264, or if the stream format is different (e.g., MJPEG), re-encode the video using [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.9.2#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view.
```yaml
go2rtc:
streams:

View File

@ -115,6 +115,7 @@ services:
frigate:
container_name: frigate
restart: unless-stopped
stop_grace_period: 30s
image: ghcr.io/blakeblackshear/frigate:stable
volumes:
- ./config:/config
@ -238,7 +239,7 @@ Now that you know where you need to mask, use the "Mask & Zone creator" in the o
:::warning
Note that motion masks should not be used to mark out areas where you do not want objects to be detected or to reduce false positives. They do not alter the image sent to object detection, so you can still get events and detections in areas with motion masks. These only prevent motion in these areas from initiating object detection.
Note that motion masks should not be used to mark out areas where you do not want objects to be detected or to reduce false positives. They do not alter the image sent to object detection, so you can still get tracked objects, alerts, and detections in areas with motion masks. These only prevent motion in these areas from initiating object detection.
:::
@ -298,11 +299,11 @@ If you don't have separate streams for detect and record, you would just add the
If you only define one stream in your `inputs` and do not assign a `detect` role to it, Frigate will automatically assign it the `detect` role. Frigate will always decode a stream to support motion detection, Birdseye, the API image endpoints, and other features, even if you have disabled object detection with `enabled: False` in your config's `detect` section.
If you plan to use Frigate for recording only, it is still recommended to define a `detect` role for a low resolution stream to minimize resource usage from the required stream decoding.
If you only plan to use Frigate for recording, it is still recommended to define a `detect` role for a low resolution stream to minimize resource usage from the required stream decoding.
:::
By default, Frigate will retain video of all events for 10 days. The full set of options for recording can be found [here](../configuration/reference.md).
By default, Frigate will retain video of all tracked objects for 10 days. The full set of options for recording can be found [here](../configuration/reference.md).
### Step 7: Complete config

View File

@ -7,11 +7,11 @@ The best way to get started with notifications for Frigate is to use the [Bluepr
It is generally recommended to trigger notifications based on the `frigate/reviews` mqtt topic. This provides the event_id(s) needed to fetch [thumbnails/snapshots/clips](../integrations/home-assistant.md#notification-api) and other useful information to customize when and where you want to receive alerts. The data is published in the form of a change feed, which means you can reference the "previous state" of the object in the `before` section and the "current state" of the object in the `after` section. You can see an example [here](../integrations/mqtt.md#frigateevents).
Here is a simple example of a notification automation of events which will update the existing notification for each change. This means the image you see in the notification will update as Frigate finds a "better" image.
Here is a simple example of a notification automation of tracked objects which will update the existing notification for each change. This means the image you see in the notification will update as Frigate finds a "better" image.
```yaml
automation:
- alias: Notify of events
- alias: Notify of tracked object
trigger:
platform: mqtt
topic: frigate/events

View File

@ -1,562 +0,0 @@
---
id: api
title: HTTP API
---
A web server is available on port 5000 with the following endpoints.
## Management & Information
### `GET /api/config`
A json representation of your configuration
### `POST /api/restart`
Restarts Frigate process.
### `GET /api/stats`
Contains some granular debug info that can be used for sensors in Home Assistant.
Sample response:
```json
{
/* Per Camera Stats */
"cameras": {
"back": {
/***************
* Frames per second being consumed from your camera. If this is higher
* than it is supposed to be, you should set -r FPS in your input_args.
* camera_fps = process_fps + skipped_fps
***************/
"camera_fps": 5.0,
/***************
* Number of times detection is run per second. This can be higher than
* your camera FPS because Frigate often looks at the same frame multiple times
* or in multiple locations
***************/
"detection_fps": 1.5,
/***************
* PID for the ffmpeg process that consumes this camera
***************/
"capture_pid": 27,
/***************
* PID for the process that runs detection for this camera
***************/
"pid": 34,
/***************
* Frames per second being processed by Frigate.
***************/
"process_fps": 5.1,
/***************
* Frames per second skip for processing by Frigate.
***************/
"skipped_fps": 0.0
}
},
/***************
* Sum of detection_fps across all cameras and detectors.
* This should be the sum of all detection_fps values from cameras.
***************/
"detection_fps": 5.0,
/* Detectors Stats */
"detectors": {
"coral": {
/***************
* Timestamp when object detection started. If this value stays non-zero and constant
* for a long time, that means the detection process is stuck.
***************/
"detection_start": 0.0,
/***************
* Time spent running object detection in milliseconds.
***************/
"inference_speed": 10.48,
/***************
* PID for the shared process that runs object detection on the Coral.
***************/
"pid": 25321
}
},
"service": {
/* Uptime in seconds */
"uptime": 10,
"version": "0.10.1-8883709",
"latest_version": "0.10.1",
/* Storage data in MB for important locations */
"storage": {
"/media/frigate/clips": {
"total": 1000,
"used": 700,
"free": 300,
"mnt_type": "ext4"
},
"/media/frigate/recordings": {
"total": 1000,
"used": 700,
"free": 300,
"mnt_type": "ext4"
},
"/tmp/cache": {
"total": 256,
"used": 100,
"free": 156,
"mnt_type": "tmpfs"
},
"/dev/shm": {
"total": 256,
"used": 100,
"free": 156,
"mnt_type": "tmpfs"
}
}
},
"cpu_usages": {
"pid": {
"cmdline": "ffmpeg...",
"cpu": "5.0",
"cpu_average": "3.0",
"mem": "0.5"
}
},
"gpu_usages": {
"gpu-type": {
"gpu": "17%",
"mem": "18%"
}
}
}
```
### `GET /api/version`
Version info
### `GET /api/ffprobe`
Get ffprobe output for camera feed paths.
| param | Type | Description |
| ------- | ------ | ---------------------------------- |
| `paths` | string | `,` separated list of camera paths |
### `GET /api/<camera_name>/ptz/info`
Get PTZ info for the camera.
## Camera Media
### `GET /api/<camera_name>`
An mjpeg stream for debugging. Keep in mind the mjpeg endpoint is for debugging only and will put additional load on the system when in use.
Accepts the following query string parameters:
| param | Type | Description |
| ----------- | ---- | ------------------------------------------------------------------ |
| `fps` | int | Frame rate |
| `h` | int | Height in pixels |
| `bbox` | int | Show bounding boxes for detected objects (0 or 1) |
| `timestamp` | int | Print the timestamp in the upper left (0 or 1) |
| `zones` | int | Draw the zones on the image (0 or 1) |
| `mask` | int | Overlay the mask on the image (0 or 1) |
| `motion` | int | Draw blue boxes for areas with detected motion (0 or 1) |
| `regions` | int | Draw green boxes for areas where object detection was run (0 or 1) |
You can access a higher resolution mjpeg stream by appending `h=height-in-pixels` to the endpoint. For example `/api/back?h=1080`. You can also increase the FPS by appending `fps=frame-rate` to the URL such as `/api/back?fps=10` or both with `?fps=10&h=1000`.
### `GET /api/<camera_name>/latest.jpg[?h=300]`
The most recent frame that Frigate has finished processing. It is a full resolution image by default.
Accepts the following query string parameters:
| param | Type | Description |
| ----------- | ---- | ------------------------------------------------------------------ |
| `h` | int | Height in pixels |
| `bbox` | int | Show bounding boxes for detected objects (0 or 1) |
| `timestamp` | int | Print the timestamp in the upper left (0 or 1) |
| `zones` | int | Draw the zones on the image (0 or 1) |
| `mask` | int | Overlay the mask on the image (0 or 1) |
| `motion` | int | Draw blue boxes for areas with detected motion (0 or 1) |
| `regions` | int | Draw green boxes for areas where object detection was run (0 or 1) |
| `quality` | int | Jpeg encoding quality (0-100). Defaults to 70. |
Example parameters:
- `h=300`: resizes the image to 300 pixels tall
### `GET /api/<camera_name>/<label>/thumbnail.jpg`
Returns the thumbnail from the latest event for the given camera and label combo. Using `any` as the label will return the latest thumbnail regardless of type.
### `GET /api/<camera_name>/<label>/clip.mp4`
Returns the clip from the latest event for the given camera and label combo. Using `any` as the label will return the latest clip regardless of type.
### `GET /api/<camera_name>/<label>/snapshot.jpg`
Returns the snapshot image from the latest event for the given camera and label combo. Using `any` as the label will return the latest thumbnail regardless of type.
### `GET /api/<camera_name>/grid.jpg`
Returns the latest camera image with the regions grid overlaid.
| param | Type | Description |
| ------------ | ----- | ------------------------------------------------------------------------------------------ |
| `color` | str | The color of the grid (red,green,blue,black,white). Defaults to "green". |
| `font_scale` | float | Font scale. Can be used to increase font size on high resolution cameras. Defaults to 0.5. |
### `GET /clips/<camera>-<id>.jpg`
JPG snapshot for the given camera and event id.
## Events
### `GET /api/events`
Events from the database. Accepts the following query string parameters:
| param | Type | Description |
| -------------------- | ----- | ----------------------------------------------------- |
| `before` | int | Epoch time |
| `after` | int | Epoch time |
| `cameras` | str | , separated list of cameras |
| `labels` | str | , separated list of labels |
| `zones` | str | , separated list of zones |
| `limit` | int | Limit the number of events returned |
| `has_snapshot` | int | Filter to events that have snapshots (0 or 1) |
| `has_clip` | int | Filter to events that have clips (0 or 1) |
| `include_thumbnails` | int | Include thumbnails in the response (0 or 1) |
| `in_progress` | int | Limit to events in progress (0 or 1) |
| `time_range` | str | Time range in format after,before (00:00,24:00) |
| `timezone` | str | Timezone to use for time range |
| `min_score` | float | Minimum score of the event |
| `max_score` | float | Maximum score of the event |
| `is_submitted` | int | Filter events that are submitted to Frigate+ (0 or 1) |
| `min_length` | float | Minimum length of the event |
| `max_length` | float | Maximum length of the event |
### `GET /api/events/summary`
Returns summary data for events in the database. Used by the Home Assistant integration.
### `GET /api/events/<id>`
Returns data for a single event.
### `DELETE /api/events/<id>`
Permanently deletes the event along with any clips/snapshots.
### `POST /api/events/<id>/retain`
Sets retain to true for the event id.
### `POST /api/events/<id>/plus`
Submits the snapshot of the event to Frigate+ for labeling.
| param | Type | Description |
| -------------------- | ---- | ---------------------------------- |
| `include_annotation` | int | Submit annotation to Frigate+ too. |
### `PUT /api/events/<id>/false_positive`
Submits the snapshot of the event to Frigate+ for labeling and adds the detection as a false positive.
### `DELETE /api/events/<id>/retain`
Sets retain to false for the event id (event may be deleted quickly after removing).
### `POST /api/events/<id>/sub_label`
Set a sub label for an event. For example to update `person` -> `person's name` if they were recognized with facial recognition.
Sub labels must be 100 characters or shorter.
```json
{
"subLabel": "some_string",
"subLabelScore": 0.79
}
```
### `GET /api/events/<id>/thumbnail.jpg`
Returns a thumbnail for the event id optimized for notifications. Works while the event is in progress and after completion. Passing `?format=android` will convert the thumbnail to 2:1 aspect ratio.
### `GET /api/events/<id>/clip.mp4`
Returns the clip for the event id. Works after the event has ended.
### `GET /api/events/<id>/snapshot-clean.png`
Returns the clean snapshot image for the event id. Only works if `snapshots` and `clean_copy` are enabled in the config.
| param | Type | Description |
| ---------- | ---- | ------------------ |
| `download` | bool | Download the image |
### `GET /api/events/<id>/snapshot.jpg`
Returns the snapshot image for the event id. Works while the event is in progress and after completion.
Accepts the following query string parameters, but they are only applied when an event is in progress. After the event is completed, the saved snapshot is returned from disk without modification:
| param | Type | Description |
| ----------- | ---- | ------------------------------------------------- |
| `h` | int | Height in pixels |
| `bbox` | int | Show bounding boxes for detected objects (0 or 1) |
| `timestamp` | int | Print the timestamp in the upper left (0 or 1) |
| `crop` | int | Crop the snapshot to the (0 or 1) |
| `quality` | int | Jpeg encoding quality (0-100). Defaults to 70. |
| `download` | bool | Download the image |
### `POST /api/events/<camera_name>/<label>/create`
Create a manual event with a given `label` (ex: doorbell press) to capture a specific event besides an object being detected.
:::warning
Recording retention config still applies to manual events, if frigate is configured with `mode: motion` then the manual event will only keep recording segments when motion occurred.
:::
**Optional Body:**
```json
{
"sub_label": "some_string", // add sub label to event
"duration": 30, // predetermined length of event (default: 30 seconds) or can be to null for indeterminate length event
"include_recording": true, // whether the event should save recordings along with the snapshot that is taken
"draw": {
// optional annotations that will be drawn on the snapshot
"boxes": [
{
"box": [0.5, 0.5, 0.25, 0.25], // box consists of x, y, width, height which are on a scale between 0 - 1
"color": [255, 0, 0], // color of the box, default is red
"score": 100 // optional score associated with the box
}
]
}
}
```
**Success Response:**
```json
{
"event_id": "1682970645.13116-1ug7ns",
"message": "Successfully created event.",
"success": true
}
```
### `PUT /api/events/<event_id>/end`
End a specific manual event without a predetermined length.
### `GET /api/events/<id>/preview.gif`
Gif covering the first 20 seconds of a specific event.
## Previews
Previews are low res / fps videos that are quickly scrubbable and can be used for notifications or time-lapses.
### `GET /api/preview/<camera>/start/<start-timestamp>/end/<end-timestamp>`
Metadata about previews for this time range.
### `GET /api/preview/<year>-<month>/<day>/<hour>/<camera>/<timezone>`
Metadata about previews for this hour
### `GET /api/preview/<camera>/start/<start-timestamp>/end/<end-timestamp>/frames`
List of frames in the preview cache for the time range. Previews are only kept in the cache until they are combined into an mp4 at the end of the hour.
### `GET /api/preview/<file_name>/thumbnail.jpg`
Specific preview frame from preview cache.
### `GET /review/<review_id>/preview`
Looping image made from preview video / frames during this review item.
| param | Type | Description |
| --------- | ---- | -------------------------------- |
| `format` | str | Format of preview [`gif`, `mp4`] |
### `GET /<camera>/start/<start-timestamp>/end/<end-timestamp>/preview`
Looping image made from preview video / frames during this time range.
| param | Type | Description |
| --------- | ---- | -------------------------------- |
| `format` | str | Format of preview [`gif`, `mp4`] |
## Recordings
### `GET /vod/<year>-<month>/<day>/<hour>/<camera>/master.m3u8`
HTTP Live Streaming Video on Demand URL for the specified hour and camera. Can be viewed in an application like VLC.
### `GET /vod/event/<event-id>/index.m3u8`
HTTP Live Streaming Video on Demand URL for the specified event. Can be viewed in an application like VLC.
### `GET /vod/<camera>/start/<start-timestamp>/end/<end-timestamp>/index.m3u8`
HTTP Live Streaming Video on Demand URL for the camera with the specified time range. Can be viewed in an application like VLC.
### `GET /api/exports`
Fetch a list of all export recordings
Sample response:
```json
[
{
"camera": "doorbell",
"date": 12800057,
"id": "doorbell_pjis54",
"in_progress": false,
"name": "2024-10-04 fox visit",
"thumb_path": "/media/frigate/clips/export/doorbell_pjis54.webp",
"video_path": "/media/frigate/exports/doorbell_pjis54.mp4"
}
]
```
### `POST /api/export/<camera>/start/<start-timestamp>/end/<end-timestamp>`
Export recordings from `start-timestamp` to `end-timestamp` for `camera` as a single mp4 file. These recordings will be exported to the `/media/frigate/exports` folder.
It is also possible to export this recording as a time-lapse using the "playback" key in the json body, or specify a custom export filename, using the "name" key.
**Optional Body:**
```json
{
"playback": "realtime", // playback factor: realtime or timelapse_25x
"name": "custom export name" // override the default export filename with a custom name
}
```
### `DELETE /api/export/<export_name>`
Delete an export from disk.
### `PATCH /api/export/<export_name_current>/<export_name_new>`
Renames an export.
### `GET /api/<camera_name>/recordings/summary`
Hourly summary of recordings data for a camera.
### `GET /api/<camera_name>/recordings`
Get recording segment details for the given timestamp range.
| param | Type | Description |
| -------- | ---- | ------------------------------------- |
| `after` | int | Unix timestamp for beginning of range |
| `before` | int | Unix timestamp for end of range |
### `GET /api/<camera_name>/recordings/<frame_time>/snapshot.png`
Returns the snapshot image from the specific point in that cameras recordings.
## Reviews
### `GET /api/review`
Reviews from the database. Accepts the following query string parameters:
| param | Type | Description |
| ---------- | ---- | -------------------------------------------------------------- |
| `before` | int | Epoch time |
| `after` | int | Epoch time |
| `cameras` | str | , separated list of cameras |
| `labels` | str | , separated list of labels |
| `zones` | str | , separated list of zones |
| `reviewed` | int | Include items that have been reviewed (0 or 1) |
| `limit` | int | Limit the number of events returned |
| `severity` | str | Limit items to severity (alert, detection, significant_motion) |
### `GET /api/review/<id>`
Get review with `id` from the database.
### `GET /api/review/summary`
Summary of reviews for the last 30 days. Accepts the following query string parameters:
| param | Type | Description |
| ---------- | ---- | --------------------------- |
| `cameras` | str | , separated list of cameras |
| `labels` | str | , separated list of labels |
| `timezone` | str | Timezone name |
### `POST /api/reviews/viewed`
Mark item(s) as reviewed.
**Required Body:**
```json
{
"ids": ["123", "456"] // , separated list of review IDs
}
```
### `DELETE /api/review/<id>/viewed`
Mark an item as not reviewed.
### `POST /api/reviews/delete`
Delete review items.
**Required Body:**
```json
{
"ids": ["123", "456"] // , separated list of review IDs
}
```
### `GET /review/activity/motion`
Get the motion activity for camera(s) during a specified time period.
| param | Type | Description |
| --------- | ---- | --------------------------- |
| `before` | int | Epoch time |
| `after` | int | Epoch time |
| `cameras` | str | , separated list of cameras |
### `GET /review/activity/audio`
Get the audio activity for camera(s) during a specified time period.
| param | Type | Description |
| --------- | ---- | --------------------------- |
| `before` | int | Epoch time |
| `after` | int | Epoch time |
| `cameras` | str | , separated list of cameras |
## Timeline
### `GET /api/timeline`
Timeline of key moments of an event(s) from the database. Accepts the following query string parameters:
| param | Type | Description |
| ----------- | ---- | ----------------------------------- |
| `camera` | str | Name of camera |
| `source_id` | str | ID of tracked object |
| `limit` | int | Limit the number of events returned |

View File

@ -47,7 +47,7 @@ that card.
## Configuration
When configuring the integration, you will be asked for the `URL` of your Frigate instance which needs to be pointed at the internal unauthenticated port (`5000`) for your instance. This may look like `http://<host>:5000/`.
When configuring the integration, you will be asked for the `URL` of your Frigate instance which can be pointed at the internal unauthenticated port (`5000`) or the authenticated port (`8971`) for your instance. This may look like `http://<host>:5000/`.
### Docker Compose Examples
@ -55,7 +55,7 @@ If you are running Home Assistant Core and Frigate with Docker Compose on the sa
#### Home Assistant running with host networking
It is not recommended to run Frigate in host networking mode. In this example, you would use `http://172.17.0.1:5000` when configuring the integration.
It is not recommended to run Frigate in host networking mode. In this example, you would use `http://172.17.0.1:5000` or `http://172.17.0.1:8971` when configuring the integration.
```yaml
services:
@ -75,7 +75,7 @@ services:
#### Home Assistant _not_ running with host networking or in a separate compose file
In this example, you would use `http://frigate:5000` when configuring the integration. There is no need to map the port for the Frigate container.
In this example, it is recommended to connect to the authenticated port, for example, `http://frigate:8971` when configuring the integration. There is no need to map the port for the Frigate container.
```yaml
services:
@ -97,20 +97,21 @@ services:
If you are using HassOS with the addon, the URL should be one of the following depending on which addon version you are using. Note that if you are using the Proxy Addon, you do NOT point the integration at the proxy URL. Just enter the URL used to access Frigate directly from your network.
| Addon Version | URL |
| ------------------------------ | -------------------------------------- |
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
| Addon Version | URL |
| ------------------------------ | ----------------------------------------- |
| Frigate NVR | `http://ccab4aaf-frigate:5000` |
| Frigate NVR (Full Access) | `http://ccab4aaf-frigate-fa:5000` |
| Frigate NVR Beta | `http://ccab4aaf-frigate-beta:5000` |
| Frigate NVR Beta (Full Access) | `http://ccab4aaf-frigate-fa-beta:5000` |
| Frigate NVR HailoRT Beta | `http://ccab4aaf-frigate-hailo-beta:5000` |
### Frigate running on a separate machine
If you run Frigate on a separate device within your local network, Home Assistant will need access to port 5000.
If you run Frigate on a separate device within your local network, Home Assistant will need access to port 8971.
#### Local network
Use `http://<frigate_device_ip>:5000` as the URL for the integration. If you want to protect access to port 5000, you can use firewall rules to limit access to the device running Home Assistant.
Use `http://<frigate_device_ip>:8971` as the URL for the integration so that authentication is required.
```yaml
services:
@ -118,7 +119,7 @@ services:
image: ghcr.io/blakeblackshear/frigate:stable
...
ports:
- "5000:5000"
- "8971:8971"
...
```
@ -148,19 +149,19 @@ Home Assistant > Configuration > Integrations > Frigate > Options
## Entities Provided
| Platform | Description |
| --------------- | --------------------------------------------------------------------------------- |
| `camera` | Live camera stream (requires RTSP). |
| `image` | Image of the latest detected object for each camera. |
| `sensor` | States to monitor Frigate performance, object counts for all zones and cameras. |
| `switch` | Switch entities to toggle detection, recordings and snapshots. |
| `binary_sensor` | A "motion" binary sensor entity per camera/zone/object. |
| Platform | Description |
| --------------- | ------------------------------------------------------------------------------- |
| `camera` | Live camera stream (requires RTSP). |
| `image` | Image of the latest detected object for each camera. |
| `sensor` | States to monitor Frigate performance, object counts for all zones and cameras. |
| `switch` | Switch entities to toggle detection, recordings and snapshots. |
| `binary_sensor` | A "motion" binary sensor entity per camera/zone/object. |
## Media Browser Support
The integration provides:
- Browsing event recordings with thumbnails
- Browsing tracked object recordings with thumbnails
- Browsing snapshots
- Browsing recordings by month, day, camera, time
@ -183,24 +184,42 @@ For clips to be castable to media devices, audio is required and may need to be
Many people do not want to expose Frigate to the web, so the integration creates some public API endpoints that can be used for notifications.
To load a thumbnail for an event:
To load a thumbnail for a tracked object:
```
https://HA_URL/api/frigate/notifications/<event-id>/thumbnail.jpg
```
To load a snapshot for an event:
To load a snapshot for a tracked object:
```
https://HA_URL/api/frigate/notifications/<event-id>/snapshot.jpg
```
To load a video clip of an event:
To load a video clip of a tracked object using an Android device:
```
https://HA_URL/api/frigate/notifications/<event-id>/clip.mp4
```
To load a video clip of a tracked object using an iOS device:
```
https://HA_URL/api/frigate/notifications/<event-id>/master.m3u8
```
To load a preview gif of a tracked object:
```
https://HA_URL/api/frigate/notifications/<event-id>/event_preview.gif
```
To load a preview gif of a review item:
```
https://HA_URL/api/frigate/notifications/<review-id>/review_preview.gif
```
<a name="streams"></a>
## RTSP stream
@ -215,7 +234,7 @@ For advanced usecases, this behavior can be changed with the [RTSP URL
template](#options) option. When set, this string will override the default stream
address that is derived from the default behavior described above. This option supports
[jinja2 templates](https://jinja.palletsprojects.com/) and has the `camera` dict
variables from [Frigate API](api.md)
variables from [Frigate API](../integrations/api)
available for the template. Note that no Home Assistant state is available to the
template, only the camera dict from Frigate.
@ -282,3 +301,7 @@ which server they are referring to.
#### If I am detecting multiple objects, how do I assign the correct `binary_sensor` to the camera in HomeKit?
The [HomeKit integration](https://www.home-assistant.io/integrations/homekit/) randomly links one of the binary sensors (motion sensor entities) grouped with the camera device in Home Assistant. You can specify a `linked_motion_sensor` in the Home Assistant [HomeKit configuration](https://www.home-assistant.io/integrations/homekit/#linked_motion_sensor) for each camera.
#### I have set up automations based on the occupancy sensors. Sometimes the automation runs because the sensors are turned on, but then I look at Frigate I can't find the object that triggered the sensor. Is this a bug?
No. The occupancy sensors have fewer checks in place because they are often used for things like turning the lights on where latency needs to be as low as possible. So false positives can sometimes trigger these sensors. If you want false positive filtering, you should use an mqtt sensor on the `frigate/events` or `frigate/reviews` topic.

View File

@ -19,7 +19,7 @@ Causes Frigate to exit. Docker should be configured to automatically restart the
### `frigate/events`
Message published for each changed event. The first message is published when the tracked object is no longer marked as a false_positive. When Frigate finds a better snapshot of the tracked object or when a zone change occurs, it will publish a message with the same id. When the event ends, a final message is published with `end_time` set.
Message published for each changed tracked object. The first message is published when the tracked object is no longer marked as a false_positive. When Frigate finds a better snapshot of the tracked object or when a zone change occurs, it will publish a message with the same id. When the tracked object ends, a final message is published with `end_time` set.
```json
{
@ -45,6 +45,7 @@ Message published for each changed event. The first message is published when th
"thumbnail": null,
"has_snapshot": false,
"has_clip": false,
"active": true, // convenience attribute, this is strictly opposite of "stationary"
"stationary": false, // whether or not the object is considered stationary
"motionless_count": 0, // number of frames the object has been motionless
"position_changes": 2, // number of times the object has moved from a stationary position
@ -74,6 +75,7 @@ Message published for each changed event. The first message is published when th
"thumbnail": null,
"has_snapshot": false,
"has_clip": false,
"active": true, // convenience attribute, this is strictly opposite of "stationary"
"stationary": false, // whether or not the object is considered stationary
"motionless_count": 0, // number of frames the object has been motionless
"position_changes": 2, // number of times the object has changed position
@ -92,30 +94,40 @@ Message published for each changed event. The first message is published when th
}
```
### `frigate/tracked_object_update`
Message published for updates to tracked object metadata, for example when GenAI runs and returns a tracked object description.
```json
{
"type": "description",
"id": "1607123955.475377-mxklsc",
"description": "The car is a red sedan moving away from the camera."
}
```
### `frigate/reviews`
Message published for each changed review item. The first message is published when the `detection` or `alert` is initiated. When additional objects are detected or when a zone change occurs, it will publish a, `update` message with the same id. When the review activity has ended a final `end` message is published.
```json
{
"type": "update", // new, update, end
"type": "update", // new, update, end
"before": {
"id": "1718987129.308396-fqk5ka", // review_id
"id": "1718987129.308396-fqk5ka", // review_id
"camera": "front_cam",
"start_time": 1718987129.308396,
"end_time": null,
"severity": "detection",
"thumb_path": "/media/frigate/clips/review/thumb-front_cam-1718987129.308396-fqk5ka.webp",
"data": {
"detections": [ // list of event IDs
"detections": [
// list of event IDs
"1718987128.947436-g92ztx",
"1718987148.879516-d7oq7r",
"1718987126.934663-q5ywpt"
],
"objects": [
"person",
"car"
],
"objects": ["person", "car"],
"sub_labels": [],
"zones": [],
"audio": []
@ -134,14 +146,9 @@ Message published for each changed review item. The first message is published w
"1718987148.879516-d7oq7r",
"1718987126.934663-q5ywpt"
],
"objects": [
"person",
"car"
],
"objects": ["person", "car"],
"sub_labels": ["Bob"],
"zones": [
"front_yard"
],
"zones": ["front_yard"],
"audio": []
}
}
@ -152,6 +159,18 @@ Message published for each changed review item. The first message is published w
Same data available at `/api/stats` published at a configurable interval.
### `frigate/camera_activity`
Returns data about each camera, its current features, and if it is detecting motion, objects, etc. Can be triggered by publising to `frigate/onConnect`
### `frigate/notifications/set`
Topic to turn notifications on and off. Expected values are `ON` and `OFF`.
### `frigate/notifications/state`
Topic with current state of notifications. Published values are `ON` and `OFF`.
## Frigate Camera Topics
### `frigate/<camera_name>/<object_name>`
@ -159,11 +178,23 @@ Same data available at `/api/stats` published at a configurable interval.
Publishes the count of objects for the camera for use as a sensor in Home Assistant.
`all` can be used as the object_name for the count of all objects for the camera.
### `frigate/<camera_name>/<object_name>/active`
Publishes the count of active objects for the camera for use as a sensor in Home
Assistant. `all` can be used as the object_name for the count of all active objects
for the camera.
### `frigate/<zone_name>/<object_name>`
Publishes the count of objects for the zone for use as a sensor in Home Assistant.
`all` can be used as the object_name for the count of all objects for the zone.
### `frigate/<zone_name>/<object_name>/active`
Publishes the count of active objects for the zone for use as a sensor in Home
Assistant. `all` can be used as the object_name for the count of all objects for the
zone.
### `frigate/<camera_name>/<object_name>/snapshot`
Publishes a jpeg encoded frame of the detected object type. When the object is no longer detected, the highest confidence image is published or the original image

View File

@ -19,7 +19,7 @@ Once logged in, you can generate an API key for Frigate in Settings.
### Set your API key
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the Frigate+ page. Home Assistant Addon users can set it under Settings > Addons > Frigate NVR > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
In Frigate, you can use an environment variable or a docker secret named `PLUS_API_KEY` to enable the `Frigate+` buttons on the Explore page. Home Assistant Addon users can set it under Settings > Addons > Frigate NVR > Configuration > Options (be sure to toggle the "Show unused optional configuration options" switch).
:::warning
@ -29,7 +29,9 @@ You cannot use the `environment_vars` section of your Frigate configuration file
## Submit examples
Once your API key is configured, you can submit examples directly from the Frigate+ page.
Once your API key is configured, you can submit examples directly from the Explore page in Frigate. From the More Filters menu, select "Has a Snapshot - Yes" and "Submitted to Frigate+ - No", and press Apply at the bottom of the pane. Then, click on a thumbnail and select the Snapshot tab.
You can use your keyboard's left and right arrow keys to quickly navigate between the tracked object snapshots.
:::note
@ -37,8 +39,6 @@ Snapshots must be enabled to be able to submit examples to Frigate+
:::
![Send To Plus](/img/plus/send-to-plus.jpg)
![Submit To Plus](/img/plus/submit-to-plus.jpg)
### Annotate and verify

View File

@ -32,7 +32,7 @@ Currently, Frigate+ models support CPU (`cpu`), Google Coral (`edgetpu`), OpenVi
:::warning
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15, which is still under development.
Using Frigate+ models with `onnx` and `rocm` is only available with Frigate 0.15 and later.
:::
@ -60,7 +60,7 @@ Other object types available in the default Frigate model are not available. Add
### Label attributes
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, and delivery logos such as `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate events. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
Frigate has special handling for some labels when using Frigate+ models. `face`, `license_plate`, and delivery logos such as `amazon`, `ups`, and `fedex` are considered attribute labels which are not tracked like regular objects and do not generate review items directly. In addition, the `threshold` filter will have no effect on these labels. You should adjust the `min_score` and other filter values as needed.
In order to have Frigate start using these attribute labels, you will need to add them to the list of objects to track:

View File

@ -17,7 +17,11 @@ ffmpeg:
record: preset-record-generic-audio-aac
```
### I can't view events or recordings in the Web UI.
### How can I get sound in live view?
Audio is only supported for live view when go2rtc is configured, see [the live docs](../configuration/live.md) for more information.
### I can't view recordings in the Web UI.
Ensure your cameras send h264 encoded video, or [transcode them](/configuration/restream.md).
@ -98,3 +102,11 @@ docker run -d \
-p 8555:8555/udp \
ghcr.io/blakeblackshear/frigate:stable
```
### My RTSP stream works fine in VLC, but it does not work when I put the same URL in my Frigate config. Is this a bug?
No. Frigate uses the TCP protocol to connect to your camera's RTSP URL. VLC automatically switches between UDP and TCP depending on network conditions and stream availability. So a stream that works in VLC but not in Frigate is likely due to VLC selecting UDP as the transfer protocol.
TCP ensures that all data packets arrive in the correct order. This is crucial for video recording, decoding, and stream processing, which is why Frigate enforces a TCP connection. UDP is faster but less reliable, as it does not guarantee packet delivery or order, and VLC does not have the same requirements as Frigate.
You can still configure Frigate to use UDP by using ffmpeg input args or the preset `preset-rtsp-udp`. See the [ffmpeg presets](/configuration/ffmpeg_presets) documentation.

View File

@ -3,7 +3,15 @@ id: recordings
title: Troubleshooting Recordings
---
### WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
## I have Frigate configured for motion recording only, but it still seems to be recording even with no motion. Why?
You'll want to:
- Make sure your camera's timestamp is masked out with a motion mask. Even if there is no motion occurring in your scene, your motion settings may be sensitive enough to count your timestamp as motion.
- If you have audio detection enabled, keep in mind that audio that is heard above `min_volume` is considered motion.
- [Tune your motion detection settings](/configuration/motion_detection) either by editing your config file or by using the UI's Motion Tuner.
## I see the message: WARNING : Unable to keep up with recording segments in cache for camera. Keeping the 5 most recent segments out of 6 and discarding the rest...
This error can be caused by a number of different issues. The first step in troubleshooting is to enable debug logging for recording. This will enable logging showing how long it takes for recordings to be moved from RAM cache to the disk.
@ -40,6 +48,7 @@ On linux, some helpful tools/commands in diagnosing would be:
On modern linux kernels, the system will utilize some swap if enabled. Setting vm.swappiness=1 no longer means that the kernel will only swap in order to avoid OOM. To prevent any swapping inside a container, set allocations memory and memory+swap to be the same and disable swapping by setting the following docker/podman run parameters:
**Compose example**
```yaml
version: "3.9"
services:
@ -54,6 +63,7 @@ services:
```
**Run command example**
```
--memory=<MAXRAM> --memory-swap=<MAXSWAP> --memory-swappiness=0
```

View File

@ -1,102 +0,0 @@
const path = require("path");
module.exports = {
title: "Frigate",
tagline: "NVR With Realtime Object Detection for IP Cameras",
url: "https://docs.frigate.video",
baseUrl: "/",
onBrokenLinks: "throw",
onBrokenMarkdownLinks: "warn",
favicon: "img/favicon.ico",
organizationName: "blakeblackshear",
projectName: "frigate",
themes: ["@docusaurus/theme-mermaid"],
markdown: {
mermaid: true,
},
themeConfig: {
algolia: {
appId: "WIURGBNBPY",
apiKey: "d02cc0a6a61178b25da550212925226b",
indexName: "frigate",
},
docs: {
sidebar: {
hideable: true,
},
},
prism: {
additionalLanguages: ["bash", "json"],
},
navbar: {
title: "Frigate",
logo: {
alt: "Frigate",
src: "img/logo.svg",
srcDark: "img/logo-dark.svg",
},
items: [
{
to: "/",
activeBasePath: "docs",
label: "Docs",
position: "left",
},
{
href: "https://frigate.video",
label: "Website",
position: "right",
},
{
href: "http://demo.frigate.video",
label: "Demo",
position: "right",
},
{
href: "https://github.com/blakeblackshear/frigate",
label: "GitHub",
position: "right",
},
],
},
footer: {
style: "dark",
links: [
{
title: "Community",
items: [
{
label: "GitHub",
href: "https://github.com/blakeblackshear/frigate",
},
{
label: "Discussions",
href: "https://github.com/blakeblackshear/frigate/discussions",
},
],
},
],
copyright: `Copyright © ${new Date().getFullYear()} Blake Blackshear`,
},
},
plugins: [path.resolve(__dirname, "plugins", "raw-loader")],
presets: [
[
"@docusaurus/preset-classic",
{
docs: {
routeBasePath: "/",
sidebarPath: require.resolve("./sidebars.js"),
// Please change this to your repo.
editUrl:
"https://github.com/blakeblackshear/frigate/edit/master/docs/",
sidebarCollapsible: false,
},
theme: {
customCss: require.resolve("./src/css/custom.css"),
},
},
],
],
};

158
docs/docusaurus.config.ts Normal file
View File

@ -0,0 +1,158 @@
import type * as Preset from '@docusaurus/preset-classic';
import * as path from 'node:path';
import type { Config, PluginConfig } from '@docusaurus/types';
import type * as OpenApiPlugin from 'docusaurus-plugin-openapi-docs';
const config: Config = {
title: 'Frigate',
tagline: 'NVR With Realtime Object Detection for IP Cameras',
url: 'https://docs.frigate.video',
baseUrl: '/',
onBrokenLinks: 'throw',
onBrokenMarkdownLinks: 'warn',
favicon: 'img/favicon.ico',
organizationName: 'blakeblackshear',
projectName: 'frigate',
themes: ['@docusaurus/theme-mermaid', 'docusaurus-theme-openapi-docs'],
markdown: {
mermaid: true,
},
themeConfig: {
algolia: {
appId: 'WIURGBNBPY',
apiKey: 'd02cc0a6a61178b25da550212925226b',
indexName: 'frigate',
},
docs: {
sidebar: {
hideable: true,
},
},
prism: {
additionalLanguages: ['bash', 'json'],
},
languageTabs: [
{
highlight: 'python',
language: 'python',
logoClass: 'python',
},
{
highlight: 'javascript',
language: 'nodejs',
logoClass: 'nodejs',
},
{
highlight: 'javascript',
language: 'javascript',
logoClass: 'javascript',
},
{
highlight: 'bash',
language: 'curl',
logoClass: 'curl',
},
{
highlight: "rust",
language: "rust",
logoClass: "rust",
},
],
navbar: {
title: 'Frigate',
logo: {
alt: 'Frigate',
src: 'img/logo.svg',
srcDark: 'img/logo-dark.svg',
},
items: [
{
to: '/',
activeBasePath: 'docs',
label: 'Docs',
position: 'left',
},
{
href: 'https://frigate.video',
label: 'Website',
position: 'right',
},
{
href: 'http://demo.frigate.video',
label: 'Demo',
position: 'right',
},
{
href: 'https://github.com/blakeblackshear/frigate',
label: 'GitHub',
position: 'right',
},
],
},
footer: {
style: 'dark',
links: [
{
title: 'Community',
items: [
{
label: 'GitHub',
href: 'https://github.com/blakeblackshear/frigate',
},
{
label: 'Discussions',
href: 'https://github.com/blakeblackshear/frigate/discussions',
},
],
},
],
copyright: `Copyright © ${new Date().getFullYear()} Blake Blackshear`,
},
},
plugins: [
path.resolve(__dirname, 'plugins', 'raw-loader'),
[
'docusaurus-plugin-openapi-docs',
{
id: 'openapi',
docsPluginId: 'classic', // configured for preset-classic
config: {
frigateApi: {
specPath: 'static/frigate-api.yaml',
outputDir: 'docs/integrations/api',
sidebarOptions: {
groupPathsBy: 'tag',
categoryLinkSource: 'tag',
sidebarCollapsible: true,
sidebarCollapsed: true,
},
showSchemas: true,
} satisfies OpenApiPlugin.Options,
},
},
]
] as PluginConfig[],
presets: [
[
'classic',
{
docs: {
routeBasePath: '/',
sidebarPath: './sidebars.ts',
// Please change this to your repo.
editUrl: 'https://github.com/blakeblackshear/frigate/edit/master/docs/',
sidebarCollapsible: false,
docItemComponent: '@theme/ApiItem', // Derived from docusaurus-theme-openapi
},
theme: {
customCss: './src/css/custom.css',
},
} satisfies Preset.Options,
],
],
};
export default async function createConfig() {
return config;
}

11802
docs/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -4,25 +4,31 @@
"private": true,
"scripts": {
"docusaurus": "docusaurus",
"start": "docusaurus start --host 0.0.0.0",
"build": "docusaurus build",
"start": "npm run regen-docs && docusaurus start --host 0.0.0.0",
"build": "npm run regen-docs && docusaurus build",
"swizzle": "docusaurus swizzle",
"deploy": "docusaurus deploy",
"clear": "docusaurus clear",
"gen-api-docs": "docusaurus gen-api-docs all",
"clear-api-docs": "docusaurus clean-api-docs all",
"regen-docs": "npm run clear-api-docs && npm run gen-api-docs",
"serve": "docusaurus serve --host 0.0.0.0",
"write-translations": "docusaurus write-translations",
"write-heading-ids": "docusaurus write-heading-ids"
},
"dependencies": {
"@docusaurus/core": "^3.4.0",
"@docusaurus/preset-classic": "^3.4.0",
"@docusaurus/theme-mermaid": "^3.4.0",
"@mdx-js/react": "^3.0.0",
"clsx": "^2.0.0",
"prism-react-renderer": "^2.1.0",
"@docusaurus/core": "^3.6.3",
"@docusaurus/preset-classic": "^3.6.3",
"@docusaurus/theme-mermaid": "^3.6.3",
"@docusaurus/plugin-content-docs": "^3.6.3",
"@mdx-js/react": "^3.1.0",
"clsx": "^2.1.1",
"docusaurus-plugin-openapi-docs": "^4.3.1",
"docusaurus-theme-openapi-docs": "^4.3.1",
"prism-react-renderer": "^2.4.1",
"raw-loader": "^4.0.2",
"react": "^18.2.0",
"react-dom": "^18.2.0"
"react": "^18.3.1",
"react-dom": "^18.3.1"
},
"browserslist": {
"production": [
@ -39,7 +45,7 @@
"devDependencies": {
"@docusaurus/module-type-aliases": "^3.4.0",
"@docusaurus/types": "^3.4.0",
"@types/react": "^18.2.79"
"@types/react": "^18.3.7"
},
"engines": {
"node": ">=18.0"

View File

@ -1,83 +0,0 @@
module.exports = {
docs: {
Frigate: [
"frigate/index",
"frigate/hardware",
"frigate/installation",
"frigate/camera_setup",
"frigate/video_pipeline",
"frigate/glossary",
],
Guides: [
"guides/getting_started",
"guides/configuring_go2rtc",
"guides/ha_notifications",
"guides/ha_network_storage",
"guides/reverse_proxy",
],
Configuration: {
"Configuration Files": [
"configuration/index",
"configuration/reference",
{
type: "link",
label: "Go2RTC Configuration Reference",
href: "https://github.com/AlexxIT/go2rtc/tree/v1.9.4#configuration",
},
],
Detectors: [
"configuration/object_detectors",
"configuration/audio_detectors",
],
Cameras: [
"configuration/cameras",
"configuration/review",
"configuration/record",
"configuration/snapshots",
"configuration/motion_detection",
"configuration/birdseye",
"configuration/live",
"configuration/restream",
"configuration/autotracking",
"configuration/camera_specific",
],
Objects: [
"configuration/object_filters",
"configuration/masks",
"configuration/zones",
"configuration/objects",
"configuration/stationary_objects",
],
"Extra Configuration": [
"configuration/authentication",
"configuration/hardware_acceleration",
"configuration/ffmpeg_presets",
"configuration/pwa",
"configuration/tls",
"configuration/advanced",
],
},
Integrations: [
"integrations/plus",
"integrations/home-assistant",
"integrations/api",
"integrations/mqtt",
"integrations/third_party_extensions",
],
"Frigate+": [
"plus/index",
"plus/first_model",
"plus/improving_model",
"plus/faq",
],
Troubleshooting: [
"troubleshooting/faqs",
"troubleshooting/recordings",
"troubleshooting/edgetpu",
],
Development: [
"development/contributing",
"development/contributing-boards",
],
},
};

105
docs/sidebars.ts Normal file
View File

@ -0,0 +1,105 @@
import type { SidebarsConfig, } from '@docusaurus/plugin-content-docs';
import { PropSidebarItemLink } from '@docusaurus/plugin-content-docs';
import frigateHttpApiSidebar from './docs/integrations/api/sidebar';
const sidebars: SidebarsConfig = {
docs: {
Frigate: [
'frigate/index',
'frigate/hardware',
'frigate/installation',
'frigate/camera_setup',
'frigate/video_pipeline',
'frigate/glossary',
],
Guides: [
'guides/getting_started',
'guides/configuring_go2rtc',
'guides/ha_notifications',
'guides/ha_network_storage',
'guides/reverse_proxy',
],
Configuration: {
'Configuration Files': [
'configuration/index',
'configuration/reference',
{
type: 'link',
label: 'Go2RTC Configuration Reference',
href: 'https://github.com/AlexxIT/go2rtc/tree/v1.9.2#configuration',
} as PropSidebarItemLink,
],
Detectors: [
'configuration/object_detectors',
'configuration/audio_detectors',
],
'Semantic Search': [
'configuration/semantic_search',
'configuration/genai',
],
Cameras: [
'configuration/cameras',
'configuration/review',
'configuration/record',
'configuration/snapshots',
'configuration/motion_detection',
'configuration/birdseye',
'configuration/live',
'configuration/restream',
'configuration/autotracking',
'configuration/camera_specific',
],
Objects: [
'configuration/object_filters',
'configuration/masks',
'configuration/zones',
'configuration/objects',
'configuration/stationary_objects',
],
'Extra Configuration': [
'configuration/authentication',
'configuration/notifications',
'configuration/hardware_acceleration',
'configuration/ffmpeg_presets',
"configuration/pwa",
'configuration/tls',
'configuration/advanced',
],
},
Integrations: [
'integrations/plus',
'integrations/home-assistant',
// This is the HTTP API generated by OpenAPI
{
type: 'category',
label: 'HTTP API',
link: {
type: 'generated-index',
title: 'Frigate HTTP API',
description: 'HTTP API',
slug: '/integrations/api/frigate-http-api',
},
items: frigateHttpApiSidebar,
},
'integrations/mqtt',
'integrations/third_party_extensions',
],
'Frigate+': [
'plus/index',
'plus/first_model',
'plus/improving_model',
'plus/faq',
],
Troubleshooting: [
'troubleshooting/faqs',
'troubleshooting/recordings',
'troubleshooting/edgetpu',
],
Development: [
'development/contributing',
'development/contributing-boards',
],
},
};
export default sidebars;

View File

@ -23,3 +23,214 @@
margin: 0 calc(-1 * var(--ifm-pre-padding));
padding: 0 var(--ifm-pre-padding);
}
/**
Custom CSS for OpenAPI Specification. Based of openapi https://github.com/PaloAltoNetworks/docusaurus-openapi-docs/tree/main/demo
*/
/* Sidebar Method labels */
.api-method > .menu__link,
.schema > .menu__link {
align-items: center;
justify-content: start;
}
.api-method > .menu__link::before,
.schema > .menu__link::before {
width: 55px;
height: 20px;
font-size: 12px;
line-height: 20px;
text-transform: uppercase;
font-weight: 600;
border-radius: 0.25rem;
border: 1px solid;
margin-right: var(--ifm-spacing-horizontal);
text-align: center;
flex-shrink: 0;
border-color: transparent;
color: white;
}
.get > .menu__link::before {
content: "get";
background-color: var(--ifm-color-primary);
}
.post > .menu__link::before {
content: "post";
background-color: var(--ifm-color-success);
}
.delete > .menu__link::before {
content: "del";
background-color: var(--openapi-code-red);
}
.put > .menu__link::before {
content: "put";
background-color: var(--openapi-code-blue);
}
.patch > .menu__link::before {
content: "patch";
background-color: var(--openapi-code-orange);
}
.head > .menu__link::before {
content: "head";
background-color: var(--ifm-color-secondary-darkest);
}
.event > .menu__link::before {
content: "event";
background-color: var(--ifm-color-secondary-darkest);
}
.schema > .menu__link::before {
content: "schema";
background-color: var(--ifm-color-secondary-darkest);
}
.menu__list-item--deprecated > .menu__link,
.menu__list-item--deprecated > .menu__link:hover {
text-decoration: line-through;
}
/* Sidebar Method labels High Contrast */
.api-method-contrast > .menu__link,
.schema-contrast > .menu__link {
align-items: center;
justify-content: start;
}
.api-method-contrast > .menu__link::before,
.schema-contrast > .menu__link::before {
width: 55px;
height: 20px;
font-size: 12px;
line-height: 20px;
text-transform: uppercase;
font-weight: 600;
border-radius: 0.25rem;
border: 1px solid;
border-inline-start-width: 5px;
margin-right: var(--ifm-spacing-horizontal);
text-align: center;
flex-shrink: 0;
}
.get-contrast > .menu__link::before {
content: "get";
background-color: var(--ifm-color-info-contrast-background);
color: var(--ifm-color-info-contrast-foreground);
border-color: var(--ifm-color-info-dark);
}
.post-contrast > .menu__link::before {
content: "post";
background-color: var(--ifm-color-success-contrast-background);
color: var(--ifm-color-success-contrast-foreground);
border-color: var(--ifm-color-success-dark);
}
.delete-contrast > .menu__link::before {
content: "del";
background-color: var(--ifm-color-danger-contrast-background);
color: var(--ifm-color-danger-contrast-foreground);
border-color: var(--ifm-color-danger-dark);
}
.put-contrast > .menu__link::before {
content: "put";
background-color: var(--ifm-color-warning-contrast-background);
color: var(--ifm-color-warning-contrast-foreground);
border-color: var(--ifm-color-warning-dark);
}
.patch-contrast > .menu__link::before {
content: "patch";
background-color: var(--ifm-color-success-contrast-background);
color: var(--ifm-color-success-contrast-foreground);
border-color: var(--ifm-color-success-dark);
}
.head-contrast > .menu__link::before {
content: "head";
background-color: var(--ifm-color-secondary-contrast-background);
color: var(--ifm-color-secondary-contrast-foreground);
border-color: var(--ifm-color-secondary-dark);
}
.event-contrast > .menu__link::before {
content: "event";
background-color: var(--ifm-color-secondary-contrast-background);
color: var(--ifm-color-secondary-contrast-foreground);
border-color: var(--ifm-color-secondary-dark);
}
.schema-contrast > .menu__link::before {
content: "schema";
background-color: var(--ifm-color-secondary-contrast-background);
color: var(--ifm-color-secondary-contrast-foreground);
border-color: var(--ifm-color-secondary-dark);
}
/* Simple */
.api-method-simple > .menu__link {
align-items: center;
justify-content: start;
}
.api-method-simple > .menu__link::before {
width: 55px;
height: 20px;
font-size: 12px;
line-height: 20px;
text-transform: uppercase;
font-weight: 600;
border-radius: 0.25rem;
align-content: start;
margin-right: var(--ifm-spacing-horizontal);
text-align: right;
flex-shrink: 0;
border-color: transparent;
}
.get-simple > .menu__link::before {
content: "get";
color: var(--ifm-color-info);
}
.post-simple > .menu__link::before {
content: "post";
color: var(--ifm-color-success);
}
.delete-simple > .menu__link::before {
content: "del";
color: var(--ifm-color-danger);
}
.put-simple > .menu__link::before {
content: "put";
color: var(--ifm-color-warning);
}
.patch-simple > .menu__link::before {
content: "patch";
color: var(--ifm-color-warning);
}
.head-simple > .menu__link::before {
content: "head";
color: var(--ifm-color-secondary-contrast-foreground);
}
.event-simple > .menu__link::before {
content: "event";
color: var(--ifm-color-secondary-contrast-foreground);
}
.schema-simple > .menu__link::before {
content: "schema";
color: var(--ifm-color-secondary-contrast-foreground);
}

3723
docs/static/frigate-api.yaml vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,17 +1,64 @@
import argparse
import faulthandler
import signal
import sys
import threading
from flask import cli
from pydantic import ValidationError
from frigate.app import FrigateApp
from frigate.config import FrigateConfig
from frigate.log import setup_logging
faulthandler.enable()
threading.current_thread().name = "frigate"
def main() -> None:
faulthandler.enable()
# Setup the logging thread
setup_logging()
threading.current_thread().name = "frigate"
# Make sure we exit cleanly on SIGTERM.
signal.signal(signal.SIGTERM, lambda sig, frame: sys.exit())
# Parse the cli arguments.
parser = argparse.ArgumentParser(
prog="Frigate",
description="An NVR with realtime local object detection for IP cameras.",
)
parser.add_argument("--validate-config", action="store_true")
args = parser.parse_args()
# Load the configuration.
try:
config = FrigateConfig.load(install=True)
except ValidationError as e:
print("*************************************************************")
print("*************************************************************")
print("*** Your config file is not valid! ***")
print("*** Please check the docs at ***")
print("*** https://docs.frigate.video/configuration/ ***")
print("*************************************************************")
print("*************************************************************")
print("*** Config Validation Errors ***")
print("*************************************************************")
for error in e.errors():
location = ".".join(str(item) for item in error["loc"])
print(f"{location}: {error['msg']}")
print("*************************************************************")
print("*** End Config Validation Errors ***")
print("*************************************************************")
sys.exit(1)
if args.validate_config:
print("*************************************************************")
print("*** Your config file is valid. ***")
print("*************************************************************")
sys.exit(0)
# Run the main application.
FrigateApp(config).start()
cli.show_server_banner = lambda *x: None
if __name__ == "__main__":
frigate_app = FrigateApp()
frigate_app.start()
main()

Some files were not shown because too many files have changed in this diff Show More