diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 7c91f4f6f..0c460cfad 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -37,42 +37,57 @@ "onAutoForward": "silent" } }, - "extensions": [ - "ms-python.vscode-pylance", - "ms-python.python", - "visualstudioexptteam.vscodeintellicode", - "mhutchie.git-graph", - "ms-azuretools.vscode-docker", - "streetsidesoftware.code-spell-checker", - "esbenp.prettier-vscode", - "dbaeumer.vscode-eslint", - "mikestead.dotenv", - "csstools.postcss", - "blanu.vscode-styled-jsx", - "bradlc.vscode-tailwindcss" - ], - "settings": { - "remote.autoForwardPorts": false, - "python.linting.pylintEnabled": true, - "python.linting.enabled": true, - "python.formatting.provider": "black", - "python.languageServer": "Pylance", - "editor.formatOnPaste": false, - "editor.formatOnSave": true, - "editor.formatOnType": true, - "python.testing.pytestEnabled": false, - "python.testing.unittestEnabled": true, - "python.testing.unittestArgs": ["-v", "-s", "./frigate/test"], - "files.trimTrailingWhitespace": true, - "eslint.workingDirectories": ["./web"], - "[json][jsonc]": { - "editor.defaultFormatter": "esbenp.prettier-vscode" - }, - "[jsx][js][tsx][ts]": { - "editor.codeActionsOnSave": ["source.addMissingImports", "source.fixAll"], - "editor.tabSize": 2 - }, - "cSpell.ignoreWords": ["rtmp"], - "cSpell.words": ["preact"] + "customizations": { + "vscode": { + "extensions": [ + "ms-python.python", + "ms-python.vscode-pylance", + "visualstudioexptteam.vscodeintellicode", + "mhutchie.git-graph", + "ms-azuretools.vscode-docker", + "streetsidesoftware.code-spell-checker", + "esbenp.prettier-vscode", + "dbaeumer.vscode-eslint", + "mikestead.dotenv", + "csstools.postcss", + "blanu.vscode-styled-jsx", + "bradlc.vscode-tailwindcss", + "charliermarsh.ruff" + ], + "settings": { + "remote.autoForwardPorts": false, + "python.formatting.provider": "none", + "python.languageServer": "Pylance", + "editor.formatOnPaste": false, + "editor.formatOnSave": true, + "editor.formatOnType": true, + "python.testing.pytestEnabled": false, + "python.testing.unittestEnabled": true, + "python.testing.unittestArgs": ["-v", "-s", "./frigate/test"], + "files.trimTrailingWhitespace": true, + "eslint.workingDirectories": ["./web"], + "isort.args": ["--settings-path=./pyproject.toml"], + "[python]": { + "editor.defaultFormatter": "charliermarsh.ruff", + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.fixAll": true, + "source.organizeImports": true + } + }, + "[json][jsonc]": { + "editor.defaultFormatter": "esbenp.prettier-vscode" + }, + "[jsx][js][tsx][ts]": { + "editor.codeActionsOnSave": [ + "source.addMissingImports", + "source.fixAll" + ], + "editor.tabSize": 2 + }, + "cSpell.ignoreWords": ["rtmp"], + "cSpell.words": ["preact", "astype", "hwaccel", "mqtt"] + } + } } } diff --git a/.devcontainer/post_create.sh b/.devcontainer/post_create.sh index 9dd1249c6..1a1832f3b 100755 --- a/.devcontainer/post_create.sh +++ b/.devcontainer/post_create.sh @@ -2,12 +2,23 @@ set -euxo pipefail +# Cleanup the old github host key +sed -i -e '/AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31\/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi\/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==/d' ~/.ssh/known_hosts +# Add new github host key +curl -L https://api.github.com/meta | jq -r '.ssh_keys | .[]' | \ + sed -e 's/^/github.com /' >> ~/.ssh/known_hosts + # Frigate normal container runs as root, so it have permission to create # the folders. But the devcontainer runs as the host user, so we need to # create the folders and give the host user permission to write to them. sudo mkdir -p /media/frigate sudo chown -R "$(id -u):$(id -g)" /media/frigate +# When started as a service, LIBAVFORMAT_VERSION_MAJOR is defined in the +# s6 service file. For dev, where frigate is started from an interactive +# shell, we define it in .bashrc instead. +echo 'export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po "libavformat\W+\K\d+")' >> $HOME/.bashrc + make version cd web diff --git a/.github/actions/setup/action.yml b/.github/actions/setup/action.yml new file mode 100644 index 000000000..c96102edb --- /dev/null +++ b/.github/actions/setup/action.yml @@ -0,0 +1,39 @@ +name: 'Setup' +description: 'Set up QEMU and Buildx' +inputs: + GITHUB_TOKEN: + required: true +outputs: + image-name: + value: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ steps.create-short-sha.outputs.SHORT_SHA }} + cache-name: + value: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:cache +runs: + using: "composite" + steps: + - name: Remove unnecessary files + run: | + sudo rm -rf /usr/share/dotnet + sudo rm -rf /usr/local/lib/android + sudo rm -rf /opt/ghc + shell: bash + - id: lowercaseRepo + uses: ASzc/change-string-case-action@v5 + with: + string: ${{ github.repository }} + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + - name: Log in to the Container registry + uses: docker/login-action@465a07811f14bebb1938fbed4728c6a1ff8901fc + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ inputs.GITHUB_TOKEN }} + - name: Create version file + run: make version + shell: bash + - id: create-short-sha + run: echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT + shell: bash diff --git a/.github/dependabot.yml b/.github/dependabot.yml index d424c6bf8..79e8b2881 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -13,7 +13,13 @@ updates: open-pull-requests-limit: 10 target-branch: dev - package-ecosystem: "pip" - directory: "/" + directory: "/docker/main" + schedule: + interval: daily + open-pull-requests-limit: 10 + target-branch: dev + - package-ecosystem: "pip" + directory: "/docker/tensorrt" schedule: interval: daily open-pull-requests-limit: 10 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8a06ef0e6..c6fad8817 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,6 +1,7 @@ name: CI on: + workflow_dispatch: push: branches: - dev @@ -15,53 +16,154 @@ env: PYTHON_VERSION: 3.9 jobs: - multi_arch_build: + amd64_build: runs-on: ubuntu-latest - name: Image Build + name: AMD64 Build + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push amd64 standard build + uses: docker/build-push-action@v5 + with: + context: . + file: docker/main/Dockerfile + push: true + platforms: linux/amd64 + target: frigate + tags: ${{ steps.setup.outputs.image-name }}-amd64 + cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 + - name: Build and push TensorRT (x86 GPU) + uses: docker/bake-action@v4 + with: + push: true + targets: tensorrt + files: docker/tensorrt/trt.hcl + set: | + tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max + arm64_build: + runs-on: ubuntu-latest + name: ARM Build + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push arm64 standard build + uses: docker/build-push-action@v5 + with: + context: . + file: docker/main/Dockerfile + push: true + platforms: linux/arm64 + target: frigate + tags: | + ${{ steps.setup.outputs.image-name }}-standard-arm64 + cache-from: type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 + - name: Build and push RPi build + uses: docker/bake-action@v4 + with: + push: true + targets: rpi + files: docker/rpi/rpi.hcl + set: | + rpi.tags=${{ steps.setup.outputs.image-name }}-rpi + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max + - name: Build and push RockChip build + uses: docker/bake-action@v3 + with: + push: true + targets: rk + files: docker/rockchip/rk.hcl + set: | + rk.tags=${{ steps.setup.outputs.image-name }}-rk + *.cache-from=type=gha + jetson_jp4_build: + runs-on: ubuntu-latest + name: Jetson Jetpack 4 + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push TensorRT (Jetson, Jetpack 4) + env: + ARCH: arm64 + BASE_IMAGE: timongentzsch/l4t-ubuntu20-opencv:latest + SLIM_BASE: timongentzsch/l4t-ubuntu20-opencv:latest + TRT_BASE: timongentzsch/l4t-ubuntu20-opencv:latest + uses: docker/bake-action@v4 + with: + push: true + targets: tensorrt + files: docker/tensorrt/trt.hcl + set: | + tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp4 + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4 + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp4,mode=max + jetson_jp5_build: + runs-on: ubuntu-latest + name: Jetson Jetpack 5 + steps: + - name: Check out code + uses: actions/checkout@v4 + - name: Set up QEMU and Buildx + id: setup + uses: ./.github/actions/setup + with: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Build and push TensorRT (Jetson, Jetpack 5) + env: + ARCH: arm64 + BASE_IMAGE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime + SLIM_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime + TRT_BASE: nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime + uses: docker/bake-action@v4 + with: + push: true + targets: tensorrt + files: docker/tensorrt/trt.hcl + set: | + tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt-jp5 + *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5 + *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-jp5,mode=max + # The majority of users running arm64 are rpi users, so the rpi + # build should be the primary arm64 image + assemble_default_build: + runs-on: ubuntu-latest + name: Assemble and push default build + needs: + - amd64_build + - arm64_build steps: - - name: Remove unnecessary files - run: | - sudo rm -rf /usr/share/dotnet - sudo rm -rf /usr/local/lib/android - sudo rm -rf /opt/ghc - id: lowercaseRepo - uses: ASzc/change-string-case-action@v5 + uses: ASzc/change-string-case-action@v6 with: string: ${{ github.repository }} - - name: Check out code - uses: actions/checkout@v3 - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - name: Log in to the Container registry - uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: Create version file - run: make version - name: Create short sha run: echo "SHORT_SHA=${GITHUB_SHA::7}" >> $GITHUB_ENV - - name: Build and push - uses: docker/build-push-action@v3 + - uses: int128/docker-manifest-create-action@v1 with: - context: . - push: true - platforms: linux/amd64,linux/arm64,linux/arm/v7 - target: frigate - tags: | - ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }} - cache-from: type=gha - cache-to: type=gha,mode=max - - name: Build and push TensorRT - uses: docker/build-push-action@v3 - with: - context: . - push: true - platforms: linux/amd64 - target: frigate-tensorrt - tags: | - ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}-tensorrt - cache-from: type=gha + tags: ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }} + suffixes: | + -amd64 + -rpi diff --git a/.github/workflows/dependabot-auto-merge.yaml b/.github/workflows/dependabot-auto-merge.yaml index 873350876..a3eecb1d5 100644 --- a/.github/workflows/dependabot-auto-merge.yaml +++ b/.github/workflows/dependabot-auto-merge.yaml @@ -16,7 +16,9 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} - name: Enable auto-merge for Dependabot PRs if: steps.metadata.outputs.dependency-type == 'direct:development' && (steps.metadata.outputs.update-type == 'version-update:semver-minor' || steps.metadata.outputs.update-type == 'version-update:semver-patch') - run: gh pr merge --auto --squash "$PR_URL" + run: | + gh pr review --approve "$PR_URL" + gh pr merge --auto --squash "$PR_URL" env: PR_URL: ${{ github.event.pull_request.html_url }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/pull_request.yml b/.github/workflows/pull_request.yml index 528d1e20e..b86d9b658 100644 --- a/.github/workflows/pull_request.yml +++ b/.github/workflows/pull_request.yml @@ -15,7 +15,7 @@ jobs: env: DOCKER_BUILDKIT: "1" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-node@master with: node-version: 16.x @@ -34,7 +34,7 @@ jobs: name: Web - Lint runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-node@master with: node-version: 16.x @@ -48,7 +48,7 @@ jobs: name: Web - Test runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-node@master with: node-version: 16.x @@ -63,25 +63,28 @@ jobs: name: Python Checks steps: - name: Check out the repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up Python ${{ env.DEFAULT_PYTHON }} - uses: actions/setup-python@v4.5.0 + uses: actions/setup-python@v5.0.0 with: python-version: ${{ env.DEFAULT_PYTHON }} - name: Install requirements run: | - pip install pip - pip install -r requirements-dev.txt - - name: Lint + python3 -m pip install -U pip + python3 -m pip install -r docker/main/requirements-dev.txt + - name: Check formatting run: | - python3 -m black frigate --check + ruff format --check --diff frigate migrations docker *.py + - name: Check lint + run: | + ruff check frigate migrations docker *.py python_tests: runs-on: ubuntu-latest name: Python Tests steps: - name: Check out code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - uses: actions/setup-node@master with: node-version: 16.x @@ -91,9 +94,9 @@ jobs: run: npm run build working-directory: ./web - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@v3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: Build run: make - name: Run mypy diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..3eb9785d9 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,37 @@ +name: On release + +on: + workflow_dispatch: + release: + types: [published] + +jobs: + release: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + - id: lowercaseRepo + uses: ASzc/change-string-case-action@v6 + with: + string: ${{ github.repository }} + - name: Log in to the Container registry + uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + - name: Create tag variables + run: | + BRANCH=$([[ "${{ github.ref_name }}" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]] && echo "master" || echo "dev") + echo "BASE=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}" >> $GITHUB_ENV + echo "BUILD_TAG=${BRANCH}-${GITHUB_SHA::7}" >> $GITHUB_ENV + echo "CLEAN_VERSION=$(echo ${GITHUB_REF##*/} | tr '[:upper:]' '[:lower:]' | sed 's/^[v]//')" >> $GITHUB_ENV + - name: Tag and push the main image + run: | + VERSION_TAG=${BASE}:${CLEAN_VERSION} + PULL_TAG=${BASE}:${BUILD_TAG} + docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG} docker://${VERSION_TAG} + for variant in standard-arm64 tensorrt tensorrt-jp4 tensorrt-jp5 rk; do + docker run --rm -v $HOME/.docker/config.json:/config.json quay.io/skopeo/stable:latest copy --authfile /config.json --multi-arch all docker://${PULL_TAG}-${variant} docker://${VERSION_TAG}-${variant} + done diff --git a/.gitignore b/.gitignore index 200107ae5..33ec9ee24 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,8 @@ debug .vscode/* !.vscode/launch.json -config/config.yml +config/* +!config/*.example models *.mp4 *.ts diff --git a/CODEOWNERS b/CODEOWNERS new file mode 100644 index 000000000..48b26a359 --- /dev/null +++ b/CODEOWNERS @@ -0,0 +1,6 @@ +# Community-supported boards +/docker/tensorrt/ @madsciencetist @NateMeyer +/docker/tensorrt/*arm64* @madsciencetist +/docker/tensorrt/*jetson* @madsciencetist + +/docker/rockchip/ @MarcA711 diff --git a/Makefile b/Makefile index 7ec0c71aa..2cd831670 100644 --- a/Makefile +++ b/Makefile @@ -1,39 +1,39 @@ default_target: local COMMIT_HASH := $(shell git log -1 --pretty=format:"%h"|tail -1) -VERSION = 0.12.1 +VERSION = 0.13.0 IMAGE_REPO ?= ghcr.io/blakeblackshear/frigate +GITHUB_REF_NAME ?= $(shell git rev-parse --abbrev-ref HEAD) CURRENT_UID := $(shell id -u) CURRENT_GID := $(shell id -g) +BOARDS= #Initialized empty + +include docker/*/*.mk + +build-boards: $(BOARDS:%=build-%) + +push-boards: $(BOARDS:%=push-%) version: echo 'VERSION = "$(VERSION)-$(COMMIT_HASH)"' > frigate/version.py local: version - docker buildx build --target=frigate --tag frigate:latest --load . - -local-trt: version - docker buildx build --target=frigate-tensorrt --tag frigate:latest-tensorrt --load . + docker buildx build --target=frigate --tag frigate:latest --load --file docker/main/Dockerfile . amd64: - docker buildx build --platform linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . - docker buildx build --platform linux/amd64 --target=frigate-tensorrt --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH)-tensorrt . + docker buildx build --platform linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile . arm64: - docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . + docker buildx build --platform linux/arm64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile . -armv7: - docker buildx build --platform linux/arm/v7 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . +build: version amd64 arm64 + docker buildx build --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) --file docker/main/Dockerfile . -build: version amd64 arm64 armv7 - docker buildx build --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):$(VERSION)-$(COMMIT_HASH) . - -push: build - docker buildx build --push --platform linux/arm/v7,linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) . - docker buildx build --push --platform linux/amd64 --target=frigate-tensorrt --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt . +push: push-boards + docker buildx build --push --platform linux/arm64/v8,linux/amd64 --target=frigate --tag $(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH) --file docker/main/Dockerfile . run: local - docker run --rm --publish=5000:5000 --volume=${PWD}/config/config.yml:/config/config.yml frigate:latest + docker run --rm --publish=5000:5000 --volume=${PWD}/config:/config frigate:latest run_tests: local docker run --rm --workdir=/opt/frigate --entrypoint= frigate:latest python3 -u -m unittest diff --git a/audio-labelmap.txt b/audio-labelmap.txt new file mode 100644 index 000000000..4a38b5f63 --- /dev/null +++ b/audio-labelmap.txt @@ -0,0 +1,521 @@ +speech +speech +speech +speech +babbling +speech +yell +bellow +whoop +yell +yell +yell +whispering +laughter +laughter +laughter +snicker +laughter +laughter +crying +crying +crying +yell +sigh +singing +choir +sodeling +chant +mantra +child_singing +synthetic_singing +rapping +humming +groan +grunt +whistling +breathing +wheeze +snoring +gasp +pant +snort +cough +throat_clearing +sneeze +sniff +run +shuffle +footsteps +chewing +biting +gargling +stomach_rumble +burping +hiccup +fart +hands +finger_snapping +clapping +heartbeat +heart_murmur +cheering +applause +chatter +crowd +speech +children_playing +animal +pets +dog +bark +yip +howl +bow-wow +growling +whimper_dog +cat +purr +meow +hiss +caterwaul +livestock +horse +clip-clop +neigh +cattle +moo +cowbell +pig +oink +goat +bleat +sheep +fowl +chicken +cluck +cock-a-doodle-doo +turkey +gobble +duck +quack +goose +honk +wild_animals +roaring_cats +roar +bird +chird +chirp +squawk +pigeon +coo +crow +caw +owl +hoot +flapping_wings +dogs +rats +mouse +patter +insect +cricket +mosquito +fly +buzz +buzz +frog +croak +snake +rattle +whale_vocalization +music +musical_instrument +plucked_string_instrument +guitar +electric_guitar +bass_guitar +acoustic_guitar +steel_guitar +tapping +strum +banjo +sitar +mandolin +zither +ukulele +keyboard +piano +electric_piano +organ +electronic_organ +hammond_organ +synthesizer +sampler +harpsichord +percussion +drum_kit +drum_machine +drum +snare_drum +rimshot +drum_roll +bass_drum +timpani +tabla +cymbal +hi-hat +wood_block +tambourine +rattle +maraca +gong +tubular_bells +mallet_percussion +marimba +glockenspiel +vibraphone +steelpan +orchestra +brass_instrument +french_horn +trumpet +trombone +bowed_string_instrument +string_section +violin +pizzicato +cello +double_bass +wind_instrument +flute +saxophone +clarinet +harp +bell +church_bell +jingle_bell +bicycle_bell +tuning_fork +chime +wind_chime +change_ringing +harmonica +accordion +bagpipes +didgeridoo +shofar +theremin +singing_bowl +scratching +pop_music +hip_hop_music +beatboxing +rock_music +heavy_metal +punk_rock +grunge +progressive_rock +rock_and_roll +psychedelic_rock +rhythm_and_blues +soul_music +reggae +country +swing_music +bluegrass +funk +folk_music +middle_eastern_music +jazz +disco +classical_music +opera +electronic_music +house_music +techno +dubstep +drum_and_bass +electronica +electronic_dance_music +ambient_music +trance_music +music_of_latin_america +salsa_music +flamenco +blues +music_for_children +new-age_music +vocal_music +a_capella +music_of_africa +afrobeat +christian_music +gospel_music +music_of_asia +carnatic_music +music_of_bollywood +ska +traditional_music +independent_music +song +background_music +theme_music +jingle +soundtrack_music +lullaby +video_game_music +christmas_music +dance_music +wedding_music +happy_music +sad_music +tender_music +exciting_music +angry_music +scary_music +wind +rustling_leaves +wind_noise +thunderstorm +thunder +water +rain +raindrop +rain_on_surface +stream +waterfall +ocean +waves +steam +gurgling +fire +crackle +vehicle +boat +sailboat +rowboat +motorboat +ship +motor_vehicle +car +honk +toot +car_alarm +power_windows +skidding +tire_squeal +car_passing_by +race_car +truck +air_brake +air_horn +reversing_beeps +ice_cream_truck +bus +emergency_vehicle +police_car +ambulance +fire_engine +motorcycle +traffic_noise +rail_transport +train +train_whistle +train_horn +railroad_car +train_wheels_squealing +subway +aircraft +aircraft_engine +jet_engine +propeller +helicopter +fixed-wing_aircraft +bicycle +skateboard +engine +light_engine +dental_drill's_drill +lawn_mower +chainsaw +medium_engine +heavy_engine +engine_knocking +engine_starting +idling +accelerating +door +doorbell +ding-dong +sliding_door +slam +knock +tap +squeak +cupboard_open_or_close +drawer_open_or_close +dishes +cutlery +chopping +frying +microwave_oven +blender +water_tap +sink +bathtub +hair_dryer +toilet_flush +toothbrush +electric_toothbrush +vacuum_cleaner +zipper +keys_jangling +coin +scissors +electric_shaver +shuffling_cards +typing +typewriter +computer_keyboard +writing +alarm +telephone +telephone_bell_ringing +ringtone +telephone_dialing +dial_tone +busy_signal +alarm_clock +siren +civil_defense_siren +buzzer +smoke_detector +fire_alarm +foghorn +whistle +steam_whistle +mechanisms +ratchet +clock +tick +tick-tock +gears +pulleys +sewing_machine +mechanical_fan +air_conditioning +cash_register +printer +camera +single-lens_reflex_camera +tools +hammer +jackhammer +sawing +filing +sanding +power_tool +drill +explosion +gunshot +machine_gun +fusillade +artillery_fire +cap_gun +fireworks +firecracker +burst +eruption +boom +wood +chop +splinter +crack +glass +chink +shatter +liquid +splash +slosh +squish +drip +pour +trickle +gush +fill +spray +pump +stir +boiling +sonar +arrow +whoosh +thump +thunk +electronic_tuner +effects_unit +chorus_effect +basketball_bounce +bang +slap +whack +smash +breaking +bouncing +whip +flap +scratch +scrape +rub +roll +crushing +crumpling +tearing +beep +ping +ding +clang +squeal +creak +rustle +whir +clatter +sizzle +clicking +clickety-clack +rumble +plop +jingle +hum +zing +boing +crunch +silence +sine_wave +harmonic +chirp_tone +sound_effect +pulse +inside +inside +inside +outside +outside +reverberation +echo +noise +environmental_noise +static +mains_hum +distortion +sidetone +cacophony +white_noise +pink_noise +throbbing +vibration +television +radio +field_recording diff --git a/benchmark.py b/benchmark.py index 3d0cacd87..8ba22d093 100755 --- a/benchmark.py +++ b/benchmark.py @@ -1,11 +1,11 @@ -import os -from statistics import mean -import multiprocessing as mp -import numpy as np import datetime +import multiprocessing as mp +from statistics import mean + +import numpy as np + from frigate.config import DetectorTypeEnum from frigate.object_detection import ( - LocalObjectDetector, ObjectDetectProcess, RemoteObjectDetector, load_labels, @@ -53,7 +53,7 @@ def start(id, num_detections, detection_queue, event): frame_times = [] for x in range(0, num_detections): start_frame = datetime.datetime.now().timestamp() - detections = object_detector.detect(my_frame) + object_detector.detect(my_frame) frame_times.append(datetime.datetime.now().timestamp() - start_frame) duration = datetime.datetime.now().timestamp() - start diff --git a/benchmark_motion.py b/benchmark_motion.py new file mode 100644 index 000000000..431398f98 --- /dev/null +++ b/benchmark_motion.py @@ -0,0 +1,118 @@ +import datetime +import multiprocessing as mp +import os + +import cv2 +import numpy as np + +from frigate.config import MotionConfig +from frigate.motion.improved_motion import ImprovedMotionDetector +from frigate.util import create_mask + +# get info on the video +# cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4") +# cap = cv2.VideoCapture("debug/motion_test_clips/rain_1.mp4") +cap = cv2.VideoCapture("debug/motion_test_clips/lawn_mower_night_1.mp4") +# cap = cv2.VideoCapture("airport.mp4") +width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) +height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) +fps = cap.get(cv2.CAP_PROP_FPS) +frame_shape = (height, width, 3) +# Nick back: +# "1280,0,1280,316,1170,216,1146,126,1016,127,979,82,839,0", +# "310,350,300,402,224,405,241,354", +# "378,0,375,26,0,23,0,0", +# Front door: +# "1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0", +# "336,833,438,1024,346,1093,103,1052,24,814", +# Back +# "1855,0,1851,100,1289,96,1105,161,1045,119,890,121,890,0", +# "505,95,506,138,388,153,384,114", +# "689,72,689,122,549,134,547,89", +# "261,134,264,176,169,195,167,158", +# "145,159,146,202,70,220,65,183", + +mask = create_mask( + (height, width), + [ + "1080,0,1080,339,1010,280,1020,169,777,163,452,170,318,299,191,365,186,417,139,470,108,516,40,530,0,514,0,0", + "336,833,438,1024,346,1093,103,1052,24,814", + ], +) + +# create the motion config +motion_config_1 = MotionConfig() +motion_config_1.mask = np.zeros((height, width), np.uint8) +motion_config_1.mask[:] = mask +# motion_config_1.improve_contrast = 1 +motion_config_1.frame_height = 150 +# motion_config_1.frame_alpha = 0.02 +# motion_config_1.threshold = 30 +# motion_config_1.contour_area = 10 + +motion_config_2 = MotionConfig() +motion_config_2.mask = np.zeros((height, width), np.uint8) +motion_config_2.mask[:] = mask +# motion_config_2.improve_contrast = 1 +motion_config_2.frame_height = 150 +# motion_config_2.frame_alpha = 0.01 +motion_config_2.threshold = 20 +# motion_config.contour_area = 10 + +save_images = True + +improved_motion_detector_1 = ImprovedMotionDetector( + frame_shape=frame_shape, + config=motion_config_1, + fps=fps, + improve_contrast=mp.Value("i", motion_config_1.improve_contrast), + threshold=mp.Value("i", motion_config_1.threshold), + contour_area=mp.Value("i", motion_config_1.contour_area), + name="default", +) +improved_motion_detector_1.save_images = save_images + +improved_motion_detector_2 = ImprovedMotionDetector( + frame_shape=frame_shape, + config=motion_config_2, + fps=fps, + improve_contrast=mp.Value("i", motion_config_2.improve_contrast), + threshold=mp.Value("i", motion_config_2.threshold), + contour_area=mp.Value("i", motion_config_2.contour_area), + name="compare", +) +improved_motion_detector_2.save_images = save_images + +# read and process frames +ret, frame = cap.read() +frame_counter = 1 +while ret: + yuv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420) + + start_frame = datetime.datetime.now().timestamp() + improved_motion_detector_1.detect(yuv_frame) + + start_frame = datetime.datetime.now().timestamp() + improved_motion_detector_2.detect(yuv_frame) + + default_frame = f"debug/frames/default-{frame_counter}.jpg" + compare_frame = f"debug/frames/compare-{frame_counter}.jpg" + if os.path.exists(default_frame) and os.path.exists(compare_frame): + images = [ + cv2.imread(default_frame), + cv2.imread(compare_frame), + ] + + cv2.imwrite( + f"debug/frames/all-{frame_counter}.jpg", + cv2.vconcat(images) + if frame_shape[0] > frame_shape[1] + else cv2.hconcat(images), + ) + os.unlink(default_frame) + os.unlink(compare_frame) + frame_counter += 1 + + ret, frame = cap.read() + +cap.release() diff --git a/docker-compose.yml b/docker-compose.yml index e050840ab..a4d349194 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -11,15 +11,19 @@ services: shm_size: "256mb" build: context: . + dockerfile: docker/main/Dockerfile # Use target devcontainer-trt for TensorRT dev target: devcontainer - deploy: - resources: - reservations: - devices: - - driver: nvidia - count: 1 - capabilities: [gpu] + ## Uncomment this block for nvidia gpu support + # deploy: + # resources: + # reservations: + # devices: + # - driver: nvidia + # count: 1 + # capabilities: [gpu] + environment: + YOLO_MODELS: yolov7-320 devices: - /dev/bus/usb:/dev/bus/usb # - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware @@ -27,10 +31,8 @@ services: - .:/workspace/frigate:cached - ./web/dist:/opt/frigate/web:cached - /etc/localtime:/etc/localtime:ro - - ./config/config.yml:/config/config.yml:ro + - ./config:/config - ./debug:/media/frigate - # Create the trt-models folder using the documented method of generating TRT models - # - ./debug/trt-models:/trt-models - /dev/bus/usb:/dev/bus/usb mqtt: container_name: mqtt diff --git a/Dockerfile b/docker/main/Dockerfile similarity index 66% rename from Dockerfile rename to docker/main/Dockerfile index 204fc7c81..e35eac191 100644 --- a/Dockerfile +++ b/docker/main/Dockerfile @@ -1,13 +1,16 @@ -# syntax=docker/dockerfile:1.2 +# syntax=docker/dockerfile:1.6 # https://askubuntu.com/questions/972516/debian-frontend-environment-variable ARG DEBIAN_FRONTEND=noninteractive -FROM debian:11 AS base +ARG BASE_IMAGE=debian:11 +ARG SLIM_BASE=debian:11-slim -FROM --platform=linux/amd64 debian:11 AS base_amd64 +FROM ${BASE_IMAGE} AS base -FROM debian:11-slim AS slim-base +FROM --platform=${BUILDPLATFORM} debian:11 AS base_host + +FROM ${SLIM_BASE} AS slim-base FROM slim-base AS wget ARG DEBIAN_FRONTEND @@ -18,17 +21,19 @@ WORKDIR /rootfs FROM base AS nginx ARG DEBIAN_FRONTEND +ENV CCACHE_DIR /root/.ccache +ENV CCACHE_MAXSIZE 2G # bind /var/cache/apt to tmpfs to speed up nginx build RUN --mount=type=tmpfs,target=/tmp --mount=type=tmpfs,target=/var/cache/apt \ - --mount=type=bind,source=docker/build_nginx.sh,target=/deps/build_nginx.sh \ + --mount=type=bind,source=docker/main/build_nginx.sh,target=/deps/build_nginx.sh \ + --mount=type=cache,target=/root/.ccache \ /deps/build_nginx.sh -FROM wget AS go2rtc +FROM scratch AS go2rtc ARG TARGETARCH WORKDIR /rootfs/usr/local/go2rtc/bin -RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.2.0/go2rtc_linux_${TARGETARCH}" \ - && chmod +x go2rtc +ADD --link --chmod=755 "https://github.com/AlexxIT/go2rtc/releases/download/v1.8.4/go2rtc_linux_${TARGETARCH}" go2rtc #### @@ -40,11 +45,11 @@ RUN wget -qO go2rtc "https://github.com/AlexxIT/go2rtc/releases/download/v1.2.0/ # #### # Download and Convert OpenVino model -FROM base_amd64 AS ov-converter +FROM base_host AS ov-converter ARG DEBIAN_FRONTEND # Install OpenVino Runtime and Dev library -COPY requirements-ov.txt /requirements-ov.txt +COPY docker/main/requirements-ov.txt /requirements-ov.txt RUN apt-get -qq update \ && apt-get -qq install -y wget python3 python3-distutils \ && wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ @@ -61,25 +66,27 @@ RUN mkdir /models \ FROM wget as libusb-build ARG TARGETARCH ARG DEBIAN_FRONTEND +ENV CCACHE_DIR /root/.ccache +ENV CCACHE_MAXSIZE 2G # Build libUSB without udev. Needed for Openvino NCS2 support WORKDIR /opt -RUN apt-get update && apt-get install -y unzip build-essential automake libtool -RUN wget -q https://github.com/libusb/libusb/archive/v1.0.25.zip -O v1.0.25.zip && \ - unzip v1.0.25.zip && cd libusb-1.0.25 && \ +RUN apt-get update && apt-get install -y unzip build-essential automake libtool ccache pkg-config +RUN --mount=type=cache,target=/root/.ccache wget -q https://github.com/libusb/libusb/archive/v1.0.26.zip -O v1.0.26.zip && \ + unzip v1.0.26.zip && cd libusb-1.0.26 && \ ./bootstrap.sh && \ - ./configure --disable-udev --enable-shared && \ + ./configure CC='ccache gcc' CCX='ccache g++' --disable-udev --enable-shared && \ make -j $(nproc --all) RUN apt-get update && \ apt-get install -y --no-install-recommends libusb-1.0-0-dev && \ rm -rf /var/lib/apt/lists/* -WORKDIR /opt/libusb-1.0.25/libusb +WORKDIR /opt/libusb-1.0.26/libusb RUN /bin/mkdir -p '/usr/local/lib' && \ /bin/bash ../libtool --mode=install /usr/bin/install -c libusb-1.0.la '/usr/local/lib' && \ /bin/mkdir -p '/usr/local/include/libusb-1.0' && \ /usr/bin/install -c -m 644 libusb.h '/usr/local/include/libusb-1.0' && \ /bin/mkdir -p '/usr/local/lib/pkgconfig' && \ - cd /opt/libusb-1.0.25/ && \ + cd /opt/libusb-1.0.26/ && \ /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \ ldconfig @@ -93,12 +100,14 @@ COPY labelmap.txt . COPY --from=ov-converter /models/public/ssdlite_mobilenet_v2/FP16 openvino-model RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \ sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt - +# Get Audio Model and labels +RUN wget -qO cpu_audio_model.tflite https://tfhub.dev/google/lite-model/yamnet/classification/tflite/1?lite-format=tflite +COPY audio-labelmap.txt . FROM wget AS s6-overlay ARG TARGETARCH -RUN --mount=type=bind,source=docker/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \ +RUN --mount=type=bind,source=docker/main/install_s6_overlay.sh,target=/deps/install_s6_overlay.sh \ /deps/install_s6_overlay.sh @@ -112,13 +121,15 @@ RUN apt-get -qq update \ apt-transport-https \ gnupg \ wget \ - && apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 9165938D90FDDD2E \ - && echo "deb http://raspbian.raspberrypi.org/raspbian/ bullseye main contrib non-free rpi" | tee /etc/apt/sources.list.d/raspi.list \ + # the key fingerprint can be obtained from https://ftp-master.debian.org/keys.html + && wget -qO- "https://keyserver.ubuntu.com/pks/lookup?op=get&search=0xA4285295FC7B1A81600062A9605C66F00D6C9793" | \ + gpg --dearmor > /usr/share/keyrings/debian-archive-bullseye-stable.gpg \ + && echo "deb [signed-by=/usr/share/keyrings/debian-archive-bullseye-stable.gpg] http://deb.debian.org/debian bullseye main contrib non-free" | \ + tee /etc/apt/sources.list.d/debian-bullseye-nonfree.list \ && apt-get -qq update \ && apt-get -qq install -y \ - python3 \ - python3-dev \ - wget \ + python3.9 \ + python3.9-dev \ # opencv dependencies build-essential cmake git pkg-config libgtk-3-dev \ libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \ @@ -130,28 +141,17 @@ RUN apt-get -qq update \ gcc gfortran libopenblas-dev liblapack-dev && \ rm -rf /var/lib/apt/lists/* +# Ensure python3 defaults to python3.9 +RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 + RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ && python3 get-pip.py "pip" -RUN if [ "${TARGETARCH}" = "arm" ]; \ - then echo "[global]" > /etc/pip.conf \ - && echo "extra-index-url=https://www.piwheels.org/simple" >> /etc/pip.conf; \ - fi +COPY docker/main/requirements.txt /requirements.txt +RUN pip3 install -r /requirements.txt -COPY requirements.txt /requirements.txt -RUN pip3 install -r requirements.txt - -COPY requirements-wheels.txt /requirements-wheels.txt -RUN pip3 wheel --wheel-dir=/wheels -r requirements-wheels.txt - -# Make this a separate target so it can be built/cached optionally -FROM wheels as trt-wheels -ARG DEBIAN_FRONTEND -ARG TARGETARCH - -# Add TensorRT wheels to another folder -COPY requirements-tensorrt.txt /requirements-tensorrt.txt -RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r requirements-tensorrt.txt +COPY docker/main/requirements-wheels.txt /requirements-wheels.txt +RUN pip3 wheel --wheel-dir=/wheels -r /requirements-wheels.txt # Collect deps in a single layer @@ -161,7 +161,7 @@ COPY --from=go2rtc /rootfs/ / COPY --from=libusb-build /usr/local/lib /usr/local/lib COPY --from=s6-overlay /rootfs/ / COPY --from=models /rootfs/ / -COPY docker/rootfs/ / +COPY docker/main/rootfs/ / # Frigate deps (ffmpeg, python, nginx, go2rtc, s6-overlay, etc) @@ -179,10 +179,11 @@ ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility" ENV PATH="/usr/lib/btbn-ffmpeg/bin:/usr/local/go2rtc/bin:/usr/local/nginx/sbin:${PATH}" # Install dependencies -RUN --mount=type=bind,source=docker/install_deps.sh,target=/deps/install_deps.sh \ +RUN --mount=type=bind,source=docker/main/install_deps.sh,target=/deps/install_deps.sh \ /deps/install_deps.sh RUN --mount=type=bind,from=wheels,source=/wheels,target=/deps/wheels \ + python3 -m pip install --upgrade pip && \ pip3 install -U /deps/wheels/*.whl COPY --from=deps-rootfs / / @@ -200,24 +201,27 @@ ENV S6_LOGGING_SCRIPT="T 1 n0 s10000000 T" ENTRYPOINT ["/init"] CMD [] +HEALTHCHECK --start-period=120s --start-interval=5s --interval=15s --timeout=5s --retries=3 \ + CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1 + # Frigate deps with Node.js and NPM for devcontainer FROM deps AS devcontainer # Do not start the actual Frigate service on devcontainer as it will be started by VSCode # But start a fake service for simulating the logs -COPY docker/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run +COPY docker/main/fake_frigate_run /etc/s6-overlay/s6-rc.d/frigate/run # Create symbolic link to the frigate source code, as go2rtc's create_config.sh uses it RUN mkdir -p /opt/frigate \ && ln -svf /workspace/frigate/frigate /opt/frigate/frigate -# Install Node 16 -RUN apt-get update \ - && apt-get install wget -y \ - && wget -qO- https://deb.nodesource.com/setup_16.x | bash - \ - && apt-get install -y nodejs \ +# Install Node 20 +RUN curl -SLO https://deb.nodesource.com/nsolid_setup_deb.sh && \ + chmod 500 nsolid_setup_deb.sh && \ + ./nsolid_setup_deb.sh 20 && \ + apt-get install nodejs -y \ && rm -rf /var/lib/apt/lists/* \ - && npm install -g npm@9 + && npm install -g npm@10 WORKDIR /workspace/frigate @@ -225,15 +229,15 @@ RUN apt-get update \ && apt-get install make -y \ && rm -rf /var/lib/apt/lists/* -RUN --mount=type=bind,source=./requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \ +RUN --mount=type=bind,source=./docker/main/requirements-dev.txt,target=/workspace/frigate/requirements-dev.txt \ pip3 install -r requirements-dev.txt CMD ["sleep", "infinity"] # Frigate web build -# force this to run on amd64 because QEMU is painfully slow -FROM --platform=linux/amd64 node:16 AS web-build +# This should be architecture agnostic, so speed up the build on multiarch by not using QEMU. +FROM --platform=$BUILDPLATFORM node:16 AS web-build WORKDIR /work COPY web/package.json web/package-lock.json ./ @@ -257,16 +261,3 @@ FROM deps AS frigate WORKDIR /opt/frigate/ COPY --from=rootfs / / - -# Frigate w/ TensorRT Support as separate image -FROM frigate AS frigate-tensorrt -RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ - pip3 install -U /deps/trt-wheels/*.whl && \ - ln -s libnvrtc.so.11.2 /usr/local/lib/python3.9/dist-packages/nvidia/cuda_nvrtc/lib/libnvrtc.so && \ - ldconfig - -# Dev Container w/ TRT -FROM devcontainer AS devcontainer-trt - -RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ - pip3 install -U /deps/trt-wheels/*.whl diff --git a/docker/build_nginx.sh b/docker/main/build_nginx.sh similarity index 91% rename from docker/build_nginx.sh rename to docker/main/build_nginx.sh index fd1432f32..fd604c122 100755 --- a/docker/build_nginx.sh +++ b/docker/main/build_nginx.sh @@ -2,10 +2,10 @@ set -euxo pipefail -NGINX_VERSION="1.22.1" -VOD_MODULE_VERSION="1.30" -SECURE_TOKEN_MODULE_VERSION="1.4" -RTMP_MODULE_VERSION="1.2.1" +NGINX_VERSION="1.25.3" +VOD_MODULE_VERSION="1.31" +SECURE_TOKEN_MODULE_VERSION="1.5" +RTMP_MODULE_VERSION="1.2.2" cp /etc/apt/sources.list /etc/apt/sources.list.d/sources-src.list sed -i 's|deb http|deb-src http|g' /etc/apt/sources.list.d/sources-src.list @@ -15,6 +15,10 @@ apt-get -yqq build-dep nginx apt-get -yqq install --no-install-recommends ca-certificates wget update-ca-certificates -f +apt install -y ccache + +export PATH="/usr/lib/ccache:$PATH" + mkdir /tmp/nginx wget -nv https://nginx.org/download/nginx-${NGINX_VERSION}.tar.gz tar -zxf nginx-${NGINX_VERSION}.tar.gz -C /tmp/nginx --strip-components=1 @@ -62,5 +66,5 @@ cd /tmp/nginx --add-module=../nginx-rtmp-module \ --with-cc-opt="-O3 -Wno-error=implicit-fallthrough" -make -j$(nproc) && make install +make CC="ccache gcc" -j$(nproc) && make install rm -rf /usr/local/nginx/html /usr/local/nginx/conf/*.default diff --git a/docker/fake_frigate_run b/docker/main/fake_frigate_run similarity index 100% rename from docker/fake_frigate_run rename to docker/main/fake_frigate_run diff --git a/docker/install_deps.sh b/docker/main/install_deps.sh similarity index 50% rename from docker/install_deps.sh rename to docker/main/install_deps.sh index f84087b9d..43fff479b 100755 --- a/docker/install_deps.sh +++ b/docker/main/install_deps.sh @@ -10,9 +10,14 @@ apt-get -qq install --no-install-recommends -y \ wget \ procps vainfo \ unzip locales tzdata libxml2 xz-utils \ + python3.9 \ python3-pip \ curl \ - jq + jq \ + nethogs + +# ensure python3 defaults to python3.9 +update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 mkdir -p -m 600 /root/.gnupg @@ -22,8 +27,10 @@ curl -fsSLo - https://packages.cloud.google.com/apt/doc/apt-key.gpg | \ echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | tee /etc/apt/sources.list.d/coral-edgetpu.list echo "libedgetpu1-max libedgetpu/accepted-eula select true" | debconf-set-selections -# enable non-free repo -sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list +# enable non-free repo in Debian +if grep -q "Debian" /etc/issue; then + sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list +fi # coral drivers apt-get -qq update @@ -38,37 +45,26 @@ if [[ "${TARGETARCH}" == "amd64" ]]; then rm -rf btbn-ffmpeg.tar.xz /usr/lib/btbn-ffmpeg/doc /usr/lib/btbn-ffmpeg/bin/ffplay fi -# ffmpeg -> arm32 -if [[ "${TARGETARCH}" == "arm" ]]; then - # add raspberry pi repo - gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 9165938D90FDDD2E - echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] http://raspbian.raspberrypi.org/raspbian/ bullseye main contrib non-free rpi" | tee /etc/apt/sources.list.d/raspi.list - apt-get -qq update - apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg -fi - # ffmpeg -> arm64 if [[ "${TARGETARCH}" == "arm64" ]]; then - # add raspberry pi repo - gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 82B129927FA3303E - echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bullseye main" | tee /etc/apt/sources.list.d/raspi.list - apt-get -qq update - apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg + mkdir -p /usr/lib/btbn-ffmpeg + wget -qO btbn-ffmpeg.tar.xz "https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2022-07-31-12-37/ffmpeg-n5.1-2-g915ef932a3-linuxarm64-gpl-5.1.tar.xz" + tar -xf btbn-ffmpeg.tar.xz -C /usr/lib/btbn-ffmpeg --strip-components 1 + rm -rf btbn-ffmpeg.tar.xz /usr/lib/btbn-ffmpeg/doc /usr/lib/btbn-ffmpeg/bin/ffplay fi # arch specific packages if [[ "${TARGETARCH}" == "amd64" ]]; then - # Use debian testing repo only for hwaccel packages - echo 'deb http://deb.debian.org/debian testing main non-free' >/etc/apt/sources.list.d/debian-testing.list + # use debian bookworm for hwaccel packages + echo 'deb https://deb.debian.org/debian bookworm main contrib non-free' >/etc/apt/sources.list.d/debian-bookworm.list apt-get -qq update - # intel-opencl-icd specifically for GPU support in OpenVino apt-get -qq install --no-install-recommends --no-install-suggests -y \ intel-opencl-icd \ - mesa-va-drivers libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 radeontop intel-gpu-tools + mesa-va-drivers radeontop libva-drm2 intel-media-va-driver-non-free i965-va-driver libmfx1 intel-gpu-tools # something about this dependency requires it to be installed in a separate call rather than in the line above apt-get -qq install --no-install-recommends --no-install-suggests -y \ i965-va-driver-shaders - rm -f /etc/apt/sources.list.d/debian-testing.list + rm -f /etc/apt/sources.list.d/debian-bookworm.list fi if [[ "${TARGETARCH}" == "arm64" ]]; then @@ -76,17 +72,13 @@ if [[ "${TARGETARCH}" == "arm64" ]]; then libva-drm2 mesa-va-drivers fi -# not sure why 32bit arm requires all these -if [[ "${TARGETARCH}" == "arm" ]]; then - apt-get -qq install --no-install-recommends --no-install-suggests -y \ - libgtk-3-dev \ - libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \ - libxvidcore-dev libx264-dev libjpeg-dev libpng-dev libtiff-dev \ - gfortran openexr libatlas-base-dev libtbb-dev libdc1394-22-dev libopenexr-dev \ - libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev -fi - -apt-get purge gnupg apt-transport-https wget xz-utils -y +apt-get purge gnupg apt-transport-https xz-utils -y apt-get clean autoclean -y apt-get autoremove --purge -y rm -rf /var/lib/apt/lists/* + +# Install yq, for frigate-prepare and go2rtc echo source +curl -fsSL \ + "https://github.com/mikefarah/yq/releases/download/v4.33.3/yq_linux_$(dpkg --print-architecture)" \ + --output /usr/local/bin/yq +chmod +x /usr/local/bin/yq diff --git a/docker/install_s6_overlay.sh b/docker/main/install_s6_overlay.sh similarity index 85% rename from docker/install_s6_overlay.sh rename to docker/main/install_s6_overlay.sh index 9a849cb65..75acba774 100755 --- a/docker/install_s6_overlay.sh +++ b/docker/main/install_s6_overlay.sh @@ -2,12 +2,10 @@ set -euxo pipefail -s6_version="3.1.4.1" +s6_version="3.1.5.0" if [[ "${TARGETARCH}" == "amd64" ]]; then s6_arch="x86_64" -elif [[ "${TARGETARCH}" == "arm" ]]; then - s6_arch="armhf" elif [[ "${TARGETARCH}" == "arm64" ]]; then s6_arch="aarch64" fi diff --git a/docker/main/requirements-dev.txt b/docker/main/requirements-dev.txt new file mode 100644 index 000000000..af3ee5763 --- /dev/null +++ b/docker/main/requirements-dev.txt @@ -0,0 +1 @@ +ruff diff --git a/docker/main/requirements-ov.txt b/docker/main/requirements-ov.txt new file mode 100644 index 000000000..20e5a29c1 --- /dev/null +++ b/docker/main/requirements-ov.txt @@ -0,0 +1,5 @@ +numpy +# Openvino Library - Custom built with MYRIAD support +openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' +openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' +openvino-dev[tensorflow2] @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino_dev-2022.3.1-1-py3-none-any.whl diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt new file mode 100644 index 000000000..f4167744e --- /dev/null +++ b/docker/main/requirements-wheels.txt @@ -0,0 +1,29 @@ +click == 8.1.* +Flask == 2.3.* +imutils == 0.5.* +matplotlib == 3.7.* +mypy == 1.6.1 +numpy == 1.23.* +onvif_zeep == 0.2.12 +opencv-python-headless == 4.7.0.* +paho-mqtt == 1.6.* +peewee == 3.17.* +peewee_migrate == 1.12.* +psutil == 5.9.* +pydantic == 1.10.* +git+https://github.com/fbcotter/py3nvml#egg=py3nvml +PyYAML == 6.0.* +pytz == 2023.3.post1 +ruamel.yaml == 0.18.* +tzlocal == 5.2 +types-PyYAML == 6.0.* +requests == 2.31.* +types-requests == 2.31.* +scipy == 1.11.* +norfair == 2.2.* +setproctitle == 1.3.* +ws4py == 0.5.* +unidecode == 1.3.* +# Openvino Library - Custom built with MYRIAD support +openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' +openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' diff --git a/docker/main/requirements.txt b/docker/main/requirements.txt new file mode 100644 index 000000000..90780e2b4 --- /dev/null +++ b/docker/main/requirements.txt @@ -0,0 +1,2 @@ +scikit-build == 0.17.* +nvidia-pyindex diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/consumer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/consumer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/consumer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/consumer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/dependencies.d/log-prepare b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/dependencies.d/log-prepare similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/dependencies.d/log-prepare rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/dependencies.d/log-prepare diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/pipeline-name b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/pipeline-name similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/pipeline-name rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/pipeline-name diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate-log/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/go2rtc b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/go2rtc similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/go2rtc rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/go2rtc diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/finish b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/finish similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/finish rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/finish diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/producer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/producer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/producer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/producer-for diff --git a/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run new file mode 100755 index 000000000..f2cc40fcf --- /dev/null +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/run @@ -0,0 +1,55 @@ +#!/command/with-contenv bash +# shellcheck shell=bash +# Start the Frigate service + +set -o errexit -o nounset -o pipefail + +# Logs should be sent to stdout so that s6 can collect them + +# Tell S6-Overlay not to restart this service +s6-svc -O . + +function migrate_db_path() { + # Find config file in yaml or yml, but prefer yaml + local config_file="${CONFIG_FILE:-"/config/config.yml"}" + local config_file_yaml="${config_file//.yml/.yaml}" + if [[ -f "${config_file_yaml}" ]]; then + config_file="${config_file_yaml}" + elif [[ ! -f "${config_file}" ]]; then + echo "[ERROR] Frigate config file not found" + return 1 + fi + unset config_file_yaml + + # Use yq to check if database.path is set + local user_db_path + user_db_path=$(yq eval '.database.path' "${config_file}") + + if [[ "${user_db_path}" == "null" ]]; then + local previous_db_path="/media/frigate/frigate.db" + local new_db_dir="/config" + if [[ -f "${previous_db_path}" ]]; then + if mountpoint --quiet "${new_db_dir}"; then + # /config is a mount point, move the db + echo "[INFO] Moving db from '${previous_db_path}' to the '${new_db_dir}' dir..." + # Move all files that starts with frigate.db to the new directory + mv -vf "${previous_db_path}"* "${new_db_dir}" + else + echo "[ERROR] Trying to migrate the db path from '${previous_db_path}' to the '${new_db_dir}' dir, but '${new_db_dir}' is not a mountpoint, please mount the '${new_db_dir}' dir" + return 1 + fi + fi + fi +} + +echo "[INFO] Preparing Frigate..." +migrate_db_path +export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') + +echo "[INFO] Starting Frigate..." + +cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate" + +# Replace the bash process with the Frigate process, redirecting stderr to stdout +exec 2>&1 +exec python3 -u -m frigate diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/timeout-kill b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/timeout-kill similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/timeout-kill rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/timeout-kill diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/frigate/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/dependencies.d/go2rtc b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/dependencies.d/go2rtc similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/dependencies.d/go2rtc rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/dependencies.d/go2rtc diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/finish b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/finish similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/finish rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/finish diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/producer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/producer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/producer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/producer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/timeout-kill b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/timeout-kill similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/timeout-kill rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/timeout-kill diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-healthcheck/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/consumer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/consumer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/consumer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/consumer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/dependencies.d/log-prepare b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/dependencies.d/log-prepare similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/dependencies.d/log-prepare rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/dependencies.d/log-prepare diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/pipeline-name b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/pipeline-name similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/pipeline-name rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/pipeline-name diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc-log/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/base b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/base similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/base rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/dependencies.d/base diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/finish b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/finish similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/finish rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/finish diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/producer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/producer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/producer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/producer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run similarity index 83% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run index 85c8f9526..851d78799 100755 --- a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run +++ b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/run @@ -43,8 +43,15 @@ function get_ip_and_port_from_supervisor() { export FRIGATE_GO2RTC_WEBRTC_CANDIDATE_INTERNAL="${ip_address}:${webrtc_port}" } +export LIBAVFORMAT_VERSION_MAJOR=$(ffmpeg -version | grep -Po 'libavformat\W+\K\d+') + +if [[ -f "/dev/shm/go2rtc.yaml" ]]; then + echo "[INFO] Removing stale config from last run..." + rm /dev/shm/go2rtc.yaml +fi + if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then - echo "[INFO] Preparing go2rtc config..." + echo "[INFO] Preparing new go2rtc config..." if [[ -n "${SUPERVISOR_TOKEN:-}" ]]; then # Running as a Home Assistant add-on, infer the IP address and port @@ -52,6 +59,8 @@ if [[ ! -f "/dev/shm/go2rtc.yaml" ]]; then fi python3 /usr/local/go2rtc/create_config.py +else + echo "[WARNING] Unable to remove existing go2rtc config. Changes made to your frigate config file may not be recognized. Please remove the /dev/shm/go2rtc.yaml from your docker host manually." fi readonly config_path="/config" diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/timeout-kill b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/timeout-kill similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/timeout-kill rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/timeout-kill diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/go2rtc/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/dependencies.d/base b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/dependencies.d/base similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/dependencies.d/base rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/dependencies.d/base diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/up b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/up similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/up rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/log-prepare/up diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/consumer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/consumer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/consumer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/consumer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/dependencies.d/log-prepare b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/dependencies.d/log-prepare similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/dependencies.d/log-prepare rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/dependencies.d/log-prepare diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/pipeline-name b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/pipeline-name similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/pipeline-name rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/pipeline-name diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx-log/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/dependencies.d/frigate b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/dependencies.d/frigate similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/dependencies.d/frigate rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/dependencies.d/frigate diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/finish b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/finish similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/finish rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/finish diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/producer-for b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/producer-for similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/producer-for rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/producer-for diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/run b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/run rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/run diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/timeout-kill b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/timeout-kill similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/timeout-kill rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/timeout-kill diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/type b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/type similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/nginx/type rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/nginx/type diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/frigate-pipeline b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/frigate-pipeline similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/frigate-pipeline rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/frigate-pipeline diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/go2rtc-pipeline b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/go2rtc-pipeline similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/go2rtc-pipeline rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/go2rtc-pipeline diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/nginx-pipeline b/docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/nginx-pipeline similarity index 100% rename from docker/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/nginx-pipeline rename to docker/main/rootfs/etc/s6-overlay/s6-rc.d/user/contents.d/nginx-pipeline diff --git a/docker/rootfs/usr/local/go2rtc/create_config.py b/docker/main/rootfs/usr/local/go2rtc/create_config.py similarity index 56% rename from docker/rootfs/usr/local/go2rtc/create_config.py rename to docker/main/rootfs/usr/local/go2rtc/create_config.py index d201eb381..51d75f0e0 100644 --- a/docker/rootfs/usr/local/go2rtc/create_config.py +++ b/docker/main/rootfs/usr/local/go2rtc/create_config.py @@ -3,16 +3,28 @@ import json import os import sys +from pathlib import Path + import yaml sys.path.insert(0, "/opt/frigate") -from frigate.const import BIRDSEYE_PIPE, BTBN_PATH -from frigate.ffmpeg_presets import parse_preset_hardware_acceleration_encode +from frigate.const import BIRDSEYE_PIPE # noqa: E402 +from frigate.ffmpeg_presets import ( # noqa: E402 + parse_preset_hardware_acceleration_encode, +) sys.path.remove("/opt/frigate") FRIGATE_ENV_VARS = {k: v for k, v in os.environ.items() if k.startswith("FRIGATE_")} +# read docker secret files as env vars too +if os.path.isdir("/run/secrets"): + for secret_file in os.listdir("/run/secrets"): + if secret_file.startswith("FRIGATE_"): + FRIGATE_ENV_VARS[secret_file] = Path( + os.path.join("/run/secrets", secret_file) + ).read_text() + config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") # Check if we can use .yaml instead of .yml @@ -36,13 +48,25 @@ if go2rtc_config.get("api") is None: elif go2rtc_config["api"].get("origin") is None: go2rtc_config["api"]["origin"] = "*" +# Need to set default location for HA config +if go2rtc_config.get("hass") is None: + go2rtc_config["hass"] = {"config": "/config"} + # we want to ensure that logs are easy to read if go2rtc_config.get("log") is None: go2rtc_config["log"] = {"format": "text"} elif go2rtc_config["log"].get("format") is None: go2rtc_config["log"]["format"] = "text" -if not go2rtc_config.get("webrtc", {}).get("candidates", []): +# ensure there is a default webrtc config +if not go2rtc_config.get("webrtc"): + go2rtc_config["webrtc"] = {} + +# go2rtc should listen on 8555 tcp & udp by default +if not go2rtc_config["webrtc"].get("listen"): + go2rtc_config["webrtc"]["listen"] = ":8555" + +if not go2rtc_config["webrtc"].get("candidates", []): default_candidates = [] # use internal candidate if it was discovered when running through the add-on internal_candidate = os.environ.get( @@ -64,11 +88,22 @@ else: # as source for frigate and the integration supports HLS playback if go2rtc_config.get("rtsp") is None: go2rtc_config["rtsp"] = {"default_query": "mp4"} -elif go2rtc_config["rtsp"].get("default_query") is None: - go2rtc_config["rtsp"]["default_query"] = "mp4" +else: + if go2rtc_config["rtsp"].get("default_query") is None: + go2rtc_config["rtsp"]["default_query"] = "mp4" + + if go2rtc_config["rtsp"].get("username") is not None: + go2rtc_config["rtsp"]["username"] = go2rtc_config["rtsp"]["username"].format( + **FRIGATE_ENV_VARS + ) + + if go2rtc_config["rtsp"].get("password") is not None: + go2rtc_config["rtsp"]["password"] = go2rtc_config["rtsp"]["password"].format( + **FRIGATE_ENV_VARS + ) # need to replace ffmpeg command when using ffmpeg4 -if not os.path.exists(BTBN_PATH): +if int(os.environ["LIBAVFORMAT_VERSION_MAJOR"]) < 59: if go2rtc_config.get("ffmpeg") is None: go2rtc_config["ffmpeg"] = { "rtsp": "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" @@ -78,16 +113,43 @@ if not os.path.exists(BTBN_PATH): "rtsp" ] = "-fflags nobuffer -flags low_delay -stimeout 5000000 -user_agent go2rtc/ffmpeg -rtsp_transport tcp -i {input}" +# add hardware acceleration presets for rockchip devices +# may be removed if frigate uses a go2rtc version that includes these presets +if go2rtc_config.get("ffmpeg") is None: + go2rtc_config["ffmpeg"] = { + "h264/rk": "-c:v h264_rkmpp_encoder -g 50 -bf 0", + "h265/rk": "-c:v hevc_rkmpp_encoder -g 50 -bf 0", + } +else: + if go2rtc_config["ffmpeg"].get("h264/rk") is None: + go2rtc_config["ffmpeg"]["h264/rk"] = "-c:v h264_rkmpp_encoder -g 50 -bf 0" + + if go2rtc_config["ffmpeg"].get("h265/rk") is None: + go2rtc_config["ffmpeg"]["h265/rk"] = "-c:v hevc_rkmpp_encoder -g 50 -bf 0" + for name in go2rtc_config.get("streams", {}): stream = go2rtc_config["streams"][name] if isinstance(stream, str): - go2rtc_config["streams"][name] = go2rtc_config["streams"][name].format( - **FRIGATE_ENV_VARS - ) + try: + go2rtc_config["streams"][name] = go2rtc_config["streams"][name].format( + **FRIGATE_ENV_VARS + ) + except KeyError as e: + print( + "[ERROR] Invalid substitution found, see https://docs.frigate.video/configuration/restream#advanced-restream-configurations for more info." + ) + sys.exit(e) + elif isinstance(stream, list): for i, stream in enumerate(stream): - go2rtc_config["streams"][name][i] = stream.format(**FRIGATE_ENV_VARS) + try: + go2rtc_config["streams"][name][i] = stream.format(**FRIGATE_ENV_VARS) + except KeyError as e: + print( + "[ERROR] Invalid substitution found, see https://docs.frigate.video/configuration/restream#advanced-restream-configurations for more info." + ) + sys.exit(e) # add birdseye restream stream if enabled if config.get("birdseye", {}).get("restream", False): diff --git a/docker/rootfs/usr/local/nginx/conf/nginx.conf b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf similarity index 60% rename from docker/rootfs/usr/local/nginx/conf/nginx.conf rename to docker/main/rootfs/usr/local/nginx/conf/nginx.conf index eff884c14..46706a92f 100644 --- a/docker/rootfs/usr/local/nginx/conf/nginx.conf +++ b/docker/main/rootfs/usr/local/nginx/conf/nginx.conf @@ -32,6 +32,13 @@ http { gzip_proxied no-cache no-store private expired auth; gzip_vary on; + proxy_cache_path /dev/shm/nginx_cache levels=1:2 keys_zone=api_cache:10m max_size=10m inactive=1m use_temp_path=off; + + map $sent_http_content_type $should_not_cache { + 'application/json' 0; + default 1; + } + upstream frigate_api { server 127.0.0.1:5001; keepalive 1024; @@ -93,10 +100,6 @@ http { secure_token $args; secure_token_types application/vnd.apple.mpegurl; - add_header Access-Control-Allow-Headers '*'; - add_header Access-Control-Expose-Headers 'Server,range,Content-Length,Content-Range'; - add_header Access-Control-Allow-Methods 'GET, HEAD, OPTIONS'; - add_header Access-Control-Allow-Origin '*'; add_header Cache-Control "no-store"; expires off; } @@ -104,16 +107,6 @@ http { location /stream/ { add_header Cache-Control "no-store"; expires off; - add_header 'Access-Control-Allow-Origin' "$http_origin" always; - add_header 'Access-Control-Allow-Credentials' 'true'; - add_header 'Access-Control-Expose-Headers' 'Content-Length'; - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' "$http_origin"; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } types { application/dash+xml mpd; @@ -126,16 +119,6 @@ http { } location /clips/ { - add_header 'Access-Control-Allow-Origin' "$http_origin" always; - add_header 'Access-Control-Allow-Credentials' 'true'; - add_header 'Access-Control-Expose-Headers' 'Content-Length'; - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' "$http_origin"; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; - } types { video/mp4 mp4; @@ -152,17 +135,16 @@ http { } location /recordings/ { - add_header 'Access-Control-Allow-Origin' "$http_origin" always; - add_header 'Access-Control-Allow-Credentials' 'true'; - add_header 'Access-Control-Expose-Headers' 'Content-Length'; - if ($request_method = 'OPTIONS') { - add_header 'Access-Control-Allow-Origin' "$http_origin"; - add_header 'Access-Control-Max-Age' 1728000; - add_header 'Content-Type' 'text/plain charset=UTF-8'; - add_header 'Content-Length' 0; - return 204; + types { + video/mp4 mp4; } + autoindex on; + autoindex_format json; + root /media/frigate; + } + + location /exports/ { types { video/mp4 mp4; } @@ -174,58 +156,97 @@ http { location /ws { proxy_pass http://mqtt_ws/; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; + include proxy.conf; } location /live/jsmpeg/ { proxy_pass http://jsmpeg/; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; + include proxy.conf; } - location /live/mse/ { - proxy_pass http://go2rtc/; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; + # frigate lovelace card uses this path + location /live/mse/api/ws { + limit_except GET { + deny all; + } + proxy_pass http://go2rtc/api/ws; + include proxy.conf; } - location /live/webrtc/ { - proxy_pass http://go2rtc/; - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "Upgrade"; - proxy_set_header Host $host; + location /live/webrtc/api/ws { + limit_except GET { + deny all; + } + proxy_pass http://go2rtc/api/ws; + include proxy.conf; + } + + # pass through go2rtc player + location /live/webrtc/webrtc.html { + limit_except GET { + deny all; + } + proxy_pass http://go2rtc/webrtc.html; + include proxy.conf; + } + + # frontend uses this to fetch the version + location /api/go2rtc/api { + limit_except GET { + deny all; + } + proxy_pass http://go2rtc/api; + include proxy.conf; + } + + # integration uses this to add webrtc candidate + location /api/go2rtc/webrtc { + limit_except POST { + deny all; + } + proxy_pass http://go2rtc/api/webrtc; + include proxy.conf; } location ~* /api/.*\.(jpg|jpeg|png)$ { - add_header 'Access-Control-Allow-Origin' '*'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS'; rewrite ^/api/(.*)$ $1 break; proxy_pass http://frigate_api; - proxy_pass_request_headers on; - proxy_set_header Host $host; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; + include proxy.conf; } location /api/ { add_header Cache-Control "no-store"; expires off; - - add_header 'Access-Control-Allow-Origin' '*'; - add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS'; proxy_pass http://frigate_api/; - proxy_pass_request_headers on; - proxy_set_header Host $host; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; + include proxy.conf; + + proxy_cache api_cache; + proxy_cache_lock on; + proxy_cache_use_stale updating; + proxy_cache_valid 200 5s; + proxy_cache_bypass $http_x_cache_bypass; + proxy_no_cache $should_not_cache; + add_header X-Cache-Status $upstream_cache_status; + + location /api/vod/ { + proxy_pass http://frigate_api/vod/; + include proxy.conf; + proxy_cache off; + } + + location /api/stats { + access_log off; + rewrite ^/api/(.*)$ $1 break; + proxy_pass http://frigate_api; + include proxy.conf; + } + + location /api/version { + access_log off; + rewrite ^/api/(.*)$ $1 break; + proxy_pass http://frigate_api; + include proxy.conf; + } } location / { @@ -268,4 +289,4 @@ rtmp { meta copy; } } -} \ No newline at end of file +} diff --git a/docker/main/rootfs/usr/local/nginx/conf/proxy.conf b/docker/main/rootfs/usr/local/nginx/conf/proxy.conf new file mode 100644 index 000000000..442c78718 --- /dev/null +++ b/docker/main/rootfs/usr/local/nginx/conf/proxy.conf @@ -0,0 +1,4 @@ +proxy_http_version 1.1; +proxy_set_header Upgrade $http_upgrade; +proxy_set_header Connection "Upgrade"; +proxy_set_header Host $host; \ No newline at end of file diff --git a/docker/rockchip/Dockerfile b/docker/rockchip/Dockerfile new file mode 100644 index 000000000..b27e4f223 --- /dev/null +++ b/docker/rockchip/Dockerfile @@ -0,0 +1,32 @@ +# syntax=docker/dockerfile:1.6 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +FROM wheels as rk-wheels +COPY docker/main/requirements-wheels.txt /requirements-wheels.txt +COPY docker/rockchip/requirements-wheels-rk.txt /requirements-wheels-rk.txt +RUN sed -i "/https:\/\//d" /requirements-wheels.txt +RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requirements-wheels-rk.txt + +FROM deps AS rk-deps +ARG TARGETARCH + +RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \ + pip3 install -U /deps/rk-wheels/*.whl + +WORKDIR /opt/frigate/ +COPY --from=rootfs / / + +ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk356x.so /usr/lib/ +ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk3588.so /usr/lib/ + +ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3562/yolov8n-320x320-rk3562.rknn /models/rknn/ +ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3566/yolov8n-320x320-rk3566.rknn /models/rknn/ +ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3568/yolov8n-320x320-rk3568.rknn /models/rknn/ +ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3588/yolov8n-320x320-rk3588.rknn /models/rknn/ + +RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg +RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe +ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.0-1/ffmpeg /usr/lib/btbn-ffmpeg/bin/ +ADD --chmod=111 https://github.com/MarcA711/Rockchip-FFmpeg-Builds/releases/download/6.0-1/ffprobe /usr/lib/btbn-ffmpeg/bin/ diff --git a/docker/rockchip/requirements-wheels-rk.txt b/docker/rockchip/requirements-wheels-rk.txt new file mode 100644 index 000000000..9a3fe5c77 --- /dev/null +++ b/docker/rockchip/requirements-wheels-rk.txt @@ -0,0 +1,2 @@ +hide-warnings == 0.17 +rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v1.5.2/rknn_toolkit_lite2-1.5.2-cp39-cp39-linux_aarch64.whl \ No newline at end of file diff --git a/docker/rockchip/rk.hcl b/docker/rockchip/rk.hcl new file mode 100644 index 000000000..513fefa25 --- /dev/null +++ b/docker/rockchip/rk.hcl @@ -0,0 +1,34 @@ +target wget { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "wget" +} + +target wheels { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "wheels" +} + +target deps { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "deps" +} + +target rootfs { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "rootfs" +} + +target rk { + dockerfile = "docker/rockchip/Dockerfile" + contexts = { + wget = "target:wget", + wheels = "target:wheels", + deps = "target:deps", + rootfs = "target:rootfs" + } + platforms = ["linux/arm64"] +} \ No newline at end of file diff --git a/docker/rockchip/rk.mk b/docker/rockchip/rk.mk new file mode 100644 index 000000000..0d9bde16a --- /dev/null +++ b/docker/rockchip/rk.mk @@ -0,0 +1,10 @@ +BOARDS += rk + +local-rk: version + docker buildx bake --load --file=docker/rockchip/rk.hcl --set rk.tags=frigate:latest-rk rk + +build-rk: version + docker buildx bake --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk + +push-rk: build-rk + docker buildx bake --push --file=docker/rockchip/rk.hcl --set rk.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rk rk \ No newline at end of file diff --git a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run b/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run deleted file mode 100755 index 562081fc5..000000000 --- a/docker/rootfs/etc/s6-overlay/s6-rc.d/frigate/run +++ /dev/null @@ -1,18 +0,0 @@ -#!/command/with-contenv bash -# shellcheck shell=bash -# Start the Frigate service - -set -o errexit -o nounset -o pipefail - -# Logs should be sent to stdout so that s6 can collect them - -# Tell S6-Overlay not to restart this service -s6-svc -O . - -echo "[INFO] Starting Frigate..." - -cd /opt/frigate || echo "[ERROR] Failed to change working directory to /opt/frigate" - -# Replace the bash process with the Frigate process, redirecting stderr to stdout -exec 2>&1 -exec python3 -u -m frigate diff --git a/docker/rpi/Dockerfile b/docker/rpi/Dockerfile new file mode 100644 index 000000000..581ca7ff8 --- /dev/null +++ b/docker/rpi/Dockerfile @@ -0,0 +1,16 @@ +# syntax=docker/dockerfile:1.4 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +FROM deps AS rpi-deps +ARG TARGETARCH + +RUN rm -rf /usr/lib/btbn-ffmpeg/ + +# Install dependencies +RUN --mount=type=bind,source=docker/rpi/install_deps.sh,target=/deps/install_deps.sh \ + /deps/install_deps.sh + +WORKDIR /opt/frigate/ +COPY --from=rootfs / / diff --git a/docker/rpi/install_deps.sh b/docker/rpi/install_deps.sh new file mode 100755 index 000000000..9716623ca --- /dev/null +++ b/docker/rpi/install_deps.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -euxo pipefail + +apt-get -qq update + +apt-get -qq install --no-install-recommends -y \ + apt-transport-https \ + gnupg \ + wget \ + procps vainfo \ + unzip locales tzdata libxml2 xz-utils \ + python3-pip \ + curl \ + jq \ + nethogs + +mkdir -p -m 600 /root/.gnupg + +# enable non-free repo +sed -i -e's/ main/ main contrib non-free/g' /etc/apt/sources.list + +# ffmpeg -> arm64 +if [[ "${TARGETARCH}" == "arm64" ]]; then + # add raspberry pi repo + gpg --no-default-keyring --keyring /usr/share/keyrings/raspbian.gpg --keyserver keyserver.ubuntu.com --recv-keys 82B129927FA3303E + echo "deb [signed-by=/usr/share/keyrings/raspbian.gpg] https://archive.raspberrypi.org/debian/ bullseye main" | tee /etc/apt/sources.list.d/raspi.list + apt-get -qq update + apt-get -qq install --no-install-recommends --no-install-suggests -y ffmpeg +fi diff --git a/docker/rpi/rpi.hcl b/docker/rpi/rpi.hcl new file mode 100644 index 000000000..66f97c16d --- /dev/null +++ b/docker/rpi/rpi.hcl @@ -0,0 +1,20 @@ +target deps { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "deps" +} + +target rootfs { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/arm64"] + target = "rootfs" +} + +target rpi { + dockerfile = "docker/rpi/Dockerfile" + contexts = { + deps = "target:deps", + rootfs = "target:rootfs" + } + platforms = ["linux/arm64"] +} \ No newline at end of file diff --git a/docker/rpi/rpi.mk b/docker/rpi/rpi.mk new file mode 100644 index 000000000..c1282b011 --- /dev/null +++ b/docker/rpi/rpi.mk @@ -0,0 +1,10 @@ +BOARDS += rpi + +local-rpi: version + docker buildx bake --load --file=docker/rpi/rpi.hcl --set rpi.tags=frigate:latest-rpi rpi + +build-rpi: version + docker buildx bake --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi + +push-rpi: build-rpi + docker buildx bake --push --file=docker/rpi/rpi.hcl --set rpi.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-rpi rpi \ No newline at end of file diff --git a/docker/tensorrt/Dockerfile.amd64 b/docker/tensorrt/Dockerfile.amd64 new file mode 100644 index 000000000..075726eda --- /dev/null +++ b/docker/tensorrt/Dockerfile.amd64 @@ -0,0 +1,32 @@ +# syntax=docker/dockerfile:1.4 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +# Make this a separate target so it can be built/cached optionally +FROM wheels as trt-wheels +ARG DEBIAN_FRONTEND +ARG TARGETARCH + +# Add TensorRT wheels to another folder +COPY docker/tensorrt/requirements-amd64.txt /requirements-tensorrt.txt +RUN mkdir -p /trt-wheels && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt + +FROM tensorrt-base AS frigate-tensorrt +ENV TRT_VER=8.5.3 +RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ + pip3 install -U /deps/trt-wheels/*.whl && \ + ldconfig + +WORKDIR /opt/frigate/ +COPY --from=rootfs / / + +# Dev Container w/ TRT +FROM devcontainer AS devcontainer-trt + +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +COPY docker/tensorrt/detector/rootfs/ / +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ + pip3 install -U /deps/trt-wheels/*.whl diff --git a/docker/tensorrt/Dockerfile.arm64 b/docker/tensorrt/Dockerfile.arm64 new file mode 100644 index 000000000..70184bf9b --- /dev/null +++ b/docker/tensorrt/Dockerfile.arm64 @@ -0,0 +1,79 @@ +# syntax=docker/dockerfile:1.4 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +ARG BASE_IMAGE +FROM ${BASE_IMAGE} AS build-wheels +ARG DEBIAN_FRONTEND + +# Use a separate container to build wheels to prevent build dependencies in final image +RUN apt-get -qq update \ + && apt-get -qq install -y --no-install-recommends \ + python3.9 python3.9-dev \ + wget build-essential cmake git \ + && rm -rf /var/lib/apt/lists/* + +# Ensure python3 defaults to python3.9 +RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 + +RUN wget -q https://bootstrap.pypa.io/get-pip.py -O get-pip.py \ + && python3 get-pip.py "pip" + + +FROM build-wheels AS trt-wheels +ARG DEBIAN_FRONTEND +ARG TARGETARCH + +# python-tensorrt build deps are 3.4 GB! +RUN apt-get update \ + && apt-get install -y ccache cuda-cudart-dev-* cuda-nvcc-* libnvonnxparsers-dev libnvparsers-dev libnvinfer-plugin-dev \ + && ([ -e /usr/local/cuda ] || ln -s /usr/local/cuda-* /usr/local/cuda) \ + && rm -rf /var/lib/apt/lists/*; + +# Determine version of tensorrt already installed in base image, e.g. "Version: 8.4.1-1+cuda11.4" +RUN NVINFER_VER=$(dpkg -s libnvinfer8 | grep -Po "Version: \K.*") \ + && echo $NVINFER_VER | grep -Po "^\d+\.\d+\.\d+" > /etc/TENSORRT_VER + +RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,target=/deps/build_python_tensorrt.sh \ + --mount=type=cache,target=/root/.ccache \ + export PATH="/usr/lib/ccache:$PATH" CCACHE_DIR=/root/.ccache CCACHE_MAXSIZE=2G \ + && TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh + +COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt +RUN pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt + +FROM build-wheels AS trt-model-wheels +ARG DEBIAN_FRONTEND + +RUN apt-get update \ + && apt-get install -y protobuf-compiler libprotobuf-dev \ + && rm -rf /var/lib/apt/lists/* +RUN --mount=type=bind,source=docker/tensorrt/requirements-models-arm64.txt,target=/requirements-tensorrt-models.txt \ + pip3 wheel --wheel-dir=/trt-model-wheels -r /requirements-tensorrt-models.txt + +FROM wget AS jetson-ffmpeg +ARG DEBIAN_FRONTEND +ENV CCACHE_DIR /root/.ccache +ENV CCACHE_MAXSIZE 2G +RUN --mount=type=bind,source=docker/tensorrt/build_jetson_ffmpeg.sh,target=/deps/build_jetson_ffmpeg.sh \ + --mount=type=cache,target=/root/.ccache \ + /deps/build_jetson_ffmpeg.sh + +# Frigate w/ TensorRT for NVIDIA Jetson platforms +FROM tensorrt-base AS frigate-tensorrt +RUN apt-get update \ + && apt-get install -y python-is-python3 libprotobuf17 \ + && rm -rf /var/lib/apt/lists/* + +RUN rm -rf /usr/lib/btbn-ffmpeg/ +COPY --from=jetson-ffmpeg /rootfs / + +COPY --from=trt-wheels /etc/TENSORRT_VER /etc/TENSORRT_VER +RUN --mount=type=bind,from=trt-wheels,source=/trt-wheels,target=/deps/trt-wheels \ + --mount=type=bind,from=trt-model-wheels,source=/trt-model-wheels,target=/deps/trt-model-wheels \ + pip3 install -U /deps/trt-wheels/*.whl /deps/trt-model-wheels/*.whl \ + && ldconfig + +WORKDIR /opt/frigate/ +COPY --from=rootfs / / diff --git a/docker/tensorrt/Dockerfile.base b/docker/tensorrt/Dockerfile.base new file mode 100644 index 000000000..b0015016d --- /dev/null +++ b/docker/tensorrt/Dockerfile.base @@ -0,0 +1,29 @@ +# syntax=docker/dockerfile:1.6 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +ARG TRT_BASE=nvcr.io/nvidia/tensorrt:23.03-py3 + +# Build TensorRT-specific library +FROM ${TRT_BASE} AS trt-deps + +RUN apt-get update \ + && apt-get install -y git build-essential cuda-nvcc-* cuda-nvtx-* libnvinfer-dev libnvinfer-plugin-dev libnvparsers-dev libnvonnxparsers-dev \ + && rm -rf /var/lib/apt/lists/* +RUN --mount=type=bind,source=docker/tensorrt/detector/tensorrt_libyolo.sh,target=/tensorrt_libyolo.sh \ + /tensorrt_libyolo.sh + +# Frigate w/ TensorRT Support as separate image +FROM deps AS tensorrt-base + +#Disable S6 Global timeout +ENV S6_CMD_WAIT_FOR_SERVICES_MAXTIME=0 + +COPY --from=trt-deps /usr/local/lib/libyolo_layer.so /usr/local/lib/libyolo_layer.so +COPY --from=trt-deps /usr/local/src/tensorrt_demos /usr/local/src/tensorrt_demos +COPY docker/tensorrt/detector/rootfs/ / +ENV YOLO_MODELS="yolov7-320" + +HEALTHCHECK --start-period=600s --start-interval=5s --interval=15s --timeout=5s --retries=3 \ + CMD curl --fail --silent --show-error http://127.0.0.1:5000/api/version || exit 1 diff --git a/docker/tensorrt/build_jetson_ffmpeg.sh b/docker/tensorrt/build_jetson_ffmpeg.sh new file mode 100755 index 000000000..f4e55c2bb --- /dev/null +++ b/docker/tensorrt/build_jetson_ffmpeg.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# For jetson platforms, build ffmpeg with custom patches. NVIDIA supplies a deb +# with accelerated decoding, but it doesn't have accelerated scaling or encoding + +set -euxo pipefail + +INSTALL_PREFIX=/rootfs/usr/local + +apt-get -qq update +apt-get -qq install -y --no-install-recommends build-essential ccache clang cmake pkg-config +apt-get -qq install -y --no-install-recommends libx264-dev libx265-dev + +pushd /tmp + +# Install libnvmpi to enable nvmpi decoders (h264_nvmpi, hevc_nvmpi) +if [ -e /usr/local/cuda-10.2 ]; then + # assume Jetpack 4.X + wget -q https://developer.nvidia.com/embedded/L4T/r32_Release_v5.0/T186/Jetson_Multimedia_API_R32.5.0_aarch64.tbz2 -O jetson_multimedia_api.tbz2 +else + # assume Jetpack 5.X + wget -q https://developer.nvidia.com/downloads/embedded/l4t/r35_release_v3.1/release/jetson_multimedia_api_r35.3.1_aarch64.tbz2 -O jetson_multimedia_api.tbz2 +fi +tar xaf jetson_multimedia_api.tbz2 -C / && rm jetson_multimedia_api.tbz2 + +wget -q https://github.com/AndBobsYourUncle/jetson-ffmpeg/archive/9c17b09.zip -O jetson-ffmpeg.zip +unzip jetson-ffmpeg.zip && rm jetson-ffmpeg.zip && mv jetson-ffmpeg-* jetson-ffmpeg && cd jetson-ffmpeg +LD_LIBRARY_PATH=$(pwd)/stubs:$LD_LIBRARY_PATH # tegra multimedia libs aren't available in image, so use stubs for ffmpeg build +mkdir build +cd build +cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=$INSTALL_PREFIX +make -j$(nproc) +make install +cd ../../ + +# Install nv-codec-headers to enable ffnvcodec filters (scale_cuda) +wget -q https://github.com/FFmpeg/nv-codec-headers/archive/refs/heads/master.zip +unzip master.zip && rm master.zip && cd nv-codec-headers-master +make PREFIX=$INSTALL_PREFIX install +cd ../ && rm -rf nv-codec-headers-master + +# Build ffmpeg with nvmpi patch +wget -q https://ffmpeg.org/releases/ffmpeg-6.0.tar.xz +tar xaf ffmpeg-*.tar.xz && rm ffmpeg-*.tar.xz && cd ffmpeg-* +patch -p1 < ../jetson-ffmpeg/ffmpeg_patches/ffmpeg6.0_nvmpi.patch +export PKG_CONFIG_PATH=$INSTALL_PREFIX/lib/pkgconfig +# enable Jetson codecs but disable dGPU codecs +./configure --cc='ccache gcc' --cxx='ccache g++' \ + --enable-shared --disable-static --prefix=$INSTALL_PREFIX \ + --enable-gpl --enable-libx264 --enable-libx265 \ + --enable-nvmpi --enable-ffnvcodec --enable-cuda-llvm \ + --disable-cuvid --disable-nvenc --disable-nvdec \ + || { cat ffbuild/config.log && false; } +make -j$(nproc) +make install +cd ../ + +rm -rf /var/lib/apt/lists/* +popd diff --git a/docker/tensorrt/detector/build_python_tensorrt.sh b/docker/tensorrt/detector/build_python_tensorrt.sh new file mode 100755 index 000000000..21b6ae268 --- /dev/null +++ b/docker/tensorrt/detector/build_python_tensorrt.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -euxo pipefail + +mkdir -p /trt-wheels + +if [[ "${TARGETARCH}" == "arm64" ]]; then + + # NVIDIA supplies python-tensorrt for python3.8, but frigate uses python3.9, + # so we must build python-tensorrt ourselves. + + # Get python-tensorrt source + mkdir /workspace + cd /workspace + git clone -b ${TENSORRT_VER} https://github.com/NVIDIA/TensorRT.git --depth=1 + + # Collect dependencies + EXT_PATH=/workspace/external && mkdir -p $EXT_PATH + pip3 install pybind11 && ln -s /usr/local/lib/python3.9/dist-packages/pybind11 $EXT_PATH/pybind11 + ln -s /usr/include/python3.9 $EXT_PATH/python3.9 + ln -s /usr/include/aarch64-linux-gnu/NvOnnxParser.h /workspace/TensorRT/parsers/onnx/ + + # Build wheel + cd /workspace/TensorRT/python + EXT_PATH=$EXT_PATH PYTHON_MAJOR_VERSION=3 PYTHON_MINOR_VERSION=9 TARGET_ARCHITECTURE=aarch64 /bin/bash ./build.sh + mv build/dist/*.whl /trt-wheels/ + +fi diff --git a/docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf b/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf similarity index 94% rename from docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf rename to docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf index d4248d047..fe16ed9c5 100644 --- a/docker/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf +++ b/docker/tensorrt/detector/rootfs/etc/ld.so.conf.d/cuda_tensorrt.conf @@ -1,3 +1,4 @@ +/usr/local/lib /usr/local/lib/python3.9/dist-packages/nvidia/cudnn/lib /usr/local/lib/python3.9/dist-packages/nvidia/cuda_runtime/lib /usr/local/lib/python3.9/dist-packages/nvidia/cublas/lib diff --git a/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/frigate/dependencies.d/trt-model-prepare new file mode 100644 index 000000000..e69de29bb diff --git a/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/dependencies.d/base new file mode 100644 index 000000000..e69de29bb diff --git a/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run new file mode 100755 index 000000000..c39c7a0aa --- /dev/null +++ b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/run @@ -0,0 +1,109 @@ +#!/command/with-contenv bash +# shellcheck shell=bash +# Generate models for the TensorRT detector + +# One or more comma-separated models may be specified via the YOLO_MODELS env. +# Append "-dla" to the model name to generate a DLA model with GPU fallback; +# otherwise a GPU-only model will be generated. + +set -o errexit -o nounset -o pipefail + +MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache/tensorrt"} +TRT_VER=${TRT_VER:-$(cat /etc/TENSORRT_VER)} +OUTPUT_FOLDER="${MODEL_CACHE_DIR}/${TRT_VER}" + +# Create output folder +mkdir -p ${OUTPUT_FOLDER} + +FIRST_MODEL=true +MODEL_DOWNLOAD="" +MODEL_CONVERT="" + +for model in ${YOLO_MODELS//,/ } +do + # Remove old link in case path/version changed + rm -f ${MODEL_CACHE_DIR}/${model}.trt + + if [[ ! -f ${OUTPUT_FOLDER}/${model}.trt ]]; then + if [[ ${FIRST_MODEL} = true ]]; then + MODEL_DOWNLOAD="${model%-dla}"; + MODEL_CONVERT="${model}" + FIRST_MODEL=false; + else + MODEL_DOWNLOAD+=",${model%-dla}"; + MODEL_CONVERT+=",${model}"; + fi + else + ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt + fi +done + +if [[ -z ${MODEL_CONVERT} ]]; then + echo "No models to convert." + exit 0 +fi + +# Setup ENV to select GPU for conversion +if [ ! -z ${TRT_MODEL_PREP_DEVICE+x} ]; then + if [ ! -z ${CUDA_VISIBLE_DEVICES+x} ]; then + PREVIOUS_CVD="$CUDA_VISIBLE_DEVICES" + unset CUDA_VISIBLE_DEVICES + fi + export CUDA_VISIBLE_DEVICES="$TRT_MODEL_PREP_DEVICE" +fi + +# On Jetpack 4.6, the nvidia container runtime will mount several host nvidia libraries into the +# container which should not be present in the image - if they are, TRT model generation will +# fail or produce invalid models. Thus we must request the user to install them on the host in +# order to run libyolo here. +# On Jetpack 5.0, these libraries are not mounted by the runtime and are supplied by the image. +if [[ "$(arch)" == "aarch64" ]]; then + if [[ ! -e /usr/lib/aarch64-linux-gnu/tegra ]]; then + echo "ERROR: Container must be launched with nvidia runtime" + exit 1 + elif [[ ! -e /usr/lib/aarch64-linux-gnu/libnvinfer.so.8 || + ! -e /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.8 || + ! -e /usr/lib/aarch64-linux-gnu/libnvparsers.so.8 || + ! -e /usr/lib/aarch64-linux-gnu/libnvonnxparser.so.8 ]]; then + echo "ERROR: Please run the following on the HOST:" + echo " sudo apt install libnvinfer8 libnvinfer-plugin8 libnvparsers8 libnvonnxparsers8 nvidia-container" + exit 1 + fi +fi + +echo "Generating the following TRT Models: ${MODEL_CONVERT}" + +# Build trt engine +cd /usr/local/src/tensorrt_demos/yolo + +echo "Downloading yolo weights" +./download_yolo.sh $MODEL_DOWNLOAD 2> /dev/null + +for model in ${MODEL_CONVERT//,/ } +do + python3 yolo_to_onnx.py -m ${model%-dla} > /dev/null + + echo -e "\nGenerating ${model}.trt. This may take a few minutes.\n"; start=$(date +%s) + if [[ $model == *-dla ]]; then + cmd="python3 onnx_to_tensorrt.py -m ${model%-dla} --dla_core 0" + else + cmd="python3 onnx_to_tensorrt.py -m ${model}" + fi + $cmd > /tmp/onnx_to_tensorrt.log || { cat /tmp/onnx_to_tensorrt.log && continue; } + + mv ${model%-dla}.trt ${OUTPUT_FOLDER}/${model}.trt; + ln -s ${OUTPUT_FOLDER}/${model}.trt ${MODEL_CACHE_DIR}/${model}.trt + echo "Generated ${model}.trt in $(($(date +%s)-start)) seconds" +done + +# Restore ENV after conversion +if [ ! -z ${TRT_MODEL_PREP_DEVICE+x} ]; then + unset CUDA_VISIBLE_DEVICES + if [ ! -z ${PREVIOUS_CVD+x} ]; then + export CUDA_VISIBLE_DEVICES="$PREVIOUS_CVD" + fi +fi + +# Print which models exist in output folder +echo "Available tensorrt models:" +cd ${OUTPUT_FOLDER} && ls *.trt; diff --git a/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type new file mode 100644 index 000000000..bdd22a185 --- /dev/null +++ b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/type @@ -0,0 +1 @@ +oneshot diff --git a/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up new file mode 100644 index 000000000..b9de40ad0 --- /dev/null +++ b/docker/tensorrt/detector/rootfs/etc/s6-overlay/s6-rc.d/trt-model-prepare/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/trt-model-prepare/run diff --git a/docker/tensorrt/detector/tensorrt_libyolo.sh b/docker/tensorrt/detector/tensorrt_libyolo.sh new file mode 100755 index 000000000..91b9340a9 --- /dev/null +++ b/docker/tensorrt/detector/tensorrt_libyolo.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -euxo pipefail + +SCRIPT_DIR="/usr/local/src/tensorrt_demos" + +# Clone tensorrt_demos repo +git clone --depth 1 https://github.com/NateMeyer/tensorrt_demos.git -b conditional_download + +# Build libyolo +if [ ! -e /usr/local/cuda ]; then + ln -s /usr/local/cuda-* /usr/local/cuda +fi +cd ./tensorrt_demos/plugins && make all -j$(nproc) +cp libyolo_layer.so /usr/local/lib/libyolo_layer.so + +# Store yolo scripts for later conversion +cd ../ +mkdir -p ${SCRIPT_DIR}/plugins +cp plugins/libyolo_layer.so ${SCRIPT_DIR}/plugins/libyolo_layer.so +cp -a yolo ${SCRIPT_DIR}/ diff --git a/docker/tensorrt/requirements-amd64.txt b/docker/tensorrt/requirements-amd64.txt new file mode 100644 index 000000000..214202e43 --- /dev/null +++ b/docker/tensorrt/requirements-amd64.txt @@ -0,0 +1,12 @@ +# NVidia TensorRT Support (amd64 only) +--extra-index-url 'https://pypi.nvidia.com' +numpy < 1.24; platform_machine == 'x86_64' +tensorrt == 8.5.3.*; platform_machine == 'x86_64' +cuda-python == 11.8; platform_machine == 'x86_64' +cython == 0.29.*; platform_machine == 'x86_64' +nvidia-cuda-runtime-cu12 == 12.1.*; platform_machine == 'x86_64' +nvidia-cuda-runtime-cu11 == 11.8.*; platform_machine == 'x86_64' +nvidia-cublas-cu11 == 11.11.3.6; platform_machine == 'x86_64' +nvidia-cudnn-cu11 == 8.6.0.*; platform_machine == 'x86_64' +onnx==1.14.0; platform_machine == 'x86_64' +protobuf==3.20.3; platform_machine == 'x86_64' \ No newline at end of file diff --git a/docker/tensorrt/requirements-arm64.txt b/docker/tensorrt/requirements-arm64.txt new file mode 100644 index 000000000..9b12dac33 --- /dev/null +++ b/docker/tensorrt/requirements-arm64.txt @@ -0,0 +1 @@ +cuda-python == 11.7; platform_machine == 'aarch64' diff --git a/docker/tensorrt/requirements-models-arm64.txt b/docker/tensorrt/requirements-models-arm64.txt new file mode 100644 index 000000000..3490a7897 --- /dev/null +++ b/docker/tensorrt/requirements-models-arm64.txt @@ -0,0 +1,3 @@ +onnx == 1.14.0; platform_machine == 'aarch64' +protobuf == 3.20.3; platform_machine == 'aarch64' +numpy == 1.23.*; platform_machine == 'aarch64' # required by python-tensorrt 8.2.1 (Jetpack 4.6) diff --git a/docker/tensorrt/trt.hcl b/docker/tensorrt/trt.hcl new file mode 100644 index 000000000..56e294100 --- /dev/null +++ b/docker/tensorrt/trt.hcl @@ -0,0 +1,94 @@ +variable "ARCH" { + default = "amd64" +} +variable "BASE_IMAGE" { + default = null +} +variable "SLIM_BASE" { + default = null +} +variable "TRT_BASE" { + default = null +} + +target "_build_args" { + args = { + BASE_IMAGE = BASE_IMAGE, + SLIM_BASE = SLIM_BASE, + TRT_BASE = TRT_BASE + } + platforms = ["linux/${ARCH}"] +} + +target wget { + dockerfile = "docker/main/Dockerfile" + target = "wget" + inherits = ["_build_args"] +} + +target deps { + dockerfile = "docker/main/Dockerfile" + target = "deps" + inherits = ["_build_args"] +} + +target rootfs { + dockerfile = "docker/main/Dockerfile" + target = "rootfs" + inherits = ["_build_args"] +} + +target wheels { + dockerfile = "docker/main/Dockerfile" + target = "wheels" + inherits = ["_build_args"] +} + +target devcontainer { + dockerfile = "docker/main/Dockerfile" + platforms = ["linux/amd64"] + target = "devcontainer" +} + +target "trt-deps" { + dockerfile = "docker/tensorrt/Dockerfile.base" + context = "." + contexts = { + deps = "target:deps", + } + inherits = ["_build_args"] +} + +target "tensorrt-base" { + dockerfile = "docker/tensorrt/Dockerfile.base" + context = "." + contexts = { + deps = "target:deps", + } + inherits = ["_build_args"] +} + +target "tensorrt" { + dockerfile = "docker/tensorrt/Dockerfile.${ARCH}" + context = "." + contexts = { + wget = "target:wget", + tensorrt-base = "target:tensorrt-base", + rootfs = "target:rootfs" + wheels = "target:wheels" + } + target = "frigate-tensorrt" + inherits = ["_build_args"] +} + +target "devcontainer-trt" { + dockerfile = "docker/tensorrt/Dockerfile.amd64" + context = "." + contexts = { + wheels = "target:wheels", + trt-deps = "target:trt-deps", + devcontainer = "target:devcontainer" + } + platforms = ["linux/amd64"] + target = "devcontainer-trt" +} diff --git a/docker/tensorrt/trt.mk b/docker/tensorrt/trt.mk new file mode 100644 index 000000000..0e01c1402 --- /dev/null +++ b/docker/tensorrt/trt.mk @@ -0,0 +1,26 @@ +BOARDS += trt + +JETPACK4_BASE ?= timongentzsch/l4t-ubuntu20-opencv:latest # L4T 32.7.1 JetPack 4.6.1 +JETPACK5_BASE ?= nvcr.io/nvidia/l4t-tensorrt:r8.5.2-runtime # L4T 35.3.1 JetPack 5.1.1 +X86_DGPU_ARGS := ARCH=amd64 +JETPACK4_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK4_BASE) SLIM_BASE=$(JETPACK4_BASE) TRT_BASE=$(JETPACK4_BASE) +JETPACK5_ARGS := ARCH=arm64 BASE_IMAGE=$(JETPACK5_BASE) SLIM_BASE=$(JETPACK5_BASE) TRT_BASE=$(JETPACK5_BASE) + +local-trt: version + $(X86_DGPU_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt tensorrt + +local-trt-jp4: version + $(JETPACK4_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp4 tensorrt + +local-trt-jp5: version + $(JETPACK5_ARGS) docker buildx bake --load --file=docker/tensorrt/trt.hcl --set tensorrt.tags=frigate:latest-tensorrt-jp5 tensorrt + +build-trt: + $(X86_DGPU_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt + $(JETPACK4_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt + $(JETPACK5_ARGS) docker buildx bake --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt + +push-trt: build-trt + $(X86_DGPU_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt tensorrt + $(JETPACK4_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp4 tensorrt + $(JETPACK5_ARGS) docker buildx bake --push --file=docker/tensorrt/trt.hcl --set tensorrt.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-tensorrt-jp5 tensorrt diff --git a/docker/tensorrt_models.sh b/docker/tensorrt_models.sh deleted file mode 100755 index 957e817d6..000000000 --- a/docker/tensorrt_models.sh +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -set -euxo pipefail - -CUDA_HOME=/usr/local/cuda -LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64 -OUTPUT_FOLDER=/tensorrt_models -echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}" - -# Create output folder -mkdir -p ${OUTPUT_FOLDER} - -# Install packages -pip install --upgrade pip && pip install onnx==1.9.0 protobuf==3.20.3 - -# Clone tensorrt_demos repo -git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git /tensorrt_demos - -# Build libyolo -cd /tensorrt_demos/plugins && make all -cp libyolo_layer.so ${OUTPUT_FOLDER}/libyolo_layer.so - -# Download yolo weights -cd /tensorrt_demos/yolo && ./download_yolo.sh - -# Build trt engine -cd /tensorrt_demos/yolo - -for model in ${YOLO_MODELS//,/ } -do - python3 yolo_to_onnx.py -m ${model} - python3 onnx_to_tensorrt.py -m ${model} - cp /tensorrt_demos/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt; -done diff --git a/docs/README.md b/docs/README.md index eb435c5b6..bd4aded51 100644 --- a/docs/README.md +++ b/docs/README.md @@ -2,4 +2,4 @@ This website is built using [Docusaurus 2](https://v2.docusaurus.io/), a modern static website generator. -For installation and contributing instructions, please follow the [Contributing Docs](https://blakeblackshear.github.io/frigate/contributing). +For installation and contributing instructions, please follow the [Contributing Docs](https://docs.frigate.video/development/contributing). diff --git a/docs/docs/configuration/advanced.md b/docs/docs/configuration/advanced.md index 8404c3cf0..50cd5ff79 100644 --- a/docs/docs/configuration/advanced.md +++ b/docs/docs/configuration/advanced.md @@ -24,7 +24,6 @@ Examples of available modules are: - `frigate.app` - `frigate.mqtt` - `frigate.object_detection` -- `frigate.zeroconf` - `detector.` - `watchdog.` - `ffmpeg..` NOTE: All FFmpeg logs are sent as `error` level. @@ -42,7 +41,7 @@ environment_vars: ### `database` -Event and recording information is managed in a sqlite database at `/media/frigate/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant. +Event and recording information is managed in a sqlite database at `/config/frigate.db`. If that database is deleted, recordings will be orphaned and will need to be cleaned up manually. They also won't show up in the Media Browser within Home Assistant. If you are storing your database on a network share (SMB, NFS, etc), you may get a `database is locked` error message on startup. You can customize the location of the database in the config if necessary. @@ -97,6 +96,16 @@ model: Note that if you rename objects in the labelmap, you will also need to update your `objects -> track` list as well. +:::caution + +Some labels have special handling and modifications can disable functionality. + +`person` objects are associated with `face` and `amazon` + +`car` objects are associated with `license_plate`, `ups`, `fedex`, `amazon` + +::: + ## Custom ffmpeg build Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, a docker volume mapping can be used to overwrite the included ffmpeg build with an ffmpeg build that works for your specific hardware setup. @@ -111,7 +120,7 @@ NOTE: The folder that is mapped from the host needs to be the folder that contai ## Custom go2rtc version -Frigate currently includes go2rtc v1.2.0, there may be certain cases where you want to run a different version of go2rtc. +Frigate currently includes go2rtc v1.8.4, there may be certain cases where you want to run a different version of go2rtc. To do this: @@ -119,3 +128,34 @@ To do this: 2. Rename the build to `go2rtc`. 3. Give `go2rtc` execute permission. 4. Restart Frigate and the custom version will be used, you can verify by checking go2rtc logs. + +## Validating your config.yaml file updates + +When frigate starts up, it checks whether your config file is valid, and if it is not, the process exits. To minimize interruptions when updating your config, you have three options -- you can edit the config via the WebUI which has built in validation, use the config API, or you can validate on the command line using the frigate docker container. + +### Via API + +Frigate can accept a new configuration file as JSON at the `/config/save` endpoint. When updating the config this way, Frigate will validate the config before saving it, and return a `400` if the config is not valid. + +```bash +curl -X POST http://frigate_host:5000/config/save -d @config.json +``` + +if you'd like you can use your yaml config directly by using [`yq`](https://github.com/mikefarah/yq) to convert it to json: + +```bash +yq r -j config.yml | curl -X POST http://frigate_host:5000/config/save -d @- +``` + +### Via Command Line + +You can also validate your config at the command line by using the docker container itself. In CI/CD, you leverage the return code to determine if your config is valid, Frigate will return `1` if the config is invalid, or `0` if it's valid. + +```bash +docker run \ + -v $(pwd)/config.yml:/config/config.yml \ + --entrypoint python3 \ + ghcr.io/blakeblackshear/frigate:stable \ + -u -m frigate \ + --validate_config +``` diff --git a/docs/docs/configuration/audio_detectors.md b/docs/docs/configuration/audio_detectors.md new file mode 100644 index 000000000..b783daa69 --- /dev/null +++ b/docs/docs/configuration/audio_detectors.md @@ -0,0 +1,74 @@ +--- +id: audio_detectors +title: Audio Detectors +--- + +Frigate provides a builtin audio detector which runs on the CPU. Compared to object detection in images, audio detection is a relatively lightweight operation so the only option is to run the detection on a CPU. + +## Configuration + +Audio events work by detecting a type of audio and creating an event, the event will end once the type of audio has not been heard for the configured amount of time. Audio events save a snapshot at the beginning of the event as well as recordings throughout the event. The recordings are retained using the configured recording retention. + +### Enabling Audio Events + +Audio events can be enabled for all cameras or only for specific cameras. + +```yaml + +audio: # <- enable audio events for all camera + enabled: True + +cameras: + front_camera: + ffmpeg: + ... + audio: + enabled: True # <- enable audio events for the front_camera +``` + +If you are using multiple streams then you must set the `audio` role on the stream that is going to be used for audio detection, this can be any stream but the stream must have audio included. + +:::note + +The ffmpeg process for capturing audio will be a separate connection to the camera along with the other roles assigned to the camera, for this reason it is recommended that the go2rtc restream is used for this purpose. See [the restream docs](/configuration/restream.md) for more information. + +::: + +```yaml +cameras: + front_camera: + ffmpeg: + inputs: + - path: rtsp://.../main_stream + roles: + - record + - path: rtsp://.../sub_stream # <- this stream must have audio enabled + roles: + - audio + - detect +``` + +### Configuring Minimum Volume + +The audio detector uses volume levels in the same way that motion in a camera feed is used for object detection. This means that frigate will not run audio detection unless the audio volume is above the configured level in order to reduce resource usage. Audio levels can vary widely between camera models so it is important to run tests to see what volume levels are. MQTT explorer can be used on the audio topic to see what volume level is being detected. + +:::tip + +Volume is considered motion for recordings, this means when the `record -> retain -> mode` is set to `motion` any time audio volume is > min_volume that recording segment for that camera will be kept. + +::: + +### Configuring Audio Events + +The included audio model has over [500 different types](https://github.com/blakeblackshear/frigate/blob/dev/audio-labelmap.txt) of audio that can be detected, many of which are not practical. By default `bark`, `fire_alarm`, `scream`, `speech`, and `yell` are enabled but these can be customized. + +```yaml +audio: + enabled: True + listen: + - bark + - fire_alarm + - scream + - speech + - yell +``` diff --git a/docs/docs/configuration/autotracking.md b/docs/docs/configuration/autotracking.md new file mode 100644 index 000000000..31048db2e --- /dev/null +++ b/docs/docs/configuration/autotracking.md @@ -0,0 +1,166 @@ +--- +id: autotracking +title: Camera Autotracking +--- + +An ONVIF-capable, PTZ (pan-tilt-zoom) camera that supports relative movement within the field of view (FOV) can be configured to automatically track moving objects and keep them in the center of the frame. + +![Autotracking example with zooming](/img/frigate-autotracking-example.gif) + +## Autotracking behavior + +Once Frigate determines that an object is not a false positive and has entered one of the required zones, the autotracker will move the PTZ camera to keep the object centered in the frame until the object either moves out of the frame, the PTZ is not capable of any more movement, or Frigate loses track of it. + +Upon loss of tracking, Frigate will scan the region of the lost object for `timeout` seconds. If an object of the same type is found in that region, Frigate will autotrack that new object. + +When tracking has ended, Frigate will return to the camera firmware's PTZ preset specified by the `return_preset` configuration entry. + +## Checking ONVIF camera support + +Frigate autotracking functions with PTZ cameras capable of relative movement within the field of view (as specified in the [ONVIF spec](https://www.onvif.org/specs/srv/ptz/ONVIF-PTZ-Service-Spec-v1712.pdf) as `RelativePanTiltTranslationSpace` having a `TranslationSpaceFov` entry). + +Many cheaper or older PTZs may not support this standard. Frigate will report an error message in the log and disable autotracking if your PTZ is unsupported. + +Alternatively, you can download and run [this simple Python script](https://gist.github.com/hawkeye217/152a1d4ba80760dac95d46e143d37112), replacing the details on line 4 with your camera's IP address, ONVIF port, username, and password to check your camera. + +A growing list of cameras and brands that have been reported by users to work with Frigate's autotracking can be found [here](cameras.md). + +## Configuration + +First, set up a PTZ preset in your camera's firmware and give it a name. If you're unsure how to do this, consult the documentation for your camera manufacturer's firmware. Some tutorials for common brands: [Amcrest](https://www.youtube.com/watch?v=lJlE9-krmrM), [Reolink](https://www.youtube.com/watch?v=VAnxHUY5i5w), [Dahua](https://www.youtube.com/watch?v=7sNbc5U-k54). + +Edit your Frigate configuration file and enter the ONVIF parameters for your camera. Specify the object types to track, a required zone the object must enter to begin autotracking, and the camera preset name you configured in your camera's firmware to return to when tracking has ended. Optionally, specify a delay in seconds before Frigate returns the camera to the preset. + +An [ONVIF connection](cameras.md) is required for autotracking to function. Also, a [motion mask](masks.md) over your camera's timestamp and any overlay text is recommended to ensure they are completely excluded from scene change calculations when the camera is moving. + +Note that `autotracking` is disabled by default but can be enabled in the configuration or by MQTT. + +```yaml +cameras: + ptzcamera: + ... + onvif: + # Required: host of the camera being connected to. + host: 0.0.0.0 + # Optional: ONVIF port for device (default: shown below). + port: 8000 + # Optional: username for login. + # NOTE: Some devices require admin to access ONVIF. + user: admin + # Optional: password for login. + password: admin + # Optional: PTZ camera object autotracking. Keeps a moving object in + # the center of the frame by automatically moving the PTZ camera. + autotracking: + # Optional: enable/disable object autotracking. (default: shown below) + enabled: False + # Optional: calibrate the camera on startup (default: shown below) + # A calibration will move the PTZ in increments and measure the time it takes to move. + # The results are used to help estimate the position of tracked objects after a camera move. + # Frigate will update your config file automatically after a calibration with + # a "movement_weights" entry for the camera. You should then set calibrate_on_startup to False. + calibrate_on_startup: False + # Optional: the mode to use for zooming in/out on objects during autotracking. (default: shown below) + # Available options are: disabled, absolute, and relative + # disabled - don't zoom in/out on autotracked objects, use pan/tilt only + # absolute - use absolute zooming (supported by most PTZ capable cameras) + # relative - use relative zooming (not supported on all PTZs, but makes concurrent pan/tilt/zoom movements) + zooming: disabled + # Optional: A value to change the behavior of zooming on autotracked objects. (default: shown below) + # A lower value will keep more of the scene in view around a tracked object. + # A higher value will zoom in more on a tracked object, but Frigate may lose tracking more quickly. + # The value should be between 0.1 and 0.75 + zoom_factor: 0.3 + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Required: Begin automatically tracking an object when it enters any of the listed zones. + required_zones: + - zone_name + # Required: Name of ONVIF preset in camera's firmware to return to when tracking is over. (default: shown below) + return_preset: home + # Optional: Seconds to delay before returning to preset. (default: shown below) + timeout: 10 + # Optional: Values generated automatically by a camera calibration. Do not modify these manually. (default: shown below) + movement_weights: [] +``` + +## Calibration + +PTZ motors operate at different speeds. Performing a calibration will direct Frigate to measure this speed over a variety of movements and use those measurements to better predict the amount of movement necessary to keep autotracked objects in the center of the frame. + +Calibration is optional, but will greatly assist Frigate in autotracking objects that move across the camera's field of view more quickly. + +To begin calibration, set the `calibrate_on_startup` for your camera to `True` and restart Frigate. Frigate will then make a series of small and large movements with your camera. Don't move the PTZ manually while calibration is in progress. Once complete, camera motion will stop and your config file will be automatically updated with a `movement_weights` parameter to be used in movement calculations. You should not modify this parameter manually. + +After calibration has ended, your PTZ will be moved to the preset specified by `return_preset`. + +:::note + +Frigate's web UI and all other cameras will be unresponsive while calibration is in progress. This is expected and normal to avoid excessive network traffic or CPU usage during calibration. Calibration for most PTZs will take about two minutes. The Frigate log will show calibration progress and any errors. + +::: + +At this point, Frigate will be running and will continue to refine and update the `movement_weights` parameter in your config automatically as the PTZ moves during autotracking and more measurements are obtained. + +Before restarting Frigate, you should set `calibrate_on_startup` in your config file to `False`, otherwise your refined `movement_weights` will be overwritten and calibration will occur when starting again. + +You can recalibrate at any time by removing the `movement_weights` parameter, setting `calibrate_on_startup` to `True`, and then restarting Frigate. You may need to recalibrate or remove `movement_weights` from your config altogether if autotracking is erratic. If you change your `return_preset` in any way or if you change your camera's detect `fps` value, a recalibration is also recommended. + +If you initially calibrate with zooming disabled and then enable zooming at a later point, you should also recalibrate. + +## Best practices and considerations + +Every PTZ camera is different, so autotracking may not perform ideally in every situation. This experimental feature was initially developed using an EmpireTech/Dahua SD1A404XB-GNR. + +The object tracker in Frigate estimates the motion of the PTZ so that tracked objects are preserved when the camera moves. In most cases 5 fps is sufficient, but if you plan to track faster moving objects, you may want to increase this slightly. Higher frame rates (> 10fps) will only slow down Frigate and the motion estimator and may lead to dropped frames, especially if you are using experimental zooming. + +A fast [detector](object_detectors.md) is recommended. CPU detectors will not perform well or won't work at all. You can watch Frigate's debug viewer for your camera to see a thicker colored box around the object currently being autotracked. + +![Autotracking Debug View](/img/autotracking-debug.gif) + +A full-frame zone in `required_zones` is not recommended, especially if you've calibrated your camera and there are `movement_weights` defined in the configuration file. Frigate will continue to autotrack an object that has entered one of the `required_zones`, even if it moves outside of that zone. + +Some users have found it helpful to adjust the zone `inertia` value. See the [configuration reference](index.md). + +## Zooming + +Zooming is a very experimental feature and may use significantly more CPU when tracking objects than panning/tilting only. + +Absolute zooming makes zoom movements separate from pan/tilt movements. Most PTZ cameras will support absolute zooming. Absolute zooming was developed to be very conservative to work best with a variety of cameras and scenes. Absolute zooming usually will not occur until an object has stopped moving or is moving very slowly. + +Relative zooming attempts to make a zoom movement concurrently with any pan/tilt movements. It was tested to work with some Dahua and Amcrest PTZs. But the ONVIF specification indicates that there no assumption about how the generic zoom range is mapped to magnification, field of view or other physical zoom dimension when using relative zooming. So if relative zooming behavior is erratic or just doesn't work, try absolute zooming. + +You can optionally adjust the `zoom_factor` for your camera in your configuration file. Lower values will leave more space from the scene around the tracked object while higher values will cause your camera to zoom in more on the object. However, keep in mind that Frigate needs a fair amount of pixels and scene details outside of the bounding box of the tracked object to estimate the motion of your camera. If the object is taking up too much of the frame, Frigate will not be able to track the motion of the camera and your object will be lost. + +The range of this option is from 0.1 to 0.75. The default value of 0.3 is conservative and should be sufficient for most users. Because every PTZ and scene is different, you should experiment to determine what works best for you. + +## Usage applications + +In security and surveillance, it's common to use "spotter" cameras in combination with your PTZ. When your fixed spotter camera detects an object, you could use an automation platform like Home Assistant to move the PTZ to a specific preset so that Frigate can begin automatically tracking the object. For example: a residence may have fixed cameras on the east and west side of the property, capturing views up and down a street. When the spotter camera on the west side detects a person, a Home Assistant automation could move the PTZ to a camera preset aimed toward the west. When the object enters the specified zone, Frigate's autotracker could then continue to track the person as it moves out of view of any of the fixed cameras. + +## Troubleshooting and FAQ + +### The autotracker loses track of my object. Why? + +There are many reasons this could be the case. If you are using experimental zooming, your `zoom_factor` value might be too high, the object might be traveling too quickly, the scene might be too dark, there are not enough details in the scene (for example, a PTZ looking down on a driveway or other monotone background without a sufficient number of hard edges or corners), or the scene is otherwise less than optimal for Frigate to maintain tracking. + +Your camera's shutter speed may also be set too low so that blurring occurs with motion. Check your camera's firmware to see if you can increase the shutter speed. + +Watching Frigate's debug view can help to determine a possible cause. The autotracked object will have a thicker colored box around it. + +### I'm seeing an error in the logs that my camera "is still in ONVIF 'MOVING' status." What does this mean? + +There are two possible known reasons for this (and perhaps others yet unknown): a slow PTZ motor or buggy camera firmware. Frigate uses an ONVIF parameter provided by the camera, `MoveStatus`, to determine when the PTZ's motor is moving or idle. According to some users, Hikvision PTZs (even with the latest firmware), are not updating this value after PTZ movement. Unfortunately there is no workaround to this bug in Hikvision firmware, so autotracking will not function correctly and should be disabled in your config. This may also be the case with other non-Hikvision cameras utilizing Hikvision firmware. + +### I tried calibrating my camera, but the logs show that it is stuck at 0% and Frigate is not starting up. + +This is often caused by the same reason as above - the `MoveStatus` ONVIF parameter is not changing due to a bug in your camera's firmware. Also, see the note above: Frigate's web UI and all other cameras will be unresponsive while calibration is in progress. This is expected and normal. But if you don't see log entries every few seconds for calibration progress, your camera is not compatible with autotracking. + +### I'm seeing this error in the logs: "Autotracker: motion estimator couldn't get transformations". What does this mean? + +To maintain object tracking during PTZ moves, Frigate tracks the motion of your camera based on the details of the frame. If you are seeing this message, it could mean that your `zoom_factor` may be set too high, the scene around your detected object does not have enough details (like hard edges or color variatons), or your camera's shutter speed is too slow and motion blur is occurring. Try reducing `zoom_factor`, finding a way to alter the scene around your object, or changing your camera's shutter speed. + +### Calibration seems to have completed, but the camera is not actually moving to track my object. Why? + +Some cameras have firmware that reports that FOV RelativeMove, the ONVIF command that Frigate uses for autotracking, is supported. However, if the camera does not pan or tilt when an object comes into the required zone, your camera's firmware does not actually support FOV RelativeMove. One such camera is the Uniview IPC672LR-AX4DUPK. It actually moves its zoom motor instead of panning and tilting and does not follow the ONVIF standard whatsoever. diff --git a/docs/docs/configuration/birdseye.md b/docs/docs/configuration/birdseye.md index 0b54a5f5f..6471bf4e3 100644 --- a/docs/docs/configuration/birdseye.md +++ b/docs/docs/configuration/birdseye.md @@ -33,3 +33,25 @@ cameras: birdseye: enabled: False ``` + +### Sorting cameras in the Birdseye view + +It is possible to override the order of cameras that are being shown in the Birdseye view. +The order needs to be set at the camera level. + +```yaml +# Include all cameras by default in Birdseye view +birdseye: + enabled: True + mode: continuous + +cameras: + front: + birdseye: + order: 1 + back: + birdseye: + order: 2 +``` + +*Note*: Cameras are sorted by default using their name to ensure a constant view inside Birdseye. diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index 2b355d4ac..96299c7c4 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -80,8 +80,8 @@ cameras: rtmp: enabled: False # <-- RTMP should be disabled if your stream is not H264 detect: - width: # <---- update for your camera's resolution - height: # <---- update for your camera's resolution + width: # <- optional, by default Frigate tries to automatically detect resolution + height: # <- optional, by default Frigate tries to automatically detect resolution ``` ### Blue Iris RTSP Cameras @@ -105,26 +105,65 @@ If available, recommended settings are: According to [this discussion](https://github.com/blakeblackshear/frigate/issues/3235#issuecomment-1135876973), the http video streams seem to be the most reliable for Reolink. +Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels. +The setup of main stream can be also done via RTSP, but isn't always reliable on all hardware versions. The example configuration is working with the oldest HW version RLN16-410 device with multiple types of cameras. + +:::caution + +The below configuration only works for reolink cameras with stream resolution of 5MP or lower, 8MP+ cameras need to use RTSP as http-flv is not supported in this case. + +::: + ```yaml go2rtc: streams: - your_reolink_camera: + your_reolink_camera: - "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus" - your_reolink_camera_sub: + your_reolink_camera_sub: - "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password" + your_reolink_camera_via_nvr: + - "ffmpeg:http://reolink_nvr_ip/flv?port=1935&app=bcs&stream=channel3_main.bcs&user=username&password=password" # channel numbers are 0-15 + - "ffmpeg:your_reolink_camera_via_nvr#audio=aac" + your_reolink_camera_via_nvr_sub: + - "ffmpeg:http://reolink_nvr_ip/flv?port=1935&app=bcs&stream=channel3_ext.bcs&user=username&password=password" cameras: - reolink: + your_reolink_camera: ffmpeg: inputs: - - path: rtsp://127.0.0.1:8554/your_reolink_camera?video=copy&audio=aac + - path: rtsp://127.0.0.1:8554/your_reolink_camera input_args: preset-rtsp-restream roles: - record - - path: rtsp://127.0.0.1:8554/your_reolink_camera_sub?video=copy + - path: rtsp://127.0.0.1:8554/your_reolink_camera_sub input_args: preset-rtsp-restream roles: - detect + reolink_via_nvr: + ffmpeg: + inputs: + - path: rtsp://127.0.0.1:8554/your_reolink_camera_via_nvr?video=copy&audio=aac + input_args: preset-rtsp-restream + roles: + - record + - path: rtsp://127.0.0.1:8554/your_reolink_camera_via_nvr_sub?video=copy + input_args: preset-rtsp-restream + roles: + - detect +``` + +#### Reolink Doorbell + +The reolink doorbell supports 2-way audio via go2rtc and other applications. It is important that the http-flv stream is still used for stability, a secondary rtsp stream can be added that will be using for the two way audio only. + +```yaml +go2rtc: + streams: + your_reolink_doorbell: + - "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=username&password=password#video=copy#audio=copy#audio=opus" + - rtsp://reolink_ip/Preview_01_sub + your_reolink_doorbell_sub: + - "ffmpeg:http://reolink_ip/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=username&password=password" ``` ### Unifi Protect Cameras @@ -140,7 +179,7 @@ go2rtc: - rtspx://192.168.1.1:7441/abcdefghijk ``` -[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#source-rtsp) +[See the go2rtc docs for more information](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-rtsp) In the Unifi 2.0 update Unifi Protect Cameras had a change in audio sample rate which causes issues for ffmpeg. The input rate needs to be set for record and rtmp if used directly with unifi protect. @@ -150,3 +189,7 @@ ffmpeg: record: preset-record-ubiquiti rtmp: preset-rtmp-ubiquiti # recommend using go2rtc instead ``` + +### TP-Link VIGI Cameras + +TP-Link VIGI cameras need some adjustments to the main stream settings on the camera itself to avoid issues. The stream needs to be configured as `H264` with `Smart Coding` set to `off`. Without these settings you may have problems when trying to watch recorded events. For example Firefox will stop playback after a few seconds and show the following error message: `The media playback was aborted due to a corruption problem or because the media used features your browser did not support.`. diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index d8fefed8f..a95ffae86 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -1,6 +1,6 @@ --- id: cameras -title: Cameras +title: Camera Configuration --- ## Setting Up Camera Inputs @@ -11,11 +11,12 @@ A camera is enabled by default but can be temporarily disabled by using `enabled Each role can only be assigned to one input per camera. The options for roles are as follows: -| Role | Description | -| ---------- | ---------------------------------------------------------------------------------------- | -| `detect` | Main feed for object detection | -| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) | -| `rtmp` | Deprecated: Broadcast as an RTMP feed for other services to consume. [docs](restream.md) | +| Role | Description | +| -------- | ---------------------------------------------------------------------------------------- | +| `detect` | Main feed for object detection. [docs](object_detectors.md) | +| `record` | Saves segments of the video feed based on configuration settings. [docs](record.md) | +| `audio` | Feed for audio based detection. [docs](audio_detectors.md) | +| `rtmp` | Deprecated: Broadcast as an RTMP feed for other services to consume. [docs](restream.md) | ```yaml mqtt: @@ -33,8 +34,8 @@ cameras: roles: - record detect: - width: 1280 - height: 720 + width: 1280 # <- optional, by default Frigate tries to automatically detect resolution + height: 720 # <- optional, by default Frigate tries to automatically detect resolution ``` Additional cameras are simply added to the config under the `cameras` entry. @@ -48,3 +49,50 @@ cameras: ``` For camera model specific settings check the [camera specific](camera_specific.md) infos. + +## Setting up camera PTZ controls + +:::caution + +Not every PTZ supports ONVIF, which is the standard protocol Frigate uses to communicate with your camera. Check the [official list of ONVIF conformant products](https://www.onvif.org/conformant-products/), your camera documentation, or camera manufacturer's website to ensure your PTZ supports ONVIF. Also, ensure your camera is running the latest firmware. + +::: + +Add the onvif section to your camera in your configuration file: + +```yaml +cameras: + back: + ffmpeg: ... + onvif: + host: 10.0.10.10 + port: 8000 + user: admin + password: password +``` + +If the ONVIF connection is successful, PTZ controls will be available in the camera's WebUI. + +An ONVIF-capable camera that supports relative movement within the field of view (FOV) can also be configured to automatically track moving objects and keep them in the center of the frame. For autotracking setup, see the [autotracking](autotracking.md) docs. + +## ONVIF PTZ camera recommendations + +This list of working and non-working PTZ cameras is based on user feedback. + +| Brand or specific camera | PTZ Controls | Autotracking | Notes | +| ------------------------ | :----------: | :----------: | ----------------------------------------------------------------------------------------------------------------------------------------------- | +| Amcrest | ✅ | ✅ | ⛔️ Generally, Amcrest should work, but some older models (like the common IP2M-841) don't support autotracking | +| Amcrest ASH21 | ❌ | ❌ | No ONVIF support | +| Ctronics PTZ | ✅ | ❌ | | +| Dahua | ✅ | ✅ | | +| Foscam R5 | ✅ | ❌ | | +| Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others | +| Reolink 511WA | ✅ | ❌ | Zoom only | +| Reolink E1 Pro | ✅ | ❌ | | +| Reolink E1 Zoom | ✅ | ❌ | | +| Reolink RLC-823A 16x | ✅ | ❌ | | +| Sunba 405-D20X | ✅ | ❌ | | +| Tapo C200 | ✅ | ❌ | Incomplete ONVIF support | +| Tapo C210 | ❌ | ❌ | Incomplete ONVIF support | +| Uniview IPC672LR-AX4DUPK | ✅ | ❌ | Firmware says FOV relative movement is supported, but camera doesn't actually move when sending ONVIF commands | +| Vikylin PTZ-2804X-I2 | ❌ | ❌ | Incomplete ONVIF support | diff --git a/docs/docs/configuration/ffmpeg_presets.md b/docs/docs/configuration/ffmpeg_presets.md index f346c92a0..e39d1f164 100644 --- a/docs/docs/configuration/ffmpeg_presets.md +++ b/docs/docs/configuration/ffmpeg_presets.md @@ -11,16 +11,20 @@ It is highly recommended to use hwaccel presets in the config. These presets not See [the hwaccel docs](/configuration/hardware_acceleration.md) for more info on how to setup hwaccel for your GPU / iGPU. -| Preset | Usage | Other Notes | -| --------------------- | ---------------------------- | ----------------------------------------------------- | -| preset-rpi-32-h264 | 32 bit Rpi with h264 stream | | -| preset-rpi-64-h264 | 64 bit Rpi with h264 stream | | -| preset-vaapi | Intel & AMD VAAPI | Check hwaccel docs to ensure correct driver is chosen | -| preset-intel-qsv-h264 | Intel QSV with h264 stream | If issues occur recommend using vaapi preset instead | -| preset-intel-qsv-h265 | Intel QSV with h265 stream | If issues occur recommend using vaapi preset instead | -| preset-nvidia-h264 | Nvidia GPU with h264 stream | | -| preset-nvidia-h265 | Nvidia GPU with h265 stream | | -| preset-nvidia-mjpeg | Nvidia GPU with mjpeg stream | Recommend restreaming mjpeg and using nvidia-h264 | +| Preset | Usage | Other Notes | +| --------------------- | ------------------------------ | ----------------------------------------------------- | +| preset-rpi-64-h264 | 64 bit Rpi with h264 stream | | +| preset-rpi-64-h265 | 64 bit Rpi with h265 stream | | +| preset-vaapi | Intel & AMD VAAPI | Check hwaccel docs to ensure correct driver is chosen | +| preset-intel-qsv-h264 | Intel QSV with h264 stream | If issues occur recommend using vaapi preset instead | +| preset-intel-qsv-h265 | Intel QSV with h265 stream | If issues occur recommend using vaapi preset instead | +| preset-nvidia-h264 | Nvidia GPU with h264 stream | | +| preset-nvidia-h265 | Nvidia GPU with h265 stream | | +| preset-nvidia-mjpeg | Nvidia GPU with mjpeg stream | Recommend restreaming mjpeg and using nvidia-h264 | +| preset-jetson-h264 | Nvidia Jetson with h264 stream | | +| preset-jetson-h265 | Nvidia Jetson with h265 stream | | +| preset-rk-h264 | Rockchip MPP with h264 stream | Use image with *-rk suffix and privileged mode | +| preset-rk-h265 | Rockchip MPP with h265 stream | Use image with *-rk suffix and privileged mode | ### Input Args Presets diff --git a/docs/docs/configuration/hardware_acceleration.md b/docs/docs/configuration/hardware_acceleration.md index bb50d9cfe..ad9d27211 100644 --- a/docs/docs/configuration/hardware_acceleration.md +++ b/docs/docs/configuration/hardware_acceleration.md @@ -3,16 +3,25 @@ id: hardware_acceleration title: Hardware Acceleration --- +# Hardware Acceleration + It is recommended to update your configuration to enable hardware accelerated decoding in ffmpeg. Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro -### Raspberry Pi 3/4 +# Officially Supported + +## Raspberry Pi 3/4 Ensure you increase the allocated RAM for your GPU to at least 128 (raspi-config > Performance Options > GPU Memory). **NOTICE**: If you are using the addon, you may need to turn off `Protection mode` for hardware acceleration. ```yaml +# if you want to decode a h264 stream ffmpeg: hwaccel_args: preset-rpi-64-h264 + +# if you want to decode a h265 (hevc) stream +ffmpeg: + hwaccel_args: preset-rpi-64-h265 ``` :::note @@ -21,17 +30,17 @@ If running Frigate in docker, you either need to run in priviliged mode or be su ```yaml docker run -d \ - --name frigate \ - ... - --device /dev/video10 \ - ghcr.io/blakeblackshear/frigate:stable +--name frigate \ +... +--device /dev/video10 \ +ghcr.io/blakeblackshear/frigate:stable ``` ::: -### Intel-based CPUs +## Intel-based CPUs -#### Via VAAPI +### Via VAAPI VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. VAAPI is recommended for all generations of Intel-based CPUs if QSV does not work. @@ -40,39 +49,42 @@ ffmpeg: hwaccel_args: preset-vaapi ``` -**NOTICE**: With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the frigate.yml for HA OS users](advanced.md#environment_vars). +:::note -#### Via Quicksync (>=10th Generation only) +With some of the processors, like the J4125, the default driver `iHD` doesn't seem to work correctly for hardware acceleration. You may need to change the driver to `i965` by adding the following environment variable `LIBVA_DRIVER_NAME=i965` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars). + +::: + +### Via Quicksync (>=10th Generation only) QSV must be set specifically based on the video encoding of the stream. -##### H.264 streams +#### H.264 streams ```yaml ffmpeg: hwaccel_args: preset-intel-qsv-h264 ``` -##### H.265 streams +#### H.265 streams ```yaml ffmpeg: hwaccel_args: preset-intel-qsv-h265 ``` -#### Configuring Intel GPU Stats in Docker +### Configuring Intel GPU Stats in Docker -Additional configuration is needed for the Docker container to be able to access the `intel_gpu_top` command for GPU stats. Three possible changes can be made: +Additional configuration is needed for the Docker container to be able to access the `intel_gpu_top` command for GPU stats. There are two options: 1. Run the container as privileged. -2. Adding the `CAP_PERFMON` capability. -3. Setting the `perf_event_paranoid` low enough to allow access to the performance event system. +2. Add the `CAP_PERFMON` capability (note: you might need to set the `perf_event_paranoid` low enough to allow access to the performance event system.) -##### Run as privileged +#### Run as privileged This method works, but it gives more permissions to the container than are actually needed. -###### Docker Compose - Privileged +##### Docker Compose - Privileged ```yaml services: @@ -82,7 +94,7 @@ services: privileged: true ``` -###### Docker Run CLI - Privileged +##### Docker Run CLI - Privileged ```bash docker run -d \ @@ -92,11 +104,11 @@ docker run -d \ ghcr.io/blakeblackshear/frigate:stable ``` -##### CAP_PERFMON +#### CAP_PERFMON Only recent versions of Docker support the `CAP_PERFMON` capability. You can test to see if yours supports it by running: `docker run --cap-add=CAP_PERFMON hello-world` -###### Docker Compose - CAP_PERFMON +##### Docker Compose - CAP_PERFMON ```yaml services: @@ -107,7 +119,7 @@ services: - CAP_PERFMON ``` -###### Docker Run CLI - CAP_PERFMON +##### Docker Run CLI - CAP_PERFMON ```bash docker run -d \ @@ -117,26 +129,30 @@ docker run -d \ ghcr.io/blakeblackshear/frigate:stable ``` -##### perf_event_paranoid +#### perf_event_paranoid _Note: This setting must be changed for the entire system._ For more information on the various values across different distributions, see https://askubuntu.com/questions/1400874/what-does-perf-paranoia-level-four-do. -Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=1 >> /etc/sysctl.d/local.conf'` +Depending on your OS and kernel configuration, you may need to change the `/proc/sys/kernel/perf_event_paranoid` kernel tunable. You can test the change by running `sudo sh -c 'echo 2 >/proc/sys/kernel/perf_event_paranoid'` which will persist until a reboot. Make it permanent by running `sudo sh -c 'echo kernel.perf_event_paranoid=2 >> /etc/sysctl.d/local.conf'` -### AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver +## AMD/ATI GPUs (Radeon HD 2000 and newer GPUs) via libva-mesa-driver VAAPI supports automatic profile selection so it will work automatically with both H.264 and H.265 streams. -**Note:** You also need to set `LIBVA_DRIVER_NAME=radeonsi` as an environment variable on the container. +:::note + +You need to change the driver to `radeonsi` by adding the following environment variable `LIBVA_DRIVER_NAME=radeonsi` to your docker-compose file or [in the `frigate.yaml` for HA OS users](advanced.md#environment_vars). + +::: ```yaml ffmpeg: hwaccel_args: preset-vaapi ``` -### NVIDIA GPUs +## NVIDIA GPUs While older GPUs may work, it is recommended to use modern, supported GPUs. NVIDIA provides a [matrix of supported GPUs and features](https://developer.nvidia.com/video-encode-and-decode-gpu-support-matrix-new). If your card is on the list and supports CUVID/NVDEC, it will most likely work with Frigate for decoding. However, you must also use [a driver version that will work with FFmpeg](https://github.com/FFmpeg/nv-codec-headers/blob/master/README). Older driver versions may be missing symbols and fail to work, and older cards are not supported by newer driver versions. The only way around this is to [provide your own FFmpeg](/configuration/advanced#custom-ffmpeg-build) that will work with your driver version, but this is unsupported and may not work well if at all. @@ -144,11 +160,11 @@ A more complete list of cards and their compatible drivers is available in the [ If your distribution does not offer NVIDIA driver packages, you can [download them here](https://www.nvidia.com/en-us/drivers/unix/). -#### Configuring Nvidia GPUs in Docker +### Configuring Nvidia GPUs in Docker Additional configuration is needed for the Docker container to be able to access the NVIDIA GPU. The supported method for this is to install the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker) and specify the GPU to Docker. How you do this depends on how Docker is being run: -##### Docker Compose - Nvidia GPU +#### Docker Compose - Nvidia GPU ```yaml services: @@ -165,7 +181,7 @@ services: capabilities: [gpu] ``` -##### Docker Run CLI - Nvidia GPU +#### Docker Run CLI - Nvidia GPU ```bash docker run -d \ @@ -175,7 +191,7 @@ docker run -d \ ghcr.io/blakeblackshear/frigate:stable ``` -#### Setup Decoder +### Setup Decoder The decoder you need to pass in the `hwaccel_args` will depend on the input video. @@ -242,3 +258,133 @@ processes: If you do not see these processes, check the `docker logs` for the container and look for decoding errors. These instructions were originally based on the [Jellyfin documentation](https://jellyfin.org/docs/general/administration/hardware-acceleration.html#nvidia-hardware-acceleration-on-docker-linux). + +# Community Supported + +## NVIDIA Jetson (Orin AGX, Orin NX, Orin Nano\*, Xavier AGX, Xavier NX, TX2, TX1, Nano) + +A separate set of docker images is available that is based on Jetpack/L4T. They comes with an `ffmpeg` build +with codecs that use the Jetson's dedicated media engine. If your Jetson host is running Jetpack 4.6, use the +`frigate-tensorrt-jp4` image, or if your Jetson host is running Jetpack 5.0+, use the `frigate-tensorrt-jp5` +image. Note that the Orin Nano has no video encoder, so frigate will use software encoding on this platform, +but the image will still allow hardware decoding and tensorrt object detection. + +You will need to use the image with the nvidia container runtime: + +### Docker Run CLI - Jetson + +```bash +docker run -d \ + ... + --runtime nvidia + ghcr.io/blakeblackshear/frigate-tensorrt-jp5 +``` + +### Docker Compose - Jetson + +```yaml +version: '2.4' +services: + frigate: + ... + image: ghcr.io/blakeblackshear/frigate-tensorrt-jp5 + runtime: nvidia # Add this +``` + +:::note + +The `runtime:` tag is not supported on older versions of docker-compose. If you run into this, you can instead use the nvidia runtime system-wide by adding `"default-runtime": "nvidia"` to `/etc/docker/daemon.json`: + +``` +{ + "runtimes": { + "nvidia": { + "path": "nvidia-container-runtime", + "runtimeArgs": [] + } + }, + "default-runtime": "nvidia" +} +``` + +::: + +### Setup Decoder + +The decoder you need to pass in the `hwaccel_args` will depend on the input video. + +A list of supported codecs (you can use `ffmpeg -decoders | grep nvmpi` in the container to get the ones your card supports) + +``` + V..... h264_nvmpi h264 (nvmpi) (codec h264) + V..... hevc_nvmpi hevc (nvmpi) (codec hevc) + V..... mpeg2_nvmpi mpeg2 (nvmpi) (codec mpeg2video) + V..... mpeg4_nvmpi mpeg4 (nvmpi) (codec mpeg4) + V..... vp8_nvmpi vp8 (nvmpi) (codec vp8) + V..... vp9_nvmpi vp9 (nvmpi) (codec vp9) +``` + +For example, for H264 video, you'll select `preset-jetson-h264`. + +```yaml +ffmpeg: + hwaccel_args: preset-jetson-h264 +``` + +If everything is working correctly, you should see a significant reduction in ffmpeg CPU load and power consumption. +Verify that hardware decoding is working by running `jtop` (`sudo pip3 install -U jetson-stats`), which should show +that NVDEC/NVDEC1 are in use. + +## Rockchip platform + +Hardware accelerated video de-/encoding is supported on all Rockchip SoCs. + +### Setup + +Use a frigate docker image with `-rk` suffix and enable privileged mode by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file. + +### Configuration + +Add one of the following ffmpeg presets to your `config.yaml` to enable hardware acceleration: + +```yaml +# if you try to decode a h264 encoded stream +ffmpeg: + hwaccel_args: preset-rk-h264 + +# if you try to decode a h265 (hevc) encoded stream +ffmpeg: + hwaccel_args: preset-rk-h265 +``` + +:::note + +Make sure that your SoC supports hardware acceleration for your input stream. For example, if your camera streams with h265 encoding and a 4k resolution, your SoC must be able to de- and encode h265 with a 4k resolution or higher. If you are unsure whether your SoC meets the requirements, take a look at the datasheet. + +::: + +### go2rtc presets for hardware accelerated transcoding + +If your input stream is to be transcoded using hardware acceleration, there are these presets for go2rtc: `h264/rk` and `h265/rk`. You can use them this way: + +``` +go2rtc: + streams: + Cam_h264: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h264/rk + Cam_h265: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h265/rk +``` + +:::warning + +The go2rtc docs may suggest the following configuration: + +``` +go2rtc: + streams: + Cam_h264: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h264#hardware=rk + Cam_h265: ffmpeg:rtsp://username:password@192.168.1.123/av_stream/ch0#video=h265#hardware=rk +``` + +However, this does not currently work. + +::: diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 9686e8240..53993af67 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -1,49 +1,35 @@ --- id: index -title: Configuration File +title: Frigate Configuration --- -For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yml` or `frigate.yaml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored. +For Home Assistant Addon installations, the config file needs to be in the root of your Home Assistant config directory (same location as `configuration.yaml`). It can be named `frigate.yaml` or `frigate.yml`, but if both files exist `frigate.yaml` will be preferred and `frigate.yml` will be ignored. For all other installation types, the config file should be mapped to `/config/config.yml` inside the container. -It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md): +It is recommended to start with a minimal configuration and add to it as described in [this guide](../guides/getting_started.md) and use the built in configuration editor in Frigate's UI which supports validation. ```yaml mqtt: - host: mqtt.server.com + enabled: False + cameras: - back: + dummy_camera: # <--- this will be changed to your actual camera later + enabled: False ffmpeg: inputs: - - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 + - path: rtsp://127.0.0.1:554/rtsp roles: - detect - detect: - width: 1280 - height: 720 ``` -### VSCode Configuration Schema +## VSCode Configuration Schema VSCode (and VSCode addon) supports the JSON schemas which will automatically validate the config. This can be added by adding `# yaml-language-server: $schema=http://frigate_host:5000/api/config/schema.json` to the top of the config file. `frigate_host` being the IP address of Frigate or `ccab4aaf-frigate` if running in the addon. -### Full configuration reference: +## Environment Variable Substitution -:::caution - -It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions. - -::: - -**Note:** The following values will be replaced at runtime by using environment variables - -- `{FRIGATE_MQTT_USER}` -- `{FRIGATE_MQTT_PASSWORD}` -- `{FRIGATE_RTSP_USER}` -- `{FRIGATE_RTSP_PASSWORD}` - -for example: +Frigate supports the use of environment variables starting with `FRIGATE_` **only** where specifically indicated in the [reference config](./reference.md). For example, the following values can be replaced at runtime by using environment variables: ```yaml mqtt: @@ -56,482 +42,195 @@ mqtt: ``` ```yaml -mqtt: - # Optional: Enable mqtt server (default: shown below) - enabled: True - # Required: host name - host: mqtt.server.com - # Optional: port (default: shown below) - port: 1883 - # Optional: topic prefix (default: shown below) - # NOTE: must be unique if you are running multiple instances - topic_prefix: frigate - # Optional: client id (default: shown below) - # NOTE: must be unique if you are running multiple instances - client_id: frigate - # Optional: user - # NOTE: MQTT user can be specified with an environment variables that must begin with 'FRIGATE_'. - # e.g. user: '{FRIGATE_MQTT_USER}' - user: mqtt_user - # Optional: password - # NOTE: MQTT password can be specified with an environment variables that must begin with 'FRIGATE_'. - # e.g. password: '{FRIGATE_MQTT_PASSWORD}' - password: password - # Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None) - tls_ca_certs: /path/to/ca.crt - # Optional: tls_client_cert and tls_client key in order to use self-signed client - # certificates (default: None) - # NOTE: certificate must not be password-protected - # do not set user and password when using a client certificate - tls_client_cert: /path/to/client.crt - tls_client_key: /path/to/client.key - # Optional: tls_insecure (true/false) for enabling TLS verification of - # the server hostname in the server certificate (default: None) - tls_insecure: false - # Optional: interval in seconds for publishing stats (default: shown below) - stats_interval: 60 +onvif: + host: 10.0.10.10 + port: 8000 + user: "{FRIGATE_RTSP_USER}" + password: "{FRIGATE_RTSP_PASSWORD}" +``` -# Optional: Detectors configuration. Defaults to a single CPU detector -detectors: - # Required: name of the detector - detector_name: - # Required: type of the detector - # Frigate provided types include 'cpu', 'edgetpu', and 'openvino' (default: shown below) - # Additional detector types can also be plugged in. - # Detectors may require additional configuration. - # Refer to the Detectors configuration page for more information. - type: cpu - -# Optional: Database configuration -database: - # The path to store the SQLite DB (default: shown below) - path: /media/frigate/frigate.db - -# Optional: model modifications -model: - # Optional: path to the model (default: automatic based on detector) - path: /edgetpu_model.tflite - # Optional: path to the labelmap (default: shown below) - labelmap_path: /labelmap.txt - # Required: Object detection model input width (default: shown below) - width: 320 - # Required: Object detection model input height (default: shown below) - height: 320 - # Optional: Object detection model input colorspace - # Valid values are rgb, bgr, or yuv. (default: shown below) - input_pixel_format: rgb - # Optional: Object detection model input tensor format - # Valid values are nhwc or nchw (default: shown below) - input_tensor: nhwc - # Optional: Object detection model type, currently only used with the OpenVINO detector - # Valid values are ssd, yolox, yolov5, or yolov8 (default: shown below) - model_type: ssd - # Optional: Label name modifications. These are merged into the standard labelmap. - labelmap: - 2: vehicle - -# Optional: logger verbosity settings -logger: - # Optional: Default log verbosity (default: shown below) - default: info - # Optional: Component specific logger overrides - logs: - frigate.event: debug - -# Optional: set environment variables -environment_vars: - EXAMPLE_VAR: value - -# Optional: birdseye configuration -# NOTE: Can (enabled, mode) be overridden at the camera level -birdseye: - # Optional: Enable birdseye view (default: shown below) - enabled: True - # Optional: Restream birdseye via RTSP (default: shown below) - # NOTE: Enabling this will set birdseye to run 24/7 which may increase CPU usage somewhat. - restream: False - # Optional: Width of the output resolution (default: shown below) - width: 1280 - # Optional: Height of the output resolution (default: shown below) - height: 720 - # Optional: Encoding quality of the mpeg1 feed (default: shown below) - # 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources. - quality: 8 - # Optional: Mode of the view. Available options are: objects, motion, and continuous - # objects - cameras are included if they have had a tracked object within the last 30 seconds - # motion - cameras are included if motion was detected in the last 30 seconds - # continuous - all cameras are included always - mode: objects - -# Optional: ffmpeg configuration -# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets -ffmpeg: - # Optional: global ffmpeg args (default: shown below) - global_args: -hide_banner -loglevel warning -threads 2 - # Optional: global hwaccel args (default: shown below) - # NOTE: See hardware acceleration docs for your specific device - hwaccel_args: [] - # Optional: global input args (default: shown below) - input_args: preset-rtsp-generic - # Optional: global output args - output_args: - # Optional: output args for detect streams (default: shown below) - detect: -threads 2 -f rawvideo -pix_fmt yuv420p - # Optional: output args for record streams (default: shown below) - record: preset-record-generic - # Optional: output args for rtmp streams (default: shown below) - rtmp: preset-rtmp-generic - -# Optional: Detect configuration -# NOTE: Can be overridden at the camera level -detect: - # Optional: width of the frame for the input with the detect role (default: shown below) - width: 1280 - # Optional: height of the frame for the input with the detect role (default: shown below) - height: 720 - # Optional: desired fps for your camera for the input with the detect role (default: shown below) - # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. - fps: 5 - # Optional: enables detection for the camera (default: True) - enabled: True - # Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate) - max_disappeared: 25 - # Optional: Configuration for stationary object tracking - stationary: - # Optional: Frequency for confirming stationary objects (default: shown below) - # When set to 0, object detection will not confirm stationary objects until movement is detected. - # If set to 10, object detection will run to confirm the object still exists on every 10th frame. - interval: 0 - # Optional: Number of frames without a position change for an object to be considered stationary (default: 10x the frame rate or 10s) - threshold: 50 - # Optional: Define a maximum number of frames for tracking a stationary object (default: not set, track forever) - # This can help with false positives for objects that should only be stationary for a limited amount of time. - # It can also be used to disable stationary object tracking. For example, you may want to set a value for person, but leave - # car at the default. - # WARNING: Setting these values overrides default behavior and disables stationary object tracking. - # There are very few situations where you would want it disabled. It is NOT recommended to - # copy these values from the example config into your config unless you know they are needed. - max_frames: - # Optional: Default for all object types (default: not set, track forever) - default: 3000 - # Optional: Object specific values - objects: - person: 1000 - -# Optional: Object configuration -# NOTE: Can be overridden at the camera level -objects: - # Optional: list of objects to track from labelmap.txt (default: shown below) - track: - - person - # Optional: mask to prevent all object types from being detected in certain areas (default: no mask) - # Checks based on the bottom center of the bounding box of the object. - # NOTE: This mask is COMBINED with the object type specific mask below - mask: 0,0,1000,0,1000,200,0,200 - # Optional: filters to reduce false positives for specific object types - filters: - person: - # Optional: minimum width*height of the bounding box for the detected object (default: 0) - min_area: 5000 - # Optional: maximum width*height of the bounding box for the detected object (default: 24000000) - max_area: 100000 - # Optional: minimum width/height of the bounding box for the detected object (default: 0) - min_ratio: 0.5 - # Optional: maximum width/height of the bounding box for the detected object (default: 24000000) - max_ratio: 2.0 - # Optional: minimum score for the object to initiate tracking (default: shown below) - min_score: 0.5 - # Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below) - threshold: 0.7 - # Optional: mask to prevent this object type from being detected in certain areas (default: no mask) - # Checks based on the bottom center of the bounding box of the object - mask: 0,0,1000,0,1000,200,0,200 - -# Optional: Motion configuration -# NOTE: Can be overridden at the camera level -motion: - # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) - # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. - # The value should be between 1 and 255. - threshold: 25 - # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: 30) - # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will - # make motion detection more sensitive to smaller moving objects. - # As a rule of thumb: - # - 15 - high sensitivity - # - 30 - medium sensitivity - # - 50 - low sensitivity - contour_area: 30 - # Optional: Alpha value passed to cv2.accumulateWeighted when averaging the motion delta across multiple frames (default: shown below) - # Higher values mean the current frame impacts the delta a lot, and a single raindrop may register as motion. - # Too low and a fast moving person wont be detected as motion. - delta_alpha: 0.2 - # Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below) - # Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster. - # Low values will cause things like moving shadows to be detected as motion for longer. - # https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/ - frame_alpha: 0.2 - # Optional: Height of the resized motion frame (default: 50) - # This operates as an efficient blur alternative. Higher values will result in more granular motion detection at the expense - # of higher CPU usage. Lower values result in less CPU, but small changes may not register as motion. - frame_height: 50 - # Optional: motion mask - # NOTE: see docs for more detailed info on creating masks - mask: 0,900,1080,900,1080,1920,0,1920 - # Optional: improve contrast (default: shown below) - # Enables dynamic contrast improvement. This should help improve night detections at the cost of making motion detection more sensitive - # for daytime. - improve_contrast: False - # Optional: Delay when updating camera motion through MQTT from ON -> OFF (default: shown below). - mqtt_off_delay: 30 - -# Optional: Record configuration -# NOTE: Can be overridden at the camera level -record: - # Optional: Enable recording (default: shown below) - # WARNING: If recording is disabled in the config, turning it on via - # the UI or MQTT later will have no effect. - enabled: False - # Optional: Number of minutes to wait between cleanup runs (default: shown below) - # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o - expire_interval: 60 - # Optional: Retention settings for recording - retain: - # Optional: Number of days to retain recordings regardless of events (default: shown below) - # NOTE: This should be set to 0 and retention should be defined in events section below - # if you only want to retain recordings of events. - days: 0 - # Optional: Mode for retention. Available options are: all, motion, and active_objects - # all - save all recording segments regardless of activity - # motion - save all recordings segments with any detected motion - # active_objects - save all recording segments with active/moving objects - # NOTE: this mode only applies when the days setting above is greater than 0 - mode: all - # Optional: Event recording settings - events: - # Optional: Number of seconds before the event to include (default: shown below) - pre_capture: 5 - # Optional: Number of seconds after the event to include (default: shown below) - post_capture: 5 - # Optional: Objects to save recordings for. (default: all tracked objects) - objects: - - person - # Optional: Restrict recordings to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Retention settings for recordings of events - retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Mode for retention. (default: shown below) - # all - save all recording segments for events regardless of activity - # motion - save all recordings segments for events with any detected motion - # active_objects - save all recording segments for event with active/moving objects - # - # NOTE: If the retain mode for the camera is more restrictive than the mode configured - # here, the segments will already be gone by the time this mode is applied. - # For example, if the camera retain mode is "motion", the segments without motion are - # never stored, so setting the mode to "all" here won't bring them back. - mode: motion - # Optional: Per object retention days - objects: - person: 15 - -# Optional: Configuration for the jpg snapshots written to the clips directory for each event -# NOTE: Can be overridden at the camera level -snapshots: - # Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below) - enabled: False - # Optional: save a clean PNG copy of the snapshot image (default: shown below) - clean_copy: True - # Optional: print a timestamp on the snapshots (default: shown below) - timestamp: False - # Optional: draw bounding box on the snapshots (default: shown below) - bounding_box: False - # Optional: crop the snapshot (default: shown below) - crop: False - # Optional: height to resize the snapshot to (default: original size) - height: 175 - # Optional: Restrict snapshots to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - # Optional: Camera override for retention settings (default: global values) - retain: - # Required: Default retention days (default: shown below) - default: 10 - # Optional: Per object retention days - objects: - person: 15 - -# Optional: RTMP configuration -# NOTE: RTMP is deprecated in favor of restream -# NOTE: Can be overridden at the camera level -rtmp: - # Optional: Enable the RTMP stream (default: False) - enabled: False - -# Optional: Restream configuration -# Uses https://github.com/AlexxIT/go2rtc (v1.2.0) +```yaml go2rtc: + rtsp: + username: "{FRIGATE_GO2RTC_RTSP_USERNAME}" + password: "{FRIGATE_GO2RTC_RTSP_PASSWORD}" +``` -# Optional: jsmpeg stream configuration for WebUI -live: - # Optional: Set the name of the stream that should be used for live view - # in frigate WebUI. (default: name of camera) - stream_name: camera_name - # Optional: Set the height of the jsmpeg stream. (default: 720) - # This must be less than or equal to the height of the detect stream. Lower resolutions - # reduce bandwidth required for viewing the jsmpeg stream. Width is computed to match known aspect ratio. - height: 720 - # Optional: Set the encode quality of the jsmpeg stream (default: shown below) - # 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources. - quality: 8 +## Common configuration examples -# Optional: in-feed timestamp style configuration -# NOTE: Can be overridden at the camera level -timestamp_style: - # Optional: Position of the timestamp (default: shown below) - # "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right) - position: "tl" - # Optional: Format specifier conform to the Python package "datetime" (default: shown below) - # Additional Examples: - # german: "%d.%m.%Y %H:%M:%S" - format: "%m/%d/%Y %H:%M:%S" - # Optional: Color of font - color: - # All Required when color is specified (default: shown below) - red: 255 - green: 255 - blue: 255 - # Optional: Line thickness of font (default: shown below) - thickness: 2 - # Optional: Effect of lettering (default: shown below) - # None (No effect), - # "solid" (solid background in inverse color of font) - # "shadow" (shadow for font) - effect: None +Here are some common starter configuration examples. Refer to the [reference config](./reference.md) for detailed information about all the config values. + +### Raspberry Pi Home Assistant Addon with USB Coral + +- Single camera with 720p, 5fps stream for detect +- MQTT connected to home assistant mosquitto addon +- Hardware acceleration for decoding video +- USB Coral detector +- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not +- Continue to keep all video if it was during any event for 30 days +- Save snapshots for 30 days +- Motion mask for the camera timestamp + +```yaml +mqtt: + host: core-mosquitto + user: mqtt-user + password: xxxxxxxxxx + +ffmpeg: + hwaccel_args: preset-rpi-64-h264 + +detectors: + coral: + type: edgetpu + device: usb + +record: + enabled: True + retain: + days: 7 + mode: motion + events: + retain: + default: 30 + mode: motion + +snapshots: + enabled: True + retain: + default: 30 -# Required cameras: - # Required: name of the camera - back: - # Optional: Enable/Disable the camera (default: shown below). - # If disabled: config is used but no live stream and no capture etc. - # Events/Recordings are still viewable. - enabled: True - # Required: ffmpeg settings for the camera + name_of_your_camera: + detect: + width: 1280 + height: 720 + fps: 5 ffmpeg: - # Required: A list of input streams for the camera. See documentation for more information. inputs: - # Required: the path to the stream - # NOTE: path may include environment variables, which must begin with 'FRIGATE_' and be referenced in {} - - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 - # Required: list of roles for this stream. valid values are: detect,record,rtmp - # NOTICE: In addition to assigning the record and rtmp roles, - # they must also be enabled in the camera config. + - path: rtsp://10.0.10.10:554/rtsp roles: - detect - - record - - rtmp - # Optional: stream specific global args (default: inherit) - # global_args: - # Optional: stream specific hwaccel args (default: inherit) - # hwaccel_args: - # Optional: stream specific input args (default: inherit) - # input_args: - # Optional: camera specific global args (default: inherit) - # global_args: - # Optional: camera specific hwaccel args (default: inherit) - # hwaccel_args: - # Optional: camera specific input args (default: inherit) - # input_args: - # Optional: camera specific output args (default: inherit) - # output_args: - - # Optional: timeout for highest scoring image before allowing it - # to be replaced by a newer image. (default: shown below) - best_image_timeout: 60 - - # Optional: zones for this camera - zones: - # Required: name of the zone - # NOTE: This must be different than any camera names, but can match with another zone on another - # camera. - front_steps: - # Required: List of x,y coordinates to define the polygon of the zone. - # NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box. - coordinates: 545,1077,747,939,788,805 - # Optional: List of objects that can trigger this zone (default: all tracked objects) - objects: - - person - # Optional: Zone level object filters. - # NOTE: The global and camera filters are applied upstream. - filters: - person: - min_area: 5000 - max_area: 100000 - threshold: 0.7 - - # Optional: Configuration for the jpg snapshots published via MQTT - mqtt: - # Optional: Enable publishing snapshot via mqtt for camera (default: shown below) - # NOTE: Only applies to publishing image data to MQTT via 'frigate///snapshot'. - # All other messages will still be published. - enabled: True - # Optional: print a timestamp on the snapshots (default: shown below) - timestamp: True - # Optional: draw bounding box on the snapshots (default: shown below) - bounding_box: True - # Optional: crop the snapshot (default: shown below) - crop: True - # Optional: height to resize the snapshot to (default: shown below) - height: 270 - # Optional: jpeg encode quality (default: shown below) - quality: 70 - # Optional: Restrict mqtt messages to objects that entered any of the listed zones (default: no required zones) - required_zones: [] - - # Optional: Configuration for how camera is handled in the GUI. - ui: - # Optional: Adjust sort order of cameras in the UI. Larger numbers come later (default: shown below) - # By default the cameras are sorted alphabetically. - order: 0 - # Optional: Whether or not to show the camera in the Frigate UI (default: shown below) - dashboard: True - -# Optional -ui: - # Optional: Set the default live mode for cameras in the UI (default: shown below) - live_mode: mse - # Optional: Set a timezone to use in the UI (default: use browser local time) - # timezone: America/Denver - # Optional: Use an experimental recordings / camera view UI (default: shown below) - use_experimental: False - # Optional: Set the time format used. - # Options are browser, 12hour, or 24hour (default: shown below) - time_format: browser - # Optional: Set the date style for a specified length. - # Options are: full, long, medium, short - # Examples: - # short: 2/11/23 - # medium: Feb 11, 2023 - # full: Saturday, February 11, 2023 - # (default: shown below). - date_style: short - # Optional: Set the time style for a specified length. - # Options are: full, long, medium, short - # Examples: - # short: 8:14 PM - # medium: 8:15:22 PM - # full: 8:15:22 PM Mountain Standard Time - # (default: shown below). - time_style: medium - # Optional: Ability to manually override the date / time styling to use strftime format - # https://www.gnu.org/software/libc/manual/html_node/Formatting-Calendar-Time.html - # possible values are shown above (default: not set) - strftime_fmt: "%Y/%m/%d %H:%M" - -# Optional: Telemetry configuration -telemetry: - # Optional: Enable the latest version outbound check (default: shown below) - # NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions - version_check: True + motion: + mask: + - 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432 +``` + +### Standalone Intel Mini PC with USB Coral + +- Single camera with 720p, 5fps stream for detect +- MQTT disabled (not integrated with home assistant) +- VAAPI hardware acceleration for decoding video +- USB Coral detector +- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not +- Continue to keep all video if it was during any event for 30 days +- Save snapshots for 30 days +- Motion mask for the camera timestamp + +```yaml +mqtt: + enabled: False + +ffmpeg: + hwaccel_args: preset-vaapi + +detectors: + coral: + type: edgetpu + device: usb + +record: + enabled: True + retain: + days: 7 + mode: motion + events: + retain: + default: 30 + mode: motion + +snapshots: + enabled: True + retain: + default: 30 + +cameras: + name_of_your_camera: + detect: + width: 1280 + height: 720 + fps: 5 + ffmpeg: + inputs: + - path: rtsp://10.0.10.10:554/rtsp + roles: + - detect + motion: + mask: + - 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432 +``` + +### Home Assistant integrated Intel Mini PC with OpenVino + +- Single camera with 720p, 5fps stream for detect +- MQTT connected to same mqtt server as home assistant +- VAAPI hardware acceleration for decoding video +- OpenVino detector +- Save all video with any detectable motion for 7 days regardless of whether any objects were detected or not +- Continue to keep all video if it was during any event for 30 days +- Save snapshots for 30 days +- Motion mask for the camera timestamp + +```yaml +mqtt: + host: 192.168.X.X # <---- same mqtt broker that home assistant uses + user: mqtt-user + password: xxxxxxxxxx + +ffmpeg: + hwaccel_args: preset-vaapi + +detectors: + ov: + type: openvino + device: AUTO + model: + path: /openvino-model/ssdlite_mobilenet_v2.xml + +model: + width: 300 + height: 300 + input_tensor: nhwc + input_pixel_format: bgr + labelmap_path: /openvino-model/coco_91cl_bkgr.txt + +record: + enabled: True + retain: + days: 7 + mode: motion + events: + retain: + default: 30 + mode: motion + +snapshots: + enabled: True + retain: + default: 30 + +cameras: + name_of_your_camera: + detect: + width: 1280 + height: 720 + fps: 5 + ffmpeg: + inputs: + - path: rtsp://10.0.10.10:554/rtsp + roles: + - detect + motion: + mask: + - 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432 ``` diff --git a/docs/docs/configuration/live.md b/docs/docs/configuration/live.md index b2eff6129..003e7599c 100644 --- a/docs/docs/configuration/live.md +++ b/docs/docs/configuration/live.md @@ -9,11 +9,11 @@ Frigate has different live view options, some of which require the bundled `go2r Live view options can be selected while viewing the live stream. The options are: -| Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations | -| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | -------------------------------------------- | -| jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none | -| mse | low | native | native | yes (depends on audio codec) | yes | not supported on iOS, Firefox is h.264 only | -| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 | +| Source | Latency | Frame Rate | Resolution | Audio | Requires go2rtc | Other Limitations | +| ------ | ------- | ------------------------------------- | -------------- | ---------------------------- | --------------- | ------------------------------------------------ | +| jsmpeg | low | same as `detect -> fps`, capped at 10 | same as detect | no | no | none | +| mse | low | native | native | yes (depends on audio codec) | yes | iPhone requires iOS 17.1+, Firefox is h.264 only | +| webrtc | lowest | native | native | yes (depends on audio codec) | yes | requires extra config, doesn't support h.265 | ### Audio Support @@ -37,12 +37,12 @@ There may be some cameras that you would prefer to use the sub stream for live v ```yaml go2rtc: streams: - rtsp_cam: + test_cam: - rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio. - - "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to opus - rtsp_cam_sub: + - "ffmpeg:test_cam#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc + test_cam_sub: - rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio. - - "ffmpeg:rtsp_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus + - "ffmpeg:test_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus for webrtc cameras: test_cam: @@ -59,7 +59,7 @@ cameras: roles: - detect live: - stream_name: rtsp_cam_sub + stream_name: test_cam_sub ``` ### WebRTC extra configuration: @@ -78,7 +78,7 @@ WebRTC works by creating a TCP or UDP connection on port `8555`. However, it req - 192.168.1.10:8555 - stun:8555 ``` - + - For access through Tailscale, the Frigate system's Tailscale IP must be added as a WebRTC candidate. Tailscale IPs all start with `100.`, and are reserved within the `100.0.0.0/8` CIDR block. :::tip @@ -104,6 +104,7 @@ If you are having difficulties getting WebRTC to work and you are running Frigat If not running in host mode, port 8555 will need to be mapped for the container: docker-compose.yml + ```yaml services: frigate: @@ -115,4 +116,4 @@ services: ::: -See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#module-webrtc) for more information about this. +See [go2rtc WebRTC docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.3#module-webrtc) for more information about this. diff --git a/docs/docs/configuration/masks.md b/docs/docs/configuration/masks.md index 321b909cb..ae64e7e5f 100644 --- a/docs/docs/configuration/masks.md +++ b/docs/docs/configuration/masks.md @@ -3,11 +3,19 @@ id: masks title: Masks --- -There are two types of masks available: +## Motion masks -**Motion masks**: Motion masks are used to prevent unwanted types of motion from triggering detection. Try watching the debug feed with `Motion Boxes` enabled to see what may be regularly detected as motion. For example, you want to mask out your timestamp, the sky, rooftops, etc. Keep in mind that this mask only prevents motion from being detected and does not prevent objects from being detected if object detection was started due to motion in unmasked areas. Motion is also used during object tracking to refine the object detection area in the next frame. Over masking will make it more difficult for objects to be tracked. To see this effect, create a mask, and then watch the video feed with `Motion Boxes` enabled again. +Motion masks are used to prevent unwanted types of motion from triggering detection. Try watching the debug feed with `Motion Boxes` enabled to see what may be regularly detected as motion. For example, you want to mask out your timestamp, the sky, rooftops, etc. Keep in mind that this mask only prevents motion from being detected and does not prevent objects from being detected if object detection was started due to motion in unmasked areas. Motion is also used during object tracking to refine the object detection area in the next frame. Over masking will make it more difficult for objects to be tracked. To see this effect, create a mask, and then watch the video feed with `Motion Boxes` enabled again. -**Object filter masks**: Object filter masks are used to filter out false positives for a given object type based on location. These should be used to filter any areas where it is not possible for an object of that type to be. The bottom center of the detected object's bounding box is evaluated against the mask. If it is in a masked area, it is assumed to be a false positive. For example, you may want to mask out rooftops, walls, the sky, treetops for people. For cars, masking locations other than the street or your driveway will tell Frigate that anything in your yard is a false positive. +## Object filter masks + +Object filter masks are used to filter out false positives for a given object type based on location. These should be used to filter any areas where it is not possible for an object of that type to be. The bottom center of the detected object's bounding box is evaluated against the mask. If it is in a masked area, it is assumed to be a false positive. For example, you may want to mask out rooftops, walls, the sky, treetops for people. For cars, masking locations other than the street or your driveway will tell Frigate that anything in your yard is a false positive. + +Object filter masks can be used to filter out stubborn false positives in fixed locations. For example, the base of this tree may be frequently detected as a person. The following image shows an example of an object filter mask (shaded red area) over the location where the bottom center is typically located to filter out person detections in a precise location. + +![object mask](/img/bottom-center-mask.jpg) + +## Using the mask creator To create a poly mask: diff --git a/docs/docs/configuration/motion_detection.md b/docs/docs/configuration/motion_detection.md new file mode 100644 index 000000000..f3d1d7692 --- /dev/null +++ b/docs/docs/configuration/motion_detection.md @@ -0,0 +1,103 @@ +--- +id: motion_detection +title: Motion Detection +--- + +# Tuning Motion Detection + +Frigate uses motion detection as a first line check to see if there is anything happening in the frame worth checking with object detection. + +Once motion is detected, it tries to group up nearby areas of motion together in hopes of identifying a rectangle in the image that will capture the area worth inspecting. These are the red "motion boxes" you see in the debug viewer. + +## The Goal + +The default motion settings should work well for the majority of cameras, however there are cases where tuning motion detection can lead to better and more optimal results. Each camera has its own environment with different variables that affect motion, this means that the same motion settings will not fit all of your cameras. + +Before tuning motion it is important to understand the goal. In an optimal configuration, motion from people and cars would be detected, but not grass moving, lighting changes, timestamps, etc. If your motion detection is too sensitive, you will experience higher CPU loads and greater false positives from the increased rate of object detection. If it is not sensitive enough, you will miss events. + +## Create Motion Masks + +First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want events**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md). + +## Prepare For Testing + +The easiest way to tune motion detection is to do it live, have one window / screen open with the frigate debug view and motion boxes enabled with another window / screen open allowing for configuring the motion settings. It is recommended to use Home Assistant or MQTT as they offer live configuration of some motion settings meaning that Frigate does not need to be restarted when values are changed. + +In Home Assistant the `Improve Contrast`, `Contour Area`, and `Threshold` configuration entities are disabled by default but can easily be enabled and used to tune live, otherwise MQTT can be used. + +## Tuning Motion Detection During The Day + +Now that things are set up, find a time to tune that represents normal circumstances. For example, if you tune your motion on a day that is sunny and windy you may find later that the motion settings are not sensitive enough on a cloudy and still day. + +:::note + +Remember that motion detection is just used to determine when object detection should be used. You should aim to have motion detection sensitive enough that you won't miss events from objects you want to detect with object detection. The goal is to prevent object detection from running constantly for every small pixel change in the image. Windy days are still going to result in lots of motion being detected. + +::: + +### Threshold + +The threshold value dictates how much of a change in a pixels luminance is required to be considered motion. + +```yaml +# default threshold value +motion: + # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) + # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. + # The value should be between 1 and 255. + threshold: 30 +``` + +Lower values mean motion detection is more sensitive to changes in color, making it more likely for example to detect motion when a brown dogs blends in with a brown fence or a person wearing a red shirt blends in with a red car. If the threshold is too low however, it may detect things like grass blowing in the wind, shadows, etc. to be detected as motion. + +Watching the motion boxes in the debug view, increase the threshold until you only see motion that is visible to the eye. Once this is done, it is important to test and ensure that desired motion is still detected. + +### Contour Area + +```yaml +# default contour_area value +motion: + # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below) + # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will + # make motion detection more sensitive to smaller moving objects. + # As a rule of thumb: + # - 10 - high sensitivity + # - 30 - medium sensitivity + # - 50 - low sensitivity + contour_area: 10 +``` + +Once the threshold calculation is run, the pixels that have changed are grouped together. The contour area value is used to decide which groups of changed pixels qualify as motion. Smaller values are more sensitive meaning people that are far away, small animals, etc. are more likely to be detected as motion, but it also means that small changes in shadows, leaves, etc. are detected as motion. Higher values are less sensitive meaning these things won't be detected as motion but with the risk that desired motion won't be detected until closer to the camera. + +Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving. + +### Improve Contrast + +At this point if motion is working as desired there is no reason to continue with tuning for the day. If you were unable to find a balance between desired and undesired motion being detected, you can try disabling improve contrast and going back to the threshold and contour area steps. + +## Tuning Motion Detection During The Night + +Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone. + +However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection. + +## Tuning For Large Changes In Motion + +```yaml +# default lightning_threshold: +motion: + # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection + # needs to recalibrate. (default: shown below) + # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. + # Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching + # a doorbell camera. + lightning_threshold: 0.8 +``` + +:::tip + +Some cameras like doorbell cameras may have missed detections when someone walks directly in front of the camera and the lightning_threshold causes motion detection to be re-calibrated. In this case, it may be desirable to increase the `lightning_threshold` to ensure these events are not missed. + +::: + +Large changes in motion like PTZ moves and camera switches between Color and IR mode should result in no motion detection. This is done via the `lightning_threshold` configuration. It is defined as the percentage of the image used to detect lightning or other substantial changes where motion detection needs to recalibrate. Increasing this value will make motion detection more likely to consider lightning or IR mode changes as valid motion. Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching a doorbell camera. diff --git a/docs/docs/configuration/detectors.md b/docs/docs/configuration/object_detectors.md similarity index 50% rename from docs/docs/configuration/detectors.md rename to docs/docs/configuration/object_detectors.md index fa2fde345..89734efb9 100644 --- a/docs/docs/configuration/detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -1,9 +1,11 @@ --- -id: detectors -title: Detectors +id: object_detectors +title: Object Detectors --- -Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, and `tensorrt`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. +# Officially Supported Detectors + +Frigate provides the following builtin detector types: `cpu`, `edgetpu`, `openvino`, `tensorrt`, and `rknn`. By default, Frigate will use a single CPU detector. Other detectors may require additional configuration as described below. When using multiple detectors they will run in dedicated processes, but pull from a common queue of detection requests from across all cameras. ## CPU Detector (not recommended) @@ -35,6 +37,12 @@ The EdgeTPU device can be specified using the `"device"` attribute according to A TensorFlow Lite model is provided in the container at `/edgetpu_model.tflite` and is used by this detector type by default. To provide your own model, bind mount the file into the container and provide the path with `model.path`. +:::tip + +See [common Edge-TPU troubleshooting steps](/troubleshooting/edgetpu) if the EdgeTPu is not detected. + +::: + ### Single USB Coral ```yaml @@ -172,9 +180,7 @@ NVidia GPUs may be used for object detection using the TensorRT libraries. Due t ### Minimum Hardware Support -The TensorRT detector uses the 11.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. - -> **TODO:** NVidia claims support on compute 3.5 and 3.7, but marks it as deprecated. This would have some, but not all, Kepler GPUs as possibly working. This needs testing before making any claims of support. +The TensorRT detector uses the 12.x series of CUDA libraries which have minor version compatibility. The minimum driver version on the host system must be `>=530`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the NVIDIA GPU Compute Capability table linked below. To use the TensorRT detector, make sure your host system has the [nvidia-container-runtime](https://docs.docker.com/config/containers/resource_constraints/#access-an-nvidia-gpu) installed to pass through the GPU to the container and the host system has a compatible driver installed for your GPU. @@ -190,22 +196,17 @@ There are improved capabilities in newer GPU architectures that TensorRT can ben ### Generate Models -The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is provided that will build several common models. +The model used for TensorRT must be preprocessed on the same hardware platform that they will run on. This means that each user must run additional setup to generate a model file for the TensorRT library. A script is included that will build several common models. -To generate model files, create a new folder to save the models, download the script, and launch a docker container that will run the script. +The Frigate image will generate model files during startup if the specified model is not found. Processed models are stored in the `/config/model_cache` folder. Typically the `/config` path is mapped to a directory on the host already and the `model_cache` does not need to be mapped separately unless the user wants to store it in a different location on the host. -```bash -mkdir trt-models -wget https://github.com/blakeblackshear/frigate/raw/master/docker/tensorrt_models.sh -chmod +x tensorrt_models.sh -docker run --gpus=all --rm -it -v `pwd`/trt-models:/tensorrt_models -v `pwd`/tensorrt_models.sh:/tensorrt_models.sh nvcr.io/nvidia/tensorrt:22.07-py3 /tensorrt_models.sh -``` +By default, the `yolov7-320` model will be generated, but this can be overridden by specifying the `YOLO_MODELS` environment variable in Docker. One or more models may be listed in a comma-separated format, and each one will be generated. To select no model generation, set the variable to an empty string, `YOLO_MODELS=""`. Models will only be generated if the corresponding `{model}.trt` file is not present in the `model_cache` folder, so you can force a model to be regenerated by deleting it from your Frigate data folder. -The `trt-models` folder can then be mapped into your Frigate container as `trt-models` and the models referenced from the config. +If you have a Jetson device with DLAs (Xavier or Orin), you can generate a model that will run on the DLA by appending `-dla` to your model name, e.g. specify `YOLO_MODELS=yolov7-320-dla`. The model will run on DLA0 (Frigate does not currently support DLA1). DLA-incompatible layers will fall back to running on the GPU. -If your GPU does not support FP16 operations, you can pass the environment variable `-e USE_FP16=False` to the `docker run` command to disable it. +If your GPU does not support FP16 operations, you can pass the environment variable `USE_FP16=False` to disable it. -Specific models can be selected by passing an environment variable to the `docker run` command. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below. +Specific models can be selected by passing an environment variable to the `docker run` command or in your `docker-compose.yml` file. Use the form `-e YOLO_MODELS=yolov4-416,yolov4-tiny-416` to select one or more model names. The models available are shown below. ``` yolov3-288 @@ -235,11 +236,28 @@ yolov7x-640 yolov7x-320 ``` +An example `docker-compose.yml` fragment that converts the `yolov4-608` and `yolov7x-640` models for a Pascal card would look something like this: + +```yml +frigate: + environment: + - YOLO_MODELS=yolov4-608,yolov7x-640 + - USE_FP16=false +``` + +If you have multiple GPUs passed through to Frigate, you can specify which one to use for the model conversion. The conversion script will use the first visible GPU, however in systems with mixed GPU models you may not want to use the default index for object detection. Add the `TRT_MODEL_PREP_DEVICE` environment variable to select a specific GPU. + +```yml +frigate: + environment: + - TRT_MODEL_PREP_DEVICE=0 # Optionally, select which GPU is used for model optimization +``` + ### Configuration Parameters The TensorRT detector can be selected by specifying `tensorrt` as the model type. The GPU will need to be passed through to the docker container using the same methods described in the [Hardware Acceleration](hardware_acceleration.md#nvidia-gpu) section. If you pass through multiple GPUs, you can select which GPU is used for a detector with the `device` configuration parameter. The `device` parameter is an integer value of the GPU index, as shown by `nvidia-smi` within the container. -The TensorRT detector uses `.trt` model files that are located in `/trt-models/` by default. These model file path and dimensions used will depend on which model you have generated. +The TensorRT detector uses `.trt` model files that are located in `/config/model_cache/tensorrt` by default. These model path and dimensions used will depend on which model you have generated. ```yaml detectors: @@ -248,9 +266,134 @@ detectors: device: 0 #This is the default, select the first GPU model: - path: /trt-models/yolov7-tiny-416.trt + path: /config/model_cache/tensorrt/yolov7-320.trt input_tensor: nchw input_pixel_format: rgb - width: 416 - height: 416 + width: 320 + height: 320 ``` + +## Deepstack / CodeProject.AI Server Detector + +The Deepstack / CodeProject.AI Server detector for Frigate allows you to integrate Deepstack and CodeProject.AI object detection capabilities into Frigate. CodeProject.AI and DeepStack are open-source AI platforms that can be run on various devices such as the Raspberry Pi, Nvidia Jetson, and other compatible hardware. It is important to note that the integration is performed over the network, so the inference times may not be as fast as native Frigate detectors, but it still provides an efficient and reliable solution for object detection and tracking. + +### Setup + +To get started with CodeProject.AI, visit their [official website](https://www.codeproject.com/Articles/5322557/CodeProject-AI-Server-AI-the-easy-way) to follow the instructions to download and install the AI server on your preferred device. Detailed setup instructions for CodeProject.AI are outside the scope of the Frigate documentation. + +To integrate CodeProject.AI into Frigate, you'll need to make the following changes to your Frigate configuration file: + +```yaml +detectors: + deepstack: + api_url: http://:/v1/vision/detection + type: deepstack + api_timeout: 0.1 # seconds +``` + +Replace `` and `` with the IP address and port of your CodeProject.AI server. + +To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly. + +# Community Supported Detectors + +## Rockchip RKNN-Toolkit-Lite2 + +This detector is only available if one of the following Rockchip SoCs is used: + +- RK3588/RK3588S +- RK3568 +- RK3566 +- RK3562 + +These SoCs come with a NPU that will highly speed up detection. + +### Setup + +Use a frigate docker image with `-rk` suffix and enable privileged mode by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file. + +### Configuration + +This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for one). Lines that are required at least to use the detector are labeled as required, all other lines are optional. + +```yaml +detectors: # required + rknn: # required + type: rknn # required + # core mask for npu + core_mask: 0 + +model: # required + # name of yolov8 model or path to your own .rknn model file + # possible values are: + # - default-yolov8n + # - default-yolov8s + # - default-yolov8m + # - default-yolov8l + # - default-yolov8x + # - /config/model_cache/rknn/your_custom_model.rknn + path: default-yolov8n + # width and height of detection frames + width: 320 + height: 320 + # pixel format of detection frame + # default value is rgb but yolov models usually use bgr format + input_pixel_format: bgr # required + # shape of detection frame + input_tensor: nhwc +``` + +Explanation for rknn specific options: + +- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit coresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples: + - `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value. + - `core_mask: 0b001` use only core0. + - `core_mask: 0b011` use core0 and core1. + - `core_mask: 0b110` use core1 and core2. **This does not** work, since core0 is disabled. + +### Choosing a model + +There are 5 default yolov8 models that differ in size and therefore load the NPU more or less. In ascending order, with the top one being the smallest and least computationally intensive model: + +| Model | Size in mb | +| ------- | ---------- | +| yolov8n | 9 | +| yolov8s | 25 | +| yolov8m | 54 | +| yolov8l | 90 | +| yolov8x | 136 | + +:::tip + +You can get the load of your NPU with the following command: + +```bash +$ cat /sys/kernel/debug/rknpu/load +>> NPU load: Core0: 0%, Core1: 0%, Core2: 0%, +``` + +::: + +- By default the rknn detector uses the yolov8n model (`model: path: default-yolov8n`). This model comes with the image, so no further steps than those mentioned above are necessary. +- If you want to use a more precise model, you can pass `default-yolov8s`, `default-yolov8m`, `default-yolov8l` or `default-yolov8x` as `model: path:` option. + - If the model does not exist, it will be automatically downloaded to `/config/model_cache/rknn`. + - If your server has no internet connection, you can download the model from [this Github repository](https://github.com/MarcA711/rknn-models/releases) using another device and place it in the `config/model_cache/rknn` on your system. +- Finally, you can also provide your own model. Note that only yolov8 models are currently supported. Moreover, you will need to convert your model to the rknn format using `rknn-toolkit2` on a x86 machine. Afterwards, you can place your `.rknn` model file in the `config/model_cache/rknn` directory on your system. Then you need to pass the path to your model using the `path` option of your `model` block like this: + +```yaml +model: + path: /config/model_cache/rknn/my-rknn-model.rknn +``` + +:::tip + +When you have a multicore NPU, you can enable all cores to reduce inference times. You should consider activating all cores if you use a larger model like yolov8l. If your NPU has 3 cores (like rk3588/S SoCs), you can enable all 3 cores using: + +```yaml +detectors: + rknn: + type: rknn + core_mask: 0b111 +``` + +::: diff --git a/docs/docs/configuration/object_filters.md b/docs/docs/configuration/object_filters.md new file mode 100644 index 000000000..5ffb205d0 --- /dev/null +++ b/docs/docs/configuration/object_filters.md @@ -0,0 +1,59 @@ +--- +id: object_filters +title: Filters +--- + +There are several types of object filters that can be used to reduce false positive rates. + +## Object Scores + +For object filters in your configuration, any single detection below `min_score` will be ignored as a false positive. `threshold` is based on the median of the history of scores (padded to 3 values) for a tracked object. Consider the following frames when `min_score` is set to 0.6 and threshold is set to 0.85: + +| Frame | Current Score | Score History | Computed Score | Detected Object | +| ----- | ------------- | --------------------------------- | -------------- | --------------- | +| 1 | 0.7 | 0.0, 0, 0.7 | 0.0 | No | +| 2 | 0.55 | 0.0, 0.7, 0.0 | 0.0 | No | +| 3 | 0.85 | 0.7, 0.0, 0.85 | 0.7 | No | +| 4 | 0.90 | 0.7, 0.85, 0.95, 0.90 | 0.875 | Yes | +| 5 | 0.88 | 0.7, 0.85, 0.95, 0.90, 0.88 | 0.88 | Yes | +| 6 | 0.95 | 0.7, 0.85, 0.95, 0.90, 0.88, 0.95 | 0.89 | Yes | + +In frame 2, the score is below the `min_score` value, so Frigate ignores it and it becomes a 0.0. The computed score is the median of the score history (padding to at least 3 values), and only when that computed score crosses the `threshold` is the object marked as a true positive. That happens in frame 4 in the example. + +show image of snapshot vs event with differing scores + +### Minimum Score + +Any detection below `min_score` will be immediately thrown out and never tracked because it is considered a false positive. If `min_score` is too low then false positives may be detected and tracked which can confuse the object tracker and may lead to wasted resources. If `min_score` is too high then lower scoring true positives like objects that are further away or partially occluded may be thrown out which can also confuse the tracker and cause valid events to be lost or disjointed. + +### Threshold + +`threshold` is used to determine that the object is a true positive. Once an object is detected with a score >= `threshold` object is considered a true positive. If `threshold` is too low then some higher scoring false positives may create an event. If `threshold` is too high then true positive events may be missed due to the object never scoring high enough. + +## Object Shape + +False positives can also be reduced by filtering a detection based on its shape. + +### Object Area + +`min_area` and `max_area` filter on the area of an objects bounding box in pixels and can be used to reduce false positives that are outside the range of expected sizes. For example when a leaf is detected as a dog or when a large tree is detected as a person, these can be reduced by adding a `min_area` / `max_area` filter. The recordings timeline can be used to determine the area of the bounding box in that frame by selecting a timeline item then mousing over or tapping the red box. + +### Object Proportions + +`min_ratio` and `max_ratio` values are compared against a given detected object's width/height ratio (in pixels). If the ratio is outside this range, the object will be ignored as a false positive. This allows objects that are proportionally too short-and-wide (higher ratio) or too tall-and-narrow (smaller ratio) to be ignored. + +:::info + +Conceptually, a ratio of 1 is a square, 0.5 is a "tall skinny" box, and 2 is a "wide flat" box. If `min_ratio` is 1.0, any object that is taller than it is wide will be ignored. Similarly, if `max_ratio` is 1.0, then any object that is wider than it is tall will be ignored. + +::: + +## Other Tools + +### Zones + +[Required zones](/configuration/zones.md) can be a great tool to reduce false positives that may be detected in the sky or other areas that are not of interest. The required zones will only create events for objects that enter the zone. + +### Object Masks + +[Object Filter Masks](/configuration/masks) are a last resort but can be useful when false positives are in the relatively same place but can not be filtered due to their size or shape. diff --git a/docs/docs/configuration/objects.mdx b/docs/docs/configuration/objects.mdx index c15907339..81e74e2fe 100644 --- a/docs/docs/configuration/objects.mdx +++ b/docs/docs/configuration/objects.mdx @@ -1,15 +1,16 @@ --- id: objects -title: Objects +title: Available Objects --- import labels from "../../../labelmap.txt"; Frigate includes the object models listed below from the Google Coral test data. -Please note: - - `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused. - - `person` is the only tracked object by default. See the [full configuration reference](index.md#full-configuration-reference) for an example of expanding the list of tracked objects. +Please note: + +- `car` is listed twice because `truck` has been renamed to `car` by default. These object types are frequently confused. +- `person` is the only tracked object by default. See the [full configuration reference](index.md#full-configuration-reference) for an example of expanding the list of tracked objects.
    {labels.split("\n").map((label) => ( diff --git a/docs/docs/configuration/record.md b/docs/docs/configuration/record.md index f22f37d02..1cf1df559 100644 --- a/docs/docs/configuration/record.md +++ b/docs/docs/configuration/record.md @@ -3,17 +3,90 @@ id: record title: Recording --- -Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH//MM.SS.mp4`. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed. +Recordings can be enabled and are stored at `/media/frigate/recordings`. The folder structure for the recordings is `YYYY-MM-DD/HH//MM.SS.mp4` in **UTC time**. These recordings are written directly from your camera stream without re-encoding. Each camera supports a configurable retention policy in the config. Frigate chooses the largest matching retention value between the recording retention and the event retention when determining if a recording should be removed. New recording segments are written from the camera stream to cache, they are only moved to disk if they match the setup recording retention policy. H265 recordings can be viewed in Chrome 108+, Edge and Safari only. All other browsers require recordings to be encoded with H264. +## Common recording configurations + +### Most conservative: Ensure all video is saved + +For users deploying Frigate in environments where it is important to have contiguous video stored even if there was no detectable motion, the following config will store all video for 3 days. After 3 days, only video containing motion and overlapping with events will be retained until 30 days have passed. + +```yaml +record: + enabled: True + retain: + days: 3 + mode: all + events: + retain: + default: 30 + mode: motion +``` + +### Reduced storage: Only saving video when motion is detected + +In order to reduce storage requirements, you can adjust your config to only retain video where motion was detected. + +```yaml +record: + enabled: True + retain: + days: 3 + mode: all + events: + retain: + default: 30 + mode: motion +``` + +### Minimum: Events only + +If you only want to retain video that occurs during an event, this config will discard video unless an event is ongoing. + +```yaml +record: + enabled: True + retain: + days: 0 + mode: all + events: + retain: + default: 30 + mode: motion +``` + ## Will Frigate delete old recordings if my storage runs out? As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted. -## What if I don't want 24/7 recordings? +## Configuring Recording Retention + +Frigate supports both continuous and event based recordings with separate retention modes and retention periods. + +:::tip + +Retention configs support decimals meaning they can be configured to retain `0.5` days, for example. + +::: + +### Continuous Recording + +The number of days to retain continuous recordings can be set via the following config where X is a number, by default continuous recording is disabled. + +```yaml +record: + enabled: True + retain: + days: 1 # <- number of days to keep continuous recordings +``` + +Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean) + +### Event Recording If you only used clips in previous versions with recordings disabled, you can use the following config to get the same behavior. This is also the default behavior when recordings are enabled. @@ -22,34 +95,31 @@ record: enabled: True events: retain: - default: 10 + default: 10 # <- number of days to keep event recordings ``` This configuration will retain recording segments that overlap with events and have active tracked objects for 10 days. Because multiple events can reference the same recording segments, this avoids storing duplicate footage for overlapping events and reduces overall storage needs. -When `retain -> days` is set to `0`, segments will be deleted from the cache if no events are in progress. - -## Can I have "24/7" recordings, but only at certain times? - -Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times. - **WARNING**: Recordings still must be enabled in the config. If a camera has recordings disabled in the config, enabling via the methods listed above will have no effect. ## What do the different retain modes mean? -Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for 24/7 recording (but can also affect events). +Frigate saves from the stream with the `record` role in 10 second segments. These options determine which recording segments are kept for continuous recording (but can also affect events). + +Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of continuous recording. -Let's say you have Frigate configured so that your doorbell camera would retain the last **2** days of 24/7 recording. - With the `all` option all 48 hours of those two days would be kept and viewable. - With the `motion` option the only parts of those 48 hours would be segments that Frigate detected motion. This is the middle ground option that won't keep all 48 hours, but will likely keep all segments of interest along with the potential for some extra segments. - With the `active_objects` option the only segments that would be kept are those where there was a true positive object that was not considered stationary. The same options are available with events. Let's consider a scenario where you drive up and park in your driveway, go inside, then come back out 4 hours later. + - With the `all` option all segments for the duration of the event would be saved for the event. This event would have 4 hours of footage. - With the `motion` option all segments for the duration of the event with motion would be saved. This means any segment where a car drove by in the street, person walked by, lighting changed, etc. would be saved. - With the `active_objects` it would only keep segments where the object was active. In this case the only segments that would be saved would be the ones where the car was driving up, you going inside, you coming outside, and the car driving away. Essentially reducing the 4 hours to a minute or two of event footage. A configuration example of the above retain modes where all `motion` segments are stored for 7 days and `active objects` are stored for 14 days would be as follows: + ```yaml record: enabled: True @@ -61,11 +131,13 @@ record: default: 14 mode: active_objects ``` + The above configuration example can be added globally or on a per camera basis. ### Object Specific Retention You can also set specific retention length for an object type. The below configuration example builds on from above but also specifies that recordings of dogs only need to be kept for 2 days and recordings of cars should be kept for 7 days. + ```yaml record: enabled: True @@ -80,3 +152,26 @@ record: dog: 2 car: 7 ``` + +## Can I have "continuous" recordings, but only at certain times? + +Using Frigate UI, HomeAssistant, or MQTT, cameras can be automated to only record in certain situations or at certain times. + +## How do I export recordings? + +The export page in the Frigate WebUI allows for exporting real time clips with a designated start and stop time as well as exporting a time-lapse for a designated start and stop time. These exports can take a while so it is important to leave the file until it is no longer in progress. + +## Syncing Recordings With Disk + +In some cases the recordings files may be deleted but Frigate will not know this has happened. Recordings sync can be enabled which will tell Frigate to check the file system and delete any db entries for files which don't exist. + +```yaml +record: + sync_recordings: True +``` + +:::warning + +The sync operation uses considerable CPU resources and in most cases is not needed, only enable when necessary. + +::: diff --git a/docs/docs/configuration/reference.md b/docs/docs/configuration/reference.md new file mode 100644 index 000000000..d500060a7 --- /dev/null +++ b/docs/docs/configuration/reference.md @@ -0,0 +1,633 @@ +--- +id: reference +title: Full Reference Config +--- + +### Full configuration reference: + +:::caution + +It is not recommended to copy this full configuration file. Only specify values that are different from the defaults. Configuration options and default values may change in future versions. + +::: + +```yaml +mqtt: + # Optional: Enable mqtt server (default: shown below) + enabled: True + # Required: host name + host: mqtt.server.com + # Optional: port (default: shown below) + port: 1883 + # Optional: topic prefix (default: shown below) + # NOTE: must be unique if you are running multiple instances + topic_prefix: frigate + # Optional: client id (default: shown below) + # NOTE: must be unique if you are running multiple instances + client_id: frigate + # Optional: user + # NOTE: MQTT user can be specified with an environment variable or docker secrets that must begin with 'FRIGATE_'. + # e.g. user: '{FRIGATE_MQTT_USER}' + user: mqtt_user + # Optional: password + # NOTE: MQTT password can be specified with an environment variable or docker secrets that must begin with 'FRIGATE_'. + # e.g. password: '{FRIGATE_MQTT_PASSWORD}' + password: password + # Optional: tls_ca_certs for enabling TLS using self-signed certs (default: None) + tls_ca_certs: /path/to/ca.crt + # Optional: tls_client_cert and tls_client key in order to use self-signed client + # certificates (default: None) + # NOTE: certificate must not be password-protected + # do not set user and password when using a client certificate + tls_client_cert: /path/to/client.crt + tls_client_key: /path/to/client.key + # Optional: tls_insecure (true/false) for enabling TLS verification of + # the server hostname in the server certificate (default: None) + tls_insecure: false + # Optional: interval in seconds for publishing stats (default: shown below) + stats_interval: 60 + +# Optional: Detectors configuration. Defaults to a single CPU detector +detectors: + # Required: name of the detector + detector_name: + # Required: type of the detector + # Frigate provided types include 'cpu', 'edgetpu', 'openvino' and 'tensorrt' (default: shown below) + # Additional detector types can also be plugged in. + # Detectors may require additional configuration. + # Refer to the Detectors configuration page for more information. + type: cpu + +# Optional: Database configuration +database: + # The path to store the SQLite DB (default: shown below) + path: /config/frigate.db + +# Optional: model modifications +model: + # Optional: path to the model (default: automatic based on detector) + path: /edgetpu_model.tflite + # Optional: path to the labelmap (default: shown below) + labelmap_path: /labelmap.txt + # Required: Object detection model input width (default: shown below) + width: 320 + # Required: Object detection model input height (default: shown below) + height: 320 + # Optional: Object detection model input colorspace + # Valid values are rgb, bgr, or yuv. (default: shown below) + input_pixel_format: rgb + # Optional: Object detection model input tensor format + # Valid values are nhwc or nchw (default: shown below) + input_tensor: nhwc + # Optional: Object detection model type, currently only used with the OpenVINO detector + # Valid values are ssd, yolox, yolov5, or yolov8 (default: shown below) + model_type: ssd + # Optional: Label name modifications. These are merged into the standard labelmap. + labelmap: + 2: vehicle + +# Optional: Audio Events Configuration +# NOTE: Can be overridden at the camera level +audio: + # Optional: Enable audio events (default: shown below) + enabled: False + # Optional: Configure the amount of seconds without detected audio to end the event (default: shown below) + max_not_heard: 30 + # Optional: Configure the min rms volume required to run audio detection (default: shown below) + # As a rule of thumb: + # - 200 - high sensitivity + # - 500 - medium sensitivity + # - 1000 - low sensitivity + min_volume: 500 + # Optional: Types of audio to listen for (default: shown below) + listen: + - bark + - fire_alarm + - scream + - speech + - yell + # Optional: Filters to configure detection. + filters: + # Label that matches label in listen config. + speech: + # Minimum score that triggers an audio event (default: shown below) + threshold: 0.8 + +# Optional: logger verbosity settings +logger: + # Optional: Default log verbosity (default: shown below) + default: info + # Optional: Component specific logger overrides + logs: + frigate.event: debug + +# Optional: set environment variables +environment_vars: + EXAMPLE_VAR: value + +# Optional: birdseye configuration +# NOTE: Can (enabled, mode) be overridden at the camera level +birdseye: + # Optional: Enable birdseye view (default: shown below) + enabled: True + # Optional: Restream birdseye via RTSP (default: shown below) + # NOTE: Enabling this will set birdseye to run 24/7 which may increase CPU usage somewhat. + restream: False + # Optional: Width of the output resolution (default: shown below) + width: 1280 + # Optional: Height of the output resolution (default: shown below) + height: 720 + # Optional: Encoding quality of the mpeg1 feed (default: shown below) + # 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources. + quality: 8 + # Optional: Mode of the view. Available options are: objects, motion, and continuous + # objects - cameras are included if they have had a tracked object within the last 30 seconds + # motion - cameras are included if motion was detected in the last 30 seconds + # continuous - all cameras are included always + mode: objects + +# Optional: ffmpeg configuration +# More information about presets at https://docs.frigate.video/configuration/ffmpeg_presets +ffmpeg: + # Optional: global ffmpeg args (default: shown below) + global_args: -hide_banner -loglevel warning -threads 2 + # Optional: global hwaccel args (default: shown below) + # NOTE: See hardware acceleration docs for your specific device + hwaccel_args: [] + # Optional: global input args (default: shown below) + input_args: preset-rtsp-generic + # Optional: global output args + output_args: + # Optional: output args for detect streams (default: shown below) + detect: -threads 2 -f rawvideo -pix_fmt yuv420p + # Optional: output args for record streams (default: shown below) + record: preset-record-generic + # Optional: output args for rtmp streams (default: shown below) + rtmp: preset-rtmp-generic + # Optional: Time in seconds to wait before ffmpeg retries connecting to the camera. (default: shown below) + # If set too low, frigate will retry a connection to the camera's stream too frequently, using up the limited streams some cameras can allow at once + # If set too high, then if a ffmpeg crash or camera stream timeout occurs, you could potentially lose up to a maximum of retry_interval second(s) of footage + # NOTE: this can be a useful setting for Wireless / Battery cameras to reduce how much footage is potentially lost during a connection timeout. + retry_interval: 10 + +# Optional: Detect configuration +# NOTE: Can be overridden at the camera level +detect: + # Optional: width of the frame for the input with the detect role (default: use native stream resolution) + width: 1280 + # Optional: height of the frame for the input with the detect role (default: use native stream resolution) + height: 720 + # Optional: desired fps for your camera for the input with the detect role (default: shown below) + # NOTE: Recommended value of 5. Ideally, try and reduce your FPS on the camera. + fps: 5 + # Optional: enables detection for the camera (default: True) + enabled: True + # Optional: Number of consecutive detection hits required for an object to be initialized in the tracker. (default: 1/2 the frame rate) + min_initialized: 2 + # Optional: Number of frames without a detection before Frigate considers an object to be gone. (default: 5x the frame rate) + max_disappeared: 25 + # Optional: Configuration for stationary object tracking + stationary: + # Optional: Frequency for confirming stationary objects (default: same as threshold) + # When set to 1, object detection will run to confirm the object still exists on every frame. + # If set to 10, object detection will run to confirm the object still exists on every 10th frame. + interval: 50 + # Optional: Number of frames without a position change for an object to be considered stationary (default: 10x the frame rate or 10s) + threshold: 50 + # Optional: Define a maximum number of frames for tracking a stationary object (default: not set, track forever) + # This can help with false positives for objects that should only be stationary for a limited amount of time. + # It can also be used to disable stationary object tracking. For example, you may want to set a value for person, but leave + # car at the default. + # WARNING: Setting these values overrides default behavior and disables stationary object tracking. + # There are very few situations where you would want it disabled. It is NOT recommended to + # copy these values from the example config into your config unless you know they are needed. + max_frames: + # Optional: Default for all object types (default: not set, track forever) + default: 3000 + # Optional: Object specific values + objects: + person: 1000 + # Optional: Milliseconds to offset detect annotations by (default: shown below). + # There can often be latency between a recording and the detect process, + # especially when using separate streams for detect and record. + # Use this setting to make the timeline bounding boxes more closely align + # with the recording. The value can be positive or negative. + # TIP: Imagine there is an event clip with a person walking from left to right. + # If the event timeline bounding box is consistently to the left of the person + # then the value should be decreased. Similarly, if a person is walking from + # left to right and the bounding box is consistently ahead of the person + # then the value should be increased. + # TIP: This offset is dynamic so you can change the value and it will update existing + # events, this makes it easy to tune. + # WARNING: Fast moving objects will likely not have the bounding box align. + annotation_offset: 0 + +# Optional: Object configuration +# NOTE: Can be overridden at the camera level +objects: + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Optional: mask to prevent all object types from being detected in certain areas (default: no mask) + # Checks based on the bottom center of the bounding box of the object. + # NOTE: This mask is COMBINED with the object type specific mask below + mask: 0,0,1000,0,1000,200,0,200 + # Optional: filters to reduce false positives for specific object types + filters: + person: + # Optional: minimum width*height of the bounding box for the detected object (default: 0) + min_area: 5000 + # Optional: maximum width*height of the bounding box for the detected object (default: 24000000) + max_area: 100000 + # Optional: minimum width/height of the bounding box for the detected object (default: 0) + min_ratio: 0.5 + # Optional: maximum width/height of the bounding box for the detected object (default: 24000000) + max_ratio: 2.0 + # Optional: minimum score for the object to initiate tracking (default: shown below) + min_score: 0.5 + # Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below) + threshold: 0.7 + # Optional: mask to prevent this object type from being detected in certain areas (default: no mask) + # Checks based on the bottom center of the bounding box of the object + mask: 0,0,1000,0,1000,200,0,200 + +# Optional: Motion configuration +# NOTE: Can be overridden at the camera level +motion: + # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) + # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. + # The value should be between 1 and 255. + threshold: 30 + # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection + # needs to recalibrate. (default: shown below) + # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. + # Decreasing this value will make motion detection more likely to ignore large amounts of motion such as a person approaching + # a doorbell camera. + lightning_threshold: 0.8 + # Optional: Minimum size in pixels in the resized motion image that counts as motion (default: shown below) + # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will + # make motion detection more sensitive to smaller moving objects. + # As a rule of thumb: + # - 10 - high sensitivity + # - 30 - medium sensitivity + # - 50 - low sensitivity + contour_area: 10 + # Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below) + # Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster. + # Low values will cause things like moving shadows to be detected as motion for longer. + # https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/ + frame_alpha: 0.01 + # Optional: Height of the resized motion frame (default: 100) + # Higher values will result in more granular motion detection at the expense of higher CPU usage. + # Lower values result in less CPU, but small changes may not register as motion. + frame_height: 100 + # Optional: motion mask + # NOTE: see docs for more detailed info on creating masks + mask: 0,900,1080,900,1080,1920,0,1920 + # Optional: improve contrast (default: shown below) + # Enables dynamic contrast improvement. This should help improve night detections at the cost of making motion detection more sensitive + # for daytime. + improve_contrast: True + # Optional: Delay when updating camera motion through MQTT from ON -> OFF (default: shown below). + mqtt_off_delay: 30 + +# Optional: Record configuration +# NOTE: Can be overridden at the camera level +record: + # Optional: Enable recording (default: shown below) + # WARNING: If recording is disabled in the config, turning it on via + # the UI or MQTT later will have no effect. + enabled: False + # Optional: Number of minutes to wait between cleanup runs (default: shown below) + # This can be used to reduce the frequency of deleting recording segments from disk if you want to minimize i/o + expire_interval: 60 + # Optional: Sync recordings with disk on startup and once a day (default: shown below). + sync_recordings: False + # Optional: Retention settings for recording + retain: + # Optional: Number of days to retain recordings regardless of events (default: shown below) + # NOTE: This should be set to 0 and retention should be defined in events section below + # if you only want to retain recordings of events. + days: 0 + # Optional: Mode for retention. Available options are: all, motion, and active_objects + # all - save all recording segments regardless of activity + # motion - save all recordings segments with any detected motion + # active_objects - save all recording segments with active/moving objects + # NOTE: this mode only applies when the days setting above is greater than 0 + mode: all + # Optional: Recording Export Settings + export: + # Optional: Timelapse Output Args (default: shown below). + # NOTE: The default args are set to fit 24 hours of recording into 1 hour playback. + # See https://stackoverflow.com/a/58268695 for more info on how these args work. + # As an example: if you wanted to go from 24 hours to 30 minutes that would be going + # from 86400 seconds to 1800 seconds which would be 1800 / 86400 = 0.02. + # The -r (framerate) dictates how smooth the output video is. + # So the args would be -vf setpts=0.02*PTS -r 30 in that case. + timelapse_args: "-vf setpts=0.04*PTS -r 30" + # Optional: Event recording settings + events: + # Optional: Number of seconds before the event to include (default: shown below) + pre_capture: 5 + # Optional: Number of seconds after the event to include (default: shown below) + post_capture: 5 + # Optional: Objects to save recordings for. (default: all tracked objects) + objects: + - person + # Optional: Restrict recordings to objects that entered any of the listed zones (default: no required zones) + required_zones: [] + # Optional: Retention settings for recordings of events + retain: + # Required: Default retention days (default: shown below) + default: 10 + # Optional: Mode for retention. (default: shown below) + # all - save all recording segments for events regardless of activity + # motion - save all recordings segments for events with any detected motion + # active_objects - save all recording segments for event with active/moving objects + # + # NOTE: If the retain mode for the camera is more restrictive than the mode configured + # here, the segments will already be gone by the time this mode is applied. + # For example, if the camera retain mode is "motion", the segments without motion are + # never stored, so setting the mode to "all" here won't bring them back. + mode: motion + # Optional: Per object retention days + objects: + person: 15 + +# Optional: Configuration for the jpg snapshots written to the clips directory for each event +# NOTE: Can be overridden at the camera level +snapshots: + # Optional: Enable writing jpg snapshot to /media/frigate/clips (default: shown below) + enabled: False + # Optional: save a clean PNG copy of the snapshot image (default: shown below) + clean_copy: True + # Optional: print a timestamp on the snapshots (default: shown below) + timestamp: False + # Optional: draw bounding box on the snapshots (default: shown below) + bounding_box: True + # Optional: crop the snapshot (default: shown below) + crop: False + # Optional: height to resize the snapshot to (default: original size) + height: 175 + # Optional: Restrict snapshots to objects that entered any of the listed zones (default: no required zones) + required_zones: [] + # Optional: Camera override for retention settings (default: global values) + retain: + # Required: Default retention days (default: shown below) + default: 10 + # Optional: Per object retention days + objects: + person: 15 + # Optional: quality of the encoded jpeg, 0-100 (default: shown below) + quality: 70 + +# Optional: RTMP configuration +# NOTE: RTMP is deprecated in favor of restream +# NOTE: Can be overridden at the camera level +rtmp: + # Optional: Enable the RTMP stream (default: False) + enabled: False + +# Optional: Restream configuration +# Uses https://github.com/AlexxIT/go2rtc (v1.8.3) +go2rtc: + +# Optional: jsmpeg stream configuration for WebUI +live: + # Optional: Set the name of the stream that should be used for live view + # in frigate WebUI. (default: name of camera) + stream_name: camera_name + # Optional: Set the height of the jsmpeg stream. (default: 720) + # This must be less than or equal to the height of the detect stream. Lower resolutions + # reduce bandwidth required for viewing the jsmpeg stream. Width is computed to match known aspect ratio. + height: 720 + # Optional: Set the encode quality of the jsmpeg stream (default: shown below) + # 1 is the highest quality, and 31 is the lowest. Lower quality feeds utilize less CPU resources. + quality: 8 + +# Optional: in-feed timestamp style configuration +# NOTE: Can be overridden at the camera level +timestamp_style: + # Optional: Position of the timestamp (default: shown below) + # "tl" (top left), "tr" (top right), "bl" (bottom left), "br" (bottom right) + position: "tl" + # Optional: Format specifier conform to the Python package "datetime" (default: shown below) + # Additional Examples: + # german: "%d.%m.%Y %H:%M:%S" + format: "%m/%d/%Y %H:%M:%S" + # Optional: Color of font + color: + # All Required when color is specified (default: shown below) + red: 255 + green: 255 + blue: 255 + # Optional: Line thickness of font (default: shown below) + thickness: 2 + # Optional: Effect of lettering (default: shown below) + # None (No effect), + # "solid" (solid background in inverse color of font) + # "shadow" (shadow for font) + effect: None + +# Required +cameras: + # Required: name of the camera + back: + # Optional: Enable/Disable the camera (default: shown below). + # If disabled: config is used but no live stream and no capture etc. + # Events/Recordings are still viewable. + enabled: True + # Required: ffmpeg settings for the camera + ffmpeg: + # Required: A list of input streams for the camera. See documentation for more information. + inputs: + # Required: the path to the stream + # NOTE: path may include environment variables or docker secrets, which must begin with 'FRIGATE_' and be referenced in {} + - path: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2 + # Required: list of roles for this stream. valid values are: audio,detect,record,rtmp + # NOTICE: In addition to assigning the audio, record, and rtmp roles, + # they must also be enabled in the camera config. + roles: + - audio + - detect + - record + - rtmp + # Optional: stream specific global args (default: inherit) + # global_args: + # Optional: stream specific hwaccel args (default: inherit) + # hwaccel_args: + # Optional: stream specific input args (default: inherit) + # input_args: + # Optional: camera specific global args (default: inherit) + # global_args: + # Optional: camera specific hwaccel args (default: inherit) + # hwaccel_args: + # Optional: camera specific input args (default: inherit) + # input_args: + # Optional: camera specific output args (default: inherit) + # output_args: + + # Optional: timeout for highest scoring image before allowing it + # to be replaced by a newer image. (default: shown below) + best_image_timeout: 60 + + # Optional: URL to visit the camera web UI directly from the system page. Might not be available on every camera. + webui_url: "" + + # Optional: zones for this camera + zones: + # Required: name of the zone + # NOTE: This must be different than any camera names, but can match with another zone on another + # camera. + front_steps: + # Required: List of x,y coordinates to define the polygon of the zone. + # NOTE: Presence in a zone is evaluated only based on the bottom center of the objects bounding box. + coordinates: 545,1077,747,939,788,805 + # Optional: Number of consecutive frames required for object to be considered present in the zone (default: shown below). + inertia: 3 + # Optional: List of objects that can trigger this zone (default: all tracked objects) + objects: + - person + # Optional: Zone level object filters. + # NOTE: The global and camera filters are applied upstream. + filters: + person: + min_area: 5000 + max_area: 100000 + threshold: 0.7 + + # Optional: Configuration for the jpg snapshots published via MQTT + mqtt: + # Optional: Enable publishing snapshot via mqtt for camera (default: shown below) + # NOTE: Only applies to publishing image data to MQTT via 'frigate///snapshot'. + # All other messages will still be published. + enabled: True + # Optional: print a timestamp on the snapshots (default: shown below) + timestamp: True + # Optional: draw bounding box on the snapshots (default: shown below) + bounding_box: True + # Optional: crop the snapshot (default: shown below) + crop: True + # Optional: height to resize the snapshot to (default: shown below) + height: 270 + # Optional: jpeg encode quality (default: shown below) + quality: 70 + # Optional: Restrict mqtt messages to objects that entered any of the listed zones (default: no required zones) + required_zones: [] + + # Optional: Configuration for how camera is handled in the GUI. + ui: + # Optional: Adjust sort order of cameras in the UI. Larger numbers come later (default: shown below) + # By default the cameras are sorted alphabetically. + order: 0 + # Optional: Whether or not to show the camera in the Frigate UI (default: shown below) + dashboard: True + + # Optional: connect to ONVIF camera + # to enable PTZ controls. + onvif: + # Required: host of the camera being connected to. + host: 0.0.0.0 + # Optional: ONVIF port for device (default: shown below). + port: 8000 + # Optional: username for login. + # NOTE: Some devices require admin to access ONVIF. + user: admin + # Optional: password for login. + password: admin + # Optional: PTZ camera object autotracking. Keeps a moving object in + # the center of the frame by automatically moving the PTZ camera. + autotracking: + # Optional: enable/disable object autotracking. (default: shown below) + enabled: False + # Optional: calibrate the camera on startup (default: shown below) + # A calibration will move the PTZ in increments and measure the time it takes to move. + # The results are used to help estimate the position of tracked objects after a camera move. + # Frigate will update your config file automatically after a calibration with + # a "movement_weights" entry for the camera. You should then set calibrate_on_startup to False. + calibrate_on_startup: False + # Optional: the mode to use for zooming in/out on objects during autotracking. (default: shown below) + # Available options are: disabled, absolute, and relative + # disabled - don't zoom in/out on autotracked objects, use pan/tilt only + # absolute - use absolute zooming (supported by most PTZ capable cameras) + # relative - use relative zooming (not supported on all PTZs, but makes concurrent pan/tilt/zoom movements) + zooming: disabled + # Optional: A value to change the behavior of zooming on autotracked objects. (default: shown below) + # A lower value will keep more of the scene in view around a tracked object. + # A higher value will zoom in more on a tracked object, but Frigate may lose tracking more quickly. + # The value should be between 0.1 and 0.75 + zoom_factor: 0.3 + # Optional: list of objects to track from labelmap.txt (default: shown below) + track: + - person + # Required: Begin automatically tracking an object when it enters any of the listed zones. + required_zones: + - zone_name + # Required: Name of ONVIF preset in camera's firmware to return to when tracking is over. (default: shown below) + return_preset: home + # Optional: Seconds to delay before returning to preset. (default: shown below) + timeout: 10 + # Optional: Values generated automatically by a camera calibration. Do not modify these manually. (default: shown below) + movement_weights: [] + + # Optional: Configuration for how to sort the cameras in the Birdseye view. + birdseye: + # Optional: Adjust sort order of cameras in the Birdseye view. Larger numbers come later (default: shown below) + # By default the cameras are sorted alphabetically. + order: 0 + +# Optional +ui: + # Optional: Set the default live mode for cameras in the UI (default: shown below) + live_mode: mse + # Optional: Set a timezone to use in the UI (default: use browser local time) + # timezone: America/Denver + # Optional: Use an experimental recordings / camera view UI (default: shown below) + use_experimental: False + # Optional: Set the time format used. + # Options are browser, 12hour, or 24hour (default: shown below) + time_format: browser + # Optional: Set the date style for a specified length. + # Options are: full, long, medium, short + # Examples: + # short: 2/11/23 + # medium: Feb 11, 2023 + # full: Saturday, February 11, 2023 + # (default: shown below). + date_style: short + # Optional: Set the time style for a specified length. + # Options are: full, long, medium, short + # Examples: + # short: 8:14 PM + # medium: 8:15:22 PM + # full: 8:15:22 PM Mountain Standard Time + # (default: shown below). + time_style: medium + # Optional: Ability to manually override the date / time styling to use strftime format + # https://www.gnu.org/software/libc/manual/html_node/Formatting-Calendar-Time.html + # possible values are shown above (default: not set) + strftime_fmt: "%Y/%m/%d %H:%M" + +# Optional: Telemetry configuration +telemetry: + # Optional: Enabled network interfaces for bandwidth stats monitoring (default: empty list, let nethogs search all) + network_interfaces: + - eth + - enp + - eno + - ens + - wl + - lo + # Optional: Configure system stats + stats: + # Enable AMD GPU stats (default: shown below) + amd_gpu_stats: True + # Enable Intel GPU stats (default: shown below) + intel_gpu_stats: True + # Enable network bandwidth stats monitoring for camera ffmpeg processes, go2rtc, and object detectors. (default: shown below) + # NOTE: The container must either be privileged or have cap_net_admin, cap_net_raw capabilities enabled. + network_bandwidth: False + # Optional: Enable the latest version outbound check (default: shown below) + # NOTE: If you use the HomeAssistant integration, disabling this will prevent it from reporting new versions + version_check: True +``` diff --git a/docs/docs/configuration/restream.md b/docs/docs/configuration/restream.md index e7db71634..74baf365e 100644 --- a/docs/docs/configuration/restream.md +++ b/docs/docs/configuration/restream.md @@ -7,17 +7,22 @@ title: Restream Frigate can restream your video feed as an RTSP feed for other applications such as Home Assistant to utilize it at `rtsp://:8554/`. Port 8554 must be open. [This allows you to use a video feed for detection in Frigate and Home Assistant live view at the same time without having to make two separate connections to the camera](#reduce-connections-to-camera). The video feed is copied from the original video feed directly to avoid re-encoding. This feed does not include any annotation by Frigate. -Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.2.0) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#configuration) for more advanced configurations and features. +Frigate uses [go2rtc](https://github.com/AlexxIT/go2rtc/tree/v1.8.4) to provide its restream and MSE/WebRTC capabilities. The go2rtc config is hosted at the `go2rtc` in the config, see [go2rtc docs](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#configuration) for more advanced configurations and features. :::note -You can access the go2rtc webUI at `http://frigate_ip:5000/live/webrtc` which can be helpful to debug as well as provide useful information about your camera streams. +You can access the go2rtc stream info at `http://frigate_ip:5000/api/go2rtc/streams` which can be helpful to debug as well as provide useful information about your camera streams. ::: ### Birdseye Restream -Birdseye RTSP restream can be enabled at `birdseye -> restream` and accessed at `rtsp://:8554/birdseye`. Enabling the restream will cause birdseye to run 24/7 which may increase CPU usage somewhat. +Birdseye RTSP restream can be accessed at `rtsp://:8554/birdseye`. Enabling the birdseye restream will cause birdseye to run 24/7 which may increase CPU usage somewhat. + +```yaml +birdseye: + restream: true +``` ### Securing Restream With Authentication @@ -28,15 +33,14 @@ go2rtc: rtsp: username: "admin" password: "pass" - streams: - ... + streams: ... ``` **NOTE:** This does not apply to localhost requests, there is no need to provide credentials when using the restream as a source for frigate cameras. ## RTMP (Deprecated) -In previous Frigate versions RTMP was used for re-streaming. RTMP has disadvantages however including being incompatible with H.265, high bitrates, and certain audio codecs. RTMP is deprecated and it is recommended to move to the new restream role. +In previous Frigate versions RTMP was used for re-streaming. RTMP has disadvantages however including being incompatible with H.265, high bitrates, and certain audio codecs. RTMP is deprecated and it is recommended use the built in go2rtc config for restreaming. ## Reduce Connections To Camera @@ -49,34 +53,36 @@ One connection is made to the camera. One for the restream, `detect` and `record ```yaml go2rtc: streams: - rtsp_cam: # <- for RTSP streams + name_your_rtsp_cam: # <- for RTSP streams - rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio - - "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus) - http_cam: # <- for other streams + - "ffmpeg:name_your_rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus) + name_your_http_cam: # <- for other streams - http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=user&password=password # <- stream which supports video & aac audio - - "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus) + - "ffmpeg:name_your_http_cam#audio=opus" # <- copy of the stream which transcodes audio to the missing codec (usually will be opus) cameras: - rtsp_cam: + name_your_rtsp_cam: ffmpeg: output_args: record: preset-record-generic-audio-copy inputs: - - path: rtsp://127.0.0.1:8554/rtsp_cam # <--- the name here must match the name of the camera in restream + - path: rtsp://127.0.0.1:8554/name_your_rtsp_cam # <--- the name here must match the name of the camera in restream input_args: preset-rtsp-restream roles: - record - detect - http_cam: + - audio # <- only necessary if audio detection is enabled + name_your_http_cam: ffmpeg: output_args: record: preset-record-generic-audio-copy inputs: - - path: rtsp://127.0.0.1:8554/http_cam # <--- the name here must match the name of the camera in restream + - path: rtsp://127.0.0.1:8554/name_your_http_cam # <--- the name here must match the name of the camera in restream input_args: preset-rtsp-restream roles: - record - detect + - audio # <- only necessary if audio detection is enabled ``` ### With Sub Stream @@ -86,51 +92,53 @@ Two connections are made to the camera. One for the sub stream, one for the rest ```yaml go2rtc: streams: - rtsp_cam: + name_your_rtsp_cam: - rtsp://192.168.1.5:554/live0 # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg - - "ffmpeg:rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to opus - rtsp_cam_sub: + - "ffmpeg:name_your_rtsp_cam#audio=opus" # <- copy of the stream which transcodes audio to opus + name_your_rtsp_cam_sub: - rtsp://192.168.1.5:554/substream # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg - - "ffmpeg:rtsp_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus - http_cam: + - "ffmpeg:name_your_rtsp_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus + name_your_http_cam: - http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_main.bcs&user=user&password=password # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg - - "ffmpeg:http_cam#audio=opus" # <- copy of the stream which transcodes audio to opus - http_cam_sub: + - "ffmpeg:name_your_http_cam#audio=opus" # <- copy of the stream which transcodes audio to opus + name_your_http_cam_sub: - http://192.168.50.155/flv?port=1935&app=bcs&stream=channel0_ext.bcs&user=user&password=password # <- stream which supports video & aac audio. This is only supported for rtsp streams, http must use ffmpeg - - "ffmpeg:http_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus + - "ffmpeg:name_your_http_cam_sub#audio=opus" # <- copy of the stream which transcodes audio to opus cameras: - rtsp_cam: + name_your_rtsp_cam: ffmpeg: output_args: record: preset-record-generic-audio-copy inputs: - - path: rtsp://127.0.0.1:8554/rtsp_cam # <--- the name here must match the name of the camera in restream + - path: rtsp://127.0.0.1:8554/name_your_rtsp_cam # <--- the name here must match the name of the camera in restream input_args: preset-rtsp-restream roles: - record - - path: rtsp://127.0.0.1:8554/rtsp_cam_sub # <--- the name here must match the name of the camera_sub in restream + - path: rtsp://127.0.0.1:8554/name_your_rtsp_cam_sub # <--- the name here must match the name of the camera_sub in restream input_args: preset-rtsp-restream roles: + - audio # <- only necessary if audio detection is enabled - detect - http_cam: + name_your_http_cam: ffmpeg: output_args: record: preset-record-generic-audio-copy inputs: - - path: rtsp://127.0.0.1:8554/http_cam # <--- the name here must match the name of the camera in restream + - path: rtsp://127.0.0.1:8554/name_your_http_cam # <--- the name here must match the name of the camera in restream input_args: preset-rtsp-restream roles: - record - - path: rtsp://127.0.0.1:8554/http_cam_sub # <--- the name here must match the name of the camera_sub in restream + - path: rtsp://127.0.0.1:8554/name_your_http_cam_sub # <--- the name here must match the name of the camera_sub in restream input_args: preset-rtsp-restream roles: + - audio # <- only necessary if audio detection is enabled - detect ``` ## Advanced Restream Configurations -The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: +The [exec](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-exec) source in go2rtc can be used for custom ffmpeg commands. An example is below: NOTE: The output will need to be passed with two curly braces `{{output}}` diff --git a/docs/docs/configuration/stationary_objects.md b/docs/docs/configuration/stationary_objects.md index deeee9ffd..464a48c24 100644 --- a/docs/docs/configuration/stationary_objects.md +++ b/docs/docs/configuration/stationary_objects.md @@ -1,6 +1,6 @@ # Stationary Objects -An object is considered stationary when it is being tracked and has been in a very similar position for a certain number of frames. This number is defined in the configuration under `detect -> stationary -> threshold`, and is 10x the frame rate (or 10 seconds) by default. Once an object is considered stationary, it will remain stationary until motion occurs near the object at which point object detection will start running again. If the object changes location, it will be considered active. +An object is considered stationary when it is being tracked and has been in a very similar position for a certain number of frames. This number is defined in the configuration under `detect -> stationary -> threshold`, and is 10x the frame rate (or 10 seconds) by default. Once an object is considered stationary, it will remain stationary until motion occurs within the object at which point object detection will start running again. If the object changes location, it will be considered active. ## Why does it matter if an object is stationary? @@ -13,16 +13,44 @@ The default config is: ```yaml detect: stationary: - interval: 0 + interval: 50 threshold: 50 ``` -`interval` is defined as the frequency for running detection on stationary objects. This means that by default once an object is considered stationary, detection will not be run on it until motion is detected. With `interval > 0`, every nth frames detection will be run to make sure the object is still there. +`interval` is defined as the frequency for running detection on stationary objects. This means that by default once an object is considered stationary, detection will not be run on it until motion is detected or until the interval (every 50th frame by default). With `interval >= 1`, every nth frames detection will be run to make sure the object is still there. NOTE: There is no way to disable stationary object tracking with this value. `threshold` is the number of frames an object needs to remain relatively still before it is considered stationary. -## Avoiding stationary objects +## Handling stationary objects -In some cases, like a driveway, you may prefer to only have an event when a car is coming & going vs a constant event of it stationary in the driveway. [This docs sections](../guides/stationary_objects.md) explains how to approach that scenario. +In some cases, like a driveway, you may prefer to only have an event when a car is coming & going vs a constant event of it stationary in the driveway. You can reference [this guide](../guides/parked_cars.md) for recommended approaches. + +## Why does Frigate track stationary objects? + +Frigate didn't always track stationary objects. In fact, it didn't even track objects at all initially. + +Let's look at an example use case: I want to record any cars that enter my driveway. + +One might simply think "Why not just run object detection any time there is motion around the driveway area and notify if the bounding box is in that zone?" + +With that approach, what video is related to the car that entered the driveway? Did it come from the left or right? Was it parked across the street for an hour before turning into the driveway? One approach is to just record 24/7 or for motion (on any changed changed pixels) and not attempt to do that at all. This is what most other NVRs do. Just don't even try to identify a start and end for that object since it's hard and you will be wrong some portion of the time. + +Couldn't you just look at when motion stopped and started? Motion for a video feed is nothing more than looking for pixels that are different than they were in previous frames. If the car entered the driveway while someone was mowing the grass, how would you know which motion was for the car and which was for the person when they mow along the driveway or street? What if another car was driving the other direction on the street? Or what if its a windy day and the bush by your mailbox is blowing around? + +In order to do it more accurately, you need to identify objects and track them with a unique id. In each subsequent frame, everything has moved a little and you need to determine which bounding boxes go with each object from the previous frame. + +Tracking objects across frames is a challenging problem. Especially if you want to do it in real time. There are entire competitions for research algorithms to see which of them can do it the most accurately. Zero of them are accurate 100% of the time. Even the ones that can't do it in realtime. There is always an error rate in the algorithm. + +Now consider that the car is driving down a street that has other cars parked along it. It will drive behind some of these cars and in front of others. There may even be a car driving the opposite direction. + +Let's assume for now that we are NOT already tracking two parked cars on the street or the car parked in the driveway, ie, there is no stationary object tracking. + +As the car you are tracking approaches an area with 2 cars parked, the headlights reflect off the parked cars and the car parked in your driveway. The pixel values are different in that area, so there is motion detected. Object detection runs and identifies the remaining 3 cars. In the previous frame, you had a single bounding box from the car you are tracking. Now you have 4. The original object, the 2 cars on the street and the one in your driveway. + +Now you have to determine which of the bounding boxes in this frame should be matched to the tracking id from the previous frame where you only had one. Remember, you have never seen these additional 3 cars before, so you know nothing about them. On top of that the bounding box for the car you are tracking has now moved to a new location, so which of the 4 belongs to the car you were originally tracking? The algorithms here are fairly good. They use a Kalman filter to predict the next location of an object using the historical bounding boxes and the bounding box closest to the predicted location is linked. It's right sometimes, but the error rate is going to be high when there are 4 possible bounding boxes. + +Now let's assume that those other 3 cars were already being tracked as stationary objects, so the car driving down the street is a new 4th car. The object tracker knows we have had 3 cars and we now have 4. As the new car approaches the parked cars, the bounding boxes for all 4 cars is predicted based on the previous frames. The predicted boxes for the parked cars is pretty much a 100% overlap with the bounding boxes in the new frame. The parked cars are slam dunk matches to the tracking ids they had before and the only one left is the remaining bounding box which gets assigned to the new car. This results in a much lower error rate. Not perfect, but better. + +The most difficult scenario that causes IDs to be assigned incorrectly is when an object completely occludes another object. When a car drives in front of another car and its no longer visible, a bounding box disappeared and it's a bit of a toss up when assigning the id since it's difficult to know which one is in front of the other. This happens for cars passing in front of other cars fairly often. It's something that we want to improve in the future. diff --git a/docs/docs/configuration/zones.md b/docs/docs/configuration/zones.md index f8e463605..40297b048 100644 --- a/docs/docs/configuration/zones.md +++ b/docs/docs/configuration/zones.md @@ -5,6 +5,9 @@ title: Zones Zones allow you to define a specific area of the frame and apply additional filters for object types so you can determine whether or not an object is within a particular area. Presence in a zone is evaluated based on the bottom center of the bounding box for the object. It does not matter how much of the bounding box overlaps with the zone. +For example, the cat in this image is currently in Zone 1, but **not** Zone 2. +![bottom center](/img/bottom-center.jpg) + Zones cannot have the same name as a camera. If desired, a single zone can include multiple cameras if you have multiple cameras covering the same area by configuring zones with the same name for each camera. During testing, enable the Zones option for the debug feed so you can adjust as needed. The zone line will increase in thickness when any object enters the zone. @@ -56,3 +59,27 @@ camera: ``` Only car objects can trigger the `front_yard_street` zone and only person can trigger the `entire_yard`. You will get events for person objects that enter anywhere in the yard, and events for cars only if they enter the street. + +### Zone Inertia + +Sometimes an objects bounding box may be slightly incorrect and the bottom center of the bounding box is inside the zone while the object is not actually in the zone. Zone inertia helps guard against this by requiring an object's bounding box to be within the zone for multiple consecutive frames. This value can be configured: + +```yaml +camera: + zones: + front_yard: + inertia: 3 + objects: + - person +``` + +There may also be cases where you expect an object to quickly enter and exit a zone, like when a car is pulling into the driveway, and you may want to have the object be considered present in the zone immediately: + +```yaml +camera: + zones: + driveway_entrance: + inertia: 1 + objects: + - car +``` diff --git a/docs/docs/development/contributing-boards.md b/docs/docs/development/contributing-boards.md new file mode 100644 index 000000000..49a65722d --- /dev/null +++ b/docs/docs/development/contributing-boards.md @@ -0,0 +1,94 @@ +--- +id: contributing-boards +title: Community Supported Boards +--- + +## About Community Supported Boards + +There are many SBCs (small board computers) that have a passionate community behind them, Jetson Nano for example. These SBCs often have dedicated hardware that can greatly accelerate Frigate's AI and video workloads, but this hardware requires very specific frameworks for interfacing with it. + +This means it would be very difficult for Frigate's maintainers to support these different boards especially given the relatively low userbase. + +The community support boards framework allows a user in the community to be the codeowner to add support for an SBC or other detector by providing the code, maintenance, and user support. + +## Getting Started + +1. Follow the steps from [the main contributing docs](/development/contributing.md). +2. Create a new build type under `docker/` +3. Get build working as expected, all board-specific changes should be done inside of the board specific docker file. + +## Required Structure + +Each board will have different build requirements, run on different architectures, etc. however there are set of files that all boards will need. + +### Bake File .hcl + +The `board.hcl` file is what allows the community boards build to be built using the main build as a cache. This enables a clean base and quicker build times. For more information on the format and options available in the Bake file, [see the official Buildx Bake docs](https://docs.docker.com/build/bake/reference/) + +### Board Make File + +The `board.mk` file is what allows automated and configurable Make targets to be included in the main Make file. Below is the general format for this file: + +```Makefile +BOARDS += board # Replace `board` with the board suffix ex: rpi + +local-rpi: version + docker buildx bake --load --file=docker/board/board.hcl --set board.tags=frigate:latest-board bake-target # Replace `board` with the board suffix ex: rpi. Bake target is the target in the board.hcl file ex: board + +build-rpi: version + docker buildx bake --file=docker/board/board.hcl --set board.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-board bake-target # Replace `board` with the board suffix ex: rpi. Bake target is the target in the board.hcl file ex: board + +push-rpi: build-rpi + docker buildx bake --push --file=docker/board/board.hcl --set board.tags=$(IMAGE_REPO):${GITHUB_REF_NAME}-$(COMMIT_HASH)-board bake-target # Replace `board` with the board suffix ex: rpi. Bake target is the target in the board.hcl file ex: board +``` + +### Dockerfile + +The `Dockerfile` is what orchestrates the build, this will vary greatly depending on the board but some parts are required for things to work. Below are the required parts of the Dockerfile: + +```Dockerfile +# syntax=docker/dockerfile:1.4 + +# https://askubuntu.com/questions/972516/debian-frontend-environment-variable +ARG DEBIAN_FRONTEND=noninteractive + +# All board-specific work should be done with `deps` as the base +FROM deps AS board-deps + +# do stuff specific +# to the board + +# set workdir +WORKDIR /opt/frigate/ + +# copies base files from the main frigate build +COPY --from=rootfs / / +``` + +## Other Required Changes + +### CI/CD + +The images for each board will be built for each Frigate release, this is done in the `.github/workflows/ci.yml` file. The board build workflow will need to be added here. + +```yml + - name: Build and push board build + uses: docker/bake-action@v3 + with: + push: true + targets: board # this is the target in the board.hcl file + files: docker/board/board.hcl # this should be updated with the actual board type + # the tags should be updated with the actual board types as well + # the community board builds should never push to cache, but it can pull from cache + set: | + board.tags=ghcr.io/${{ steps.lowercaseRepo.outputs.lowercase }}:${{ github.ref_name }}-${{ env.SHORT_SHA }}-board + *.cache-from=type=gha +``` + +### Code Owner File + +The `CODEOWNERS` file should be updated to include the `docker/board` along with `@user` for each user that is a code owner of this board + +# Docs + +At a minimum the `installation`, `object_detectors`, `hardware_acceleration`, and `ffmpeg-presets` docs should be updated (if applicable) to reflect the configuration of this community board. diff --git a/docs/docs/development/contributing.md b/docs/docs/development/contributing.md index 0955af56a..bc08afbc9 100644 --- a/docs/docs/development/contributing.md +++ b/docs/docs/development/contributing.md @@ -1,6 +1,6 @@ --- id: contributing -title: Contributing +title: Contributing To The Main Code Base --- ## Getting the source @@ -68,10 +68,6 @@ cameras: input_args: -re -stream_loop -1 -fflags +genpts roles: - detect - detect: - height: 1080 - width: 1920 - fps: 5 ``` These input args tell ffmpeg to read the mp4 file in an infinite loop. You can use any valid ffmpeg input here. @@ -99,18 +95,24 @@ The following commands are used inside the container to ensure hardware accelera **Raspberry Pi (64bit)** -This should show <50% CPU in top, and ~80% CPU without `-c:v h264_v4l2m2m`. +This should show less than 50% CPU in top, and ~80% CPU without `-c:v h264_v4l2m2m`. ```shell ffmpeg -c:v h264_v4l2m2m -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/incoming/720p60.mp4 -f rawvideo -pix_fmt yuv420p pipe: > /dev/null ``` -**NVIDIA** +**NVIDIA GPU** ```shell ffmpeg -c:v h264_cuvid -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/incoming/720p60.mp4 -f rawvideo -pix_fmt yuv420p pipe: > /dev/null ``` +**NVIDIA Jetson** + +```shell +ffmpeg -c:v h264_nvmpi -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/incoming/720p60.mp4 -f rawvideo -pix_fmt yuv420p pipe: > /dev/null +``` + **VAAPI** ```shell @@ -129,7 +131,7 @@ ffmpeg -c:v h264_qsv -re -stream_loop -1 -i https://streams.videolan.org/ffmpeg/ - [Frigate source code](#frigate-core-web-and-docs) - All [core](#core) prerequisites _or_ another running Frigate instance locally available -- Node.js 16 +- Node.js 20 ### Making changes @@ -153,10 +155,6 @@ cd web && npm install cd web && npm run dev ``` -#### 3a. Run the development server against a non-local instance - -To run the development server against a non-local instance, you will need to modify the API_HOST default return in `web/src/env.js`. - #### 4. Making changes The Web UI is built using [Vite](https://vitejs.dev/), [Preact](https://preactjs.com), and [Tailwind CSS](https://tailwindcss.com). @@ -185,7 +183,7 @@ npm run test ### Prerequisites - [Frigate source code](#frigate-core-web-and-docs) -- Node.js 16 +- Node.js 20 ### Making changes @@ -203,7 +201,7 @@ npm run start This command starts a local development server and open up a browser window. Most changes are reflected live without having to restart the server. -The docs are built using [Docusaurus v2](https://v2.docusaurus.io). Please refer to the Docusaurus docs for more information on how to modify Frigate's documentation. +The docs are built using [Docusaurus v3](https://docusaurus.io). Please refer to the Docusaurus docs for more information on how to modify Frigate's documentation. #### 3. Build (optional) diff --git a/docs/docs/frigate/glossary.md b/docs/docs/frigate/glossary.md new file mode 100644 index 000000000..5e31f4485 --- /dev/null +++ b/docs/docs/frigate/glossary.md @@ -0,0 +1,58 @@ +--- +id: glossary +title: Glossary +--- + +The glossary explains terms commonly used in Frigate's documentation. + +## Bounding Box + +A box returned from the object detection model that outlines an object in the frame. These have multiple colors depending on object type in the debug live view. + +## Event + +The time period starting when a tracked object entered the frame and ending when it left the frame, including any time that the object remained still. Events are saved when it is considered a [true positive](#threshold) and meets the requirements for a snapshot or recording to be saved. + +## False Positive + +An incorrect detection of an object type. For example a dog being detected as a person, a chair being detected as a dog, etc. A person being detected in an area you want to ignore is not a false positive. + +## Mask + +There are two types of masks in Frigate. [See the mask docs for more info](/configuration/masks) + +### Motion Mask + +Motion masks prevent detection of [motion](#motion) in masked areas from triggering Frigate to run object detection, but do not prevent objects from being detected if object detection runs due to motion in nearby areas. For example: camera timestamps, skies, the tops of trees, etc. + +### Object Mask + +Object filter masks drop any bounding boxes where the bottom center (overlap doesn't matter) is in the masked area. It forces them to be considered a [false positive](#false_positive) so that they are ignored. + +## Min Score + +The lowest score that an object can be detected with during tracking, any detection with a lower score will be assumed to be a false positive + +## Motion + +When pixels in the current camera frame are different than previous frames. When many nearby pixels are different in the current frame they grouped together and indicated with a red motion box in the live debug view. [See the motion detection docs for more info](/configuration/motion_detection) + +## Region + +A portion of the camera frame that is sent to object detection, regions can be sent due to motion, active objects, or occasionally for stationary objects. These are represented by green boxes in the debug live view. + +## Snapshot Score + +The score shown in a snapshot is the score of that object at that specific moment in time. + +## Threshold + +The threshold is the median score that an object must reach in order to be considered a true positive. + +## Top Score + +The top score for an object is the highest median score for an object. + +## Zone + +Zones are areas of interest, zones can be used for notifications and for limiting the areas where Frigate will create an [event](#event). [See the zone docs for more info](/configuration/zones) \ No newline at end of file diff --git a/docs/docs/frigate/hardware.md b/docs/docs/frigate/hardware.md index fea92a422..e0285f408 100644 --- a/docs/docs/frigate/hardware.md +++ b/docs/docs/frigate/hardware.md @@ -9,7 +9,7 @@ Cameras that output H.264 video and AAC audio will offer the most compatibility I recommend Dahua, Hikvision, and Amcrest in that order. Dahua edges out Hikvision because they are easier to find and order, not because they are better cameras. I personally use Dahua cameras because they are easier to purchase directly. In my experience Dahua and Hikvision both have multiple streams with configurable resolutions and frame rates and rock solid streams. They also both have models with large sensors well known for excellent image quality at night. Not all the models are equal. Larger sensors are better than higher resolutions; especially at night. Amcrest is the fallback recommendation because they are rebranded Dahuas. They are rebranding the lower end models with smaller sensors or less configuration options. -Many users have reported various issues with Reolink cameras, so I do not recommend them. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-410520-possibly-others). Wifi cameras are also not recommended. Their streams are less reliable and cause connection loss and/or lost video data. +Many users have reported various issues with Reolink cameras, so I do not recommend them. If you are using Reolink, I suggest the [Reolink specific configuration](../configuration/camera_specific.md#reolink-cameras). Wifi cameras are also not recommended. Their streams are less reliable and cause connection loss and/or lost video data. Here are some of the camera's I recommend: @@ -21,13 +21,12 @@ I may earn a small commission for my endorsement, recommendation, testimonial, o ## Server -My current favorite is the Minisforum GK41 because of the dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website. +My current favorite is the Beelink EQ12 because of the efficient N100 CPU and dual NICs that allow you to setup a dedicated private network for your cameras where they can be blocked from accessing the internet. There are many used workstation options on eBay that work very well. Anything with an Intel CPU and capable of running Debian should work fine. As a bonus, you may want to look for devices with a M.2 or PCIe express slot that is compatible with the Google Coral. I may earn a small commission for my endorsement, recommendation, testimonial, or link to any products or services from this website. -| Name | Coral Inference Speed | Coral Compatibility | Notes | -| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | -| Odyssey X86 Blue J4125 (Amazon) (SeeedStudio) | 9-10ms | M.2 B+M, USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. | -| Minisforum GK41 (Amazon) | 9-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. | -| Intel NUC (Amazon) | 8-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. | +| Name | Coral Inference Speed | Coral Compatibility | Notes | +| ------------------------------------------------------------------------------------------------------------- | --------------------- | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------- | +| Beelink EQ12 (Amazon) | 5-10ms | USB | Dual gigabit NICs for easy isolated camera network. Easily handles several 1080p cameras. | +| Intel NUC (Amazon) | 5-10ms | USB | Overkill for most, but great performance. Can handle many cameras at 5fps depending on typical amounts of motion. Requires extra parts. | ## Detectors @@ -48,9 +47,9 @@ A single Coral can handle many cameras and will be sufficient for the majority o The OpenVINO detector type is able to run on: - 6th Gen Intel Platforms and newer that have an iGPU -- x86 & Arm32/64 hosts with VPU Hardware (ex: Intel NCS2) +- x86 & Arm64 hosts with VPU Hardware (ex: Intel NCS2) -More information is available [in the detector docs](/configuration/detectors#openvino-detector) +More information is available [in the detector docs](/configuration/object_detectors#openvino-detector) Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known examples are below: @@ -70,9 +69,9 @@ Inference speeds vary greatly depending on the CPU, GPU, or VPU used, some known | Intel i5 1135G7 | 10 - 15 ms | | | Intel i5 12600K | ~ 15 ms | Inference speeds on CPU were ~ 35 ms | -### TensorRT +### TensorRT - Nvidia GPU -The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 11.x series of CUDA libraries. The minimum driver version on the host system must be `>=450.80.02`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/detectors#nvidia-tensorrt-detector). +The TensortRT detector is able to run on x86 hosts that have an Nvidia GPU which supports the 12.x series of CUDA libraries. The minimum driver version on the host system must be `>=525.60.13`. Also the GPU must support a Compute Capability of `5.0` or greater. This generally correlates to a Maxwell-era GPU or newer, check the [TensorRT docs for more info](/configuration/object_detectors#nvidia-tensorrt-detector). Inference speeds will vary greatly depending on the GPU and the model used. `tiny` variants are faster than the equivalent non-tiny model, some known examples are below: @@ -87,6 +86,25 @@ Inference speeds will vary greatly depending on the GPU and the model used. | Quadro P400 2GB | 20 - 25 ms | | Quadro P2000 | ~ 12 ms | +### Community Supported: + +#### Nvidia Jetson + +Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powerful Jetson Orin AGX. It will [make use of the Jetson's hardware media engine](/configuration/hardware_acceleration#nvidia-jetson-orin-agx-orin-nx-orin-nano-xavier-agx-xavier-nx-tx2-tx1-nano) when configured with the [appropriate presets](/configuration/ffmpeg_presets#hwaccel-presets), and will make use of the Jetson's GPU and DLA for object detection when configured with the [TensorRT detector](/configuration/object_detectors#nvidia-tensorrt-detector). + +Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time. + +#### Rockchip SoC + +Frigate supports SBCs with the following Rockchip SoCs: + +- RK3566/RK3568 +- RK3588/RK3588S +- RV1103/RV1106 +- RK3562 + +Using the yolov8n model and an Orange Pi 5 Plus with RK3588 SoC inference speeds vary between 20 - 25 ms. + ## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version) This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity. diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index a15bd8d6c..fcdaa68ba 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -21,12 +21,12 @@ Windows is not officially supported, but some users have had success getting it Frigate uses the following locations for read/write operations in the container. Docker volume mappings can be used to map these to any location on your host machine. +- `/config`: Used to store the Frigate config file and sqlite database. You will also see a few files alongside the database file while Frigate is running. - `/media/frigate/clips`: Used for snapshot storage. In the future, it will likely be renamed from `clips` to `snapshots`. The file structure here cannot be modified and isn't intended to be browsed or managed manually. - `/media/frigate/recordings`: Internal system storage for recording segments. The file structure here cannot be modified and isn't intended to be browsed or managed manually. -- `/media/frigate/frigate.db`: Default location for the sqlite database. You will also see several files alongside this file while Frigate is running. If moving the database location (often needed when using a network drive at `/media/frigate`), it is recommended to mount a volume with docker at `/db` and change the storage location of the database to `/db/frigate.db` in the config file. -- `/tmp/cache`: Cache location for recording segments. Initial recordings are written here before being checked and converted to mp4 and moved to the recordings folder. -- `/dev/shm`: It is not recommended to modify this directory or map it with docker. This is the location for raw decoded frames in shared memory and it's size is impacted by the `shm-size` calculations below. -- `/config/config.yml`: Default location of the config file. +- `/media/frigate/exports`: Storage for clips and timelapses that have been exported via the WebUI or API. +- `/tmp/cache`: Cache location for recording segments. Initial recordings are written here before being checked and converted to mp4 and moved to the recordings folder. Segments generated via the `clip.mp4` endpoints are also concatenated and processed here. It is recommended to use a [`tmpfs`](https://docs.docker.com/storage/tmpfs/) mount for this. +- `/dev/shm`: Internal cache for raw decoded frames in shared memory. It is not recommended to modify this directory or map it with docker. The minimum size is impacted by the `shm-size` calculations below. #### Common docker compose storage configurations @@ -38,7 +38,7 @@ services: frigate: ... volumes: - - /path/to/your/config.yml:/config/config.yml + - /path/to/your/config:/config - /path/to/your/storage:/media/frigate - type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear target: /tmp/cache @@ -47,36 +47,17 @@ services: ... ``` -Writing to a network drive with database on a local drive: +:::caution -```yaml -version: "3.9" -services: - frigate: - ... - volumes: - - /path/to/your/config.yml:/config/config.yml - - /path/to/network/storage:/media/frigate - - /path/to/local/disk:/db - - type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear - target: /tmp/cache - tmpfs: - size: 1000000000 - ... -``` +Users of the Snapcraft build of Docker cannot use storage locations outside your $HOME folder. -frigate.yml - -```yaml -database: - path: /db/frigate.db -``` +::: ### Calculating required shm-size Frigate utilizes shared memory to store frames during processing. The default `shm-size` provided by Docker is **64MB**. -The default shm size of **64MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size. +The default shm size of **64MB** is fine for setups with **2 cameras** detecting at **720p**. If Frigate is exiting with "Bus error" messages, it is likely because you have too many high resolution cameras and you need to specify a higher shm size, using [`--shm-size`](https://docs.docker.com/engine/reference/run/#runtime-constraints-on-resources) (or [`service.shm_size`](https://docs.docker.com/compose/compose-file/compose-file-v2/#shm_size) in docker-compose). The Frigate container also stores logs in shm, which can take up to **30MB**, so make sure to take this into account in your math as well. @@ -97,7 +78,6 @@ $ python -c 'print("{:.2f}MB".format(((1280 * 720 * 1.5 * 9 + 270480) / 1048576) The shm size cannot be set per container for Home Assistant add-ons. However, this is probably not required since by default Home Assistant Supervisor allocates `/dev/shm` with half the size of your total memory. If your machine has 8GB of memory, chances are that Frigate will have access to up to 4GB without any additional configuration. - ### Raspberry Pi 3/4 By default, the Raspberry Pi limits the amount of memory available to the GPU. In order to use ffmpeg hardware acceleration, you must increase the available memory by setting `gpu_mem` to the maximum recommended value in `config.txt` as described in the [official docs](https://www.raspberrypi.org/documentation/computers/config_txt.html#memory-options). @@ -106,7 +86,7 @@ Additionally, the USB Coral draws a considerable amount of power. If using any o ## Docker -Running in Docker with compose is the recommended install method: +Running in Docker with compose is the recommended install method. ```yaml version: "3.9" @@ -123,7 +103,7 @@ services: - /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware volumes: - /etc/localtime:/etc/localtime:ro - - /path/to/your/config.yml:/config/config.yml + - /path/to/your/config:/config - /path/to/your/storage:/media/frigate - type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear target: /tmp/cache @@ -149,7 +129,7 @@ docker run -d \ --device /dev/dri/renderD128 \ --shm-size=64m \ -v /path/to/your/storage:/media/frigate \ - -v /path/to/your/config.yml:/config/config.yml \ + -v /path/to/your/config:/config \ -v /etc/localtime:/etc/localtime:ro \ -e FRIGATE_RTSP_PASSWORD='password' \ -p 5000:5000 \ @@ -159,6 +139,18 @@ docker run -d \ ghcr.io/blakeblackshear/frigate:stable ``` +The official docker image tags for the current stable version are: + +- `stable` - Standard Frigate build for amd64 & RPi Optimized Frigate build for arm64 +- `stable-standard-arm64` - Standard Frigate build for arm64 +- `stable-tensorrt` - Frigate build specific for amd64 devices running an nvidia GPU + +The community supported docker image tags for the current stable version are: + +- `stable-tensorrt-jp5` - Frigate build optimized for nvidia Jetson devices running Jetpack 5 +- `stable-tensorrt-jp4` - Frigate build optimized for nvidia Jetson devices running Jetpack 4.6 +- `stable-rk` - Frigate build for SBCs with Rockchip SoC + ## Home Assistant Addon :::caution @@ -166,6 +158,7 @@ docker run -d \ As of HomeAssistant OS 10.2 and Core 2023.6 defining separate network storage for media is supported. There are important limitations in Home Assistant Operating System to be aware of: + - Separate local storage for media is not yet supported by Home Assistant - AMD GPUs are not supported because HA OS does not include the mesa driver. - Nvidia GPUs are not supported because addons do not support the nvidia runtime. @@ -184,7 +177,7 @@ HassOS users can install via the addon repository. 2. Add https://github.com/blakeblackshear/frigate-hass-addons 3. Install your desired Frigate NVR Addon and navigate to it's page 4. Setup your network configuration in the `Configuration` tab -5. (not for proxy addon) Create the file `frigate.yml` in your `config` directory with your detailed Frigate configuration +5. (not for proxy addon) Create the file `frigate.yaml` in your `config` directory with your detailed Frigate configuration 6. Start the addon container 7. (not for proxy addon) If you are using hardware acceleration for ffmpeg, you may need to disable "Protection mode" @@ -214,27 +207,26 @@ It is recommended to run Frigate in LXC for maximum performance. See [this discu For details on running Frigate using ESXi, please see the instructions [here](https://williamlam.com/2023/05/frigate-nvr-with-coral-tpu-igpu-passthrough-using-esxi-on-intel-nuc.html). +If you're running Frigate on a rack mounted server and want to passthough the Google Coral, [read this.](https://github.com/blakeblackshear/frigate/issues/305) + ## Synology NAS on DSM 7 These settings were tested on DSM 7.1.1-42962 Update 4 - **General:** The `Execute container using high privilege` option needs to be enabled in order to give the frigate container the elevated privileges it may need. -The `Enable auto-restart` option can be enabled if you want the container to automatically restart whenever it improperly shuts down due to an error. +The `Enable auto-restart` option can be enabled if you want the container to automatically restart whenever it improperly shuts down due to an error. ![image](https://user-images.githubusercontent.com/4516296/232586790-0b659a82-561d-4bc5-899b-0f5b39c6b11d.png) - **Advanced Settings:** If you want to use the password template feature, you should add the "FRIGATE_RTSP_PASSWORD" environment variable and set it to your preferred password under advanced settings. The rest of the environment variables should be left as default for now. ![image](https://user-images.githubusercontent.com/4516296/232587163-0eb662d4-5e28-4914-852f-9db1ec4b9c3d.png) - **Port Settings:** The network mode should be set to `bridge`. You need to map the default frigate container ports to your local Synology NAS ports that you want to use to access Frigate. @@ -243,7 +235,6 @@ There may be other services running on your NAS that are using the same ports th ![image](https://user-images.githubusercontent.com/4516296/232582642-773c0e37-7ef5-4373-8ce3-41401b1626e6.png) - **Volume Settings:** You need to configure 2 paths: @@ -257,14 +248,15 @@ You need to configure 2 paths: These instructions were tested on a QNAP with an Intel J3455 CPU and 16G RAM, running QTS 4.5.4.2117. -QNAP has a graphic tool named Container Station to intall and manage docker containers. However, there are two limitations with Container Station that make it unsuitable to install Frigate: +QNAP has a graphic tool named Container Station to install and manage docker containers. However, there are two limitations with Container Station that make it unsuitable to install Frigate: 1. Container Station does not incorporate GitHub Container Registry (ghcr), which hosts Frigate docker image version 0.12.0 and above. -2. Container Station uses default 64 Mb shared memory size (shm-size), and does not have a mechanism to adjust it. Frigate requires a larger shm-size to be able to work properly with more than two high resolution cameras. +2. Container Station uses default 64 Mb shared memory size (shm-size), and does not have a mechanism to adjust it. Frigate requires a larger shm-size to be able to work properly with more than two high resolution cameras. -Because of above limitations, the installation has to be done from command line. Here are the steps: +Because of above limitations, the installation has to be done from command line. Here are the steps: **Preparation** + 1. Install Container Station from QNAP App Center if it is not installed. 2. Enable ssh on your QNAP (please do an Internet search on how to do this). 3. Prepare Frigate config file, name it `config.yml`. @@ -275,7 +267,8 @@ Because of above limitations, the installation has to be done from command line. **Installation** Run the following commands to install Frigate (using `stable` version as example): -```bash + +```shell # Download Frigate image docker pull ghcr.io/blakeblackshear/frigate:stable # Create directory to host Frigate config file on QNAP file system. @@ -316,6 +309,4 @@ docker run \ ghcr.io/blakeblackshear/frigate:stable ``` -Log into QNAP, open Container Station. Frigate docker container should be listed under 'Overview' and running. Visit Frigate Web UI by clicking Frigate docker, and then clicking the URL shown at the top of the detail page. - - +Log into QNAP, open Container Station. Frigate docker container should be listed under 'Overview' and running. Visit Frigate Web UI by clicking Frigate docker, and then clicking the URL shown at the top of the detail page. diff --git a/docs/docs/frigate/video_pipeline.md b/docs/docs/frigate/video_pipeline.md new file mode 100644 index 000000000..313e27ed5 --- /dev/null +++ b/docs/docs/frigate/video_pipeline.md @@ -0,0 +1,67 @@ +--- +id: video_pipeline +title: Video pipeline +--- + +Frigate uses a sophisticated video pipeline that starts with the camera feed and progressively applies transformations to it (e.g. decoding, motion detection, etc.). + +This guide provides an overview to help users understand some of the key Frigate concepts. + +## Overview + +At a high level, there are five processing steps that could be applied to a camera feed + +```mermaid +%%{init: {"themeVariables": {"edgeLabelBackground": "transparent"}}}%% + +flowchart LR + Feed(Feed\nacquisition) --> Decode(Video\ndecoding) + Decode --> Motion(Motion\ndetection) + Motion --> Object(Object\ndetection) + Feed --> Recording(Recording\nand\nvisualization) + Motion --> Recording + Object --> Recording +``` + +As the diagram shows, all feeds first need to be acquired. Depending on the data source, it may be as simple as using FFmpeg to connect to an RTSP source via TCP or something more involved like connecting to an Apple Homekit camera using go2rtc. A single camera can produce a main (i.e. high resolution) and a sub (i.e. lower resolution) video feed. + +Typically, the sub-feed will be decoded to produce full-frame images. As part of this process, the resolution may be downscaled and an image sampling frequency may be imposed (e.g. keep 5 frames per second). + +These frames will then be compared over time to detect movement areas (a.k.a. motion boxes). These motion boxes are combined into motion regions and are analyzed by a machine learning model to detect known objects. Finally, the snapshot and recording retention config will decide what video clips and events should be saved. + +## Detailed view of the video pipeline + +The following diagram adds a lot more detail than the simple view explained before. The goal is to show the detailed data paths between the processing steps. + +```mermaid +%%{init: {"themeVariables": {"edgeLabelBackground": "transparent"}}}%% + +flowchart TD + RecStore[(Recording\nstore)] + SnapStore[(Snapshot\nstore)] + + subgraph Acquisition + Cam["Camera"] -->|FFmpeg supported| Stream + Cam -->|"Other streaming\nprotocols"| go2rtc + go2rtc("go2rtc") --> Stream + Stream[Capture main and\nsub streams] --> |detect stream|Decode(Decode and\ndownscale) + end + subgraph Motion + Decode --> MotionM(Apply\nmotion masks) + MotionM --> MotionD(Motion\ndetection) + end + subgraph Detection + MotionD --> |motion regions| ObjectD(Object detection) + Decode --> ObjectD + ObjectD --> ObjectFilter(Apply object filters & zones) + ObjectFilter --> ObjectZ(Track objects) + end + Decode --> |decoded frames|Birdseye + MotionD --> |motion event|Birdseye + ObjectZ --> |object event|Birdseye + + MotionD --> |"video segments\n(retain motion)"|RecStore + ObjectZ --> |detection clip|RecStore + Stream -->|"video segments\n(retain all)"| RecStore + ObjectZ --> |detection snapshot|SnapStore +``` diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index f4b246911..1279f9950 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -3,6 +3,8 @@ id: configuring_go2rtc title: Configuring go2rtc --- +# Configuring go2rtc + Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect directly to your cameras. However, adding go2rtc to your configuration is required for the following features: - WebRTC or MSE for live viewing with higher resolutions and frame rates than the jsmpeg stream which is limited to the detect stream @@ -11,7 +13,7 @@ Use of the bundled go2rtc is optional. You can still configure FFmpeg to connect # Setup a go2rtc stream -First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#module-streams), not just rtsp. +First, you will want to configure go2rtc to connect to your camera stream by adding the stream you want to use for live view in your Frigate config file. If you set the stream name under go2rtc to match the name of your camera, it will automatically be mapped and you will get additional live view options for the camera. Avoid changing any other parts of your config at this step. Note that go2rtc supports [many different stream types](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#module-streams), not just rtsp. ```yaml go2rtc: @@ -24,7 +26,7 @@ The easiest live view to get working is MSE. After adding this to the config, re ### What if my video doesn't play? -If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.2.0#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration: +If you are unable to see your video feed, first check the go2rtc logs in the Frigate UI under Logs in the sidebar. If go2rtc is having difficulty connecting to your camera, you should see some error messages in the log. If you do not see any errors, then the video codec of the stream may not be supported in your browser. If your camera stream is set to H265, try switching to H264. You can see more information about [video codec compatibility](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#codecs-madness) in the go2rtc documentation. If you are not able to switch your camera settings from H265 to H264 or your stream is a different format such as MJPEG, you can use go2rtc to re-encode the video using the [FFmpeg parameters](https://github.com/AlexxIT/go2rtc/tree/v1.8.4#source-ffmpeg). It supports rotating and resizing video feeds and hardware acceleration. Keep in mind that transcoding video from one format to another is a resource intensive task and you may be better off using the built-in jsmpeg view. Here is an example of a config that will re-encode the stream to H264 without hardware acceleration: ```yaml go2rtc: diff --git a/docs/docs/guides/false_positives.md b/docs/docs/guides/false_positives.md deleted file mode 100644 index 6102fd63b..000000000 --- a/docs/docs/guides/false_positives.md +++ /dev/null @@ -1,25 +0,0 @@ ---- -id: false_positives -title: Reducing false positives ---- - -Tune your object filters to adjust false positives: `min_area`, `max_area`, `min_ratio`, `max_ratio`, `min_score`, `threshold`. - -The `min_area` and `max_area` values are compared against the area (number of pixels) from a given detected object. If the area is outside this range, the object will be ignored as a false positive. This allows objects that must be too small or too large to be ignored. - -Similarly, the `min_ratio` and `max_ratio` values are compared against a given detected object's width/height ratio (in pixels). If the ratio is outside this range, the object will be ignored as a false positive. This allows objects that are proportionally too short-and-wide (higher ratio) or too tall-and-narrow (smaller ratio) to be ignored. - -For object filters in your configuration, any single detection below `min_score` will be ignored as a false positive. `threshold` is based on the median of the history of scores (padded to 3 values) for a tracked object. Consider the following frames when `min_score` is set to 0.6 and threshold is set to 0.85: - -| Frame | Current Score | Score History | Computed Score | Detected Object | -| ----- | ------------- | --------------------------------- | -------------- | --------------- | -| 1 | 0.7 | 0.0, 0, 0.7 | 0.0 | No | -| 2 | 0.55 | 0.0, 0.7, 0.0 | 0.0 | No | -| 3 | 0.85 | 0.7, 0.0, 0.85 | 0.7 | No | -| 4 | 0.90 | 0.7, 0.85, 0.95, 0.90 | 0.875 | Yes | -| 5 | 0.88 | 0.7, 0.85, 0.95, 0.90, 0.88 | 0.88 | Yes | -| 6 | 0.95 | 0.7, 0.85, 0.95, 0.90, 0.88, 0.95 | 0.89 | Yes | - -In frame 2, the score is below the `min_score` value, so Frigate ignores it and it becomes a 0.0. The computed score is the median of the score history (padding to at least 3 values), and only when that computed score crosses the `threshold` is the object marked as a true positive. That happens in frame 4 in the example. - -If you're seeing false positives from stationary objects, please see Object Masks here: https://docs.frigate.video/configuration/masks/ diff --git a/docs/docs/guides/getting_started.md b/docs/docs/guides/getting_started.md index 04110cd1d..5975da354 100644 --- a/docs/docs/guides/getting_started.md +++ b/docs/docs/guides/getting_started.md @@ -3,7 +3,145 @@ id: getting_started title: Getting started --- -This guide walks through the steps to build a configuration file for Frigate. It assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/frigate/camera_setup). Pay particular attention to the section on choosing a detect resolution. +# Getting Started + +## Setting up hardware + +This section guides you through setting up a server with Debian Bookworm and Docker. If you already have an environment with Linux and Docker installed, you can continue to [Installing Frigate](#installing-frigate) below. + +### Install Debian 12 (Bookworm) + +There are many guides on how to install Debian Server, so this will be an abbreviated guide. Connect a temporary monitor and keyboard to your device so you can install a minimal server without a desktop environment. + +#### Prepare installation media + +1. Download the small installation image from the [Debian website](https://www.debian.org/distrib/netinst) +1. Flash the ISO to a USB device (popular tool is [balena Etcher](https://etcher.balena.io/)) +1. Boot your device from USB + +#### Install and setup Debian for remote access + +1. Ensure your device is connected to the network so updates and software options can be installed +1. Choose the non-graphical install option if you don't have a mouse connected, but either install method works fine +1. You will be prompted to set the root user password and create a user with a password +1. Install the minimum software. Fewer dependencies result in less maintenance. + 1. Uncheck "Debian desktop environment" and "GNOME" + 1. Check "SSH server" + 1. Keep "standard system utilities" checked +1. After reboot, login as root at the command prompt to add user to sudoers + 1. Install sudo + ```bash + apt update && apt install -y sudo + ``` + 1. Add the user you created to the sudo group (change `blake` to your own user) + ```bash + usermod -aG sudo blake + ``` +1. Shutdown by running `poweroff` + +At this point, you can install the device in a permanent location. The remaining steps can be performed via SSH from another device. If you don't have an SSH client, you can install one of the options listed in the [Visual Studio Code documentation](https://code.visualstudio.com/docs/remote/troubleshooting#_installing-a-supported-ssh-client). + +#### Finish setup via SSH + +1. Connect via SSH and login with your non-root user created during install +1. Setup passwordless sudo so you don't have to type your password for each sudo command (change `blake` in the command below to your user) + + ```bash + echo 'blake ALL=(ALL) NOPASSWD:ALL' | sudo tee /etc/sudoers.d/user + ``` + +1. Logout and login again to activate passwordless sudo +1. Setup automatic security updates for the OS (optional) + 1. Ensure everything is up to date by running + ```bash + sudo apt update && sudo apt upgrade -y + ``` + 1. Install unattended upgrades + ```bash + sudo apt install -y unattended-upgrades + echo unattended-upgrades unattended-upgrades/enable_auto_updates boolean true | sudo debconf-set-selections + sudo dpkg-reconfigure -f noninteractive unattended-upgrades + ``` + +Now you have a minimal Debian server that requires very little maintenance. + +### Install Docker + +1. Install Docker Engine (not Docker Desktop) using the [official docs](https://docs.docker.com/engine/install/debian/) + 1. Specifically, follow the steps in the [Install using the apt repository](https://docs.docker.com/engine/install/debian/#install-using-the-repository) section +2. Add your user to the docker group as described in the [Linux postinstall steps](https://docs.docker.com/engine/install/linux-postinstall/) + +## Installing Frigate + +This section shows how to create a minimal directory structure for a Docker installation on Debian. If you have installed Frigate as a Home Assistant addon or another way, you can continue to [Configuring Frigate](#configuring-frigate). + +### Setup directories + +Frigate requires a valid config file to start. The following directory structure is the bare minimum to get started. Once Frigate is running, you can use the built-in config editor which supports config validation. + +``` +. +├── docker-compose.yml +├── config/ +│ └── config.yml +└── storage/ +``` + +This will create the above structure: + +```bash +mkdir storage config && touch docker-compose.yml config/config.yml +``` + +If you are setting up Frigate on a Linux device via SSH, you can use [nano](https://itsfoss.com/nano-editor-guide/) to edit the following files. If you prefer to edit remote files with a full editor instead of a terminal, I recommend using [Visual Studio Code](https://code.visualstudio.com/) with the [Remote SSH extension](https://code.visualstudio.com/docs/remote/ssh-tutorial). + +:::note + +This `docker-compose.yml` file is just a starter for amd64 devices. You will need to customize it for your setup as detailed in the [Installation docs](/frigate/installation#docker). + +::: +`docker-compose.yml` + +```yaml +version: "3.9" +services: + frigate: + container_name: frigate + restart: unless-stopped + image: ghcr.io/blakeblackshear/frigate:stable + volumes: + - ./config:/config + - ./storage:/media/frigate + - type: tmpfs # Optional: 1GB of memory, reduces SSD/SD Card wear + target: /tmp/cache + tmpfs: + size: 1000000000 + ports: + - "5000:5000" + - "8554:8554" # RTSP feeds +``` + +`config.yml` + +```yaml +mqtt: + enabled: False + +cameras: + dummy_camera: # <--- this will be changed to your actual camera later + enabled: False + ffmpeg: + inputs: + - path: rtsp://127.0.0.1:554/rtsp + roles: + - detect +``` + +Now you should be able to start Frigate by running `docker compose up -d` from within the folder containing `docker-compose.yml`. Frigate should now be accessible at `server_ip:5000` and you can finish the configuration using the built-in configuration editor. + +## Configuring Frigate + +This section assumes that you already have an environment setup as described in [Installation](../frigate/installation.md). You should also configure your cameras according to the [camera setup guide](/frigate/camera_setup). Pay particular attention to the section on choosing a detect resolution. ### Step 1: Add a detect stream @@ -15,6 +153,7 @@ mqtt: cameras: name_of_your_camera: # <------ Name the camera + enabled: True ffmpeg: inputs: - path: rtsp://10.0.10.10:554/rtsp # <----- The stream you want to use for detection @@ -22,8 +161,6 @@ cameras: - detect detect: enabled: False # <---- disable detection until you have a working camera feed - width: 1280 # <---- update for your camera's resolution - height: 720 # <---- update for your camera's resolution ``` ### Step 2: Start Frigate @@ -38,7 +175,21 @@ FFmpeg arguments for other types of cameras can be found [here](../configuration Now that you have a working camera configuration, you want to setup hardware acceleration to minimize the CPU required to decode your video streams. See the [hardware acceleration](../configuration/hardware_acceleration.md) config reference for examples applicable to your hardware. -Here is an example configuration with hardware acceleration configured for Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md): +Here is an example configuration with hardware acceleration configured to work with most Intel processors with an integrated GPU using the [preset](../configuration/ffmpeg_presets.md): + +`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes) + +```yaml +version: "3.9" +services: + frigate: + ... + devices: + - /dev/dri/renderD128 # for intel hwaccel, needs to be updated for your hardware + ... +``` + +`config.yml` ```yaml mqtt: ... @@ -55,6 +206,19 @@ cameras: By default, Frigate will use a single CPU detector. If you have a USB Coral, you will need to add a detectors section to your config. +`docker-compose.yml` (after modifying, you will need to run `docker compose up -d` to apply changes) + +```yaml +version: "3.9" +services: + frigate: + ... + devices: + - /dev/bus/usb:/dev/bus/usb # passes the USB Coral, needs to be modified for other versions + - /dev/apex_0:/dev/apex_0 # passes a PCIe Coral, follow driver instructions here https://coral.ai/docs/m2/get-started/#2a-on-linux + ... +``` + ```yaml mqtt: ... @@ -71,7 +235,7 @@ cameras: ... ``` -More details on available detectors can be found [here](../configuration/detectors.md). +More details on available detectors can be found [here](../configuration/object_detectors.md). Restart Frigate and you should start seeing detections for `person`. If you want to track other objects, they will need to be added according to the [configuration file reference](../configuration/index.md#full-configuration-reference). @@ -105,9 +269,6 @@ cameras: - path: rtsp://10.0.10.10:554/rtsp roles: - detect - detect: - width: 1280 - height: 720 motion: mask: - 0,461,3,0,1919,0,1919,843,1699,492,1344,458,1346,336,973,317,869,375,866,432 @@ -166,9 +327,15 @@ cameras: By default, Frigate will retain snapshots of all events for 10 days. The full set of options for snapshots can be found [here](../configuration/index.md#full-configuration-reference). -### Step 7: Follow up guides +### Step 7: Complete config -Now that you have a working install, you can use the following guides for additional features: +At this point you have a complete config with basic functionality. You can see the [full config reference](../configuration/reference.md) for a complete list of configuration options. + +### Follow up + +Now that you have a working install, you can use the following documentation for additional features: 1. [Configuring go2rtc](configuring_go2rtc.md) - Additional live view options and RTSP relay 2. [Home Assistant Integration](../integrations/home-assistant.md) - Integrate with Home Assistant +3. [Masks](../configuration/masks.md) +4. [Zones](../configuration/zones.md) diff --git a/docs/docs/guides/ha_network_storage.md b/docs/docs/guides/ha_network_storage.md index 498dd7d0c..b248cae4a 100644 --- a/docs/docs/guides/ha_network_storage.md +++ b/docs/docs/guides/ha_network_storage.md @@ -1,9 +1,9 @@ --- id: ha_network_storage -title: HA Network Storage +title: Home Assistant network storage --- -As of HomeAsisstant Core 2023.6, Network Mounted Storage is supported for addons. +As of Home Asisstant Core 2023.6, Network Mounted Storage is supported for addons. ## Setting Up Remote Storage For Frigate @@ -16,6 +16,7 @@ As of HomeAsisstant Core 2023.6, Network Mounted Storage is supported for addons 1. Stop the Frigate addon 2. Update your [config](configuration/index.md) so the DB is stored in the /config directory by adding: + ```yaml database: path: /config/frigate.db diff --git a/docs/docs/guides/parked_cars.md b/docs/docs/guides/parked_cars.md new file mode 100644 index 000000000..6416762b9 --- /dev/null +++ b/docs/docs/guides/parked_cars.md @@ -0,0 +1,71 @@ +--- +id: parked_cars +title: Handling parked cars +--- + +:::tip + +This is an area targeted for improvement in future releases. + +::: + +Many people use Frigate to detect cars entering their driveway, and they often run into an issue with repeated events of parked cars and/or long running events after the car parks. This can cause Frigate to store more video than desired. + +:::caution + +It is not recommended to use motion masks to try and eliminate parked cars in your driveway. Motion masks are designed to prevent motion from triggering object detection and will not prevent objects from being detected in the area if motion is detected outside of the motion mask. + +::: + +## Repeated events of parked cars + +To only be notified of cars that enter your driveway from the street, you can create multiple zones that cover your driveway. For cars, you would only notify if `entered_zones` from the events MQTT topic has contains the entrance zone. + +See [this example](../configuration/zones.md#restricting-zones-to-specific-objects) from the Zones documentation to see how to restrict zones to certain object types. + +![Driveway Zones](/img/driveway_zones-min.png) + +To limit snapshots and events, you can list the zone for the entrance of your driveway under `required_zones` in your configuration file. + +```yaml +camera: + record: + events: + required_zones: + - zone_2 + zones: + zone_1: + coordinates: ... (parking area) + zone_2: + coordinates: ... (entrance to driveway) +``` + +This will only save events if the car entered the entrance zone at any point. + +## Long running events + +There are a few recommended approaches to avoid excessive storage use due to parked cars. These can be used in combination. + +### 1. Use `motion` or `active_objects` mode for event recordings + +Leverages [recording settings](../configuration/record.md#what-do-the-different-retain-modes-mean) to avoid excess storage use. + +#### Advantages of this approach + +For users using `motion` mode for continuous recording, this successfully avoids extra video from being stored for cars parked in view because all motion video is already being saved. + +#### Limitations of this approach + +For users that only want to record motion during events, long running events will result in all motion being stored as long as the car is in view. You can mitigate this further by using the `active_objects` mode for event recordings, but that may result less video being retained than is desired. + +### 2. Use an object mask to prevent detections in the parking zone + +Leverages [object filter masks](../configuration/masks.md#object-filter-masks) to prevent detections of cars parked in the driveway. + +#### Advantages of this approach + +Using this approach, you will get two separate events for when a car enters the driveway, parks in the parking zone, and then later leaves the zone. Using an object mask will ensure that cars parked in the parking zone are not detected and confused with cars driving by on the street as well. + +#### Limitations of this approach + +This approach will only work for cars that park in the parking zone. Cars that park in other areas will still be tracked as long as they are in view. This will also prevent zone sensors from telling you if a car is parked in the parking zone from working. diff --git a/docs/docs/guides/stationary_objects.md b/docs/docs/guides/stationary_objects.md deleted file mode 100644 index 5d45e58c5..000000000 --- a/docs/docs/guides/stationary_objects.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -id: stationary_objects -title: Avoiding stationary objects ---- - -Many people use Frigate to detect cars entering their driveway, and they often run into an issue with repeated notifications or events of a parked car being repeatedly detected over the course of multiple days (for example if the car is lost at night and detected again the following morning). - -You can use zones to restrict events and notifications to objects that have entered specific areas. - -:::caution - -It is not recommended to use masks to try and eliminate parked cars in your driveway. Masks are designed to prevent motion from triggering object detection and/or to indicate areas that are guaranteed false positives. - -Frigate is designed to track objects as they move and over-masking can prevent it from knowing that an object in the current frame is the same as the previous frame. You want Frigate to detect objects everywhere and configure your events and alerts to be based on the location of the object with zones. - -::: - -:::info - -Once a vehicle crosses the entrance into the parking area, that event will stay `In Progress` until it is no longer seen in the frame. Frigate is designed to have an event last as long as an object is visible in the frame, an event being `In Progress` does not mean the event is being constantly recorded. You can define the recording behavior by adjusting the [recording retention settings](../configuration/record.md). - -::: - -To only be notified of cars that enter your driveway from the street, you could create multiple zones that cover your driveway. For cars, you would only notify if `entered_zones` from the events MQTT topic has more than 1 zone. - -See [this example](../configuration/zones.md#restricting-zones-to-specific-objects) from the Zones documentation to see how to restrict zones to certain object types. - -![Driveway Zones](/img/driveway_zones-min.png) - -To limit snapshots and events, you can list the zone for the entrance of your driveway under `required_zones` in your configuration file. Example below. - -```yaml -camera: - record: - events: - required_zones: - - zone_2 - zones: - zone_1: - coordinates: ... (parking area) - zone_2: - coordinates: ... (entrance to driveway) -``` diff --git a/docs/docs/integrations/api.md b/docs/docs/integrations/api.md index 51887b14b..20877bb6f 100644 --- a/docs/docs/integrations/api.md +++ b/docs/docs/integrations/api.md @@ -155,18 +155,35 @@ Version info Events from the database. Accepts the following query string parameters: -| param | Type | Description | -| -------------------- | ---- | --------------------------------------------- | -| `before` | int | Epoch time | -| `after` | int | Epoch time | -| `cameras` | str | , separated list of cameras | -| `labels` | str | , separated list of labels | -| `zones` | str | , separated list of zones | -| `limit` | int | Limit the number of events returned | -| `has_snapshot` | int | Filter to events that have snapshots (0 or 1) | -| `has_clip` | int | Filter to events that have clips (0 or 1) | -| `include_thumbnails` | int | Include thumbnails in the response (0 or 1) | -| `in_progress` | int | Limit to events in progress (0 or 1) | +| param | Type | Description | +| -------------------- | ----- | ----------------------------------------------------- | +| `before` | int | Epoch time | +| `after` | int | Epoch time | +| `cameras` | str | , separated list of cameras | +| `labels` | str | , separated list of labels | +| `zones` | str | , separated list of zones | +| `limit` | int | Limit the number of events returned | +| `has_snapshot` | int | Filter to events that have snapshots (0 or 1) | +| `has_clip` | int | Filter to events that have clips (0 or 1) | +| `include_thumbnails` | int | Include thumbnails in the response (0 or 1) | +| `in_progress` | int | Limit to events in progress (0 or 1) | +| `time_range` | str | Time range in format after,before (00:00,24:00) | +| `timezone` | str | Timezone to use for time range | +| `min_score` | float | Minimum score of the event | +| `max_score` | float | Maximum score of the event | +| `is_submitted` | int | Filter events that are submitted to Frigate+ (0 or 1) | +| `min_length` | float | Minimum length of the event | +| `max_length` | float | Maximum length of the event | + +### `GET /api/timeline` + +Timeline of key moments of an event(s) from the database. Accepts the following query string parameters: + +| param | Type | Description | +| ----------- | ---- | ----------------------------------- | +| `camera` | str | Name of camera | +| `source_id` | str | ID of tracked object | +| `limit` | int | Limit the number of events returned | ### `GET /api/events/summary` @@ -188,6 +205,14 @@ Sets retain to true for the event id. Submits the snapshot of the event to Frigate+ for labeling. +| param | Type | Description | +| -------------------- | ---- | ---------------------------------- | +| `include_annotation` | int | Submit annotation to Frigate+ too. | + +### `PUT /api/events//false_positive` + +Submits the snapshot of the event to Frigate+ for labeling and adds the detection as a false positive. + ### `DELETE /api/events//retain` Sets retain to false for the event id (event may be deleted quickly after removing). @@ -195,11 +220,12 @@ Sets retain to false for the event id (event may be deleted quickly after removi ### `POST /api/events//sub_label` Set a sub label for an event. For example to update `person` -> `person's name` if they were recognized with facial recognition. -Sub labels must be 20 characters or shorter. +Sub labels must be 100 characters or shorter. ```json { - "subLabel": "some_string" + "subLabel": "some_string", + "subLabelScore": 0.79, } ``` @@ -233,6 +259,19 @@ Accepts the following query string parameters, but they are only applied when an Returns the snapshot image from the latest event for the given camera and label combo. Using `any` as the label will return the latest thumbnail regardless of type. +### `GET /api//recordings//snapshot.png` + +Returns the snapshot image from the specific point in that cameras recordings. + +### `GET /api//grid.jpg` + +Returns the latest camera image with the regions grid overlaid. + +| param | Type | Description | +| ------------ | ----- | ------------------------------------------------------------------------------------------ | +| `color` | str | The color of the grid (red,green,blue,black,white). Defaults to "green". | +| `font_scale` | float | Font scale. Can be used to increase font size on high resolution cameras. Defaults to 0.5. | + ### `GET /clips/-.jpg` JPG snapshot for the given camera and event id. @@ -249,6 +288,28 @@ HTTP Live Streaming Video on Demand URL for the specified event. Can be viewed i HTTP Live Streaming Video on Demand URL for the camera with the specified time range. Can be viewed in an application like VLC. +### `POST /api/export//start//end/` + +Export recordings from `start-timestamp` to `end-timestamp` for `camera` as a single mp4 file. These recordings will be exported to the `/media/frigate/exports` folder. + +It is also possible to export this recording as a timelapse. + +**Optional Body:** + +```json +{ + "playback": "realtime", // playback factor: realtime or timelapse_25x +} +``` + +### `DELETE /api/export/` + +Delete an export from disk. + +### `PATCH /api/export//` + +Renames an export. + ### `GET /api//recordings/summary` Hourly summary of recordings data for a camera. @@ -269,3 +330,55 @@ Get ffprobe output for camera feed paths. | param | Type | Description | | ------- | ------ | ---------------------------------- | | `paths` | string | `,` separated list of camera paths | + +### `GET /api//ptz/info` + +Get PTZ info for the camera. + +### `POST /api/events//