Remove all AGPL licensed YOLO references from Frigate (#10717)

* Remove yolov8 support from Frigate

* Remove yolov8 from dev

* Remove builds

* Formatting and remove yolov5

* Fix lint

* remove models download

---------

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
Blake Blackshear 2024-03-30 06:46:17 -04:00 committed by GitHub
parent 0223d6df60
commit 14235c42b9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 81 additions and 671 deletions

View File

@ -69,15 +69,15 @@ jobs:
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64 *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
- name: Build and push RockChip build #- name: Build and push RockChip build
uses: docker/bake-action@v3 # uses: docker/bake-action@v3
with: # with:
push: true # push: true
targets: rk # targets: rk
files: docker/rockchip/rk.hcl # files: docker/rockchip/rk.hcl
set: | # set: |
rk.tags=${{ steps.setup.outputs.image-name }}-rk # rk.tags=${{ steps.setup.outputs.image-name }}-rk
*.cache-from=type=gha # *.cache-from=type=gha
jetson_jp4_build: jetson_jp4_build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: Jetson Jetpack 4 name: Jetson Jetpack 4
@ -155,57 +155,57 @@ jobs:
tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt tensorrt.tags=${{ steps.setup.outputs.image-name }}-tensorrt
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64 *.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max *.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-amd64,mode=max
- name: AMD/ROCm general build #- name: AMD/ROCm general build
env: # env:
AMDGPU: gfx # AMDGPU: gfx
HSA_OVERRIDE: 0 # HSA_OVERRIDE: 0
uses: docker/bake-action@v3 # uses: docker/bake-action@v3
with: # with:
push: true # push: true
targets: rocm # targets: rocm
files: docker/rocm/rocm.hcl # files: docker/rocm/rocm.hcl
set: | # set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm # rocm.tags=${{ steps.setup.outputs.image-name }}-rocm
*.cache-from=type=gha # *.cache-from=type=gha
- name: AMD/ROCm gfx900 #- name: AMD/ROCm gfx900
env: # env:
AMDGPU: gfx900 # AMDGPU: gfx900
HSA_OVERRIDE: 1 # HSA_OVERRIDE: 1
HSA_OVERRIDE_GFX_VERSION: 9.0.0 # HSA_OVERRIDE_GFX_VERSION: 9.0.0
uses: docker/bake-action@v3 # uses: docker/bake-action@v3
with: # with:
push: true # push: true
targets: rocm # targets: rocm
files: docker/rocm/rocm.hcl # files: docker/rocm/rocm.hcl
set: | # set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx900 # rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx900
*.cache-from=type=gha # *.cache-from=type=gha
- name: AMD/ROCm gfx1030 #- name: AMD/ROCm gfx1030
env: # env:
AMDGPU: gfx1030 # AMDGPU: gfx1030
HSA_OVERRIDE: 1 # HSA_OVERRIDE: 1
HSA_OVERRIDE_GFX_VERSION: 10.3.0 # HSA_OVERRIDE_GFX_VERSION: 10.3.0
uses: docker/bake-action@v3 # uses: docker/bake-action@v3
with: # with:
push: true # push: true
targets: rocm # targets: rocm
files: docker/rocm/rocm.hcl # files: docker/rocm/rocm.hcl
set: | # set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1030 # rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1030
*.cache-from=type=gha # *.cache-from=type=gha
- name: AMD/ROCm gfx1100 #- name: AMD/ROCm gfx1100
env: # env:
AMDGPU: gfx1100 # AMDGPU: gfx1100
HSA_OVERRIDE: 1 # HSA_OVERRIDE: 1
HSA_OVERRIDE_GFX_VERSION: 11.0.0 # HSA_OVERRIDE_GFX_VERSION: 11.0.0
uses: docker/bake-action@v3 # uses: docker/bake-action@v3
with: # with:
push: true # push: true
targets: rocm # targets: rocm
files: docker/rocm/rocm.hcl # files: docker/rocm/rocm.hcl
set: | # set: |
rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1100 # rocm.tags=${{ steps.setup.outputs.image-name }}-rocm-gfx1100
*.cache-from=type=gha # *.cache-from=type=gha
# The majority of users running arm64 are rpi users, so the rpi # The majority of users running arm64 are rpi users, so the rpi
# build should be the primary arm64 image # build should be the primary arm64 image
assemble_default_build: assemble_default_build:

View File

@ -1,35 +0,0 @@
#!/command/with-contenv bash
# shellcheck shell=bash
# Download yolov8 models when DOWNLOAD_YOLOV8=1 environment variable is set
set -o errexit -o nounset -o pipefail
MODEL_CACHE_DIR=${MODEL_CACHE_DIR:-"/config/model_cache"}
DOWNLOAD_YOLOV8=${DOWNLOAD_YOLOV8:-"0"}
YOLOV8_DIR="$MODEL_CACHE_DIR/yolov8"
YOLOV8_URL=https://github.com/harakas/models/releases/download/yolov8.1-1.1/yolov8.small.models.tar.gz
YOLOV8_DIGEST=304186b299560fbacc28eac9b9ea02cc2289fe30eb2c0df30109a2529423695c
if [ "$DOWNLOAD_YOLOV8" = "1" ]; then
echo "download-models: DOWNLOAD_YOLOV8=${DOWNLOAD_YOLOV8}, running download"
if ! test -f "${YOLOV8_DIR}/model.fetched"; then
mkdir -p $YOLOV8_DIR
TMP_FILE="${YOLOV8_DIR}/download.tar.gz"
curl --no-progress-meter -L --max-filesize 500M --insecure --output $TMP_FILE "${YOLOV8_URL}"
digest=$(sha256sum $TMP_FILE | awk '{print $1}')
if [ "$digest" = "$YOLOV8_DIGEST" ]; then
echo "download-models: Extracting downloaded file"
cd $YOLOV8_DIR
tar zxf $TMP_FILE
rm $TMP_FILE
touch model.fetched
echo "download-models: Yolov8 download done, files placed into ${YOLOV8_DIR}"
else
echo "download-models: Downloaded file digest does not match: got $digest, expected $YOLOV8_DIGEST"
rm $TMP_FILE
fi
else
echo "download-models: ${YOLOV8_DIR}/model.fetched already present"
fi
fi

View File

@ -1 +0,0 @@
/etc/s6-overlay/s6-rc.d/download-models/run

View File

@ -12,8 +12,8 @@ RUN pip3 wheel --wheel-dir=/rk-wheels -c /requirements-wheels.txt -r /requiremen
FROM deps AS rk-frigate FROM deps AS rk-frigate
ARG TARGETARCH ARG TARGETARCH
RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \ RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
pip3 install -U /deps/rk-wheels/*.whl pip3 install -U /deps/rk-wheels/*.whl
WORKDIR /opt/frigate/ WORKDIR /opt/frigate/
COPY --from=rootfs / / COPY --from=rootfs / /
@ -21,10 +21,7 @@ COPY --from=rootfs / /
ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk356x.so /usr/lib/ ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk356x.so /usr/lib/
ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk3588.so /usr/lib/ ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk3588.so /usr/lib/
ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3562/yolov8n-320x320-rk3562.rknn /models/rknn/ # TODO removed models, other models support may need to be added back in
ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3566/yolov8n-320x320-rk3566.rknn /models/rknn/
ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3568/yolov8n-320x320-rk3568.rknn /models/rknn/
ADD https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-rk3588/yolov8n-320x320-rk3588.rknn /models/rknn/
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe

View File

@ -105,58 +105,6 @@ detectors:
device: pci device: pci
``` ```
### Yolov8 On Coral
It is possible to use the [ultralytics yolov8](https://github.com/ultralytics/ultralytics) pretrained models with the Google Coral processors.
#### Setup
You need to download yolov8 model files suitable for the EdgeTPU. Frigate can do this automatically with the `DOWNLOAD_YOLOV8={0 | 1}` environment variable either from the command line
```bash
$ docker run ... -e DOWNLOAD_YOLOV8=1 \
...
```
or when using docker compose:
```yaml
services:
frigate:
---
environment:
DOWNLOAD_YOLOV8: "1"
```
When this variable is set then frigate will at startup fetch [yolov8.small.models.tar.gz](https://github.com/harakas/models/releases/download/yolov8.1-1.1/yolov8.small.models.tar.gz) and extract it into the `/config/model_cache/yolov8/` directory.
The following files suitable for the EdgeTPU detector will be available under `/config/model_cache/yolov8/`:
- `yolov8[ns]_320x320_edgetpu.tflite` -- nano (n) and small (s) sized models that have been trained using the coco dataset (90 classes)
- `yolov8[ns]-oiv7_320x320_edgetpu.tflite` -- model files that have been trained using the google open images v7 dataset (601 classes)
- `labels.txt` and `labels-frigate.txt` -- full and aggregated labels for the coco dataset models
- `labels-oiv7.txt` and `labels-oiv7-frigate.txt` -- labels for the oiv7 dataset models
The aggregated label files contain renamed labels leaving only `person`, `vehicle`, `animal` and `bird` classes. The oiv7 trained models contain 601 classes and so are difficult to configure manually -- using aggregate labels is recommended.
Larger models (of `m` and `l` size and also at `640x640` resolution) can be found at https://github.com/harakas/models/releases/tag/yolov8.1-1.1/ but have to be installed manually.
The oiv7 models have been trained using a larger google open images v7 dataset. They also contain a lot more detection classes (over 600) so using aggregate label files is recommended. The large number of classes leads to lower baseline for detection probability values and also for higher resource consumption (they are slower to evaluate).
#### Configuration
```yaml
model:
labelmap_path: /config/model_cache/yolov8/labels.txt
model_type: yolov8
detectors:
coral:
type: edgetpu
device: usb
model:
path: /config/model_cache/yolov8/yolov8n_320x320_edgetpu.tflite
```
## OpenVINO Detector ## OpenVINO Detector
The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`. The OpenVINO detector type runs an OpenVINO IR model on AMD and Intel CPUs, Intel GPUs and Intel VPU hardware. To configure an OpenVINO detector, set the `"type"` attribute to `"openvino"`.
@ -183,7 +131,7 @@ model:
labelmap_path: /openvino-model/coco_91cl_bkgr.txt labelmap_path: /openvino-model/coco_91cl_bkgr.txt
``` ```
This detector also supports some YOLO variants: YOLOX, YOLOv5, and YOLOv8 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/reference.md) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate: This detector also supports YOLOX. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/reference.md) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate:
```yaml ```yaml
detectors: detectors:
@ -354,257 +302,3 @@ Replace `<your_codeproject_ai_server_ip>` and `<port>` with the IP address and p
To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly. To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly.
# Community Supported Detectors # Community Supported Detectors
## Rockchip RKNN-Toolkit-Lite2
This detector is only available if one of the following Rockchip SoCs is used:
- RK3588/RK3588S
- RK3568
- RK3566
- RK3562
These SoCs come with a NPU that will highly speed up detection.
### Setup
Use a frigate docker image with `-rk` suffix and enable privileged mode by adding the `--privileged` flag to your docker run command or `privileged: true` to your `docker-compose.yml` file.
### Configuration
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for one). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
```yaml
detectors: # required
rknn: # required
type: rknn # required
# core mask for npu
core_mask: 0
model: # required
# name of yolov8 model or path to your own .rknn model file
# possible values are:
# - default-yolov8n
# - default-yolov8s
# - default-yolov8m
# - default-yolov8l
# - default-yolov8x
# - /config/model_cache/rknn/your_custom_model.rknn
path: default-yolov8n
# width and height of detection frames
width: 320
height: 320
# pixel format of detection frame
# default value is rgb but yolov models usually use bgr format
input_pixel_format: bgr # required
# shape of detection frame
input_tensor: nhwc
```
Explanation for rknn specific options:
- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit corresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples:
- `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value.
- `core_mask: 0b001` use only core0.
- `core_mask: 0b011` use core0 and core1.
- `core_mask: 0b110` use core1 and core2. **This does not** work, since core0 is disabled.
### Choosing a model
There are 5 default yolov8 models that differ in size and therefore load the NPU more or less. In ascending order, with the top one being the smallest and least computationally intensive model:
| Model | Size in mb |
| ------- | ---------- |
| yolov8n | 9 |
| yolov8s | 25 |
| yolov8m | 54 |
| yolov8l | 90 |
| yolov8x | 136 |
:::tip
You can get the load of your NPU with the following command:
```bash
$ cat /sys/kernel/debug/rknpu/load
>> NPU load: Core0: 0%, Core1: 0%, Core2: 0%,
```
:::
- By default the rknn detector uses the yolov8n model (`model: path: default-yolov8n`). This model comes with the image, so no further steps than those mentioned above are necessary.
- If you want to use a more precise model, you can pass `default-yolov8s`, `default-yolov8m`, `default-yolov8l` or `default-yolov8x` as `model: path:` option.
- If the model does not exist, it will be automatically downloaded to `/config/model_cache/rknn`.
- If your server has no internet connection, you can download the model from [this Github repository](https://github.com/MarcA711/rknn-models/releases) using another device and place it in the `config/model_cache/rknn` on your system.
- Finally, you can also provide your own model. Note that only yolov8 models are currently supported. Moreover, you will need to convert your model to the rknn format using `rknn-toolkit2` on a x86 machine. Afterwards, you can place your `.rknn` model file in the `config/model_cache/rknn` directory on your system. Then you need to pass the path to your model using the `path` option of your `model` block like this:
```yaml
model:
path: /config/model_cache/rknn/my-rknn-model.rknn
```
:::tip
When you have a multicore NPU, you can enable all cores to reduce inference times. You should consider activating all cores if you use a larger model like yolov8l. If your NPU has 3 cores (like rk3588/S SoCs), you can enable all 3 cores using:
```yaml
detectors:
rknn:
type: rknn
core_mask: 0b111
```
:::
## AMD/ROCm GPU detector
### Setup
The `rocm` detector supports running [ultralytics](https://github.com/ultralytics/ultralytics) yolov8 models on AMD GPUs and iGPUs. Use a frigate docker image with `-rocm` suffix, for example `ghcr.io/blakeblackshear/frigate:stable-rocm`.
As the ROCm software stack is quite bloated, there are also smaller versions for specific GPU chipsets:
- `ghcr.io/blakeblackshear/frigate:stable-rocm-gfx900`
- `ghcr.io/blakeblackshear/frigate:stable-rocm-gfx1030`
- `ghcr.io/blakeblackshear/frigate:stable-rocm-gfx1100`
### Docker settings for GPU access
ROCm needs access to the `/dev/kfd` and `/dev/dri` devices. When docker or frigate is not run under root then also `video` (and possibly `render` and `ssl/_ssl`) groups should be added.
When running docker directly the following flags should be added for device access:
```bash
$ docker run --device=/dev/kfd --device=/dev/dri \
...
```
When using docker compose:
```yaml
services:
frigate:
---
devices:
- /dev/dri
- /dev/kfd
```
For reference on recommended settings see [running ROCm/pytorch in Docker](https://rocm.docs.amd.com/projects/install-on-linux/en/develop/how-to/3rd-party/pytorch-install.html#using-docker-with-pytorch-pre-installed).
### Docker settings for overriding the GPU chipset
Your GPU or iGPU might work just fine without any special configuration but in many cases they need manual settings. AMD/ROCm software stack comes with a limited set of GPU drivers and for newer or missing models you will have to override the chipset version to an older/generic version to get things working.
Also AMD/ROCm does not "officially" support integrated GPUs. It still does work with most of them just fine but requires special settings. One has to configure the `HSA_OVERRIDE_GFX_VERSION` environment variable. See the [ROCm bug report](https://github.com/ROCm/ROCm/issues/1743) for context and examples.
For chipset specific frigate rocm builds this variable is already set automatically.
For the general rocm frigate build there is some automatic detection:
- gfx90c -> 9.0.0
- gfx1031 -> 10.3.0
- gfx1103 -> 11.0.0
If you have something else you might need to override the `HSA_OVERRIDE_GFX_VERSION` at Docker launch. Suppose the version you want is `9.0.0`, then you should configure it from command line as:
```bash
$ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \
...
```
When using docker compose:
```yaml
services:
frigate:
---
environment:
HSA_OVERRIDE_GFX_VERSION: "9.0.0"
```
Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name.
- first make sure that rocm environment is running properly by running `/opt/rocm/bin/rocminfo` in the frigate container -- it should list both the CPU and the GPU with their properties
- find the chipset version you have (gfxNNN) from the output of the `rocminfo` (see below)
- use a search engine to query what `HSA_OVERRIDE_GFX_VERSION` you need for the given gfx name ("gfxNNN ROCm HSA_OVERRIDE_GFX_VERSION")
- override the `HSA_OVERRIDE_GFX_VERSION` with relevant value
- if things are not working check the frigate docker logs
#### Figuring out if AMD/ROCm is working and found your GPU
```bash
$ docker exec -it frigate /opt/rocm/bin/rocminfo
```
#### Figuring out your AMD GPU chipset version:
We unset the `HSA_OVERRIDE_GFX_VERSION` to prevent an existing override from messing up the result:
```bash
$ docker exec -it frigate /bin/bash -c '(unset HSA_OVERRIDE_GFX_VERSION && /opt/rocm/bin/rocminfo |grep gfx)'
```
### Yolov8 model download and available files
The ROCm specific frigate docker containers automatically download yolov8 files from https://github.com/harakas/models/releases/tag/yolov8.1-1.1/ at startup --
they fetch [yolov8.small.models.tar.gz](https://github.com/harakas/models/releases/download/yolov8.1-1.1/yolov8.small.models.tar.gz)
and uncompresses it into the `/config/model_cache/yolov8/` directory. After that the model files are compiled for your GPU chipset.
Both the download and compilation can take couple of minutes during which frigate will not be responsive. See docker logs for how it is progressing.
Automatic model download can be configured with the `DOWNLOAD_YOLOV8=1/0` environment variable either from the command line
```bash
$ docker run ... -e DOWNLOAD_YOLOV8=1 \
...
```
or when using docker compose:
```yaml
services:
frigate:
---
environment:
DOWNLOAD_YOLOV8: "1"
```
Download can be triggered also in regular frigate builds using that environment variable. The following files will be available under `/config/model_cache/yolov8/`:
- `yolov8[ns]_320x320.onnx` -- nano (n) and small (s) sized floating point model files usable by the `rocm` and `onnx` detectors that have been trained using the coco dataset (90 classes)
- `yolov8[ns]-oiv7_320x320.onnx` -- floating point model files usable by the `rocm` and `onnx` detectors that have been trained using the google open images v7 dataset (601 classes)
- `labels.txt` and `labels-frigate.txt` -- full and aggregated labels for the coco dataset models
- `labels-oiv7.txt` and `labels-oiv7-frigate.txt` -- labels for the oiv7 dataset models
The aggregated label files contain renamed labels leaving only `person`, `vehicle`, `animal` and `bird` classes. The oiv7 trained models contain 601 classes and so are difficult to configure manually -- using aggregate labels is recommended.
Larger models (of `m` and `l` size and also at `640x640` resolution) can be found at https://github.com/harakas/models/releases/tag/yolov8.1-1.1/ but have to be installed manually.
The oiv7 models have been trained using a larger google open images v7 dataset. They also contain a lot more detection classes (over 600) so using aggregate label files is recommended. The large number of classes leads to lower baseline for detection probability values and also for higher resource consumption (they are slower to evaluate).
The `rocm` builds precompile the `onnx` files for your chipset into `mxr` files. If you change your hardware or GPU or have compiled the wrong versions you need to delete the cached `.mxr` files under `/config/model_cache/yolov8/`.
### Frigate configuration
You also need to modify the frigate configuration to specify the detector, labels and model file. Here is an example configuration running `yolov8s`:
```yaml
model:
labelmap_path: /config/model_cache/yolov8/labels.txt
model_type: yolov8
detectors:
rocm:
type: rocm
model:
path: /config/model_cache/yolov8/yolov8s_320x320.onnx
```
Other settings available for the rocm detector
- `conserve_cpu: True` -- run ROCm/HIP synchronization in blocking mode saving CPU (at small loss of latency and maximum throughput)
- `auto_override_gfx: True` -- enable or disable automatic gfx driver detection
### Expected performance
On an AMD Ryzen 3 5400U with integrated GPU (gfx90c) the yolov8n runs in around 9ms per image (about 110 detections per second) and 18ms (55 detections per second) for yolov8s (at 320x320 detector resolution).

View File

@ -80,7 +80,7 @@ model:
# Valid values are nhwc or nchw (default: shown below) # Valid values are nhwc or nchw (default: shown below)
input_tensor: nhwc input_tensor: nhwc
# Optional: Object detection model type, currently only used with the OpenVINO detector # Optional: Object detection model type, currently only used with the OpenVINO detector
# Valid values are ssd, yolox, yolov5, or yolov8 (default: shown below) # Valid values are ssd, yolox (default: shown below)
model_type: ssd model_type: ssd
# Optional: Label name modifications. These are merged into the standard labelmap. # Optional: Label name modifications. These are merged into the standard labelmap.
labelmap: labelmap:

View File

@ -95,23 +95,6 @@ Frigate supports all Jetson boards, from the inexpensive Jetson Nano to the powe
Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time. Inference speed will vary depending on the YOLO model, jetson platform and jetson nvpmodel (GPU/DLA/EMC clock speed). It is typically 20-40 ms for most models. The DLA is more efficient than the GPU, but not faster, so using the DLA will reduce power consumption but will slightly increase inference time.
#### Rockchip SoC
Frigate supports SBCs with the following Rockchip SoCs:
- RK3566/RK3568
- RK3588/RK3588S
- RV1103/RV1106
- RK3562
Using the yolov8n model and an Orange Pi 5 Plus with RK3588 SoC inference speeds vary between 20 - 25 ms.
#### AMD GPUs and iGPUs
With the [rocm](../configuration/object_detectors.md#amdrocm-gpu-detector) detector Frigate can take advantage of many AMD GPUs and iGPUs.
An AMD Ryzen mini PC with AMD Ryzen 3 5400U iGPU takes about 9 ms to evaluate yolov8n.
## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version) ## What does Frigate use the CPU for and what does it use a detector for? (ELI5 Version)
This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity. This is taken from a [user question on reddit](https://www.reddit.com/r/homeassistant/comments/q8mgau/comment/hgqbxh5/?utm_source=share&utm_medium=web2x&context=3). Modified slightly for clarity.

View File

@ -30,8 +30,6 @@ class InputTensorEnum(str, Enum):
class ModelTypeEnum(str, Enum): class ModelTypeEnum(str, Enum):
ssd = "ssd" ssd = "ssd"
yolox = "yolox" yolox = "yolox"
yolov5 = "yolov5"
yolov8 = "yolov8"
class ModelConfig(BaseModel): class ModelConfig(BaseModel):

View File

@ -6,7 +6,6 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import yolov8_postprocess
try: try:
from tflite_runtime.interpreter import Interpreter, load_delegate from tflite_runtime.interpreter import Interpreter, load_delegate
@ -58,26 +57,9 @@ class EdgeTpuTfl(DetectionApi):
self.model_type = detector_config.model.model_type self.model_type = detector_config.model.model_type
def detect_raw(self, tensor_input): def detect_raw(self, tensor_input):
if self.model_type == "yolov8":
scale, zero_point = self.tensor_input_details[0]["quantization"]
tensor_input = (
(tensor_input - scale * zero_point * 255) * (1.0 / (scale * 255))
).astype(self.tensor_input_details[0]["dtype"])
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
self.interpreter.invoke() self.interpreter.invoke()
if self.model_type == "yolov8":
scale, zero_point = self.tensor_output_details[0]["quantization"]
tensor_output = self.interpreter.get_tensor(
self.tensor_output_details[0]["index"]
)
tensor_output = (tensor_output.astype(np.float32) - zero_point) * scale
model_input_shape = self.tensor_input_details[0]["shape"]
tensor_output[:, [0, 2]] *= model_input_shape[2]
tensor_output[:, [1, 3]] *= model_input_shape[1]
return yolov8_postprocess(model_input_shape, tensor_output)
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0] boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0] class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0] scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]

View File

@ -1,4 +1,3 @@
import glob
import logging import logging
import numpy as np import numpy as np
@ -6,7 +5,7 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess, yolov8_postprocess from frigate.detectors.util import preprocess
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -31,24 +30,6 @@ class ONNXDetector(DetectionApi):
) )
raise raise
assert (
detector_config.model.model_type == "yolov8"
), "ONNX: detector_config.model.model_type: only yolov8 supported"
assert (
detector_config.model.input_tensor == "nhwc"
), "ONNX: detector_config.model.input_tensor: only nhwc supported"
if detector_config.model.input_pixel_format != "rgb":
logger.warn(
"ONNX: detector_config.model.input_pixel_format: should be 'rgb' for yolov8, but '{detector_config.model.input_pixel_format}' specified!"
)
assert detector_config.model.path is not None, (
"ONNX: No model.path configured, please configure model.path and model.labelmap_path; some suggestions: "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*.onnx"))
+ " and "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*_labels.txt"))
)
path = detector_config.model.path path = detector_config.model.path
logger.info(f"ONNX: loading {detector_config.model.path}") logger.info(f"ONNX: loading {detector_config.model.path}")
self.model = onnxruntime.InferenceSession(path) self.model = onnxruntime.InferenceSession(path)
@ -57,9 +38,10 @@ class ONNXDetector(DetectionApi):
def detect_raw(self, tensor_input): def detect_raw(self, tensor_input):
model_input_name = self.model.get_inputs()[0].name model_input_name = self.model.get_inputs()[0].name
model_input_shape = self.model.get_inputs()[0].shape model_input_shape = self.model.get_inputs()[0].shape
tensor_input = preprocess(tensor_input, model_input_shape, np.float32) tensor_input = preprocess(tensor_input, model_input_shape, np.float32)
# ruff: noqa: F841
tensor_output = self.model.run(None, {model_input_name: tensor_input})[0] tensor_output = self.model.run(None, {model_input_name: tensor_input})[0]
return yolov8_postprocess(model_input_shape, tensor_output) raise Exception(
"No models are currently supported via onnx. See the docs for more info."
)

View File

@ -131,44 +131,3 @@ class OvDetector(DetectionApi):
object_detected[6], object_detected[5], object_detected[:4] object_detected[6], object_detected[5], object_detected[:4]
) )
return detections return detections
elif self.ov_model_type == ModelTypeEnum.yolov8:
out_tensor = infer_request.get_output_tensor()
results = out_tensor.data[0]
output_data = np.transpose(results)
scores = np.max(output_data[:, 4:], axis=1)
if len(scores) == 0:
return np.zeros((20, 6), np.float32)
scores = np.expand_dims(scores, axis=1)
# add scores to the last column
dets = np.concatenate((output_data, scores), axis=1)
# filter out lines with scores below threshold
dets = dets[dets[:, -1] > 0.5, :]
# limit to top 20 scores, descending order
ordered = dets[dets[:, -1].argsort()[::-1]][:20]
detections = np.zeros((20, 6), np.float32)
for i, object_detected in enumerate(ordered):
detections[i] = self.process_yolo(
np.argmax(object_detected[4:-1]),
object_detected[-1],
object_detected[:4],
)
return detections
elif self.ov_model_type == ModelTypeEnum.yolov5:
out_tensor = infer_request.get_output_tensor()
output_data = out_tensor.data[0]
# filter out lines with scores below threshold
conf_mask = (output_data[:, 4] >= 0.5).squeeze()
output_data = output_data[conf_mask]
# limit to top 20 scores, descending order
ordered = output_data[output_data[:, 4].argsort()[::-1]][:20]
detections = np.zeros((20, 6), np.float32)
for i, object_detected in enumerate(ordered):
detections[i] = self.process_yolo(
np.argmax(object_detected[5:]),
object_detected[4],
object_detected[:4],
)
return detections

View File

@ -1,10 +1,7 @@
import logging import logging
import os.path import os.path
import urllib.request
from typing import Literal from typing import Literal
import numpy as np
try: try:
from hide_warnings import hide_warnings from hide_warnings import hide_warnings
except: # noqa: E722 except: # noqa: E722
@ -24,14 +21,6 @@ DETECTOR_KEY = "rknn"
supported_socs = ["rk3562", "rk3566", "rk3568", "rk3588"] supported_socs = ["rk3562", "rk3566", "rk3568", "rk3588"]
yolov8_suffix = {
"default-yolov8n": "n",
"default-yolov8s": "s",
"default-yolov8m": "m",
"default-yolov8l": "l",
"default-yolov8x": "x",
}
class RknnDetectorConfig(BaseDetectorConfig): class RknnDetectorConfig(BaseDetectorConfig):
type: Literal[DETECTOR_KEY] type: Literal[DETECTOR_KEY]
@ -68,35 +57,12 @@ class Rknn(DetectionApi):
elif "rk3588" in soc: elif "rk3588" in soc:
os.rename("/usr/lib/librknnrt_rk3588.so", "/usr/lib/librknnrt.so") os.rename("/usr/lib/librknnrt_rk3588.so", "/usr/lib/librknnrt.so")
self.model_path = config.model.path or "default-yolov8n"
self.core_mask = config.core_mask self.core_mask = config.core_mask
self.height = config.model.height self.height = config.model.height
self.width = config.model.width self.width = config.model.width
if self.model_path in yolov8_suffix: if True:
if self.model_path == "default-yolov8n": os.makedirs("/config/model_cache/rknn", exist_ok=True)
self.model_path = "/models/rknn/yolov8n-320x320-{soc}.rknn".format(
soc=soc
)
else:
model_suffix = yolov8_suffix[self.model_path]
self.model_path = (
"/config/model_cache/rknn/yolov8{suffix}-320x320-{soc}.rknn".format(
suffix=model_suffix, soc=soc
)
)
os.makedirs("/config/model_cache/rknn", exist_ok=True)
if not os.path.isfile(self.model_path):
logger.info(
"Downloading yolov8{suffix} model.".format(suffix=model_suffix)
)
urllib.request.urlretrieve(
"https://github.com/MarcA711/rknn-models/releases/download/v1.5.2-{soc}/yolov8{suffix}-320x320-{soc}.rknn".format(
soc=soc, suffix=model_suffix
),
self.model_path,
)
if (config.model.width != 320) or (config.model.height != 320): if (config.model.width != 320) or (config.model.height != 320):
logger.error( logger.error(
@ -132,60 +98,12 @@ class Rknn(DetectionApi):
"Error initializing rknn runtime. Do you run docker in privileged mode?" "Error initializing rknn runtime. Do you run docker in privileged mode?"
) )
def __del__(self): raise Exception(
self.rknn.release() "RKNN does not currently support any models. Please see the docs for more info."
def postprocess(self, results):
"""
Processes yolov8 output.
Args:
results: array with shape: (1, 84, n, 1) where n depends on yolov8 model size (for 320x320 model n=2100)
Returns:
detections: array with shape (20, 6) with 20 rows of (class, confidence, y_min, x_min, y_max, x_max)
"""
results = np.transpose(results[0, :, :, 0]) # array shape (2100, 84)
scores = np.max(
results[:, 4:], axis=1
) # array shape (2100,); max confidence of each row
# remove lines with score scores < 0.4
filtered_arg = np.argwhere(scores > 0.4)
results = results[filtered_arg[:, 0]]
scores = scores[filtered_arg[:, 0]]
num_detections = len(scores)
if num_detections == 0:
return np.zeros((20, 6), np.float32)
if num_detections > 20:
top_arg = np.argpartition(scores, -20)[-20:]
results = results[top_arg]
scores = scores[top_arg]
num_detections = 20
classes = np.argmax(results[:, 4:], axis=1)
boxes = np.transpose(
np.vstack(
(
(results[:, 1] - 0.5 * results[:, 3]) / self.height,
(results[:, 0] - 0.5 * results[:, 2]) / self.width,
(results[:, 1] + 0.5 * results[:, 3]) / self.height,
(results[:, 0] + 0.5 * results[:, 2]) / self.width,
)
)
) )
detections = np.zeros((20, 6), np.float32) def __del__(self):
detections[:num_detections, 0] = classes self.rknn.release()
detections[:num_detections, 1] = scores
detections[:num_detections, 2:] = boxes
return detections
@hide_warnings @hide_warnings
def inference(self, tensor_input): def inference(self, tensor_input):

View File

@ -1,5 +1,4 @@
import ctypes import ctypes
import glob
import logging import logging
import os import os
import subprocess import subprocess
@ -11,7 +10,7 @@ from typing_extensions import Literal
from frigate.detectors.detection_api import DetectionApi from frigate.detectors.detection_api import DetectionApi
from frigate.detectors.detector_config import BaseDetectorConfig from frigate.detectors.detector_config import BaseDetectorConfig
from frigate.detectors.util import preprocess, yolov8_postprocess from frigate.detectors.util import preprocess
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -75,27 +74,6 @@ class ROCmDetector(DetectionApi):
logger.error("AMD/ROCm: module loading failed, missing ROCm environment?") logger.error("AMD/ROCm: module loading failed, missing ROCm environment?")
raise raise
if detector_config.conserve_cpu:
logger.info("AMD/ROCm: switching HIP to blocking mode to conserve CPU")
ctypes.CDLL("/opt/rocm/lib/libamdhip64.so").hipSetDeviceFlags(4)
assert (
detector_config.model.model_type == "yolov8"
), "AMD/ROCm: detector_config.model.model_type: only yolov8 supported"
assert (
detector_config.model.input_tensor == "nhwc"
), "AMD/ROCm: detector_config.model.input_tensor: only nhwc supported"
if detector_config.model.input_pixel_format != "rgb":
logger.warn(
"AMD/ROCm: detector_config.model.input_pixel_format: should be 'rgb' for yolov8, but '{detector_config.model.input_pixel_format}' specified!"
)
assert detector_config.model.path is not None, (
"No model.path configured, please configure model.path and model.labelmap_path; some suggestions: "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*.onnx"))
+ " and "
+ ", ".join(glob.glob("/config/model_cache/yolov8/*_labels.txt"))
)
path = detector_config.model.path path = detector_config.model.path
mxr_path = os.path.splitext(path)[0] + ".mxr" mxr_path = os.path.splitext(path)[0] + ".mxr"
if path.endswith(".mxr"): if path.endswith(".mxr"):
@ -136,8 +114,11 @@ class ROCmDetector(DetectionApi):
detector_result = self.model.run({model_input_name: tensor_input})[0] detector_result = self.model.run({model_input_name: tensor_input})[0]
addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float)) addr = ctypes.cast(detector_result.data_ptr(), ctypes.POINTER(ctypes.c_float))
# ruff: noqa: F841
tensor_output = np.ctypeslib.as_array( tensor_output = np.ctypeslib.as_array(
addr, shape=detector_result.get_shape().lens() addr, shape=detector_result.get_shape().lens()
) )
return yolov8_postprocess(model_input_shape, tensor_output) raise Exception(
"No models are currently supported for rocm. See the docs for more info."
)

View File

@ -34,50 +34,3 @@ def preprocess(tensor_input, model_input_shape, model_input_element_type):
None, None,
swapRB=False, swapRB=False,
) )
def yolov8_postprocess(
model_input_shape,
tensor_output,
box_count=20,
score_threshold=0.5,
nms_threshold=0.5,
):
model_box_count = tensor_output.shape[2]
probs = tensor_output[0, 4:, :]
all_ids = np.argmax(probs, axis=0)
all_confidences = probs.T[np.arange(model_box_count), all_ids]
all_boxes = tensor_output[0, 0:4, :].T
mask = all_confidences > score_threshold
class_ids = all_ids[mask]
confidences = all_confidences[mask]
cx, cy, w, h = all_boxes[mask].T
if model_input_shape[3] == 3:
scale_y, scale_x = 1 / model_input_shape[1], 1 / model_input_shape[2]
else:
scale_y, scale_x = 1 / model_input_shape[2], 1 / model_input_shape[3]
detections = np.stack(
(
class_ids,
confidences,
scale_y * (cy - h / 2),
scale_x * (cx - w / 2),
scale_y * (cy + h / 2),
scale_x * (cx + w / 2),
),
axis=1,
)
if detections.shape[0] > box_count:
# if too many detections, do nms filtering to suppress overlapping boxes
boxes = np.stack((cx - w / 2, cy - h / 2, w, h), axis=1)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, score_threshold, nms_threshold)
detections = detections[indexes]
# if still too many, trim the rest by confidence
if detections.shape[0] > box_count:
detections = detections[
np.argpartition(detections[:, 1], -box_count)[-box_count:]
]
detections = detections.copy()
detections.resize((box_count, 6))
return detections