mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
Reimplement support for rknn detector (#11365)
* initial support for rknn detector * remove purge_model_cache option * update rknn * support rk3576 * fix post_process_yolonas call * add yolonas models * update config * exclude yolonas from image * remove code
This commit is contained in:
parent
910c85b1c0
commit
e91f3d8d9b
18
.github/workflows/ci.yml
vendored
18
.github/workflows/ci.yml
vendored
@ -69,15 +69,15 @@ jobs:
|
|||||||
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
|
rpi.tags=${{ steps.setup.outputs.image-name }}-rpi
|
||||||
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
*.cache-from=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64
|
||||||
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
|
*.cache-to=type=registry,ref=${{ steps.setup.outputs.cache-name }}-arm64,mode=max
|
||||||
#- name: Build and push RockChip build
|
- name: Build and push Rockchip build
|
||||||
# uses: docker/bake-action@v3
|
uses: docker/bake-action@v3
|
||||||
# with:
|
with:
|
||||||
# push: true
|
push: true
|
||||||
# targets: rk
|
targets: rk
|
||||||
# files: docker/rockchip/rk.hcl
|
files: docker/rockchip/rk.hcl
|
||||||
# set: |
|
set: |
|
||||||
# rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
rk.tags=${{ steps.setup.outputs.image-name }}-rk
|
||||||
# *.cache-from=type=gha
|
*.cache-from=type=gha
|
||||||
jetson_jp4_build:
|
jetson_jp4_build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
name: Jetson Jetpack 4
|
name: Jetson Jetpack 4
|
||||||
|
@ -18,10 +18,7 @@ RUN --mount=type=bind,from=rk-wheels,source=/rk-wheels,target=/deps/rk-wheels \
|
|||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
COPY --from=rootfs / /
|
COPY --from=rootfs / /
|
||||||
|
|
||||||
ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk356x.so /usr/lib/
|
ADD https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/librknnrt.so /usr/lib/
|
||||||
ADD https://github.com/MarcA711/rknpu2/releases/download/v1.5.2/librknnrt_rk3588.so /usr/lib/
|
|
||||||
|
|
||||||
# TODO removed models, other models support may need to be added back in
|
|
||||||
|
|
||||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
|
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffmpeg
|
||||||
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
|
RUN rm -rf /usr/lib/btbn-ffmpeg/bin/ffprobe
|
||||||
|
@ -1,2 +1 @@
|
|||||||
hide-warnings == 0.17
|
rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v2.0.0/rknn_toolkit_lite2-2.0.0b0-cp39-cp39-linux_aarch64.whl
|
||||||
rknn-toolkit-lite2 @ https://github.com/MarcA711/rknn-toolkit2/releases/download/v1.5.2/rknn_toolkit_lite2-1.5.2-cp39-cp39-linux_aarch64.whl
|
|
@ -366,7 +366,7 @@ Hardware accelerated video de-/encoding is supported on all Rockchip SoCs using
|
|||||||
|
|
||||||
### Prerequisites
|
### Prerequisites
|
||||||
|
|
||||||
Make sure that you use a linux distribution that comes with the rockchip BSP kernel 5.10 or 6.1 and supports VPU. To check, enter the following commands:
|
Make sure that you use a linux distribution that comes with the rockchip BSP kernel 5.10 or 6.1 and rkvdec2 driver. To check, enter the following commands:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ uname -r
|
$ uname -r
|
||||||
|
@ -302,3 +302,109 @@ Replace `<your_codeproject_ai_server_ip>` and `<port>` with the IP address and p
|
|||||||
To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly.
|
To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly.
|
||||||
|
|
||||||
# Community Supported Detectors
|
# Community Supported Detectors
|
||||||
|
|
||||||
|
## Rockchip platform
|
||||||
|
|
||||||
|
Hardware accelerated object detection is supported on the following SoCs:
|
||||||
|
|
||||||
|
- RK3562
|
||||||
|
- RK3566
|
||||||
|
- RK3568
|
||||||
|
- RK3576
|
||||||
|
- RK3588
|
||||||
|
|
||||||
|
This implementation uses the [Rockchip's RKNN-Toolkit2](https://github.com/airockchip/rknn-toolkit2/) Currently, only [Yolo-NAS](https://github.com/Deci-AI/super-gradients/blob/master/YOLONAS.md) is supported as object detection model.
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
Make sure that you use a linux distribution that comes with the rockchip BSP kernel 5.10 or 6.1 and rknpu driver. To check, enter the following commands:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ uname -r
|
||||||
|
5.10.xxx-rockchip # or 6.1.xxx; the -rockchip suffix is important
|
||||||
|
$ ls /dev/dri
|
||||||
|
by-path card0 card1 renderD128 renderD129 # should list renderD129
|
||||||
|
$ sudo cat /sys/kernel/debug/rknpu/version
|
||||||
|
RKNPU driver: v0.9.2 # or later version
|
||||||
|
```
|
||||||
|
|
||||||
|
I recommend [Joshua Riek's Ubuntu for Rockchip](https://github.com/Joshua-Riek/ubuntu-rockchip), if your board is supported.
|
||||||
|
|
||||||
|
### Setup
|
||||||
|
|
||||||
|
Follow Frigate's default installation instructions, but use a docker image with `-rk` suffix for example `ghcr.io/blakeblackshear/frigate:stable-rk`.
|
||||||
|
|
||||||
|
Next, you need to grant docker permissions to access your hardware:
|
||||||
|
|
||||||
|
- During the configuration process, you should run docker in privileged mode to avoid any errors due to insufficient permissions. To do so, add `privileged: true` to your `docker-compose.yml` file or the `--privileged` flag to your docker run command.
|
||||||
|
- After everything works, you should only grant necessary permissions to increase security. Add the lines below to your `docker-compose.yml` file or the following options to your docker run command: `--security-opt systempaths=unconfined --security-opt apparmor=unconfined --device /dev/dri:/dev/dri`:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
security_opt:
|
||||||
|
- apparmor=unconfined
|
||||||
|
- systempaths=unconfined
|
||||||
|
devices:
|
||||||
|
- /dev/dri:/dev/dri
|
||||||
|
```
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
This `config.yml` shows all relevant options to configure the detector and explains them. All values shown are the default values (except for two). Lines that are required at least to use the detector are labeled as required, all other lines are optional.
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
detectors: # required
|
||||||
|
rknn: # required
|
||||||
|
type: rknn # required
|
||||||
|
# number of NPU cores to use
|
||||||
|
# 0 means choose automatically
|
||||||
|
# increase for better performance if you have a multicore NPU e.g. set to 3 on rk3588
|
||||||
|
num_cores: 0
|
||||||
|
|
||||||
|
model: # required
|
||||||
|
# name of model (will be automatically downloaded) or path to your own .rknn model file
|
||||||
|
# possible values are:
|
||||||
|
# - deci-fp16-yolonas_s
|
||||||
|
# - deci-fp16-yolonas_m
|
||||||
|
# - deci-fp16-yolonas_l
|
||||||
|
# - /config/model_cache/your_custom_model.rknn
|
||||||
|
path: deci-fp16-yolonas_s
|
||||||
|
# width and height of detection frames
|
||||||
|
width: 320
|
||||||
|
height: 320
|
||||||
|
# pixel format of detection frame
|
||||||
|
# default value is rgb but yolo models usually use bgr format
|
||||||
|
input_pixel_format: bgr # required
|
||||||
|
# shape of detection frame
|
||||||
|
input_tensor: nhwc
|
||||||
|
```
|
||||||
|
|
||||||
|
### Choosing a model
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
|
||||||
|
yolo-nas models use weights from DeciAI. These weights are subject to their license and can't be used commercially. For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
The inference time was determined on a rk3588 with 3 NPU cores.
|
||||||
|
|
||||||
|
| Model | Size in mb | Inference time in ms |
|
||||||
|
| ------------------- | ---------- | -------------------- |
|
||||||
|
| deci-fp16-yolonas_s | 24 | 25 |
|
||||||
|
| deci-fp16-yolonas_m | 62 | 35 |
|
||||||
|
| deci-fp16-yolonas_l | 81 | 45 |
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
|
||||||
|
You can get the load of your NPU with the following command:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
$ cat /sys/kernel/debug/rknpu/load
|
||||||
|
>> NPU load: Core0: 0%, Core1: 0%, Core2: 0%,
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
|
- By default the rknn detector uses the yolonas_s model (`model: path: default-fp16-yolonas_s`). This model comes with the image, so no further steps than those mentioned above are necessary and no download happens.
|
||||||
|
- The other choices are automatically downloaded and stored in the folder `config/model_cache/rknn_cache`. After upgrading Frigate, you should remove older models to free up space.
|
||||||
|
- Finally, you can also provide your own `.rknn` model. You should not save your own models in the `rknn_cache` folder, store them directly in the `model_cache` folder or another subfolder. To convert a model to `.rknn` format see the `rknn-toolkit2` (requires a x86 machine). Note, that there is only post-processing for the supported models.
|
||||||
|
@ -72,7 +72,7 @@ class DetectionApi(ABC):
|
|||||||
|
|
||||||
def post_process(self, output):
|
def post_process(self, output):
|
||||||
if self.detector_config.model.model_type == ModelTypeEnum.yolonas:
|
if self.detector_config.model.model_type == ModelTypeEnum.yolonas:
|
||||||
return self.yolonas(output)
|
return self.post_process_yolonas(output)
|
||||||
else:
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f'Model type "{self.detector_config.model.model_type}" is currently not supported.'
|
f'Model type "{self.detector_config.model.model_type}" is currently not supported.'
|
||||||
|
@ -1,118 +1,157 @@
|
|||||||
import logging
|
import logging
|
||||||
import os.path
|
import os.path
|
||||||
|
import re
|
||||||
|
import urllib.request
|
||||||
from typing import Literal
|
from typing import Literal
|
||||||
|
|
||||||
try:
|
|
||||||
from hide_warnings import hide_warnings
|
|
||||||
except: # noqa: E722
|
|
||||||
|
|
||||||
def hide_warnings(func):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
|
|
||||||
from frigate.detectors.detection_api import DetectionApi
|
from frigate.detectors.detection_api import DetectionApi
|
||||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
DETECTOR_KEY = "rknn"
|
DETECTOR_KEY = "rknn"
|
||||||
|
|
||||||
supported_socs = ["rk3562", "rk3566", "rk3568", "rk3588"]
|
supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
|
||||||
|
|
||||||
|
supported_models = {ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$"}
|
||||||
|
|
||||||
|
model_chache_dir = "/config/model_cache/rknn_cache/"
|
||||||
|
|
||||||
|
|
||||||
class RknnDetectorConfig(BaseDetectorConfig):
|
class RknnDetectorConfig(BaseDetectorConfig):
|
||||||
type: Literal[DETECTOR_KEY]
|
type: Literal[DETECTOR_KEY]
|
||||||
core_mask: int = Field(default=0, ge=0, le=7, title="Core mask for NPU.")
|
num_cores: int = Field(default=0, ge=0, le=3, title="Number of NPU cores to use.")
|
||||||
|
purge_model_cache: bool = Field(default=True)
|
||||||
|
|
||||||
|
|
||||||
class Rknn(DetectionApi):
|
class Rknn(DetectionApi):
|
||||||
type_key = DETECTOR_KEY
|
type_key = DETECTOR_KEY
|
||||||
|
|
||||||
def __init__(self, config: RknnDetectorConfig):
|
def __init__(self, config: RknnDetectorConfig):
|
||||||
# find out SoC
|
super().__init__(config)
|
||||||
try:
|
|
||||||
with open("/proc/device-tree/compatible") as file:
|
|
||||||
soc = file.read().split(",")[-1].strip("\x00")
|
|
||||||
except FileNotFoundError:
|
|
||||||
logger.error("Make sure to run docker in privileged mode.")
|
|
||||||
raise Exception("Make sure to run docker in privileged mode.")
|
|
||||||
|
|
||||||
if soc not in supported_socs:
|
|
||||||
logger.error(
|
|
||||||
"Your SoC is not supported. Your SoC is: {}. Currently these SoCs are supported: {}.".format(
|
|
||||||
soc, supported_socs
|
|
||||||
)
|
|
||||||
)
|
|
||||||
raise Exception(
|
|
||||||
"Your SoC is not supported. Your SoC is: {}. Currently these SoCs are supported: {}.".format(
|
|
||||||
soc, supported_socs
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if not os.path.isfile("/usr/lib/librknnrt.so"):
|
|
||||||
if "rk356" in soc:
|
|
||||||
os.rename("/usr/lib/librknnrt_rk356x.so", "/usr/lib/librknnrt.so")
|
|
||||||
elif "rk3588" in soc:
|
|
||||||
os.rename("/usr/lib/librknnrt_rk3588.so", "/usr/lib/librknnrt.so")
|
|
||||||
|
|
||||||
self.core_mask = config.core_mask
|
|
||||||
self.height = config.model.height
|
self.height = config.model.height
|
||||||
self.width = config.model.width
|
self.width = config.model.width
|
||||||
|
core_mask = 2**config.num_cores - 1
|
||||||
|
soc = self.get_soc()
|
||||||
|
|
||||||
if True:
|
model_props = self.parse_model_input(config.model.path, soc)
|
||||||
os.makedirs("/config/model_cache/rknn", exist_ok=True)
|
|
||||||
|
|
||||||
if (config.model.width != 320) or (config.model.height != 320):
|
if model_props["preset"]:
|
||||||
logger.error(
|
config.model.model_type = model_props["model_type"]
|
||||||
"Make sure to set the model width and height to 320 in your config.yml."
|
|
||||||
)
|
|
||||||
raise Exception(
|
|
||||||
"Make sure to set the model width and height to 320 in your config.yml."
|
|
||||||
)
|
|
||||||
|
|
||||||
if config.model.input_pixel_format != "bgr":
|
if model_props["model_type"] == ModelTypeEnum.yolonas:
|
||||||
logger.error(
|
logger.info("""
|
||||||
'Make sure to set the model input_pixel_format to "bgr" in your config.yml.'
|
You are using yolo-nas with weights from DeciAI.
|
||||||
)
|
These weights are subject to their license and can't be used commercially.
|
||||||
raise Exception(
|
For more information, see: https://docs.deci.ai/super-gradients/latest/LICENSE.YOLONAS.html
|
||||||
'Make sure to set the model input_pixel_format to "bgr" in your config.yml.'
|
""")
|
||||||
)
|
|
||||||
|
|
||||||
if config.model.input_tensor != "nhwc":
|
|
||||||
logger.error(
|
|
||||||
'Make sure to set the model input_tensor to "nhwc" in your config.yml.'
|
|
||||||
)
|
|
||||||
raise Exception(
|
|
||||||
'Make sure to set the model input_tensor to "nhwc" in your config.yml.'
|
|
||||||
)
|
|
||||||
|
|
||||||
from rknnlite.api import RKNNLite
|
from rknnlite.api import RKNNLite
|
||||||
|
|
||||||
self.rknn = RKNNLite(verbose=False)
|
self.rknn = RKNNLite(verbose=False)
|
||||||
if self.rknn.load_rknn(self.model_path) != 0:
|
if self.rknn.load_rknn(model_props["path"]) != 0:
|
||||||
logger.error("Error initializing rknn model.")
|
logger.error("Error initializing rknn model.")
|
||||||
if self.rknn.init_runtime(core_mask=self.core_mask) != 0:
|
if self.rknn.init_runtime(core_mask=core_mask) != 0:
|
||||||
logger.error(
|
logger.error(
|
||||||
"Error initializing rknn runtime. Do you run docker in privileged mode?"
|
"Error initializing rknn runtime. Do you run docker in privileged mode?"
|
||||||
)
|
)
|
||||||
|
|
||||||
raise Exception(
|
|
||||||
"RKNN does not currently support any models. Please see the docs for more info."
|
|
||||||
)
|
|
||||||
|
|
||||||
def __del__(self):
|
def __del__(self):
|
||||||
self.rknn.release()
|
self.rknn.release()
|
||||||
|
|
||||||
@hide_warnings
|
def get_soc(self):
|
||||||
def inference(self, tensor_input):
|
try:
|
||||||
return self.rknn.inference(inputs=tensor_input)
|
with open("/proc/device-tree/compatible") as file:
|
||||||
|
soc = file.read().split(",")[-1].strip("\x00")
|
||||||
|
except FileNotFoundError:
|
||||||
|
raise Exception("Make sure to run docker in privileged mode.")
|
||||||
|
|
||||||
|
if soc not in supported_socs:
|
||||||
|
raise Exception(
|
||||||
|
f"Your SoC is not supported. Your SoC is: {soc}. Currently these SoCs are supported: {supported_socs}."
|
||||||
|
)
|
||||||
|
|
||||||
|
return soc
|
||||||
|
|
||||||
|
def parse_model_input(self, model_path, soc):
|
||||||
|
model_props = {}
|
||||||
|
|
||||||
|
# find out if user provides his own model
|
||||||
|
# user provided models should be a path and contain a "/"
|
||||||
|
if "/" in model_path:
|
||||||
|
model_props["preset"] = False
|
||||||
|
model_props["path"] = model_path
|
||||||
|
else:
|
||||||
|
model_props["preset"] = True
|
||||||
|
|
||||||
|
"""
|
||||||
|
Filenames follow this pattern:
|
||||||
|
origin-quant-basename-soc-tk_version-rev.rknn
|
||||||
|
origin: From where comes the model? default: upstream repo; rknn: modifications from airockchip
|
||||||
|
quant: i8 or fp16
|
||||||
|
basename: e.g. yolonas_s
|
||||||
|
soc: e.g. rk3588
|
||||||
|
tk_version: e.g. v2.0.0
|
||||||
|
rev: e.g. 1
|
||||||
|
|
||||||
|
Full name could be: default-fp16-yolonas_s-rk3588-v2.0.0-1.rknn
|
||||||
|
"""
|
||||||
|
|
||||||
|
model_matched = False
|
||||||
|
|
||||||
|
for model_type, pattern in supported_models.items():
|
||||||
|
if re.match(pattern, model_path):
|
||||||
|
model_matched = True
|
||||||
|
model_props["model_type"] = model_type
|
||||||
|
|
||||||
|
if model_matched:
|
||||||
|
model_props["filename"] = model_path + f"-{soc}-v2.0.0-1.rknn"
|
||||||
|
|
||||||
|
model_props["path"] = model_chache_dir + model_props["filename"]
|
||||||
|
|
||||||
|
if not os.path.isfile(model_props["path"]):
|
||||||
|
self.download_model(model_props["filename"])
|
||||||
|
else:
|
||||||
|
supported_models_str = ", ".join(
|
||||||
|
model[1:-1] for model in supported_models
|
||||||
|
)
|
||||||
|
raise Exception(
|
||||||
|
f"Model {model_path} is unsupported. Provide your own model or choose one of the following: {supported_models_str}"
|
||||||
|
)
|
||||||
|
|
||||||
|
return model_props
|
||||||
|
|
||||||
|
def download_model(self, filename):
|
||||||
|
if not os.path.isdir(model_chache_dir):
|
||||||
|
os.mkdir(model_chache_dir)
|
||||||
|
|
||||||
|
urllib.request.urlretrieve(
|
||||||
|
f"https://github.com/MarcA711/rknn-models/releases/download/v2.0.0/{filename}",
|
||||||
|
model_chache_dir + filename,
|
||||||
|
)
|
||||||
|
|
||||||
|
def check_config(self, config):
|
||||||
|
if (config.model.width != 320) or (config.model.height != 320):
|
||||||
|
raise Exception(
|
||||||
|
"Make sure to set the model width and height to 320 in your config.yml."
|
||||||
|
)
|
||||||
|
|
||||||
|
if config.model.input_pixel_format != "bgr":
|
||||||
|
raise Exception(
|
||||||
|
'Make sure to set the model input_pixel_format to "bgr" in your config.yml.'
|
||||||
|
)
|
||||||
|
|
||||||
|
if config.model.input_tensor != "nhwc":
|
||||||
|
raise Exception(
|
||||||
|
'Make sure to set the model input_tensor to "nhwc" in your config.yml.'
|
||||||
|
)
|
||||||
|
|
||||||
def detect_raw(self, tensor_input):
|
def detect_raw(self, tensor_input):
|
||||||
output = self.inference(
|
output = self.rknn.inference(
|
||||||
[
|
[
|
||||||
tensor_input,
|
tensor_input,
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
return self.postprocess(output[0])
|
return self.post_process(output)
|
||||||
|
Loading…
Reference in New Issue
Block a user