diff --git a/docker/tensorrt/Dockerfile.arm64 b/docker/tensorrt/Dockerfile.arm64 index 70184bf9b..286a0af55 100644 --- a/docker/tensorrt/Dockerfile.arm64 +++ b/docker/tensorrt/Dockerfile.arm64 @@ -10,8 +10,8 @@ ARG DEBIAN_FRONTEND # Use a separate container to build wheels to prevent build dependencies in final image RUN apt-get -qq update \ && apt-get -qq install -y --no-install-recommends \ - python3.9 python3.9-dev \ - wget build-essential cmake git \ + python3.9 python3.9-dev \ + wget build-essential cmake git \ && rm -rf /var/lib/apt/lists/* # Ensure python3 defaults to python3.9 @@ -41,7 +41,8 @@ RUN --mount=type=bind,source=docker/tensorrt/detector/build_python_tensorrt.sh,t && TENSORRT_VER=$(cat /etc/TENSORRT_VER) /deps/build_python_tensorrt.sh COPY docker/tensorrt/requirements-arm64.txt /requirements-tensorrt.txt -RUN pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt +RUN pip3 uninstall -y onnxruntime \ + && pip3 wheel --wheel-dir=/trt-wheels -r /requirements-tensorrt.txt FROM build-wheels AS trt-model-wheels ARG DEBIAN_FRONTEND diff --git a/docker/tensorrt/requirements-arm64.txt b/docker/tensorrt/requirements-arm64.txt index 9b12dac33..93f100dd1 100644 --- a/docker/tensorrt/requirements-arm64.txt +++ b/docker/tensorrt/requirements-arm64.txt @@ -1 +1,2 @@ cuda-python == 11.7; platform_machine == 'aarch64' +onnxruntime @ https://nvidia.box.com/shared/static/9aemm4grzbbkfaesg5l7fplgjtmswhj8.whl; platform_machine == 'aarch64' diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index d4cee196d..5896260f4 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -22,14 +22,14 @@ Frigate supports multiple different detectors that work on different types of ha - [ONNX](#onnx): OpenVINO will automatically be detected and used as a detector in the default Frigate image when a supported ONNX model is configured. **Nvidia** -- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs, using one of many default models. -- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` Frigate image when a supported ONNX model is configured. +- [TensortRT](#nvidia-tensorrt-detector): TensorRT can run on Nvidia GPUs and Jetson devices, using one of many default models. +- [ONNX](#onnx): TensorRT will automatically be detected and used as a detector in the `-tensorrt` or `-tensorrt-jp(4/5)` Frigate images when a supported ONNX model is configured. **Rockchip** - [RKNN](#rockchip-platform): RKNN models can run on Rockchip devices with included NPUs. **For Testing** -- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results. +- [CPU Detector (not recommended for actual use](#cpu-detector-not-recommended): Use a CPU to run tflite model, this is not recommended and in most cases OpenVINO can be used in CPU mode with better results. ::: diff --git a/docs/docs/configuration/semantic_search.md b/docs/docs/configuration/semantic_search.md index 2819f2a4c..8abd761a8 100644 --- a/docs/docs/configuration/semantic_search.md +++ b/docs/docs/configuration/semantic_search.md @@ -68,6 +68,7 @@ If the correct build is used for your GPU and the `large` model is configured, t **Nvidia** - Nvidia GPUs will automatically be detected and used as a detector in the `-tensorrt` Frigate image. +- Jetson devices will automatically be detected and used as a detector in the `-tensorrt-jp(4/5)` Frigate image. :::