mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-07-30 13:48:07 +02:00
Add Mesa Teflon as a TFLite detector (#18310)
* Refactor common functions for tflite detector implementations * Add detector using mesa teflon delegate Non-EdgeTPU TFLite can use the standard .tflite format * Add mesa-teflon-delegate from bookworm-backports to arm64 images
This commit is contained in:
parent
8409100623
commit
ab7b12da54
@ -31,6 +31,14 @@ unset DEBIAN_FRONTEND
|
||||
yes | dpkg -i /tmp/libedgetpu1-max.deb && export DEBIAN_FRONTEND=noninteractive
|
||||
rm /tmp/libedgetpu1-max.deb
|
||||
|
||||
# install mesa-teflon-delegate from bookworm-backports
|
||||
# Only available for arm64 at the moment
|
||||
if [[ "${TARGETARCH}" == "arm64" ]]; then
|
||||
echo "deb http://deb.debian.org/debian bookworm-backports main" | tee /etc/apt/sources.list.d/bookworm-backports.list
|
||||
apt-get -qq update
|
||||
apt-get -qq install --no-install-recommends --no-install-suggests -y mesa-teflon-delegate/bookworm-backports
|
||||
fi
|
||||
|
||||
# ffmpeg -> amd64
|
||||
if [[ "${TARGETARCH}" == "amd64" ]]; then
|
||||
mkdir -p /usr/lib/ffmpeg/5.0
|
||||
|
@ -487,7 +487,9 @@ class FrigateConfig(FrigateBaseModel):
|
||||
model_config["path"] = detector_config.model_path
|
||||
|
||||
if "path" not in model_config:
|
||||
if detector_config.type == "cpu":
|
||||
if detector_config.type == "cpu" or detector_config.type.endswith(
|
||||
"_tfl"
|
||||
):
|
||||
model_config["path"] = "/cpu_model.tflite"
|
||||
elif detector_config.type == "edgetpu":
|
||||
model_config["path"] = "/edgetpu_model.tflite"
|
||||
|
74
frigate/detectors/detector_utils.py
Normal file
74
frigate/detectors/detector_utils.py
Normal file
@ -0,0 +1,74 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
from tflite_runtime.interpreter import Interpreter, load_delegate
|
||||
except ModuleNotFoundError:
|
||||
from tensorflow.lite.python.interpreter import Interpreter, load_delegate
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def tflite_init(self, interpreter):
|
||||
self.interpreter = interpreter
|
||||
|
||||
self.interpreter.allocate_tensors()
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
|
||||
|
||||
def tflite_detect_raw(self, tensor_input):
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
||||
self.interpreter.invoke()
|
||||
|
||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
||||
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
||||
count = int(self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0])
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < 0.4 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
]
|
||||
|
||||
return detections
|
||||
|
||||
|
||||
def tflite_load_delegate_interpreter(
|
||||
delegate_library: str, detector_config, device_config
|
||||
):
|
||||
try:
|
||||
logger.info("Attempting to load NPU")
|
||||
tf_delegate = load_delegate(delegate_library, device_config)
|
||||
logger.info("NPU found")
|
||||
interpreter = Interpreter(
|
||||
model_path=detector_config.model.path,
|
||||
experimental_delegates=[tf_delegate],
|
||||
)
|
||||
return interpreter
|
||||
except ValueError:
|
||||
_, ext = os.path.splitext(detector_config.model.path)
|
||||
|
||||
if ext and ext != ".tflite":
|
||||
logger.error(
|
||||
"Incorrect model used with NPU. Only .tflite models can be used with a TFLite delegate."
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
"No NPU was detected. If you do not have a TFLite device yet, you must configure CPU detectors."
|
||||
)
|
||||
|
||||
raise
|
@ -1,12 +1,13 @@
|
||||
import logging
|
||||
|
||||
import numpy as np
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
|
||||
from ..detector_utils import tflite_detect_raw, tflite_init
|
||||
|
||||
try:
|
||||
from tflite_runtime.interpreter import Interpreter
|
||||
except ModuleNotFoundError:
|
||||
@ -27,39 +28,12 @@ class CpuTfl(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: CpuDetectorConfig):
|
||||
self.interpreter = Interpreter(
|
||||
interpreter = Interpreter(
|
||||
model_path=detector_config.model.path,
|
||||
num_threads=detector_config.num_threads or 3,
|
||||
)
|
||||
|
||||
self.interpreter.allocate_tensors()
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
tflite_init(self, interpreter)
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
||||
self.interpreter.invoke()
|
||||
|
||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
||||
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
||||
count = int(
|
||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
||||
)
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < 0.4 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
]
|
||||
|
||||
return detections
|
||||
return tflite_detect_raw(self, tensor_input)
|
||||
|
38
frigate/detectors/plugins/teflon_tfl.py
Normal file
38
frigate/detectors/plugins/teflon_tfl.py
Normal file
@ -0,0 +1,38 @@
|
||||
import logging
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
|
||||
from ..detector_utils import (
|
||||
tflite_detect_raw,
|
||||
tflite_init,
|
||||
tflite_load_delegate_interpreter,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Use _tfl suffix to default tflite model
|
||||
DETECTOR_KEY = "teflon_tfl"
|
||||
|
||||
|
||||
class TeflonDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY]
|
||||
|
||||
|
||||
class TeflonTfl(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: TeflonDetectorConfig):
|
||||
# Location in Debian's mesa-teflon-delegate
|
||||
delegate_library = "/usr/lib/teflon/libteflon.so"
|
||||
device_config = {}
|
||||
|
||||
interpreter = tflite_load_delegate_interpreter(
|
||||
delegate_library, detector_config, device_config
|
||||
)
|
||||
tflite_init(self, interpreter)
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
return tflite_detect_raw(self, tensor_input)
|
Loading…
Reference in New Issue
Block a user