blakeblackshear.frigate/frigate/detectors/edgetpu_tfl.py
Nate Meyer 4383b883c0
Refactor to simplify support for additional detector types (#3656)
* Refactor EdgeTPU and CPU model handling to detector submodules.

* Fix selecting the correct detection device type from the config

* Remove detector type check when creating ObjectDetectProcess

* Fixes after rebasing to 0.11

* Add init file to detector folder

* Rename to detect_api

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>

* Add unit test for LocalObjectDetector class

* Add configuration for model inputs
Support transforming detection regions to RGB or BGR.
Support specifying the input tensor shape.  The tensor shape has a standard format ["BHWC"] when handed to the detector, but can be transformed in the detector to match the model shape using the model  input_tensor config.

* Add documentation for new model config parameters

* Add input tensor transpose to LocalObjectDetector

* Change the model input tensor config to use an enumeration

* Updates for model config documentation

Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
2022-11-03 21:23:09 -05:00

64 lines
2.2 KiB
Python

import logging
import numpy as np
from frigate.detectors.detection_api import DetectionApi
import tflite_runtime.interpreter as tflite
from tflite_runtime.interpreter import load_delegate
logger = logging.getLogger(__name__)
class EdgeTpuTfl(DetectionApi):
def __init__(self, det_device=None, model_config=None):
device_config = {"device": "usb"}
if not det_device is None:
device_config = {"device": det_device}
edge_tpu_delegate = None
try:
logger.info(f"Attempting to load TPU as {device_config['device']}")
edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
logger.info("TPU found")
self.interpreter = tflite.Interpreter(
model_path=model_config.path or "/edgetpu_model.tflite",
experimental_delegates=[edge_tpu_delegate],
)
except ValueError:
logger.error(
"No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
)
raise
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
def detect_raw(self, tensor_input):
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
self.interpreter.invoke()
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
count = int(
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
)
detections = np.zeros((20, 6), np.float32)
for i in range(count):
if scores[i] < 0.4 or i == 20:
break
detections[i] = [
class_ids[i],
float(scores[i]),
boxes[i][0],
boxes[i][1],
boxes[i][2],
boxes[i][3],
]
return detections