blakeblackshear.frigate/frigate/object_detection.py
Nate Meyer e5fe323aca
Add OpenVino Detector (#3768)
* Initial work for adding OpenVino detector.  Not functional

* Load model and submit for inference.

Sucessfully load model and initialize OpenVino engine with either CPU or GPU as device.
Does not parse results for objects.

* Detection working with ssdlite_mobilenetv2 FP16 model

* Add OpenVIno support and model to docker image

* Add documentation for OpenVino detector configuration

* Adds support for ARM32/ARM64 and the Myriad X hardware

-  Use custom-built openvino wheel for all platforms
-  Add libusb build without udev for NCS2 support

* Add documentation around Intel CPU requirements and NCS2 setup

* Print all available output tensors

* Update documentation for config parameters
2022-12-03 10:19:34 -06:00

251 lines
7.5 KiB
Python

import datetime
import logging
import multiprocessing as mp
import os
import queue
import signal
import threading
from abc import ABC, abstractmethod
import numpy as np
from setproctitle import setproctitle
from frigate.config import DetectorTypeEnum, InputTensorEnum
from frigate.detectors.edgetpu_tfl import EdgeTpuTfl
from frigate.detectors.openvino import OvDetector
from frigate.detectors.cpu_tfl import CpuTfl
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen, load_labels
logger = logging.getLogger(__name__)
class ObjectDetector(ABC):
@abstractmethod
def detect(self, tensor_input, threshold=0.4):
pass
def tensor_transform(desired_shape):
# Currently this function only supports BHWC permutations
if desired_shape == InputTensorEnum.nhwc:
return None
elif desired_shape == InputTensorEnum.nchw:
return (0, 3, 1, 2)
class LocalObjectDetector(ObjectDetector):
def __init__(
self,
det_type=DetectorTypeEnum.cpu,
det_device=None,
model_config=None,
num_threads=3,
labels=None,
):
self.fps = EventsPerSecond()
if labels is None:
self.labels = {}
else:
self.labels = load_labels(labels)
if model_config:
self.input_transform = tensor_transform(model_config.input_tensor)
else:
self.input_transform = None
if det_type == DetectorTypeEnum.edgetpu:
self.detect_api = EdgeTpuTfl(
det_device=det_device, model_config=model_config
)
elif det_type == DetectorTypeEnum.openvino:
self.detect_api = OvDetector(
det_device=det_device, model_config=model_config
)
else:
logger.warning(
"CPU detectors are not recommended and should only be used for testing or for trial purposes."
)
self.detect_api = CpuTfl(model_config=model_config, num_threads=num_threads)
def detect(self, tensor_input, threshold=0.4):
detections = []
raw_detections = self.detect_raw(tensor_input)
for d in raw_detections:
if d[1] < threshold:
break
detections.append(
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
)
self.fps.update()
return detections
def detect_raw(self, tensor_input):
if self.input_transform:
tensor_input = np.transpose(tensor_input, self.input_transform)
return self.detect_api.detect_raw(tensor_input=tensor_input)
def run_detector(
name: str,
detection_queue: mp.Queue,
out_events: dict[str, mp.Event],
avg_speed,
start,
model_config,
det_type,
det_device,
num_threads,
):
threading.current_thread().name = f"detector:{name}"
logger = logging.getLogger(f"detector.{name}")
logger.info(f"Starting detection process: {os.getpid()}")
setproctitle(f"frigate.detector.{name}")
listen()
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
frame_manager = SharedMemoryFrameManager()
object_detector = LocalObjectDetector(
det_type=det_type,
det_device=det_device,
model_config=model_config,
num_threads=num_threads,
)
outputs = {}
for name in out_events.keys():
out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
outputs[name] = {"shm": out_shm, "np": out_np}
while not stop_event.is_set():
try:
connection_id = detection_queue.get(timeout=5)
except queue.Empty:
continue
input_frame = frame_manager.get(
connection_id, (1, model_config.height, model_config.width, 3)
)
if input_frame is None:
continue
# detect and send the output
start.value = datetime.datetime.now().timestamp()
detections = object_detector.detect_raw(input_frame)
duration = datetime.datetime.now().timestamp() - start.value
outputs[connection_id]["np"][:] = detections[:]
out_events[connection_id].set()
start.value = 0.0
avg_speed.value = (avg_speed.value * 9 + duration) / 10
class ObjectDetectProcess:
def __init__(
self,
name,
detection_queue,
out_events,
model_config,
det_type=None,
det_device=None,
num_threads=3,
):
self.name = name
self.out_events = out_events
self.detection_queue = detection_queue
self.avg_inference_speed = mp.Value("d", 0.01)
self.detection_start = mp.Value("d", 0.0)
self.detect_process = None
self.model_config = model_config
self.det_type = det_type
self.det_device = det_device
self.num_threads = num_threads
self.start_or_restart()
def stop(self):
self.detect_process.terminate()
logging.info("Waiting for detection process to exit gracefully...")
self.detect_process.join(timeout=30)
if self.detect_process.exitcode is None:
logging.info("Detection process didnt exit. Force killing...")
self.detect_process.kill()
self.detect_process.join()
def start_or_restart(self):
self.detection_start.value = 0.0
if (not self.detect_process is None) and self.detect_process.is_alive():
self.stop()
self.detect_process = mp.Process(
target=run_detector,
name=f"detector:{self.name}",
args=(
self.name,
self.detection_queue,
self.out_events,
self.avg_inference_speed,
self.detection_start,
self.model_config,
self.det_type,
self.det_device,
self.num_threads,
),
)
self.detect_process.daemon = True
self.detect_process.start()
class RemoteObjectDetector:
def __init__(self, name, labels, detection_queue, event, model_config):
self.labels = labels
self.name = name
self.fps = EventsPerSecond()
self.detection_queue = detection_queue
self.event = event
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
self.np_shm = np.ndarray(
(1, model_config.height, model_config.width, 3),
dtype=np.uint8,
buffer=self.shm.buf,
)
self.out_shm = mp.shared_memory.SharedMemory(
name=f"out-{self.name}", create=False
)
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
def detect(self, tensor_input, threshold=0.4):
detections = []
# copy input to shared memory
self.np_shm[:] = tensor_input[:]
self.event.clear()
self.detection_queue.put(self.name)
result = self.event.wait(timeout=10.0)
# if it timed out
if result is None:
return detections
for d in self.out_np_shm:
if d[1] < threshold:
break
detections.append(
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
)
self.fps.update()
return detections
def cleanup(self):
self.shm.unlink()
self.out_shm.unlink()