diff --git a/.gitignore b/.gitignore index a0f62b7eb..195708e2d 100644 --- a/.gitignore +++ b/.gitignore @@ -16,4 +16,5 @@ web/node_modules web/coverage core !/web/**/*.ts -.idea/* \ No newline at end of file +.idea/* +.ipynb_checkpoints \ No newline at end of file diff --git a/docker/main/Dockerfile b/docker/main/Dockerfile index e6e7bf46e..0eb4eb669 100644 --- a/docker/main/Dockerfile +++ b/docker/main/Dockerfile @@ -57,9 +57,11 @@ RUN apt-get -qq update \ && pip install -r /requirements-ov.txt # Get OpenVino Model -RUN mkdir /models \ - && cd /models && omz_downloader --name ssdlite_mobilenet_v2 \ - && cd /models && omz_converter --name ssdlite_mobilenet_v2 --precision FP16 +RUN --mount=type=bind,source=docker/main/build_ov_model.py,target=/build_ov_model.py \ + mkdir /models && cd /models \ + && wget http://download.tensorflow.org/models/object_detection/ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \ + && tar -xvf ssdlite_mobilenet_v2_coco_2018_05_09.tar.gz \ + && python3 /build_ov_model.py # libUSB - No Udev @@ -97,7 +99,8 @@ RUN wget -qO edgetpu_model.tflite https://github.com/google-coral/test_data/raw/ RUN wget -qO cpu_model.tflite https://github.com/google-coral/test_data/raw/release-frogfish/ssdlite_mobiledet_coco_qat_postprocess.tflite COPY labelmap.txt . # Copy OpenVino model -COPY --from=ov-converter /models/public/ssdlite_mobilenet_v2/FP16 openvino-model +COPY --from=ov-converter /models/ssdlite_mobilenet_v2.xml openvino-model/ +COPY --from=ov-converter /models/ssdlite_mobilenet_v2.bin openvino-model/ RUN wget -q https://github.com/openvinotoolkit/open_model_zoo/raw/master/data/dataset_classes/coco_91cl_bkgr.txt -O openvino-model/coco_91cl_bkgr.txt && \ sed -i 's/truck/car/g' openvino-model/coco_91cl_bkgr.txt # Get Audio Model and labels diff --git a/docker/main/build_ov_model.py b/docker/main/build_ov_model.py new file mode 100644 index 000000000..9e110ad9f --- /dev/null +++ b/docker/main/build_ov_model.py @@ -0,0 +1,11 @@ +import openvino as ov +from openvino.tools import mo + +ov_model = mo.convert_model( + "/models/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb", + compress_to_fp16=True, + transformations_config="/usr/local/lib/python3.9/dist-packages/openvino/tools/mo/front/tf/ssd_v2_support.json", + tensorflow_object_detection_api_pipeline_config="/models/ssdlite_mobilenet_v2_coco_2018_05_09/pipeline.config", + reverse_input_channels=True, +) +ov.save_model(ov_model, "/models/ssdlite_mobilenet_v2.xml") diff --git a/docker/main/requirements-ov.txt b/docker/main/requirements-ov.txt index 20e5a29c1..6fd1ca55d 100644 --- a/docker/main/requirements-ov.txt +++ b/docker/main/requirements-ov.txt @@ -1,5 +1,3 @@ numpy -# Openvino Library - Custom built with MYRIAD support -openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' -openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' -openvino-dev[tensorflow2] @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino_dev-2022.3.1-1-py3-none-any.whl +tensorflow +openvino-dev>=2024.0.0 \ No newline at end of file diff --git a/docker/main/requirements-wheels.txt b/docker/main/requirements-wheels.txt index f3d9668ab..191ebb309 100644 --- a/docker/main/requirements-wheels.txt +++ b/docker/main/requirements-wheels.txt @@ -30,6 +30,4 @@ setproctitle == 1.3.* ws4py == 0.5.* unidecode == 1.3.* onnxruntime == 1.16.* -# Openvino Library - Custom built with MYRIAD support -openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-manylinux_2_31_x86_64.whl; platform_machine == 'x86_64' -openvino @ https://github.com/NateMeyer/openvino-wheels/releases/download/multi-arch_2022.3.1/openvino-2022.3.1-1-cp39-cp39-linux_aarch64.whl; platform_machine == 'aarch64' +openvino == 2024.1.* diff --git a/frigate/detectors/plugins/openvino.py b/frigate/detectors/plugins/openvino.py index c259bda64..b87700d0d 100644 --- a/frigate/detectors/plugins/openvino.py +++ b/frigate/detectors/plugins/openvino.py @@ -1,7 +1,7 @@ import logging import numpy as np -import openvino.runtime as ov +import openvino as ov from pydantic import Field from typing_extensions import Literal @@ -23,28 +23,56 @@ class OvDetector(DetectionApi): def __init__(self, detector_config: OvDetectorConfig): self.ov_core = ov.Core() - self.ov_model = self.ov_core.read_model(detector_config.model.path) self.ov_model_type = detector_config.model.model_type self.h = detector_config.model.height self.w = detector_config.model.width self.interpreter = self.ov_core.compile_model( - model=self.ov_model, device_name=detector_config.device + model=detector_config.model.path, device_name=detector_config.device ) - logger.info(f"Model Input Shape: {self.interpreter.input(0).shape}") - self.output_indexes = 0 + self.model_invalid = False + + # Ensure the SSD model has the right input and output shapes + if self.ov_model_type == ModelTypeEnum.ssd: + model_inputs = self.interpreter.inputs + model_outputs = self.interpreter.outputs + + if len(model_inputs) != 1: + logger.error( + f"SSD models must only have 1 input. Found {len(model_inputs)}." + ) + self.model_invalid = True + if len(model_outputs) != 1: + logger.error( + f"SSD models must only have 1 output. Found {len(model_outputs)}." + ) + self.model_invalid = True + + if model_inputs[0].get_shape() != ov.Shape([1, self.w, self.h, 3]): + logger.error( + f"SSD model input doesn't match. Found {model_inputs[0].get_shape()}." + ) + self.model_invalid = True + + output_shape = model_outputs[0].get_shape() + if output_shape[0] != 1 or output_shape[1] != 1 or output_shape[3] != 7: + logger.error(f"SSD model output doesn't match. Found {output_shape}.") + self.model_invalid = True - while True: - try: - tensor_shape = self.interpreter.output(self.output_indexes).shape - logger.info(f"Model Output-{self.output_indexes} Shape: {tensor_shape}") - self.output_indexes += 1 - except Exception: - logger.info(f"Model has {self.output_indexes} Output Tensors") - break if self.ov_model_type == ModelTypeEnum.yolox: + self.output_indexes = 0 + while True: + try: + tensor_shape = self.interpreter.output(self.output_indexes).shape + logger.info( + f"Model Output-{self.output_indexes} Shape: {tensor_shape}" + ) + self.output_indexes += 1 + except Exception: + logger.info(f"Model has {self.output_indexes} Output Tensors") + break self.num_classes = tensor_shape[2] - 5 logger.info(f"YOLOX model has {self.num_classes} classes") self.set_strides_grids() @@ -81,29 +109,32 @@ class OvDetector(DetectionApi): def detect_raw(self, tensor_input): infer_request = self.interpreter.create_infer_request() - infer_request.infer([tensor_input]) + # TODO: see if we can use shared_memory=True + input_tensor = ov.Tensor(array=tensor_input) + infer_request.infer(input_tensor) if self.ov_model_type == ModelTypeEnum.ssd: - results = infer_request.get_output_tensor() - detections = np.zeros((20, 6), np.float32) - i = 0 - for object_detected in results.data[0, 0, :]: - if object_detected[0] != -1: - logger.debug(object_detected) - if object_detected[2] < 0.1 or i == 20: + + if self.model_invalid: + return detections + + results = infer_request.get_output_tensor(0).data[0][0] + + for i, (_, class_id, score, xmin, ymin, xmax, ymax) in enumerate(results): + if i == 20: break detections[i] = [ - object_detected[1], # Label ID - float(object_detected[2]), # Confidence - object_detected[4], # y_min - object_detected[3], # x_min - object_detected[6], # y_max - object_detected[5], # x_max + class_id, + float(score), + ymin, + xmin, + ymax, + xmax, ] - i += 1 return detections - elif self.ov_model_type == ModelTypeEnum.yolox: + + if self.ov_model_type == ModelTypeEnum.yolox: out_tensor = infer_request.get_output_tensor() # [x, y, h, w, box_score, class_no_1, ..., class_no_80], results = out_tensor.data