mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-06-04 01:16:52 +02:00
YOLOv5 & YOLOv8 support for the OpenVINO Detector (#5523)
* Initial commit that adds YOLOv5 and YOLOv8 support for OpenVINO detector * Fixed double inference bug with YOLOv5 and YOLOv8 * Modified documentation to mention YOLOv5 and YOLOv8 * Changes to pass lint checks * Change minimum threshold to improve model performance * Fix link * Clean up YOLO post-processing --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
parent
2b685ac343
commit
0592c8b0e2
@ -119,7 +119,7 @@ model:
|
|||||||
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
|
labelmap_path: /openvino-model/coco_91cl_bkgr.txt
|
||||||
```
|
```
|
||||||
|
|
||||||
This detector also supports YOLOx models, and has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. Frigate does not come with `yolox_tiny` model, you will need to follow [OpenVINO documentation](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) to provide your own model to Frigate. There is currently no support for other types of YOLO models (YOLOv3, YOLOv4, etc...). Below is an example of how `yolox_tiny` and other yolox variants can be used in Frigate:
|
This detector also supports some YOLO variants: YOLOX, YOLOv5, and YOLOv8 specifically. Other YOLO variants are not officially supported/tested. Frigate does not come with any yolo models preloaded, so you will need to supply your own models. This detector has been verified to work with the [yolox_tiny](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny) model from Intel's Open Model Zoo. You can follow [these instructions](https://github.com/openvinotoolkit/open_model_zoo/tree/master/models/public/yolox-tiny#download-a-model-and-convert-it-into-openvino-ir-format) to retrieve the OpenVINO-compatible `yolox_tiny` model. Make sure that the model input dimensions match the `width` and `height` parameters, and `model_type` is set accordingly. See [Full Configuration Reference](/configuration/index.md#full-configuration-reference) for a list of possible `model_type` options. Below is an example of how `yolox_tiny` can be used in Frigate:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
detectors:
|
detectors:
|
||||||
|
@ -105,7 +105,7 @@ model:
|
|||||||
# Valid values are nhwc or nchw (default: shown below)
|
# Valid values are nhwc or nchw (default: shown below)
|
||||||
input_tensor: nhwc
|
input_tensor: nhwc
|
||||||
# Optional: Object detection model type, currently only used with the OpenVINO detector
|
# Optional: Object detection model type, currently only used with the OpenVINO detector
|
||||||
# Valid values are ssd or yolox (default: shown below)
|
# Valid values are ssd, yolox, yolov5, or yolov8 (default: shown below)
|
||||||
model_type: ssd
|
model_type: ssd
|
||||||
# Optional: Label name modifications. These are merged into the standard labelmap.
|
# Optional: Label name modifications. These are merged into the standard labelmap.
|
||||||
labelmap:
|
labelmap:
|
||||||
|
@ -26,6 +26,8 @@ class InputTensorEnum(str, Enum):
|
|||||||
class ModelTypeEnum(str, Enum):
|
class ModelTypeEnum(str, Enum):
|
||||||
ssd = "ssd"
|
ssd = "ssd"
|
||||||
yolox = "yolox"
|
yolox = "yolox"
|
||||||
|
yolov5 = "yolov5"
|
||||||
|
yolov8 = "yolov8"
|
||||||
|
|
||||||
|
|
||||||
class ModelConfig(BaseModel):
|
class ModelConfig(BaseModel):
|
||||||
|
@ -67,6 +67,18 @@ class OvDetector(DetectionApi):
|
|||||||
self.grids = np.concatenate(grids, 1)
|
self.grids = np.concatenate(grids, 1)
|
||||||
self.expanded_strides = np.concatenate(expanded_strides, 1)
|
self.expanded_strides = np.concatenate(expanded_strides, 1)
|
||||||
|
|
||||||
|
## Takes in class ID, confidence score, and array of [x, y, w, h] that describes detection position,
|
||||||
|
## returns an array that's easily passable back to Frigate.
|
||||||
|
def process_yolo(self, class_id, conf, pos):
|
||||||
|
return [
|
||||||
|
class_id, # class ID
|
||||||
|
conf, # confidence score
|
||||||
|
(pos[1] - (pos[3] / 2)) / self.h, # y_min
|
||||||
|
(pos[0] - (pos[2] / 2)) / self.w, # x_min
|
||||||
|
(pos[1] + (pos[3] / 2)) / self.h, # y_max
|
||||||
|
(pos[0] + (pos[2] / 2)) / self.w, # x_max
|
||||||
|
]
|
||||||
|
|
||||||
def detect_raw(self, tensor_input):
|
def detect_raw(self, tensor_input):
|
||||||
infer_request = self.interpreter.create_infer_request()
|
infer_request = self.interpreter.create_infer_request()
|
||||||
infer_request.infer([tensor_input])
|
infer_request.infer([tensor_input])
|
||||||
@ -113,23 +125,50 @@ class OvDetector(DetectionApi):
|
|||||||
ordered = dets[dets[:, 5].argsort()[::-1]][:20]
|
ordered = dets[dets[:, 5].argsort()[::-1]][:20]
|
||||||
|
|
||||||
detections = np.zeros((20, 6), np.float32)
|
detections = np.zeros((20, 6), np.float32)
|
||||||
i = 0
|
|
||||||
|
|
||||||
for object_detected in ordered:
|
for i, object_detected in enumerate(ordered):
|
||||||
if i < 20:
|
detections[i] = self.process_yolo(
|
||||||
detections[i] = [
|
object_detected[6], object_detected[5], object_detected[:4]
|
||||||
object_detected[6], # Label ID
|
)
|
||||||
object_detected[5], # Confidence
|
return detections
|
||||||
(object_detected[1] - (object_detected[3] / 2))
|
elif self.ov_model_type == ModelTypeEnum.yolov8:
|
||||||
/ self.h, # y_min
|
out_tensor = infer_request.get_output_tensor()
|
||||||
(object_detected[0] - (object_detected[2] / 2))
|
results = out_tensor.data[0]
|
||||||
/ self.w, # x_min
|
output_data = np.transpose(results)
|
||||||
(object_detected[1] + (object_detected[3] / 2))
|
scores = np.max(output_data[:, 4:], axis=1)
|
||||||
/ self.h, # y_max
|
if len(scores) == 0:
|
||||||
(object_detected[0] + (object_detected[2] / 2))
|
return np.zeros((20, 6), np.float32)
|
||||||
/ self.w, # x_max
|
scores = np.expand_dims(scores, axis=1)
|
||||||
]
|
# add scores to the last column
|
||||||
i += 1
|
dets = np.concatenate((output_data, scores), axis=1)
|
||||||
else:
|
# filter out lines with scores below threshold
|
||||||
break
|
dets = dets[dets[:, -1] > 0.5, :]
|
||||||
|
# limit to top 20 scores, descending order
|
||||||
|
ordered = dets[dets[:, -1].argsort()[::-1]][:20]
|
||||||
|
detections = np.zeros((20, 6), np.float32)
|
||||||
|
|
||||||
|
for i, object_detected in enumerate(ordered):
|
||||||
|
detections[i] = self.process_yolo(
|
||||||
|
np.argmax(object_detected[4:-1]),
|
||||||
|
object_detected[-1],
|
||||||
|
object_detected[:4],
|
||||||
|
)
|
||||||
|
return detections
|
||||||
|
elif self.ov_model_type == ModelTypeEnum.yolov5:
|
||||||
|
out_tensor = infer_request.get_output_tensor()
|
||||||
|
output_data = out_tensor.data[0]
|
||||||
|
# filter out lines with scores below threshold
|
||||||
|
conf_mask = (output_data[:, 4] >= 0.5).squeeze()
|
||||||
|
output_data = output_data[conf_mask]
|
||||||
|
# limit to top 20 scores, descending order
|
||||||
|
ordered = output_data[output_data[:, 4].argsort()[::-1]][:20]
|
||||||
|
|
||||||
|
detections = np.zeros((20, 6), np.float32)
|
||||||
|
|
||||||
|
for i, object_detected in enumerate(ordered):
|
||||||
|
detections[i] = self.process_yolo(
|
||||||
|
np.argmax(object_detected[5:]),
|
||||||
|
object_detected[4],
|
||||||
|
object_detected[:4],
|
||||||
|
)
|
||||||
return detections
|
return detections
|
||||||
|
Loading…
Reference in New Issue
Block a user