mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-08-31 13:48:19 +02:00
Made it so openvino prioritizes using GPU and NPU over CPU
This commit is contained in:
parent
13cd5ebb2e
commit
52a9bdf2b9
@ -90,6 +90,17 @@ class DGDetector(DetectionApi):
|
|||||||
self.dg_model = self._zoo.load_model(
|
self.dg_model = self._zoo.load_model(
|
||||||
detector_config.model.path, non_blocking_batch_predict=True
|
detector_config.model.path, non_blocking_batch_predict=True
|
||||||
)
|
)
|
||||||
|
# Openvino tends to have multidevice, and they default to CPU rather than GPU or NPU
|
||||||
|
types = self.dg_model.supported_device_types
|
||||||
|
for type in types:
|
||||||
|
# If openvino is supported, prioritize using gpu, then npu, then cpu
|
||||||
|
if "OPENVINO" in type:
|
||||||
|
self.dg_model.device_type = [
|
||||||
|
"OPENVINO/GPU",
|
||||||
|
"OPENVINO/NPU",
|
||||||
|
"OPENVINO/CPU",
|
||||||
|
]
|
||||||
|
break
|
||||||
self.model_height = detector_config.model.height
|
self.model_height = detector_config.model.height
|
||||||
self.model_width = detector_config.model.width
|
self.model_width = detector_config.model.width
|
||||||
self.predict_batch = self.dg_model.predict_batch(self._queue)
|
self.predict_batch = self.dg_model.predict_batch(self._queue)
|
||||||
|
Loading…
Reference in New Issue
Block a user