mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
add num_threads fixes #322
This commit is contained in:
parent
8d52e2635a
commit
32fb76b3d1
@ -200,6 +200,9 @@ detectors:
|
|||||||
type: edgetpu
|
type: edgetpu
|
||||||
# Optional: device name as defined here: https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api
|
# Optional: device name as defined here: https://coral.ai/docs/edgetpu/multiple-edgetpu/#using-the-tensorflow-lite-python-api
|
||||||
device: usb
|
device: usb
|
||||||
|
# Optional: num_threads value passed to the tflite.Interpreter (default: shown below)
|
||||||
|
# This value is only used for CPU types
|
||||||
|
num_threads: 3
|
||||||
|
|
||||||
# Optional: model configuration
|
# Optional: model configuration
|
||||||
model:
|
model:
|
||||||
|
@ -125,9 +125,9 @@ class FrigateApp():
|
|||||||
|
|
||||||
for name, detector in self.config.detectors.items():
|
for name, detector in self.config.detectors.items():
|
||||||
if detector.type == 'cpu':
|
if detector.type == 'cpu':
|
||||||
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, tf_device='cpu')
|
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, 'cpu', detector.num_threads)
|
||||||
if detector.type == 'edgetpu':
|
if detector.type == 'edgetpu':
|
||||||
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, tf_device=detector.device)
|
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, detector.device, detector.num_threads)
|
||||||
|
|
||||||
def start_detected_frames_processor(self):
|
def start_detected_frames_processor(self):
|
||||||
self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix,
|
self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix,
|
||||||
|
@ -15,7 +15,8 @@ DETECTORS_SCHEMA = vol.Schema(
|
|||||||
{
|
{
|
||||||
vol.Required(str): {
|
vol.Required(str): {
|
||||||
vol.Required('type', default='edgetpu'): vol.In(['cpu', 'edgetpu']),
|
vol.Required('type', default='edgetpu'): vol.In(['cpu', 'edgetpu']),
|
||||||
vol.Optional('device', default='usb'): str
|
vol.Optional('device', default='usb'): str,
|
||||||
|
vol.Optional('num_threads', default=3): int
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -264,6 +265,7 @@ class DetectorConfig():
|
|||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
self._type = config['type']
|
self._type = config['type']
|
||||||
self._device = config['device']
|
self._device = config['device']
|
||||||
|
self._num_threads = config['num_threads']
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def type(self):
|
def type(self):
|
||||||
@ -273,10 +275,15 @@ class DetectorConfig():
|
|||||||
def device(self):
|
def device(self):
|
||||||
return self._device
|
return self._device
|
||||||
|
|
||||||
|
@property
|
||||||
|
def num_threads(self):
|
||||||
|
return self._num_threads
|
||||||
|
|
||||||
def to_dict(self):
|
def to_dict(self):
|
||||||
return {
|
return {
|
||||||
'type': self.type,
|
'type': self.type,
|
||||||
'device': self.device
|
'device': self.device,
|
||||||
|
'num_threads': self.num_threads
|
||||||
}
|
}
|
||||||
|
|
||||||
class LoggerConfig():
|
class LoggerConfig():
|
||||||
|
@ -43,7 +43,7 @@ class ObjectDetector(ABC):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
class LocalObjectDetector(ObjectDetector):
|
class LocalObjectDetector(ObjectDetector):
|
||||||
def __init__(self, tf_device=None, labels=None):
|
def __init__(self, tf_device=None, num_threads=3, labels=None):
|
||||||
self.fps = EventsPerSecond()
|
self.fps = EventsPerSecond()
|
||||||
if labels is None:
|
if labels is None:
|
||||||
self.labels = {}
|
self.labels = {}
|
||||||
@ -66,7 +66,7 @@ class LocalObjectDetector(ObjectDetector):
|
|||||||
|
|
||||||
if edge_tpu_delegate is None:
|
if edge_tpu_delegate is None:
|
||||||
self.interpreter = tflite.Interpreter(
|
self.interpreter = tflite.Interpreter(
|
||||||
model_path='/cpu_model.tflite')
|
model_path='/cpu_model.tflite', num_threads=num_threads)
|
||||||
else:
|
else:
|
||||||
self.interpreter = tflite.Interpreter(
|
self.interpreter = tflite.Interpreter(
|
||||||
model_path='/edgetpu_model.tflite',
|
model_path='/edgetpu_model.tflite',
|
||||||
@ -106,7 +106,7 @@ class LocalObjectDetector(ObjectDetector):
|
|||||||
|
|
||||||
return detections
|
return detections
|
||||||
|
|
||||||
def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, model_shape, tf_device):
|
def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, model_shape, tf_device, num_threads):
|
||||||
threading.current_thread().name = f"detector:{name}"
|
threading.current_thread().name = f"detector:{name}"
|
||||||
logger = logging.getLogger(f"detector.{name}")
|
logger = logging.getLogger(f"detector.{name}")
|
||||||
logger.info(f"Starting detection process: {os.getpid()}")
|
logger.info(f"Starting detection process: {os.getpid()}")
|
||||||
@ -120,7 +120,7 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.
|
|||||||
signal.signal(signal.SIGINT, receiveSignal)
|
signal.signal(signal.SIGINT, receiveSignal)
|
||||||
|
|
||||||
frame_manager = SharedMemoryFrameManager()
|
frame_manager = SharedMemoryFrameManager()
|
||||||
object_detector = LocalObjectDetector(tf_device=tf_device)
|
object_detector = LocalObjectDetector(tf_device=tf_device, num_threads=num_threads)
|
||||||
|
|
||||||
outputs = {}
|
outputs = {}
|
||||||
for name in out_events.keys():
|
for name in out_events.keys():
|
||||||
@ -155,7 +155,7 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.
|
|||||||
avg_speed.value = (avg_speed.value*9 + duration)/10
|
avg_speed.value = (avg_speed.value*9 + duration)/10
|
||||||
|
|
||||||
class EdgeTPUProcess():
|
class EdgeTPUProcess():
|
||||||
def __init__(self, name, detection_queue, out_events, model_shape, tf_device=None):
|
def __init__(self, name, detection_queue, out_events, model_shape, tf_device=None, num_threads=3):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.out_events = out_events
|
self.out_events = out_events
|
||||||
self.detection_queue = detection_queue
|
self.detection_queue = detection_queue
|
||||||
@ -164,6 +164,7 @@ class EdgeTPUProcess():
|
|||||||
self.detect_process = None
|
self.detect_process = None
|
||||||
self.model_shape = model_shape
|
self.model_shape = model_shape
|
||||||
self.tf_device = tf_device
|
self.tf_device = tf_device
|
||||||
|
self.num_threads = num_threads
|
||||||
self.start_or_restart()
|
self.start_or_restart()
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
@ -179,7 +180,7 @@ class EdgeTPUProcess():
|
|||||||
self.detection_start.value = 0.0
|
self.detection_start.value = 0.0
|
||||||
if (not self.detect_process is None) and self.detect_process.is_alive():
|
if (not self.detect_process is None) and self.detect_process.is_alive():
|
||||||
self.stop()
|
self.stop()
|
||||||
self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.model_shape, self.tf_device))
|
self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.model_shape, self.tf_device, self.num_threads))
|
||||||
self.detect_process.daemon = True
|
self.detect_process.daemon = True
|
||||||
self.detect_process.start()
|
self.detect_process.start()
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user