allow defining model shape and switch to mobiledet as default model

This commit is contained in:
Blake Blackshear 2020-12-09 07:18:53 -06:00
parent 5053305e17
commit d0470fffcc
5 changed files with 28 additions and 26 deletions

View File

@ -36,10 +36,9 @@ RUN pip3 install \
COPY nginx/nginx.conf /etc/nginx/nginx.conf
# get model and labels
ARG MODEL_REFS=7064b94dd5b996189242320359dbab8b52c94a84
COPY labelmap.txt /labelmap.txt
RUN wget -q https://github.com/google-coral/edgetpu/raw/$MODEL_REFS/test_data/ssd_mobilenet_v2_coco_quant_postprocess_edgetpu.tflite -O /edgetpu_model.tflite
RUN wget -q https://github.com/google-coral/edgetpu/raw/$MODEL_REFS/test_data/ssd_mobilenet_v2_coco_quant_postprocess.tflite -O /cpu_model.tflite
RUN wget -q https://github.com/google-coral/test_data/raw/master/ssdlite_mobiledet_coco_qat_postprocess_edgetpu.tflite -O /edgetpu_model.tflite
RUN wget -q https://github.com/google-coral/test_data/raw/master/ssdlite_mobiledet_coco_qat_postprocess.tflite -O /cpu_model.tflite
WORKDIR /opt/frigate/
ADD frigate frigate/

View File

@ -115,18 +115,19 @@ class FrigateApp():
self.mqtt_client = create_mqtt_client(self.config.mqtt)
def start_detectors(self):
model_shape = (self.config.model.height, self.config.model.width)
for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=300*300*3)
shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=self.config.model.height*self.config.model.width*3)
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)
for name, detector in self.config.detectors.items():
if detector.type == 'cpu':
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, out_events=self.detection_out_events, tf_device='cpu')
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, tf_device='cpu')
if detector.type == 'edgetpu':
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, out_events=self.detection_out_events, tf_device=detector.device)
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, tf_device=detector.device)
def start_detected_frames_processor(self):
self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix,
@ -134,8 +135,9 @@ class FrigateApp():
self.detected_frames_processor.start()
def start_camera_processors(self):
model_shape = (self.config.model.height, self.config.model.width)
for name, config in self.config.cameras.items():
camera_process = mp.Process(target=track_camera, name=f"camera_processor:{name}", args=(name, config,
camera_process = mp.Process(target=track_camera, name=f"camera_processor:{name}", args=(name, config, model_shape,
self.detection_queue, self.detection_out_events[name], self.detected_frames_queue,
self.camera_metrics[name]))
camera_process.daemon = True

View File

@ -193,7 +193,7 @@ CAMERAS_SCHEMA = vol.Schema(vol.All(
FRIGATE_CONFIG_SCHEMA = vol.Schema(
{
vol.Optional('model', default={'width': 300, 'height': 300}): {
vol.Optional('model', default={'width': 320, 'height': 320}): {
vol.Required('width'): int,
vol.Required('height'): int
},

View File

@ -106,7 +106,7 @@ class LocalObjectDetector(ObjectDetector):
return detections
def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, tf_device):
def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, model_shape, tf_device):
threading.current_thread().name = f"detector:{name}"
logger = logging.getLogger(f"detector.{name}")
logger.info(f"Starting detection process: {os.getpid()}")
@ -139,7 +139,7 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.
connection_id = detection_queue.get(timeout=5)
except queue.Empty:
continue
input_frame = frame_manager.get(connection_id, (1,300,300,3))
input_frame = frame_manager.get(connection_id, (1,model_shape[0],model_shape[1],3))
if input_frame is None:
continue
@ -155,13 +155,14 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.
avg_speed.value = (avg_speed.value*9 + duration)/10
class EdgeTPUProcess():
def __init__(self, name, detection_queue, out_events, tf_device=None):
def __init__(self, name, detection_queue, out_events, model_shape, tf_device=None):
self.name = name
self.out_events = out_events
self.detection_queue = detection_queue
self.avg_inference_speed = mp.Value('d', 0.01)
self.detection_start = mp.Value('d', 0.0)
self.detect_process = None
self.model_shape = model_shape
self.tf_device = tf_device
self.start_or_restart()
@ -178,19 +179,19 @@ class EdgeTPUProcess():
self.detection_start.value = 0.0
if (not self.detect_process is None) and self.detect_process.is_alive():
self.stop()
self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.tf_device))
self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.model_shape, self.tf_device))
self.detect_process.daemon = True
self.detect_process.start()
class RemoteObjectDetector():
def __init__(self, name, labels, detection_queue, event):
def __init__(self, name, labels, detection_queue, event, model_shape):
self.labels = load_labels(labels)
self.name = name
self.fps = EventsPerSecond()
self.detection_queue = detection_queue
self.event = event
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
self.np_shm = np.ndarray((1,300,300,3), dtype=np.uint8, buffer=self.shm.buf)
self.np_shm = np.ndarray((1,model_shape[0],model_shape[1],3), dtype=np.uint8, buffer=self.shm.buf)
self.out_shm = mp.shared_memory.SharedMemory(name=f"out-{self.name}", create=False)
self.out_np_shm = np.ndarray((20,6), dtype=np.float32, buffer=self.out_shm.buf)

View File

@ -64,14 +64,14 @@ def filtered(obj, objects_to_track, object_filters, mask=None):
return False
def create_tensor_input(frame, region):
def create_tensor_input(frame, model_shape, region):
cropped_frame = yuv_region_2_rgb(frame, region)
# Resize to 300x300 if needed
if cropped_frame.shape != (300, 300, 3):
cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
if cropped_frame.shape != (model_shape[0], model_shape[1], 3):
cropped_frame = cv2.resize(cropped_frame, dsize=model_shape, interpolation=cv2.INTER_LINEAR)
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
# Expand dimensions since the model expects images to have shape: [1, height, width, 3]
return np.expand_dims(cropped_frame, axis=0)
def stop_ffmpeg(ffmpeg_process, logger):
@ -241,7 +241,7 @@ def capture_camera(name, config: CameraConfig, process_info):
camera_watchdog.start()
camera_watchdog.join()
def track_camera(name, config: CameraConfig, detection_queue, result_connection, detected_objects_queue, process_info):
def track_camera(name, config: CameraConfig, model_shape, detection_queue, result_connection, detected_objects_queue, process_info):
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
@ -260,13 +260,13 @@ def track_camera(name, config: CameraConfig, detection_queue, result_connection,
mask = config.mask
motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection)
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
object_tracker = ObjectTracker(10)
frame_manager = SharedMemoryFrameManager()
process_frames(name, frame_queue, frame_shape, frame_manager, motion_detector, object_detector,
process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, mask, stop_event)
logger.info(f"{name}: exiting subprocess")
@ -277,8 +277,8 @@ def reduce_boxes(boxes):
reduced_boxes = cv2.groupRectangles([list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2)[0]
return [tuple(b) for b in reduced_boxes]
def detect(object_detector, frame, region, objects_to_track, object_filters, mask):
tensor_input = create_tensor_input(frame, region)
def detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask):
tensor_input = create_tensor_input(frame, model_shape, region)
detections = []
region_detections = object_detector.detect(tensor_input)
@ -300,7 +300,7 @@ def detect(object_detector, frame, region, objects_to_track, object_filters, mas
detections.append(det)
return detections
def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_shape,
frame_manager: FrameManager, motion_detector: MotionDetector,
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue, process_info: Dict,
@ -357,7 +357,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
# resize regions and detect
detections = []
for region in regions:
detections.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask))
detections.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask))
#########
# merge objects, check for clipped objects and look again up to 4 times
@ -390,7 +390,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
box[0], box[1],
box[2], box[3])
selected_objects.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask))
selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask))
refining = True
else: