From 39040c18741a20678f2a3c76983636f7f18cd37a Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Fri, 15 Jan 2021 20:52:59 -0600 Subject: [PATCH] enable and disable detection via mqtt --- frigate/app.py | 5 +++-- frigate/mqtt.py | 26 +++++++++++++++++++++++++- frigate/video.py | 27 ++++++++++++++++++--------- 3 files changed, 46 insertions(+), 12 deletions(-) diff --git a/frigate/app.py b/frigate/app.py index 3cecf6489..9bcd4d5b9 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -70,11 +70,12 @@ class FrigateApp(): 'camera_fps': mp.Value('d', 0.0), 'skipped_fps': mp.Value('d', 0.0), 'process_fps': mp.Value('d', 0.0), + 'detection_enabled': mp.Value('i', 1), 'detection_fps': mp.Value('d', 0.0), 'detection_frame': mp.Value('d', 0.0), 'read_start': mp.Value('d', 0.0), 'ffmpeg_pid': mp.Value('i', 0), - 'frame_queue': mp.Queue(maxsize=2) + 'frame_queue': mp.Queue(maxsize=2), } def check_config(self): @@ -129,7 +130,7 @@ class FrigateApp(): self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor) def init_mqtt(self): - self.mqtt_client = create_mqtt_client(self.config) + self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics) def start_detectors(self): model_shape = (self.config.model.height, self.config.model.width) diff --git a/frigate/mqtt.py b/frigate/mqtt.py index 9d44304b0..44c3edfb3 100644 --- a/frigate/mqtt.py +++ b/frigate/mqtt.py @@ -7,7 +7,7 @@ from frigate.config import FrigateConfig logger = logging.getLogger(__name__) -def create_mqtt_client(config: FrigateConfig): +def create_mqtt_client(config: FrigateConfig, camera_metrics): mqtt_config = config.mqtt def on_clips_command(client, userdata, message): @@ -57,6 +57,28 @@ def create_mqtt_client(config: FrigateConfig): if command == "set": state_topic = f"{message.topic[:-4]}/state" client.publish(state_topic, payload, retain=True) + + def on_detect_command(client, userdata, message): + payload = message.payload.decode() + logger.debug(f"on_detect_toggle: {message.topic} {payload}") + + camera_name = message.topic.split('/')[-3] + command = message.topic.split('/')[-1] + + if payload == 'ON': + if not camera_metrics[camera_name]["detection_enabled"].value: + logger.info(f"Turning on detection for {camera_name} via mqtt") + camera_metrics[camera_name]["detection_enabled"].value = True + elif payload == 'OFF': + if camera_metrics[camera_name]["detection_enabled"].value: + logger.info(f"Turning off detection for {camera_name} via mqtt") + camera_metrics[camera_name]["detection_enabled"].value = False + else: + logger.warning(f"Received unsupported value at {message.topic}: {payload}") + + if command == "set": + state_topic = f"{message.topic[:-4]}/state" + client.publish(state_topic, payload, retain=True) def on_connect(client, userdata, flags, rc): threading.current_thread().name = "mqtt" @@ -81,6 +103,7 @@ def create_mqtt_client(config: FrigateConfig): for name in config.cameras.keys(): client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/clips/#", on_clips_command) client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/snapshots/#", on_snapshots_command) + client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/detection/#", on_detect_command) if not mqtt_config.user is None: client.username_pw_set(mqtt_config.user, password=mqtt_config.password) @@ -93,5 +116,6 @@ def create_mqtt_client(config: FrigateConfig): client.loop_start() client.subscribe(f"{mqtt_config.topic_prefix}/+/clips/#") client.subscribe(f"{mqtt_config.topic_prefix}/+/snapshots/#") + client.subscribe(f"{mqtt_config.topic_prefix}/+/detection/#") return client diff --git a/frigate/video.py b/frigate/video.py index e2f5cff7c..e3625b00d 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -255,6 +255,7 @@ def track_camera(name, config: CameraConfig, model_shape, detection_queue, resul listen() frame_queue = process_info['frame_queue'] + detection_enabled = process_info['detection_enabled'] frame_shape = config.frame_shape objects_to_track = config.objects.track @@ -268,7 +269,7 @@ def track_camera(name, config: CameraConfig, model_shape, detection_queue, resul frame_manager = SharedMemoryFrameManager() process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector, - object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, stop_event) + object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, detection_enabled, stop_event) logger.info(f"{name}: exiting subprocess") @@ -305,7 +306,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s frame_manager: FrameManager, motion_detector: MotionDetector, object_detector: RemoteObjectDetector, object_tracker: ObjectTracker, detected_objects_queue: mp.Queue, process_info: Dict, - objects_to_track: List[str], object_filters, stop_event, + objects_to_track: List[str], object_filters, detection_enabled: mp.Value, stop_event, exit_on_empty: bool = False): fps = process_info['process_fps'] @@ -336,6 +337,14 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s logger.info(f"{camera_name}: frame {frame_time} is not in memory store.") continue + if not detection_enabled.value: + fps.value = fps_tracker.eps() + object_tracker.match_and_update(frame_time, []) + detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, [], [])) + detection_fps.value = object_detector.fps.eps() + frame_manager.close(f"{camera_name}{frame_time}") + continue + # look for motion motion_boxes = motion_detector.detect(frame) @@ -410,11 +419,11 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s # add to the queue if not full if(detected_objects_queue.full()): - frame_manager.delete(f"{camera_name}{frame_time}") - continue + frame_manager.delete(f"{camera_name}{frame_time}") + continue else: - fps_tracker.update() - fps.value = fps_tracker.eps() - detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions)) - detection_fps.value = object_detector.fps.eps() - frame_manager.close(f"{camera_name}{frame_time}") + fps_tracker.update() + fps.value = fps_tracker.eps() + detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions)) + detection_fps.value = object_detector.fps.eps() + frame_manager.close(f"{camera_name}{frame_time}")