From bee99ca6ff7a8d1c47c4d75cde76e10e22a56b05 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 14 Dec 2019 15:18:21 -0600 Subject: [PATCH] track and report all detected object types --- README.md | 12 +++-- config/config.example.yml | 38 ++++++++++---- detect_objects.py | 16 +++--- frigate/mqtt.py | 49 +++++++++--------- frigate/object_detection.py | 15 +++--- frigate/objects.py | 62 ++++++++++------------- frigate/video.py | 99 +++++++++++++++++++++---------------- 7 files changed, 160 insertions(+), 131 deletions(-) diff --git a/README.md b/README.md index 01e4d027c..63d522054 100644 --- a/README.md +++ b/README.md @@ -55,20 +55,22 @@ Example docker-compose: A `config.yml` file must exist in the `config` directory. See example [here](config/config.example.yml) and device specific info can be found [here](docs/DEVICES.md). -Access the mjpeg stream at `http://localhost:5000/` and the best person snapshot at `http://localhost:5000//best_person.jpg` +Access the mjpeg stream at `http://localhost:5000/` and the best snapshot for any object type with at `http://localhost:5000///best.jpg` ## Integration with HomeAssistant ``` camera: - name: Camera Last Person platform: mqtt - topic: frigate//snapshot + topic: frigate//person/snapshot + - name: Camera Last Car + platform: mqtt + topic: frigate//car/snapshot binary_sensor: - name: Camera Person platform: mqtt - state_topic: "frigate//objects" - value_template: '{{ value_json.person }}' + state_topic: "frigate//person" device_class: motion availability_topic: "frigate/available" @@ -89,7 +91,7 @@ automation: message: "A person was detected." data: photo: - - url: http://:5000//best_person.jpg + - url: http://:5000//person/best.jpg caption: A person was detected. ``` diff --git a/config/config.example.yml b/config/config.example.yml index 330f0cadb..f8a2c4ccd 100644 --- a/config/config.example.yml +++ b/config/config.example.yml @@ -45,7 +45,19 @@ mqtt: # - rawvideo # - -pix_fmt # - rgb24 - + +#################### +# Global object configuration. Applies to all cameras and regions +# unless overridden at the camera/region levels. +# Keys must be valid labels. By default, the model uses coco (https://dl.google.com/coral/canned_models/coco_labels.txt). +# All labels from the model are reported over MQTT. These values are used to filter out false positives. +#################### +objects: + person: + min_area: 5000 + max_area: 100000 + threshold: 0.5 + cameras: back: ffmpeg: @@ -78,6 +90,12 @@ cameras: # 3 every 3rd frame, etc. ################ take_frame: 1 + + objects: + person: + min_area: 5000 + max_area: 100000 + threshold: 0.5 ################ # size: size of the region in pixels @@ -93,18 +111,18 @@ cameras: - size: 350 x_offset: 0 y_offset: 300 - min_person_area: 5000 - max_person_area: 100000 - threshold: 0.5 + objects: + car: + threshold: 0.2 - size: 400 x_offset: 350 y_offset: 250 - min_person_area: 2000 - max_person_area: 100000 - threshold: 0.5 + objects: + person: + min_area: 2000 - size: 400 x_offset: 750 y_offset: 250 - min_person_area: 2000 - max_person_area: 100000 - threshold: 0.5 + objects: + person: + min_area: 2000 diff --git a/detect_objects.py b/detect_objects.py index 86c69d057..9ac2adfb6 100644 --- a/detect_objects.py +++ b/detect_objects.py @@ -42,6 +42,8 @@ FFMPEG_DEFAULT_CONFIG = { '-pix_fmt', 'rgb24']) } +GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {}) + WEB_PORT = CONFIG.get('web_port', 5000) DEBUG = (CONFIG.get('debug', '0') == '1') @@ -74,7 +76,7 @@ def main(): cameras = {} for name, config in CONFIG['cameras'].items(): - cameras[name] = Camera(name, FFMPEG_DEFAULT_CONFIG, config, prepped_frame_queue, client, MQTT_TOPIC_PREFIX) + cameras[name] = Camera(name, FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG, config, prepped_frame_queue, client, MQTT_TOPIC_PREFIX) prepped_queue_processor = PreppedQueueProcessor( cameras, @@ -94,13 +96,13 @@ def main(): # return a healh return "Frigate is running. Alive and healthy!" - @app.route('//best_person.jpg') - def best_person(camera_name): + @app.route('//