diff --git a/README.md b/README.md index 01e4d027c..63d522054 100644 --- a/README.md +++ b/README.md @@ -55,20 +55,22 @@ Example docker-compose: A `config.yml` file must exist in the `config` directory. See example [here](config/config.example.yml) and device specific info can be found [here](docs/DEVICES.md). -Access the mjpeg stream at `http://localhost:5000/` and the best person snapshot at `http://localhost:5000//best_person.jpg` +Access the mjpeg stream at `http://localhost:5000/` and the best snapshot for any object type with at `http://localhost:5000///best.jpg` ## Integration with HomeAssistant ``` camera: - name: Camera Last Person platform: mqtt - topic: frigate//snapshot + topic: frigate//person/snapshot + - name: Camera Last Car + platform: mqtt + topic: frigate//car/snapshot binary_sensor: - name: Camera Person platform: mqtt - state_topic: "frigate//objects" - value_template: '{{ value_json.person }}' + state_topic: "frigate//person" device_class: motion availability_topic: "frigate/available" @@ -89,7 +91,7 @@ automation: message: "A person was detected." data: photo: - - url: http://:5000//best_person.jpg + - url: http://:5000//person/best.jpg caption: A person was detected. ``` diff --git a/config/config.example.yml b/config/config.example.yml index 330f0cadb..f8a2c4ccd 100644 --- a/config/config.example.yml +++ b/config/config.example.yml @@ -45,7 +45,19 @@ mqtt: # - rawvideo # - -pix_fmt # - rgb24 - + +#################### +# Global object configuration. Applies to all cameras and regions +# unless overridden at the camera/region levels. +# Keys must be valid labels. By default, the model uses coco (https://dl.google.com/coral/canned_models/coco_labels.txt). +# All labels from the model are reported over MQTT. These values are used to filter out false positives. +#################### +objects: + person: + min_area: 5000 + max_area: 100000 + threshold: 0.5 + cameras: back: ffmpeg: @@ -78,6 +90,12 @@ cameras: # 3 every 3rd frame, etc. ################ take_frame: 1 + + objects: + person: + min_area: 5000 + max_area: 100000 + threshold: 0.5 ################ # size: size of the region in pixels @@ -93,18 +111,18 @@ cameras: - size: 350 x_offset: 0 y_offset: 300 - min_person_area: 5000 - max_person_area: 100000 - threshold: 0.5 + objects: + car: + threshold: 0.2 - size: 400 x_offset: 350 y_offset: 250 - min_person_area: 2000 - max_person_area: 100000 - threshold: 0.5 + objects: + person: + min_area: 2000 - size: 400 x_offset: 750 y_offset: 250 - min_person_area: 2000 - max_person_area: 100000 - threshold: 0.5 + objects: + person: + min_area: 2000 diff --git a/detect_objects.py b/detect_objects.py index 86c69d057..9ac2adfb6 100644 --- a/detect_objects.py +++ b/detect_objects.py @@ -42,6 +42,8 @@ FFMPEG_DEFAULT_CONFIG = { '-pix_fmt', 'rgb24']) } +GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {}) + WEB_PORT = CONFIG.get('web_port', 5000) DEBUG = (CONFIG.get('debug', '0') == '1') @@ -74,7 +76,7 @@ def main(): cameras = {} for name, config in CONFIG['cameras'].items(): - cameras[name] = Camera(name, FFMPEG_DEFAULT_CONFIG, config, prepped_frame_queue, client, MQTT_TOPIC_PREFIX) + cameras[name] = Camera(name, FFMPEG_DEFAULT_CONFIG, GLOBAL_OBJECT_CONFIG, config, prepped_frame_queue, client, MQTT_TOPIC_PREFIX) prepped_queue_processor = PreppedQueueProcessor( cameras, @@ -94,13 +96,13 @@ def main(): # return a healh return "Frigate is running. Alive and healthy!" - @app.route('//best_person.jpg') - def best_person(camera_name): + @app.route('//