allow detection to be disabled

Allows detection to be enabled or disabled via mqtt

fix minor mistake

fix minor bug

change how thread is started

fix some more small problems

added changes for watchdog

didnt disable watchdog and it would restart process, fixed that

change method how watchdog disabled

reinitialize watchdog when enable sent
This commit is contained in:
rourke750 2019-12-18 12:25:27 -05:00 committed by Rourke750
parent 5c01720567
commit d69a4ef427
4 changed files with 62 additions and 7 deletions

View File

@ -93,6 +93,9 @@ automation:
caption: A person was detected.
```
## Disabling Detection
You can disable or enable detection via mqtt by publishing to `topic_prefix/detection` then you want to send a payload of either 'enable' or 'disable'.
## Tips
- Lower the framerate of the video feed on the camera to reduce the CPU usage for capturing the feed
@ -109,7 +112,7 @@ automation:
- [ ] See if motion detection is even worth running
- [ ] Scan for people across entire image rather than specfic regions
- [ ] Dynamically resize detection area and follow people
- [ ] Add ability to turn detection on and off via MQTT
- [x] Add ability to turn detection on and off via MQTT
- [ ] Output movie clips of people for notifications, etc.
- [ ] Integrate with homeassistant push camera
- [ ] Merge bounding boxes that span multiple regions

View File

@ -9,6 +9,8 @@ import paho.mqtt.client as mqtt
from frigate.video import Camera
from frigate.object_detection import PreppedQueueProcessor
from frigate.mqtt import MqttObjectConsumer
with open('/config/config.yml') as f:
CONFIG = yaml.safe_load(f)
@ -60,13 +62,25 @@ def main():
print ("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
# publish a message to signal that the service is running
client.publish(MQTT_TOPIC_PREFIX+'/available', 'online', retain=True)
# start a thread to listen for responses over mqtt
listen_queue = queue.Queue(10) # since we are listening for messages shouldn't need a very large queue
def on_message(client, obj, msg):
if msg.topic.startswith(MQTT_TOPIC_PREFIX):
payload = str(msg.payload.decode("utf-8"))
listen_queue.put({
'topic': msg.topic,
'payload': payload
})
client = mqtt.Client(client_id=MQTT_CLIENT_ID)
client.on_connect = on_connect
client.on_message = on_message
client.will_set(MQTT_TOPIC_PREFIX+'/available', payload='offline', qos=1, retain=True)
if not MQTT_USER is None:
client.username_pw_set(MQTT_USER, password=MQTT_PASS)
client.connect(MQTT_HOST, MQTT_PORT, 60)
client.loop_start()
client.subscribe("frigate/detection")
# Queue for prepped frames, max size set to (number of cameras * 5)
max_queue_size = len(CONFIG['cameras'].items())*5
@ -76,6 +90,9 @@ def main():
for name, config in CONFIG['cameras'].items():
cameras[name] = Camera(name, FFMPEG_DEFAULT_CONFIG, config, prepped_frame_queue, client, MQTT_TOPIC_PREFIX)
mqtt_listener = MqttObjectConsumer(client, MQTT_TOPIC_PREFIX, listen_queue, cameras.values())
mqtt_listener.start()
prepped_queue_processor = PreppedQueueProcessor(
cameras,
prepped_frame_queue

View File

@ -1,6 +1,7 @@
import json
import cv2
import threading
import subprocess as sp
class MqttObjectPublisher(threading.Thread):
def __init__(self, client, topic_prefix, objects_parsed, detected_objects, best_person_frame):
@ -38,4 +39,28 @@ class MqttObjectPublisher(threading.Thread):
ret, jpg = cv2.imencode('.jpg', self.best_person_frame.best_frame)
if ret:
jpg_bytes = jpg.tobytes()
self.client.publish(self.topic_prefix+'/snapshot', jpg_bytes, retain=True)
self.client.publish(self.topic_prefix+'/snapshot', jpg_bytes, retain=True)
class MqttObjectConsumer(threading.Thread):
def __init__(self, client, topic_prefix, listen_queue, cameras):
threading.Thread.__init__(self)
self.client = client
self.topic_prefix = topic_prefix
self.listen_queue = listen_queue
self.cameras = cameras
def run(self):
while True:
# Now we want to see if there are more messages and then process them
item = self.listen_queue.get()
topic = item['topic']
payload = item['payload']
if topic == ("%s/detection" % self.topic_prefix):
if payload == 'enable':
for camera in self.cameras:
camera.start_or_restart_capture()
elif payload == 'disable':
for camera in self.cameras:
camera.disable_capture()
camera.watchdog.disable = True
camera.watchdog = None

View File

@ -7,6 +7,7 @@ import ctypes
import multiprocessing as mp
import subprocess as sp
import numpy as np
import queue
from . util import tonumpyarray, draw_box_with_label
from . object_detection import FramePrepper
from . objects import ObjectCleaner, BestPersonFrame
@ -63,10 +64,11 @@ class CameraWatchdog(threading.Thread):
def __init__(self, camera):
threading.Thread.__init__(self)
self.camera = camera
self.disable = False
def run(self):
while True:
while not self.disable:
# wait a bit before checking
time.sleep(10)
@ -194,9 +196,10 @@ class Camera:
self.mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
self.mask[:] = 255
def start_or_restart_capture(self):
if not self.ffmpeg_process is None:
def disable_capture(self):
if self.ffmpeg_process is None:
print("Attempted to disable capture but was already disabled")
else:
print("Terminating the existing ffmpeg process...")
self.ffmpeg_process.terminate()
try:
@ -211,6 +214,10 @@ class Camera:
self.capture_thread.join()
self.ffmpeg_process = None
self.capture_thread = None
def start_or_restart_capture(self):
if not self.ffmpeg_process is None:
self.disable_capture()
# create the process to capture frames from the input stream and store in a shared array
print("Creating a new ffmpeg process...")
@ -220,7 +227,10 @@ class Camera:
self.capture_thread = CameraCapture(self)
print("Starting a new capture thread...")
self.capture_thread.start()
if self.watchdog == None:
self.watchdog = CameraWatchdog(self)
def start_ffmpeg(self):
ffmpeg_cmd = (['ffmpeg'] +
self.ffmpeg_global_args +