Adjust threshold and contour_area with mqtt

This commit is contained in:
Josh Hawkins 2022-04-27 09:52:45 -05:00 committed by Blake Blackshear
parent afe88d6e3a
commit 7845995dfd
6 changed files with 109 additions and 7 deletions

View File

@ -140,3 +140,19 @@ Topic to turn improve_contrast for a camera on and off. Expected values are `ON`
### `frigate/<camera_name>/improve_contrast/state` ### `frigate/<camera_name>/improve_contrast/state`
Topic with current state of improve_contrast for a camera. Published values are `ON` and `OFF`. Topic with current state of improve_contrast for a camera. Published values are `ON` and `OFF`.
### `frigate/<camera_name>/motion_threshold/set`
Topic to adjust motion threshold for a camera. Expected value is an integer.
### `frigate/<camera_name>/motion_threshold/state`
Topic with current motion threshold for a camera. Published value is an integer.
### `frigate/<camera_name>/motion_contour_area/set`
Topic to adjust motion contour area for a camera. Expected value is an integer.
### `frigate/<camera_name>/motion_contour_area/state`
Topic with current motion contour area for a camera. Published value is an integer.

View File

@ -95,6 +95,12 @@ class FrigateApp:
"improve_contrast_enabled": mp.Value( "improve_contrast_enabled": mp.Value(
"i", self.config.cameras[camera_name].motion.improve_contrast "i", self.config.cameras[camera_name].motion.improve_contrast
), ),
"motion_threshold": mp.Value(
"i", self.config.cameras[camera_name].motion.threshold
),
"motion_contour_area": mp.Value(
"i", self.config.cameras[camera_name].motion.contour_area
),
"detection_fps": mp.Value("d", 0.0), "detection_fps": mp.Value("d", 0.0),
"detection_frame": mp.Value("d", 0.0), "detection_frame": mp.Value("d", 0.0),
"read_start": mp.Value("d", 0.0), "read_start": mp.Value("d", 0.0),

View File

@ -5,7 +5,14 @@ from frigate.config import MotionConfig
class MotionDetector: class MotionDetector:
def __init__(self, frame_shape, config: MotionConfig, improve_contrast_enabled): def __init__(
self,
frame_shape,
config: MotionConfig,
improve_contrast_enabled,
motion_threshold,
motion_contour_area,
):
self.config = config self.config = config
self.frame_shape = frame_shape self.frame_shape = frame_shape
self.resize_factor = frame_shape[0] / config.frame_height self.resize_factor = frame_shape[0] / config.frame_height
@ -25,6 +32,8 @@ class MotionDetector:
self.mask = np.where(resized_mask == [0]) self.mask = np.where(resized_mask == [0])
self.save_images = False self.save_images = False
self.improve_contrast = improve_contrast_enabled self.improve_contrast = improve_contrast_enabled
self.threshold = motion_threshold
self.contour_area = motion_contour_area
def detect(self, frame): def detect(self, frame):
motion_boxes = [] motion_boxes = []
@ -69,7 +78,7 @@ class MotionDetector:
# compute the threshold image for the current frame # compute the threshold image for the current frame
current_thresh = cv2.threshold( current_thresh = cv2.threshold(
frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY frameDelta, self.threshold.value, 255, cv2.THRESH_BINARY
)[1] )[1]
# black out everything in the avg_delta where there isnt motion in the current frame # black out everything in the avg_delta where there isnt motion in the current frame
@ -79,7 +88,7 @@ class MotionDetector:
# then look for deltas above the threshold, but only in areas where there is a delta # then look for deltas above the threshold, but only in areas where there is a delta
# in the current frame. this prevents deltas from previous frames from being included # in the current frame. this prevents deltas from previous frames from being included
thresh = cv2.threshold( thresh = cv2.threshold(
avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY avg_delta_image, self.threshold.value, 255, cv2.THRESH_BINARY
)[1] )[1]
# dilate the thresholded image to fill in holes, then find contours # dilate the thresholded image to fill in holes, then find contours
@ -94,7 +103,7 @@ class MotionDetector:
for c in cnts: for c in cnts:
# if the contour is big enough, count it as motion # if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c) contour_area = cv2.contourArea(c)
if contour_area > self.config.contour_area: if contour_area > self.contour_area.value:
x, y, w, h = cv2.boundingRect(c) x, y, w, h = cv2.boundingRect(c)
motion_boxes.append( motion_boxes.append(
( (
@ -111,8 +120,7 @@ class MotionDetector:
# print(self.frame_counter) # print(self.frame_counter)
for c in cnts: for c in cnts:
contour_area = cv2.contourArea(c) contour_area = cv2.contourArea(c)
# print(contour_area) if contour_area > self.contour_area.value:
if contour_area > self.config.contour_area:
x, y, w, h = cv2.boundingRect(c) x, y, w, h = cv2.boundingRect(c)
cv2.rectangle( cv2.rectangle(
thresh_dilated, thresh_dilated,

View File

@ -145,6 +145,52 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
state_topic = f"{message.topic[:-4]}/state" state_topic = f"{message.topic[:-4]}/state"
client.publish(state_topic, payload, retain=True) client.publish(state_topic, payload, retain=True)
def on_motion_threshold_command(client, userdata, message):
try:
payload = int(message.payload.decode())
except ValueError:
logger.warning(
f"Received unsupported value at {message.topic}: {message.payload.decode()}"
)
return
logger.debug(f"on_motion_threshold_toggle: {message.topic} {payload}")
camera_name = message.topic.split("/")[-3]
motion_settings = config.cameras[camera_name].motion
logger.info(f"Setting motion threshold for {camera_name} via mqtt: {payload}")
camera_metrics[camera_name]["motion_threshold"].value = payload
motion_settings.threshold = payload
state_topic = f"{message.topic[:-4]}/state"
client.publish(state_topic, payload, retain=True)
def on_motion_contour_area_command(client, userdata, message):
try:
payload = int(message.payload.decode())
except ValueError:
logger.warning(
f"Received unsupported value at {message.topic}: {message.payload.decode()}"
)
return
logger.debug(f"on_motion_contour_area_toggle: {message.topic} {payload}")
camera_name = message.topic.split("/")[-3]
motion_settings = config.cameras[camera_name].motion
logger.info(
f"Setting motion contour area for {camera_name} via mqtt: {payload}"
)
camera_metrics[camera_name]["motion_contour_area"].value = payload
motion_settings.contour_area = payload
state_topic = f"{message.topic[:-4]}/state"
client.publish(state_topic, payload, retain=True)
def on_restart_command(client, userdata, message): def on_restart_command(client, userdata, message):
restart_frigate() restart_frigate()
@ -195,6 +241,14 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
f"{mqtt_config.topic_prefix}/{name}/improve_contrast/set", f"{mqtt_config.topic_prefix}/{name}/improve_contrast/set",
on_improve_contrast_command, on_improve_contrast_command,
) )
client.message_callback_add(
f"{mqtt_config.topic_prefix}/{name}/motion_threshold/set",
on_motion_threshold_command,
)
client.message_callback_add(
f"{mqtt_config.topic_prefix}/{name}/motion_contour_area/set",
on_motion_contour_area_command,
)
client.message_callback_add( client.message_callback_add(
f"{mqtt_config.topic_prefix}/restart", on_restart_command f"{mqtt_config.topic_prefix}/restart", on_restart_command
@ -250,6 +304,16 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
"ON" if config.cameras[name].motion.improve_contrast else "OFF", "ON" if config.cameras[name].motion.improve_contrast else "OFF",
retain=True, retain=True,
) )
client.publish(
f"{mqtt_config.topic_prefix}/{name}/motion_threshold/state",
config.cameras[name].motion.threshold,
retain=True,
)
client.publish(
f"{mqtt_config.topic_prefix}/{name}/motion_contour_area/state",
config.cameras[name].motion.contour_area,
retain=True,
)
return client return client

View File

@ -16,6 +16,8 @@ class CameraMetricsTypes(TypedDict):
frame_queue: Queue frame_queue: Queue
motion_enabled: Synchronized motion_enabled: Synchronized
improve_contrast_enabled: Synchronized improve_contrast_enabled: Synchronized
motion_threshold: Synchronized
motion_contour_area: Synchronized
process: Optional[Process] process: Optional[Process]
process_fps: Synchronized process_fps: Synchronized
read_start: Synchronized read_start: Synchronized

View File

@ -363,13 +363,19 @@ def track_camera(
detection_enabled = process_info["detection_enabled"] detection_enabled = process_info["detection_enabled"]
motion_enabled = process_info["motion_enabled"] motion_enabled = process_info["motion_enabled"]
improve_contrast_enabled = process_info["improve_contrast_enabled"] improve_contrast_enabled = process_info["improve_contrast_enabled"]
motion_threshold = process_info["motion_threshold"]
motion_contour_area = process_info["motion_contour_area"]
frame_shape = config.frame_shape frame_shape = config.frame_shape
objects_to_track = config.objects.track objects_to_track = config.objects.track
object_filters = config.objects.filters object_filters = config.objects.filters
motion_detector = MotionDetector( motion_detector = MotionDetector(
frame_shape, config.motion, improve_contrast_enabled frame_shape,
config.motion,
improve_contrast_enabled,
motion_threshold,
motion_contour_area,
) )
object_detector = RemoteObjectDetector( object_detector = RemoteObjectDetector(
name, labelmap, detection_queue, result_connection, model_shape name, labelmap, detection_queue, result_connection, model_shape