mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
configurable motion and detect settings
This commit is contained in:
parent
724d8187c6
commit
3bc7cdaab6
32
README.md
32
README.md
@ -281,6 +281,38 @@ objects:
|
|||||||
# Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below)
|
# Optional: minimum decimal percentage for tracked object's computed score to be considered a true positive (default: shown below)
|
||||||
threshold: 0.7
|
threshold: 0.7
|
||||||
|
|
||||||
|
# Optional: Global motion detection config. These may also be defined at the camera level.
|
||||||
|
# ADVANCED: Most users will not need to set these values in their config
|
||||||
|
motion:
|
||||||
|
# Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below)
|
||||||
|
# Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive.
|
||||||
|
# The value should be between 1 and 255.
|
||||||
|
threshold: 25
|
||||||
|
# Optional: Minimum size in pixels in the resized motion image that counts as motion
|
||||||
|
# Increasing this value will prevent smaller areas of motion from being detected. Decreasing will make motion detection more sensitive to smaller
|
||||||
|
# moving objects.
|
||||||
|
contour_area: 100
|
||||||
|
# Optional: Alpha value passed to cv2.accumulateWeighted when averaging the motion delta across multiple frames (default: shown below)
|
||||||
|
# Higher values mean the current frame impacts the delta a lot, and a single raindrop may register as motion.
|
||||||
|
# Too low and a fast moving person wont be detected as motion.
|
||||||
|
delta_alpha: 0.2
|
||||||
|
# Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below)
|
||||||
|
# Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster.
|
||||||
|
# Low values will cause things like moving shadows to be detected as motion for longer.
|
||||||
|
# https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/
|
||||||
|
frame_alpha: 0.2
|
||||||
|
# Optional: Height of the resized motion frame (default: 1/6th of the original frame height)
|
||||||
|
# This operates as an efficient blur alternative. Higher values will result in more granular motion detection at the expense of higher CPU usage.
|
||||||
|
# Lower values result in less CPU, but small changes may not register as motion.
|
||||||
|
frame_height: 180
|
||||||
|
|
||||||
|
# Optional: Global detecttion settings. These may also be defined at the camera level.
|
||||||
|
# ADVANCED: Most users will not need to set these values in their config
|
||||||
|
detect:
|
||||||
|
# Optional: Number of frames without a detection before frigate considers an object to be gone. (default: double the frame rate)
|
||||||
|
max_disappeared: 10
|
||||||
|
|
||||||
|
|
||||||
# Required: configuration section for cameras
|
# Required: configuration section for cameras
|
||||||
cameras:
|
cameras:
|
||||||
# Required: name of the camera
|
# Required: name of the camera
|
||||||
|
@ -84,6 +84,22 @@ GLOBAL_FFMPEG_SCHEMA = vol.Schema(
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
MOTION_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
'threshold': vol.Range(min=1, max=255),
|
||||||
|
'contour_area': int,
|
||||||
|
'delta_alpha': float,
|
||||||
|
'frame_alpha': float,
|
||||||
|
'frame_height': int
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
DETECT_SCHEMA = vol.Schema(
|
||||||
|
{
|
||||||
|
'max_disappeared': int
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
FILTER_SCHEMA = vol.Schema(
|
FILTER_SCHEMA = vol.Schema(
|
||||||
{
|
{
|
||||||
str: {
|
str: {
|
||||||
@ -109,16 +125,6 @@ OBJECTS_SCHEMA = vol.Schema(vol.All(filters_for_all_tracked_objects,
|
|||||||
}
|
}
|
||||||
))
|
))
|
||||||
|
|
||||||
DEFAULT_CAMERA_SAVE_CLIPS = {
|
|
||||||
'enabled': False
|
|
||||||
}
|
|
||||||
DEFAULT_CAMERA_SNAPSHOTS = {
|
|
||||||
'show_timestamp': True,
|
|
||||||
'draw_zones': False,
|
|
||||||
'draw_bounding_boxes': True,
|
|
||||||
'crop_to_region': True
|
|
||||||
}
|
|
||||||
|
|
||||||
def each_role_used_once(inputs):
|
def each_role_used_once(inputs):
|
||||||
roles = [role for i in inputs for role in i['roles']]
|
roles = [role for i in inputs for role in i['roles']]
|
||||||
roles_set = set(roles)
|
roles_set = set(roles)
|
||||||
@ -166,7 +172,7 @@ CAMERAS_SCHEMA = vol.Schema(vol.All(
|
|||||||
vol.Optional('filters', default={}): FILTER_SCHEMA
|
vol.Optional('filters', default={}): FILTER_SCHEMA
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
vol.Optional('save_clips', default=DEFAULT_CAMERA_SAVE_CLIPS): {
|
vol.Optional('save_clips', default={}): {
|
||||||
vol.Optional('enabled', default=False): bool,
|
vol.Optional('enabled', default=False): bool,
|
||||||
vol.Optional('pre_capture', default=30): int,
|
vol.Optional('pre_capture', default=30): int,
|
||||||
'objects': [str],
|
'objects': [str],
|
||||||
@ -179,14 +185,16 @@ CAMERAS_SCHEMA = vol.Schema(vol.All(
|
|||||||
vol.Optional('rtmp', default={}): {
|
vol.Optional('rtmp', default={}): {
|
||||||
vol.Required('enabled', default=True): bool,
|
vol.Required('enabled', default=True): bool,
|
||||||
},
|
},
|
||||||
vol.Optional('snapshots', default=DEFAULT_CAMERA_SNAPSHOTS): {
|
vol.Optional('snapshots', default={}): {
|
||||||
vol.Optional('show_timestamp', default=True): bool,
|
vol.Optional('show_timestamp', default=True): bool,
|
||||||
vol.Optional('draw_zones', default=False): bool,
|
vol.Optional('draw_zones', default=False): bool,
|
||||||
vol.Optional('draw_bounding_boxes', default=True): bool,
|
vol.Optional('draw_bounding_boxes', default=True): bool,
|
||||||
vol.Optional('crop_to_region', default=True): bool,
|
vol.Optional('crop_to_region', default=True): bool,
|
||||||
vol.Optional('height', default=175): int
|
vol.Optional('height', default=175): int
|
||||||
},
|
},
|
||||||
'objects': OBJECTS_SCHEMA
|
'objects': OBJECTS_SCHEMA,
|
||||||
|
vol.Optional('motion', default={}): MOTION_SCHEMA,
|
||||||
|
vol.Optional('detect', default={}): DETECT_SCHEMA
|
||||||
}
|
}
|
||||||
}, vol.Msg(ensure_zones_and_cameras_have_different_names, msg='Zones cannot share names with cameras'))
|
}, vol.Msg(ensure_zones_and_cameras_have_different_names, msg='Zones cannot share names with cameras'))
|
||||||
)
|
)
|
||||||
@ -213,6 +221,8 @@ FRIGATE_CONFIG_SCHEMA = vol.Schema(
|
|||||||
},
|
},
|
||||||
vol.Optional('ffmpeg', default={}): GLOBAL_FFMPEG_SCHEMA,
|
vol.Optional('ffmpeg', default={}): GLOBAL_FFMPEG_SCHEMA,
|
||||||
vol.Optional('objects', default={}): OBJECTS_SCHEMA,
|
vol.Optional('objects', default={}): OBJECTS_SCHEMA,
|
||||||
|
vol.Optional('motion', default={}): MOTION_SCHEMA,
|
||||||
|
vol.Optional('detect', default={}): DETECT_SCHEMA,
|
||||||
vol.Required('cameras', default={}): CAMERAS_SCHEMA
|
vol.Required('cameras', default={}): CAMERAS_SCHEMA
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -561,6 +571,58 @@ class CameraRtmpConfig():
|
|||||||
'enabled': self.enabled,
|
'enabled': self.enabled,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
class MotionConfig():
|
||||||
|
def __init__(self, global_config, config, camera_height: int):
|
||||||
|
self._threshold = config.get('threshold', global_config.get('threshold', 25))
|
||||||
|
self._contour_area = config.get('contour_area', global_config.get('contour_area', 100))
|
||||||
|
self._delta_alpha = config.get('delta_alpha', global_config.get('delta_alpha', 0.2))
|
||||||
|
self._frame_alpha = config.get('frame_alpha', global_config.get('frame_alpha', 0.2))
|
||||||
|
self._frame_height = config.get('frame_height', global_config.get('frame_height', camera_height//6))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def threshold(self):
|
||||||
|
return self._threshold
|
||||||
|
|
||||||
|
@property
|
||||||
|
def contour_area(self):
|
||||||
|
return self._contour_area
|
||||||
|
|
||||||
|
@property
|
||||||
|
def delta_alpha(self):
|
||||||
|
return self._delta_alpha
|
||||||
|
|
||||||
|
@property
|
||||||
|
def frame_alpha(self):
|
||||||
|
return self._frame_alpha
|
||||||
|
|
||||||
|
@property
|
||||||
|
def frame_height(self):
|
||||||
|
return self._frame_height
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'threshold': self.threshold,
|
||||||
|
'contour_area': self.contour_area,
|
||||||
|
'delta_alpha': self.delta_alpha,
|
||||||
|
'frame_alpha': self.frame_alpha,
|
||||||
|
'frame_height': self.frame_height,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class DetectConfig():
|
||||||
|
def __init__(self, global_config, config, camera_fps):
|
||||||
|
self._max_disappeared = config.get('max_disappeared', global_config.get('max_disappeared', camera_fps*2))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def max_disappeared(self):
|
||||||
|
return self._max_disappeared
|
||||||
|
|
||||||
|
def to_dict(self):
|
||||||
|
return {
|
||||||
|
'max_disappeared': self._max_disappeared,
|
||||||
|
}
|
||||||
|
|
||||||
class ZoneConfig():
|
class ZoneConfig():
|
||||||
def __init__(self, name, config):
|
def __init__(self, name, config):
|
||||||
self._coordinates = config['coordinates']
|
self._coordinates = config['coordinates']
|
||||||
@ -623,6 +685,8 @@ class CameraConfig():
|
|||||||
self._rtmp = CameraRtmpConfig(global_config, config['rtmp'])
|
self._rtmp = CameraRtmpConfig(global_config, config['rtmp'])
|
||||||
self._snapshots = CameraSnapshotsConfig(config['snapshots'])
|
self._snapshots = CameraSnapshotsConfig(config['snapshots'])
|
||||||
self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}))
|
self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}))
|
||||||
|
self._motion = MotionConfig(global_config['motion'], config['motion'], self._height)
|
||||||
|
self._detect = DetectConfig(global_config['detect'], config['detect'], config.get('fps', 5))
|
||||||
|
|
||||||
self._ffmpeg_cmds = []
|
self._ffmpeg_cmds = []
|
||||||
for ffmpeg_input in self._ffmpeg.inputs:
|
for ffmpeg_input in self._ffmpeg.inputs:
|
||||||
@ -756,6 +820,14 @@ class CameraConfig():
|
|||||||
def objects(self):
|
def objects(self):
|
||||||
return self._objects
|
return self._objects
|
||||||
|
|
||||||
|
@property
|
||||||
|
def motion(self):
|
||||||
|
return self._motion
|
||||||
|
|
||||||
|
@property
|
||||||
|
def detect(self):
|
||||||
|
return self._detect
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def frame_shape(self):
|
def frame_shape(self):
|
||||||
return self._frame_shape
|
return self._frame_shape
|
||||||
@ -781,6 +853,8 @@ class CameraConfig():
|
|||||||
'rtmp': self.rtmp.to_dict(),
|
'rtmp': self.rtmp.to_dict(),
|
||||||
'snapshots': self.snapshots.to_dict(),
|
'snapshots': self.snapshots.to_dict(),
|
||||||
'objects': self.objects.to_dict(),
|
'objects': self.objects.to_dict(),
|
||||||
|
'motion': self.motion.to_dict(),
|
||||||
|
'detect': self.detect.to_dict(),
|
||||||
'frame_shape': self.frame_shape,
|
'frame_shape': self.frame_shape,
|
||||||
'ffmpeg_cmds': [{'roles': c['roles'], 'cmd': ' '.join(c['cmd'])} for c in self.ffmpeg_cmds],
|
'ffmpeg_cmds': [{'roles': c['roles'], 'cmd': ' '.join(c['cmd'])} for c in self.ffmpeg_cmds],
|
||||||
}
|
}
|
||||||
|
@ -1,13 +1,15 @@
|
|||||||
import cv2
|
import cv2
|
||||||
import imutils
|
import imutils
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from frigate.config import MotionConfig
|
||||||
|
|
||||||
|
|
||||||
class MotionDetector():
|
class MotionDetector():
|
||||||
def __init__(self, frame_shape, mask, resize_factor=4):
|
def __init__(self, frame_shape, mask, config: MotionConfig):
|
||||||
|
self.config = config
|
||||||
self.frame_shape = frame_shape
|
self.frame_shape = frame_shape
|
||||||
self.resize_factor = resize_factor
|
self.resize_factor = frame_shape[0]/config.frame_height
|
||||||
self.motion_frame_size = (int(frame_shape[0]/resize_factor), int(frame_shape[1]/resize_factor))
|
self.motion_frame_size = (config.frame_height, config.frame_height*frame_shape[1]//frame_shape[0])
|
||||||
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
|
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
|
||||||
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
|
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
|
||||||
self.motion_frame_count = 0
|
self.motion_frame_count = 0
|
||||||
@ -23,6 +25,8 @@ class MotionDetector():
|
|||||||
# resize frame
|
# resize frame
|
||||||
resized_frame = cv2.resize(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
|
resized_frame = cv2.resize(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
|
||||||
|
|
||||||
|
# TODO: can I improve the contrast of the grayscale image here?
|
||||||
|
|
||||||
# convert to grayscale
|
# convert to grayscale
|
||||||
# resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
|
# resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
|
||||||
|
|
||||||
@ -38,14 +42,13 @@ class MotionDetector():
|
|||||||
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
|
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
|
||||||
|
|
||||||
# compute the average delta over the past few frames
|
# compute the average delta over the past few frames
|
||||||
# the alpha value can be modified to configure how sensitive the motion detection is.
|
|
||||||
# higher values mean the current frame impacts the delta a lot, and a single raindrop may
|
# higher values mean the current frame impacts the delta a lot, and a single raindrop may
|
||||||
# register as motion, too low and a fast moving person wont be detected as motion
|
# register as motion, too low and a fast moving person wont be detected as motion
|
||||||
# this also assumes that a person is in the same location across more than a single frame
|
cv2.accumulateWeighted(frameDelta, self.avg_delta, self.config.delta_alpha)
|
||||||
cv2.accumulateWeighted(frameDelta, self.avg_delta, 0.2)
|
|
||||||
|
|
||||||
# compute the threshold image for the current frame
|
# compute the threshold image for the current frame
|
||||||
current_thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
|
# TODO: threshold
|
||||||
|
current_thresh = cv2.threshold(frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
|
||||||
|
|
||||||
# black out everything in the avg_delta where there isnt motion in the current frame
|
# black out everything in the avg_delta where there isnt motion in the current frame
|
||||||
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
|
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
|
||||||
@ -53,7 +56,7 @@ class MotionDetector():
|
|||||||
|
|
||||||
# then look for deltas above the threshold, but only in areas where there is a delta
|
# then look for deltas above the threshold, but only in areas where there is a delta
|
||||||
# in the current frame. this prevents deltas from previous frames from being included
|
# in the current frame. this prevents deltas from previous frames from being included
|
||||||
thresh = cv2.threshold(avg_delta_image, 25, 255, cv2.THRESH_BINARY)[1]
|
thresh = cv2.threshold(avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
|
||||||
|
|
||||||
# dilate the thresholded image to fill in holes, then find contours
|
# dilate the thresholded image to fill in holes, then find contours
|
||||||
# on thresholded image
|
# on thresholded image
|
||||||
@ -65,19 +68,18 @@ class MotionDetector():
|
|||||||
for c in cnts:
|
for c in cnts:
|
||||||
# if the contour is big enough, count it as motion
|
# if the contour is big enough, count it as motion
|
||||||
contour_area = cv2.contourArea(c)
|
contour_area = cv2.contourArea(c)
|
||||||
if contour_area > 100:
|
if contour_area > self.config.contour_area:
|
||||||
x, y, w, h = cv2.boundingRect(c)
|
x, y, w, h = cv2.boundingRect(c)
|
||||||
motion_boxes.append((x*self.resize_factor, y*self.resize_factor, (x+w)*self.resize_factor, (y+h)*self.resize_factor))
|
motion_boxes.append((x*self.resize_factor, y*self.resize_factor, (x+w)*self.resize_factor, (y+h)*self.resize_factor))
|
||||||
|
|
||||||
if len(motion_boxes) > 0:
|
if len(motion_boxes) > 0:
|
||||||
self.motion_frame_count += 1
|
self.motion_frame_count += 1
|
||||||
# TODO: this really depends on FPS
|
|
||||||
if self.motion_frame_count >= 10:
|
if self.motion_frame_count >= 10:
|
||||||
# only average in the current frame if the difference persists for at least 3 frames
|
# only average in the current frame if the difference persists for a bit
|
||||||
cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2)
|
cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
|
||||||
else:
|
else:
|
||||||
# when no motion, just keep averaging the frames together
|
# when no motion, just keep averaging the frames together
|
||||||
cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2)
|
cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
|
||||||
self.motion_frame_count = 0
|
self.motion_frame_count = 0
|
||||||
|
|
||||||
return motion_boxes
|
return motion_boxes
|
||||||
|
@ -12,14 +12,15 @@ import cv2
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from scipy.spatial import distance as dist
|
from scipy.spatial import distance as dist
|
||||||
|
|
||||||
|
from frigate.config import DetectConfig
|
||||||
from frigate.util import draw_box_with_label
|
from frigate.util import draw_box_with_label
|
||||||
|
|
||||||
|
|
||||||
class ObjectTracker():
|
class ObjectTracker():
|
||||||
def __init__(self, max_disappeared):
|
def __init__(self, config: DetectConfig):
|
||||||
self.tracked_objects = {}
|
self.tracked_objects = {}
|
||||||
self.disappeared = {}
|
self.disappeared = {}
|
||||||
self.max_disappeared = max_disappeared
|
self.max_disappeared = config.max_disappeared
|
||||||
|
|
||||||
def register(self, index, obj):
|
def register(self, index, obj):
|
||||||
rand_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
rand_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6))
|
||||||
|
@ -81,10 +81,10 @@ class ProcessClip():
|
|||||||
def process_frames(self, objects_to_track=['person'], object_filters={}):
|
def process_frames(self, objects_to_track=['person'], object_filters={}):
|
||||||
mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
|
mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
|
||||||
mask[:] = 255
|
mask[:] = 255
|
||||||
motion_detector = MotionDetector(self.frame_shape, mask)
|
motion_detector = MotionDetector(self.frame_shape, mask, self.camera_config.motion)
|
||||||
|
|
||||||
object_detector = LocalObjectDetector(labels='/labelmap.txt')
|
object_detector = LocalObjectDetector(labels='/labelmap.txt')
|
||||||
object_tracker = ObjectTracker(10)
|
object_tracker = ObjectTracker(self.camera_config.detect)
|
||||||
process_info = {
|
process_info = {
|
||||||
'process_fps': mp.Value('d', 0.0),
|
'process_fps': mp.Value('d', 0.0),
|
||||||
'detection_fps': mp.Value('d', 0.0),
|
'detection_fps': mp.Value('d', 0.0),
|
||||||
|
@ -258,10 +258,10 @@ def track_camera(name, config: CameraConfig, model_shape, detection_queue, resul
|
|||||||
object_filters = config.objects.filters
|
object_filters = config.objects.filters
|
||||||
mask = config.mask
|
mask = config.mask
|
||||||
|
|
||||||
motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
|
motion_detector = MotionDetector(frame_shape, mask, config.motion)
|
||||||
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
|
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
|
||||||
|
|
||||||
object_tracker = ObjectTracker(10)
|
object_tracker = ObjectTracker(config.detect)
|
||||||
|
|
||||||
frame_manager = SharedMemoryFrameManager()
|
frame_manager = SharedMemoryFrameManager()
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user