mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
use yuv420p pixel format for motion
This commit is contained in:
parent
f946813ccb
commit
a611cbb942
@ -66,7 +66,7 @@ save_clips:
|
|||||||
# - -f
|
# - -f
|
||||||
# - rawvideo
|
# - rawvideo
|
||||||
# - -pix_fmt
|
# - -pix_fmt
|
||||||
# - rgb24
|
# - yuv420p
|
||||||
|
|
||||||
####################
|
####################
|
||||||
# Global object configuration. Applies to all cameras
|
# Global object configuration. Applies to all cameras
|
||||||
|
@ -55,7 +55,7 @@ FFMPEG_DEFAULT_CONFIG = {
|
|||||||
'-use_wallclock_as_timestamps', '1']),
|
'-use_wallclock_as_timestamps', '1']),
|
||||||
'output_args': FFMPEG_CONFIG.get('output_args',
|
'output_args': FFMPEG_CONFIG.get('output_args',
|
||||||
['-f', 'rawvideo',
|
['-f', 'rawvideo',
|
||||||
'-pix_fmt', 'rgb24'])
|
'-pix_fmt', 'yuv420p'])
|
||||||
}
|
}
|
||||||
|
|
||||||
GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {})
|
GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {})
|
||||||
|
@ -4,6 +4,7 @@ import numpy as np
|
|||||||
|
|
||||||
class MotionDetector():
|
class MotionDetector():
|
||||||
def __init__(self, frame_shape, mask, resize_factor=4):
|
def __init__(self, frame_shape, mask, resize_factor=4):
|
||||||
|
self.frame_shape = frame_shape
|
||||||
self.resize_factor = resize_factor
|
self.resize_factor = resize_factor
|
||||||
self.motion_frame_size = (int(frame_shape[0]/resize_factor), int(frame_shape[1]/resize_factor))
|
self.motion_frame_size = (int(frame_shape[0]/resize_factor), int(frame_shape[1]/resize_factor))
|
||||||
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
|
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
|
||||||
@ -16,14 +17,16 @@ class MotionDetector():
|
|||||||
def detect(self, frame):
|
def detect(self, frame):
|
||||||
motion_boxes = []
|
motion_boxes = []
|
||||||
|
|
||||||
|
gray = frame[0:self.frame_shape[0], 0:self.frame_shape[1]]
|
||||||
|
|
||||||
# resize frame
|
# resize frame
|
||||||
resized_frame = cv2.resize(frame, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
|
resized_frame = cv2.resize(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
|
||||||
|
|
||||||
# convert to grayscale
|
# convert to grayscale
|
||||||
gray = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
|
# resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY)
|
||||||
|
|
||||||
# mask frame
|
# mask frame
|
||||||
gray[self.mask] = [255]
|
resized_frame[self.mask] = [255]
|
||||||
|
|
||||||
# it takes ~30 frames to establish a baseline
|
# it takes ~30 frames to establish a baseline
|
||||||
# dont bother looking for motion
|
# dont bother looking for motion
|
||||||
@ -31,7 +34,7 @@ class MotionDetector():
|
|||||||
self.frame_counter += 1
|
self.frame_counter += 1
|
||||||
else:
|
else:
|
||||||
# compare to average
|
# compare to average
|
||||||
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg_frame))
|
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
|
||||||
|
|
||||||
# compute the average delta over the past few frames
|
# compute the average delta over the past few frames
|
||||||
# the alpha value can be modified to configure how sensitive the motion detection is.
|
# the alpha value can be modified to configure how sensitive the motion detection is.
|
||||||
@ -70,10 +73,10 @@ class MotionDetector():
|
|||||||
# TODO: this really depends on FPS
|
# TODO: this really depends on FPS
|
||||||
if self.motion_frame_count >= 10:
|
if self.motion_frame_count >= 10:
|
||||||
# only average in the current frame if the difference persists for at least 3 frames
|
# only average in the current frame if the difference persists for at least 3 frames
|
||||||
cv2.accumulateWeighted(gray, self.avg_frame, 0.2)
|
cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2)
|
||||||
else:
|
else:
|
||||||
# when no motion, just keep averaging the frames together
|
# when no motion, just keep averaging the frames together
|
||||||
cv2.accumulateWeighted(gray, self.avg_frame, 0.2)
|
cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2)
|
||||||
self.motion_frame_count = 0
|
self.motion_frame_count = 0
|
||||||
|
|
||||||
return motion_boxes
|
return motion_boxes
|
@ -93,7 +93,7 @@ class CameraState():
|
|||||||
# get the new frame and delete the old frame
|
# get the new frame and delete the old frame
|
||||||
frame_id = f"{self.name}{frame_time}"
|
frame_id = f"{self.name}{frame_time}"
|
||||||
with self.current_frame_lock:
|
with self.current_frame_lock:
|
||||||
self._current_frame = self.frame_manager.get(frame_id, self.config['frame_shape'])
|
self._current_frame = self.frame_manager.get(frame_id, (self.config['frame_shape'][0]*3//2, self.config['frame_shape'][1]))
|
||||||
if not self.previous_frame_id is None:
|
if not self.previous_frame_id is None:
|
||||||
self.frame_manager.delete(self.previous_frame_id)
|
self.frame_manager.delete(self.previous_frame_id)
|
||||||
self.previous_frame_id = frame_id
|
self.previous_frame_id = frame_id
|
||||||
|
@ -120,7 +120,7 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
|
|||||||
stop_event: mp.Event, current_frame: mp.Value):
|
stop_event: mp.Event, current_frame: mp.Value):
|
||||||
|
|
||||||
frame_num = 0
|
frame_num = 0
|
||||||
frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
|
frame_size = frame_shape[0] * frame_shape[1] * 3 // 2
|
||||||
skipped_fps.start()
|
skipped_fps.start()
|
||||||
while True:
|
while True:
|
||||||
if stop_event.is_set():
|
if stop_event.is_set():
|
||||||
@ -276,7 +276,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
|
|||||||
|
|
||||||
current_frame_time.value = frame_time
|
current_frame_time.value = frame_time
|
||||||
|
|
||||||
frame = frame_manager.get(f"{camera_name}{frame_time}", frame_shape)
|
frame = frame_manager.get(f"{camera_name}{frame_time}", (frame_shape[0]*3//2, frame_shape[1]))
|
||||||
|
|
||||||
if frame is None:
|
if frame is None:
|
||||||
print(f"{camera_name}: frame {frame_time} is not in memory store.")
|
print(f"{camera_name}: frame {frame_time} is not in memory store.")
|
||||||
@ -304,10 +304,13 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
|
|||||||
regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
|
regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
|
||||||
for a in combined_regions]
|
for a in combined_regions]
|
||||||
|
|
||||||
|
if len(regions) > 0:
|
||||||
|
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
|
||||||
|
|
||||||
# resize regions and detect
|
# resize regions and detect
|
||||||
detections = []
|
detections = []
|
||||||
for region in regions:
|
for region in regions:
|
||||||
detections.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask))
|
detections.extend(detect(object_detector, rgb_frame, region, objects_to_track, object_filters, mask))
|
||||||
|
|
||||||
#########
|
#########
|
||||||
# merge objects, check for clipped objects and look again up to 4 times
|
# merge objects, check for clipped objects and look again up to 4 times
|
||||||
@ -340,7 +343,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
|
|||||||
box[0], box[1],
|
box[0], box[1],
|
||||||
box[2], box[3])
|
box[2], box[3])
|
||||||
|
|
||||||
selected_objects.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask))
|
selected_objects.extend(detect(object_detector, rgb_frame, region, objects_to_track, object_filters, mask))
|
||||||
|
|
||||||
refining = True
|
refining = True
|
||||||
else:
|
else:
|
||||||
|
Loading…
Reference in New Issue
Block a user