From a611cbb9423700d482c73ea381abc9ae297cf3f0 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 10 Oct 2020 10:07:14 -0500 Subject: [PATCH] use yuv420p pixel format for motion --- config/config.example.yml | 2 +- detect_objects.py | 2 +- frigate/motion.py | 15 +++++++++------ frigate/object_processing.py | 2 +- frigate/video.py | 11 +++++++---- 5 files changed, 19 insertions(+), 13 deletions(-) diff --git a/config/config.example.yml b/config/config.example.yml index 5abe3c41d..20b10e556 100644 --- a/config/config.example.yml +++ b/config/config.example.yml @@ -66,7 +66,7 @@ save_clips: # - -f # - rawvideo # - -pix_fmt -# - rgb24 +# - yuv420p #################### # Global object configuration. Applies to all cameras diff --git a/detect_objects.py b/detect_objects.py index 0cc2377b5..91455e950 100644 --- a/detect_objects.py +++ b/detect_objects.py @@ -55,7 +55,7 @@ FFMPEG_DEFAULT_CONFIG = { '-use_wallclock_as_timestamps', '1']), 'output_args': FFMPEG_CONFIG.get('output_args', ['-f', 'rawvideo', - '-pix_fmt', 'rgb24']) + '-pix_fmt', 'yuv420p']) } GLOBAL_OBJECT_CONFIG = CONFIG.get('objects', {}) diff --git a/frigate/motion.py b/frigate/motion.py index 57f27afa6..2210ba481 100644 --- a/frigate/motion.py +++ b/frigate/motion.py @@ -4,6 +4,7 @@ import numpy as np class MotionDetector(): def __init__(self, frame_shape, mask, resize_factor=4): + self.frame_shape = frame_shape self.resize_factor = resize_factor self.motion_frame_size = (int(frame_shape[0]/resize_factor), int(frame_shape[1]/resize_factor)) self.avg_frame = np.zeros(self.motion_frame_size, np.float) @@ -16,14 +17,16 @@ class MotionDetector(): def detect(self, frame): motion_boxes = [] + gray = frame[0:self.frame_shape[0], 0:self.frame_shape[1]] + # resize frame - resized_frame = cv2.resize(frame, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR) + resized_frame = cv2.resize(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR) # convert to grayscale - gray = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY) + # resized_frame = cv2.cvtColor(resized_frame, cv2.COLOR_BGR2GRAY) # mask frame - gray[self.mask] = [255] + resized_frame[self.mask] = [255] # it takes ~30 frames to establish a baseline # dont bother looking for motion @@ -31,7 +34,7 @@ class MotionDetector(): self.frame_counter += 1 else: # compare to average - frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg_frame)) + frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame)) # compute the average delta over the past few frames # the alpha value can be modified to configure how sensitive the motion detection is. @@ -70,10 +73,10 @@ class MotionDetector(): # TODO: this really depends on FPS if self.motion_frame_count >= 10: # only average in the current frame if the difference persists for at least 3 frames - cv2.accumulateWeighted(gray, self.avg_frame, 0.2) + cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2) else: # when no motion, just keep averaging the frames together - cv2.accumulateWeighted(gray, self.avg_frame, 0.2) + cv2.accumulateWeighted(resized_frame, self.avg_frame, 0.2) self.motion_frame_count = 0 return motion_boxes \ No newline at end of file diff --git a/frigate/object_processing.py b/frigate/object_processing.py index f763015bb..6ea58925b 100644 --- a/frigate/object_processing.py +++ b/frigate/object_processing.py @@ -93,7 +93,7 @@ class CameraState(): # get the new frame and delete the old frame frame_id = f"{self.name}{frame_time}" with self.current_frame_lock: - self._current_frame = self.frame_manager.get(frame_id, self.config['frame_shape']) + self._current_frame = self.frame_manager.get(frame_id, (self.config['frame_shape'][0]*3//2, self.config['frame_shape'][1])) if not self.previous_frame_id is None: self.frame_manager.delete(self.previous_frame_id) self.previous_frame_id = frame_id diff --git a/frigate/video.py b/frigate/video.py index c5a5c4365..3dd7d0f63 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -120,7 +120,7 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram stop_event: mp.Event, current_frame: mp.Value): frame_num = 0 - frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2] + frame_size = frame_shape[0] * frame_shape[1] * 3 // 2 skipped_fps.start() while True: if stop_event.is_set(): @@ -276,7 +276,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, current_frame_time.value = frame_time - frame = frame_manager.get(f"{camera_name}{frame_time}", frame_shape) + frame = frame_manager.get(f"{camera_name}{frame_time}", (frame_shape[0]*3//2, frame_shape[1])) if frame is None: print(f"{camera_name}: frame {frame_time} is not in memory store.") @@ -304,10 +304,13 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0) for a in combined_regions] + if len(regions) > 0: + rgb_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420) + # resize regions and detect detections = [] for region in regions: - detections.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask)) + detections.extend(detect(object_detector, rgb_frame, region, objects_to_track, object_filters, mask)) ######### # merge objects, check for clipped objects and look again up to 4 times @@ -340,7 +343,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, box[0], box[1], box[2], box[3]) - selected_objects.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask)) + selected_objects.extend(detect(object_detector, rgb_frame, region, objects_to_track, object_filters, mask)) refining = True else: