diff --git a/benchmark_motion.py b/benchmark_motion.py index a9ecc56b2..167770280 100644 --- a/benchmark_motion.py +++ b/benchmark_motion.py @@ -1,14 +1,13 @@ import datetime import multiprocessing as mp import os -from statistics import mean import cv2 import numpy as np from frigate.config import MotionConfig -from frigate.motion.frigate_motion import FrigateMotionDetector from frigate.motion.improved_motion import ImprovedMotionDetector +from frigate.util import create_mask # get info on the video # cap = cv2.VideoCapture("debug/front_cam_2023_05_23_08_41__2023_05_23_08_43.mp4") @@ -20,84 +19,85 @@ height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = cap.get(cv2.CAP_PROP_FPS) frame_shape = (height, width, 3) +mask = create_mask( + (height, width), + [], +) + # create the motion config -motion_config = MotionConfig() -motion_config.mask = np.zeros((height, width), np.uint8) -motion_config.mask[:] = 255 -motion_config.improve_contrast = 1 -motion_config.frame_alpha = 0.02 -motion_config.threshold = 40 -motion_config.contour_area = 15 +motion_config_1 = MotionConfig() +motion_config_1.mask = np.zeros((height, width), np.uint8) +motion_config_1.mask[:] = mask +# motion_config_1.improve_contrast = 1 +# motion_config_1.frame_height = 150 +# motion_config_1.frame_alpha = 0.02 +# motion_config_1.threshold = 30 +# motion_config_1.contour_area = 10 + +motion_config_2 = MotionConfig() +motion_config_2.mask = np.zeros((height, width), np.uint8) +motion_config_2.mask[:] = mask +# motion_config_2.improve_contrast = 1 +# motion_config_2.frame_height = 150 +# motion_config_2.frame_alpha = 0.01 +# motion_config_2.threshold = 20 +# motion_config.contour_area = 10 save_images = True -# create motion detectors -frigate_motion_detector = FrigateMotionDetector( +improved_motion_detector_1 = ImprovedMotionDetector( frame_shape=frame_shape, - config=motion_config, + config=motion_config_1, fps=fps, - improve_contrast=mp.Value("i", motion_config.improve_contrast), - threshold=mp.Value("i", motion_config.threshold), - contour_area=mp.Value("i", motion_config.contour_area), + improve_contrast=mp.Value("i", motion_config_1.improve_contrast), + threshold=mp.Value("i", motion_config_1.threshold), + contour_area=mp.Value("i", motion_config_1.contour_area), + name="default", + clipLimit=2.0, + tileGridSize=(8, 8), ) -frigate_motion_detector.save_images = save_images +improved_motion_detector_1.save_images = save_images -improved_motion_detector = ImprovedMotionDetector( +improved_motion_detector_2 = ImprovedMotionDetector( frame_shape=frame_shape, - config=motion_config, + config=motion_config_2, fps=fps, - improve_contrast=mp.Value("i", motion_config.improve_contrast), - threshold=mp.Value("i", motion_config.threshold), - contour_area=mp.Value("i", motion_config.contour_area), + improve_contrast=mp.Value("i", motion_config_2.improve_contrast), + threshold=mp.Value("i", motion_config_2.threshold), + contour_area=mp.Value("i", motion_config_2.contour_area), + name="compare", ) -improved_motion_detector.save_images = save_images +improved_motion_detector_2.save_images = save_images # read and process frames -frame_times = {"frigate": [], "improved": []} ret, frame = cap.read() frame_counter = 1 while ret: yuv_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV_I420) start_frame = datetime.datetime.now().timestamp() - frigate_motion_detector.detect(yuv_frame) - frame_times["frigate"].append(datetime.datetime.now().timestamp() - start_frame) + improved_motion_detector_1.detect(yuv_frame) start_frame = datetime.datetime.now().timestamp() - improved_motion_detector.detect(yuv_frame) - frame_times["improved"].append(datetime.datetime.now().timestamp() - start_frame) + improved_motion_detector_2.detect(yuv_frame) - frigate_frame = f"debug/frames/frigate-{frame_counter}.jpg" - improved_frame = f"debug/frames/improved-{frame_counter}.jpg" - if os.path.exists(frigate_frame) and os.path.exists(improved_frame): - image_row_1 = cv2.hconcat( - [ - cv2.imread(frigate_frame), - cv2.imread(improved_frame), - ] - ) - - image_row_2 = cv2.resize( - frame, - dsize=( - frigate_motion_detector.motion_frame_size[1] * 2, - frigate_motion_detector.motion_frame_size[0] * 2, - ), - interpolation=cv2.INTER_LINEAR, - ) + default_frame = f"debug/frames/default-{frame_counter}.jpg" + compare_frame = f"debug/frames/compare-{frame_counter}.jpg" + if os.path.exists(default_frame) and os.path.exists(compare_frame): + images = [ + cv2.imread(default_frame), + cv2.imread(compare_frame), + ] cv2.imwrite( f"debug/frames/all-{frame_counter}.jpg", - cv2.vconcat([image_row_1, image_row_2]), + cv2.vconcat(images) + if frame_shape[0] > frame_shape[1] + else cv2.hconcat(images), ) - os.unlink(frigate_frame) - os.unlink(improved_frame) + os.unlink(default_frame) + os.unlink(compare_frame) frame_counter += 1 ret, frame = cap.read() cap.release() - -print("Frigate Motion Detector") -print(f"Average frame processing time: {mean(frame_times['frigate'])*1000:.2f}ms") -print("Improved Motion Detector") -print(f"Average frame processing time: {mean(frame_times['improved'])*1000:.2f}ms") diff --git a/docs/docs/configuration/index.md b/docs/docs/configuration/index.md index 4d8cc4f17..ac65a1018 100644 --- a/docs/docs/configuration/index.md +++ b/docs/docs/configuration/index.md @@ -230,7 +230,7 @@ detect: # especially when using separate streams for detect and record. # Use this setting to make the timeline bounding boxes more closely align # with the recording. The value can be positive or negative. - # TIP: Imagine there is an event clip with a person walking from left to right. + # TIP: Imagine there is an event clip with a person walking from left to right. # If the event timeline bounding box is consistently to the left of the person # then the value should be decreased. Similarly, if a person is walking from # left to right and the bounding box is consistently ahead of the person @@ -275,7 +275,7 @@ motion: # Optional: The threshold passed to cv2.threshold to determine if a pixel is different enough to be counted as motion. (default: shown below) # Increasing this value will make motion detection less sensitive and decreasing it will make motion detection more sensitive. # The value should be between 1 and 255. - threshold: 40 + threshold: 20 # Optional: The percentage of the image used to detect lightning or other substantial changes where motion detection # needs to recalibrate. (default: shown below) # Increasing this value will make motion detection more likely to consider lightning or ir mode changes as valid motion. @@ -286,19 +286,19 @@ motion: # Increasing this value will prevent smaller areas of motion from being detected. Decreasing will # make motion detection more sensitive to smaller moving objects. # As a rule of thumb: - # - 15 - high sensitivity + # - 10 - high sensitivity # - 30 - medium sensitivity # - 50 - low sensitivity - contour_area: 15 + contour_area: 10 # Optional: Alpha value passed to cv2.accumulateWeighted when averaging frames to determine the background (default: shown below) # Higher values mean the current frame impacts the average a lot, and a new object will be averaged into the background faster. # Low values will cause things like moving shadows to be detected as motion for longer. # https://www.geeksforgeeks.org/background-subtraction-in-an-image-using-concept-of-running-average/ - frame_alpha: 0.02 + frame_alpha: 0.01 # Optional: Height of the resized motion frame (default: 50) # Higher values will result in more granular motion detection at the expense of higher CPU usage. # Lower values result in less CPU, but small changes may not register as motion. - frame_height: 50 + frame_height: 100 # Optional: motion mask # NOTE: see docs for more detailed info on creating masks mask: 0,900,1080,900,1080,1920,0,1920 diff --git a/frigate/config.py b/frigate/config.py index 2d9bb102a..b71ba1907 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -187,7 +187,7 @@ class RecordConfig(FrigateBaseModel): class MotionConfig(FrigateBaseModel): threshold: int = Field( - default=30, + default=20, title="Motion detection threshold (1-255).", ge=1, le=255, @@ -198,7 +198,7 @@ class MotionConfig(FrigateBaseModel): improve_contrast: bool = Field(default=True, title="Improve Contrast") contour_area: Optional[int] = Field(default=10, title="Contour Area") delta_alpha: float = Field(default=0.2, title="Delta Alpha") - frame_alpha: float = Field(default=0.02, title="Frame Alpha") + frame_alpha: float = Field(default=0.01, title="Frame Alpha") frame_height: Optional[int] = Field(default=100, title="Frame Height") mask: Union[str, List[str]] = Field( default="", title="Coordinates polygon for the motion mask." diff --git a/frigate/motion/improved_motion.py b/frigate/motion/improved_motion.py index 0aa259940..525854b82 100644 --- a/frigate/motion/improved_motion.py +++ b/frigate/motion/improved_motion.py @@ -15,7 +15,11 @@ class ImprovedMotionDetector(MotionDetector): improve_contrast, threshold, contour_area, + clipLimit=2.0, + tileGridSize=(2, 2), + name="improved", ): + self.name = name self.config = config self.frame_shape = frame_shape self.resize_factor = frame_shape[0] / config.frame_height @@ -38,7 +42,7 @@ class ImprovedMotionDetector(MotionDetector): self.improve_contrast = improve_contrast self.threshold = threshold self.contour_area = contour_area - self.clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)) + self.clahe = cv2.createCLAHE(clipLimit=clipLimit, tileGridSize=tileGridSize) def detect(self, frame): motion_boxes = [] @@ -52,12 +56,21 @@ class ImprovedMotionDetector(MotionDetector): interpolation=cv2.INTER_LINEAR, ) + if self.save_images: + resized_saved = resized_frame.copy() + resized_frame = cv2.GaussianBlur(resized_frame, (3, 3), cv2.BORDER_DEFAULT) + if self.save_images: + blurred_saved = resized_frame.copy() + # Improve contrast if self.improve_contrast.value: resized_frame = self.clahe.apply(resized_frame) + if self.save_images: + contrasted_saved = resized_frame.copy() + # mask frame resized_frame[self.mask] = [255] @@ -119,8 +132,19 @@ class ImprovedMotionDetector(MotionDetector): (0, 0, 255), 2, ) + frames = [ + cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR), + cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR), + cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR), + cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR), + cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR), + thresh_dilated, + ] cv2.imwrite( - f"debug/frames/improved-{self.frame_counter}.jpg", thresh_dilated + f"debug/frames/{self.name}-{self.frame_counter}.jpg", + cv2.hconcat(frames) + if self.frame_shape[0] > self.frame_shape[1] + else cv2.vconcat(frames), ) if len(motion_boxes) > 0: