diff --git a/frigate/motion/__init__.py b/frigate/motion/__init__.py index 73d2ee1fd..248c37092 100644 --- a/frigate/motion/__init__.py +++ b/frigate/motion/__init__.py @@ -20,3 +20,7 @@ class MotionDetector(ABC): @abstractmethod def detect(self, frame): pass + + @abstractmethod + def is_calibrating(self): + pass diff --git a/frigate/motion/frigate_motion.py b/frigate/motion/frigate_motion.py index 71fb35981..dc9c6b9ec 100644 --- a/frigate/motion/frigate_motion.py +++ b/frigate/motion/frigate_motion.py @@ -38,6 +38,9 @@ class FrigateMotionDetector(MotionDetector): self.threshold = threshold self.contour_area = contour_area + def is_calibrating(self): + return False + def detect(self, frame): motion_boxes = [] diff --git a/frigate/motion/improved_motion.py b/frigate/motion/improved_motion.py index b281cbbeb..b9d72da29 100644 --- a/frigate/motion/improved_motion.py +++ b/frigate/motion/improved_motion.py @@ -49,6 +49,9 @@ class ImprovedMotionDetector(MotionDetector): self.contrast_values[:, 1:2] = 255 self.contrast_values_index = 0 + def is_calibrating(self): + return self.calibrating + def detect(self, frame): motion_boxes = [] @@ -141,7 +144,6 @@ class ImprovedMotionDetector(MotionDetector): # if calibrating or the motion contours are > 80% of the image area (lightning, ir, ptz) recalibrate if self.calibrating or pct_motion > self.config.lightning_threshold: - motion_boxes = [] self.calibrating = True if self.save_images: diff --git a/frigate/video.py b/frigate/video.py index 97f903816..47e65811d 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -21,7 +21,6 @@ from frigate.log import LogPipe from frigate.motion import MotionDetector from frigate.motion.improved_motion import ImprovedMotionDetector from frigate.object_detection import RemoteObjectDetector -from frigate.ptz.autotrack import ptz_moving_at_frame_time from frigate.track import ObjectTracker from frigate.track.norfair_tracker import NorfairTracker from frigate.types import PTZMetricsTypes @@ -777,19 +776,8 @@ def process_frames( logger.info(f"{camera_name}: frame {frame_time} is not in memory store.") continue - # look for motion if enabled and ptz is not moving - # ptz_moving_at_frame_time() always returns False for - # non ptz/autotracking cameras - motion_boxes = ( - motion_detector.detect(frame) - if motion_enabled.value - and not ptz_moving_at_frame_time( - frame_time, - ptz_metrics["ptz_start_time"].value, - ptz_metrics["ptz_stop_time"].value, - ) - else [] - ) + # look for motion if enabled + motion_boxes = motion_detector.detect(frame) if motion_enabled.value else [] regions = [] consolidated_detections = [] @@ -814,8 +802,10 @@ def process_frames( ) # and it hasn't disappeared and object_tracker.disappeared[obj["id"]] == 0 - # and it doesn't overlap with any current motion boxes - and not intersects_any(obj["box"], motion_boxes) + # and it doesn't overlap with any current motion boxes when not calibrating + and not intersects_any( + obj["box"], [] if motion_detector.is_calibrating() else motion_boxes + ) ] # get tracked object boxes that aren't stationary @@ -825,7 +815,10 @@ def process_frames( if obj["id"] not in stationary_object_ids ] - combined_boxes = motion_boxes + tracked_object_boxes + combined_boxes = tracked_object_boxes + # only add in the motion boxes when not calibrating + if not motion_detector.is_calibrating(): + combined_boxes += motion_boxes cluster_candidates = get_cluster_candidates( frame_shape, region_min_size, combined_boxes