2023-06-11 15:45:11 +02:00
|
|
|
import cv2
|
|
|
|
import imutils
|
|
|
|
import numpy as np
|
2023-06-30 14:27:31 +02:00
|
|
|
from scipy.ndimage import gaussian_filter
|
2023-06-11 15:45:11 +02:00
|
|
|
|
|
|
|
from frigate.config import MotionConfig
|
|
|
|
from frigate.motion import MotionDetector
|
|
|
|
|
|
|
|
|
|
|
|
class ImprovedMotionDetector(MotionDetector):
|
|
|
|
def __init__(
|
|
|
|
self,
|
|
|
|
frame_shape,
|
|
|
|
config: MotionConfig,
|
|
|
|
fps: int,
|
|
|
|
improve_contrast,
|
|
|
|
threshold,
|
|
|
|
contour_area,
|
2023-06-21 15:38:51 +02:00
|
|
|
name="improved",
|
2023-06-30 14:27:31 +02:00
|
|
|
blur_radius=1,
|
|
|
|
interpolation=cv2.INTER_NEAREST,
|
|
|
|
contrast_frame_history=50,
|
2023-06-11 15:45:11 +02:00
|
|
|
):
|
2023-06-21 15:38:51 +02:00
|
|
|
self.name = name
|
2023-06-11 15:45:11 +02:00
|
|
|
self.config = config
|
|
|
|
self.frame_shape = frame_shape
|
|
|
|
self.resize_factor = frame_shape[0] / config.frame_height
|
|
|
|
self.motion_frame_size = (
|
|
|
|
config.frame_height,
|
|
|
|
config.frame_height * frame_shape[1] // frame_shape[0],
|
|
|
|
)
|
|
|
|
self.avg_frame = np.zeros(self.motion_frame_size, np.float32)
|
|
|
|
self.motion_frame_count = 0
|
|
|
|
self.frame_counter = 0
|
|
|
|
resized_mask = cv2.resize(
|
|
|
|
config.mask,
|
|
|
|
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
|
2023-06-30 14:27:31 +02:00
|
|
|
interpolation=cv2.INTER_AREA,
|
2023-06-11 15:45:11 +02:00
|
|
|
)
|
|
|
|
self.mask = np.where(resized_mask == [0])
|
|
|
|
self.save_images = False
|
|
|
|
self.calibrating = True
|
|
|
|
self.improve_contrast = improve_contrast
|
|
|
|
self.threshold = threshold
|
|
|
|
self.contour_area = contour_area
|
2023-06-30 14:27:31 +02:00
|
|
|
self.blur_radius = blur_radius
|
|
|
|
self.interpolation = interpolation
|
|
|
|
self.contrast_values = np.zeros((contrast_frame_history, 2), np.uint8)
|
|
|
|
self.contrast_values[:, 1:2] = 255
|
|
|
|
self.contrast_values_index = 0
|
2023-06-11 15:45:11 +02:00
|
|
|
|
2023-10-14 13:46:34 +02:00
|
|
|
def is_calibrating(self):
|
|
|
|
return self.calibrating
|
|
|
|
|
2023-06-11 15:45:11 +02:00
|
|
|
def detect(self, frame):
|
|
|
|
motion_boxes = []
|
|
|
|
|
|
|
|
gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]]
|
|
|
|
|
|
|
|
# resize frame
|
|
|
|
resized_frame = cv2.resize(
|
|
|
|
gray,
|
|
|
|
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
|
2023-06-30 14:27:31 +02:00
|
|
|
interpolation=self.interpolation,
|
2023-06-11 15:45:11 +02:00
|
|
|
)
|
|
|
|
|
2023-06-21 15:38:51 +02:00
|
|
|
if self.save_images:
|
|
|
|
resized_saved = resized_frame.copy()
|
|
|
|
|
2023-06-11 15:45:11 +02:00
|
|
|
# Improve contrast
|
|
|
|
if self.improve_contrast.value:
|
2023-06-30 14:27:31 +02:00
|
|
|
# TODO tracking moving average of min/max to avoid sudden contrast changes
|
|
|
|
minval = np.percentile(resized_frame, 4).astype(np.uint8)
|
|
|
|
maxval = np.percentile(resized_frame, 96).astype(np.uint8)
|
|
|
|
# skip contrast calcs if the image is a single color
|
|
|
|
if minval < maxval:
|
|
|
|
# keep track of the last 50 contrast values
|
|
|
|
self.contrast_values[self.contrast_values_index] = [minval, maxval]
|
|
|
|
self.contrast_values_index += 1
|
|
|
|
if self.contrast_values_index == len(self.contrast_values):
|
|
|
|
self.contrast_values_index = 0
|
|
|
|
|
|
|
|
avg_min, avg_max = np.mean(self.contrast_values, axis=0)
|
|
|
|
|
|
|
|
resized_frame = np.clip(resized_frame, avg_min, avg_max)
|
|
|
|
resized_frame = (
|
|
|
|
((resized_frame - avg_min) / (avg_max - avg_min)) * 255
|
|
|
|
).astype(np.uint8)
|
2023-06-11 15:45:11 +02:00
|
|
|
|
2023-06-21 15:38:51 +02:00
|
|
|
if self.save_images:
|
|
|
|
contrasted_saved = resized_frame.copy()
|
|
|
|
|
2023-06-11 15:45:11 +02:00
|
|
|
# mask frame
|
2023-06-30 14:27:31 +02:00
|
|
|
# this has to come after contrast improvement
|
2023-06-11 15:45:11 +02:00
|
|
|
resized_frame[self.mask] = [255]
|
|
|
|
|
2023-06-30 14:27:31 +02:00
|
|
|
resized_frame = gaussian_filter(resized_frame, sigma=1, radius=self.blur_radius)
|
|
|
|
|
|
|
|
if self.save_images:
|
|
|
|
blurred_saved = resized_frame.copy()
|
|
|
|
|
2023-06-11 15:45:11 +02:00
|
|
|
if self.save_images or self.calibrating:
|
|
|
|
self.frame_counter += 1
|
|
|
|
# compare to average
|
|
|
|
frameDelta = cv2.absdiff(resized_frame, cv2.convertScaleAbs(self.avg_frame))
|
|
|
|
|
|
|
|
# compute the threshold image for the current frame
|
|
|
|
thresh = cv2.threshold(
|
|
|
|
frameDelta, self.threshold.value, 255, cv2.THRESH_BINARY
|
|
|
|
)[1]
|
|
|
|
|
|
|
|
# dilate the thresholded image to fill in holes, then find contours
|
|
|
|
# on thresholded image
|
|
|
|
thresh_dilated = cv2.dilate(thresh, None, iterations=1)
|
|
|
|
cnts = cv2.findContours(
|
|
|
|
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
|
|
|
|
)
|
|
|
|
cnts = imutils.grab_contours(cnts)
|
|
|
|
|
|
|
|
# loop over the contours
|
|
|
|
total_contour_area = 0
|
|
|
|
for c in cnts:
|
|
|
|
# if the contour is big enough, count it as motion
|
|
|
|
contour_area = cv2.contourArea(c)
|
|
|
|
total_contour_area += contour_area
|
|
|
|
if contour_area > self.contour_area.value:
|
|
|
|
x, y, w, h = cv2.boundingRect(c)
|
|
|
|
motion_boxes.append(
|
|
|
|
(
|
|
|
|
int(x * self.resize_factor),
|
|
|
|
int(y * self.resize_factor),
|
|
|
|
int((x + w) * self.resize_factor),
|
|
|
|
int((y + h) * self.resize_factor),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
pct_motion = total_contour_area / (
|
|
|
|
self.motion_frame_size[0] * self.motion_frame_size[1]
|
|
|
|
)
|
|
|
|
|
|
|
|
# once the motion drops to less than 1% for the first time, assume its calibrated
|
|
|
|
if pct_motion < 0.01:
|
|
|
|
self.calibrating = False
|
|
|
|
|
|
|
|
# if calibrating or the motion contours are > 80% of the image area (lightning, ir, ptz) recalibrate
|
|
|
|
if self.calibrating or pct_motion > self.config.lightning_threshold:
|
|
|
|
self.calibrating = True
|
|
|
|
|
|
|
|
if self.save_images:
|
|
|
|
thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
|
|
|
|
for b in motion_boxes:
|
|
|
|
cv2.rectangle(
|
|
|
|
thresh_dilated,
|
|
|
|
(int(b[0] / self.resize_factor), int(b[1] / self.resize_factor)),
|
|
|
|
(int(b[2] / self.resize_factor), int(b[3] / self.resize_factor)),
|
|
|
|
(0, 0, 255),
|
|
|
|
2,
|
|
|
|
)
|
2023-06-21 15:38:51 +02:00
|
|
|
frames = [
|
|
|
|
cv2.cvtColor(resized_saved, cv2.COLOR_GRAY2BGR),
|
|
|
|
cv2.cvtColor(contrasted_saved, cv2.COLOR_GRAY2BGR),
|
2023-06-30 14:27:31 +02:00
|
|
|
cv2.cvtColor(blurred_saved, cv2.COLOR_GRAY2BGR),
|
2023-06-21 15:38:51 +02:00
|
|
|
cv2.cvtColor(frameDelta, cv2.COLOR_GRAY2BGR),
|
|
|
|
cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR),
|
|
|
|
thresh_dilated,
|
|
|
|
]
|
2023-06-11 15:45:11 +02:00
|
|
|
cv2.imwrite(
|
2023-06-21 15:38:51 +02:00
|
|
|
f"debug/frames/{self.name}-{self.frame_counter}.jpg",
|
|
|
|
cv2.hconcat(frames)
|
|
|
|
if self.frame_shape[0] > self.frame_shape[1]
|
|
|
|
else cv2.vconcat(frames),
|
2023-06-11 15:45:11 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
if len(motion_boxes) > 0:
|
|
|
|
self.motion_frame_count += 1
|
|
|
|
if self.motion_frame_count >= 10:
|
|
|
|
# only average in the current frame if the difference persists for a bit
|
|
|
|
cv2.accumulateWeighted(
|
|
|
|
resized_frame,
|
|
|
|
self.avg_frame,
|
|
|
|
0.2 if self.calibrating else self.config.frame_alpha,
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
# when no motion, just keep averaging the frames together
|
|
|
|
cv2.accumulateWeighted(
|
|
|
|
resized_frame,
|
|
|
|
self.avg_frame,
|
|
|
|
0.2 if self.calibrating else self.config.frame_alpha,
|
|
|
|
)
|
|
|
|
self.motion_frame_count = 0
|
|
|
|
|
|
|
|
return motion_boxes
|