mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
add mask as object filter
This commit is contained in:
parent
96ac2c29d6
commit
b2c7fc8f5b
@ -500,7 +500,7 @@ class FilterConfig():
|
|||||||
self._threshold = config['threshold']
|
self._threshold = config['threshold']
|
||||||
self._min_score = config.get('min_score')
|
self._min_score = config.get('min_score')
|
||||||
self._raw_mask = config.get('mask')
|
self._raw_mask = config.get('mask')
|
||||||
self._mask = create_mask(frame_shape, self._raw_mask) if frame_shape else None
|
self._mask = create_mask(frame_shape, self._raw_mask) if self._raw_mask else None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def min_area(self):
|
def min_area(self):
|
||||||
@ -686,7 +686,12 @@ class CameraRtmpConfig():
|
|||||||
class MotionConfig():
|
class MotionConfig():
|
||||||
def __init__(self, global_config, config, frame_shape):
|
def __init__(self, global_config, config, frame_shape):
|
||||||
self._raw_mask = config.get('mask')
|
self._raw_mask = config.get('mask')
|
||||||
self._mask = create_mask(frame_shape, self._raw_mask) if self._raw_mask else None
|
if self._raw_mask:
|
||||||
|
self._mask = create_mask(frame_shape, self._raw_mask)
|
||||||
|
else:
|
||||||
|
default_mask = np.zeros(frame_shape, np.uint8)
|
||||||
|
default_mask[:] = 255
|
||||||
|
self._mask = default_mask
|
||||||
self._threshold = config.get('threshold', global_config.get('threshold', 25))
|
self._threshold = config.get('threshold', global_config.get('threshold', 25))
|
||||||
self._contour_area = config.get('contour_area', global_config.get('contour_area', 100))
|
self._contour_area = config.get('contour_area', global_config.get('contour_area', 100))
|
||||||
self._delta_alpha = config.get('delta_alpha', global_config.get('delta_alpha', 0.2))
|
self._delta_alpha = config.get('delta_alpha', global_config.get('delta_alpha', 0.2))
|
||||||
|
@ -5,7 +5,7 @@ from frigate.config import MotionConfig
|
|||||||
|
|
||||||
|
|
||||||
class MotionDetector():
|
class MotionDetector():
|
||||||
def __init__(self, frame_shape, mask, config: MotionConfig):
|
def __init__(self, frame_shape, config: MotionConfig):
|
||||||
self.config = config
|
self.config = config
|
||||||
self.frame_shape = frame_shape
|
self.frame_shape = frame_shape
|
||||||
self.resize_factor = frame_shape[0]/config.frame_height
|
self.resize_factor = frame_shape[0]/config.frame_height
|
||||||
@ -14,7 +14,7 @@ class MotionDetector():
|
|||||||
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
|
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
|
||||||
self.motion_frame_count = 0
|
self.motion_frame_count = 0
|
||||||
self.frame_counter = 0
|
self.frame_counter = 0
|
||||||
resized_mask = cv2.resize(mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
|
resized_mask = cv2.resize(config.mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
|
||||||
self.mask = np.where(resized_mask==[0])
|
self.mask = np.where(resized_mask==[0])
|
||||||
|
|
||||||
def detect(self, frame):
|
def detect(self, frame):
|
||||||
|
@ -291,7 +291,7 @@ class CameraState():
|
|||||||
cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
|
cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
|
||||||
|
|
||||||
if draw_options.get('mask'):
|
if draw_options.get('mask'):
|
||||||
mask_overlay = np.where(self.camera_config.mask==[0])
|
mask_overlay = np.where(self.camera_config.motion.mask==[0])
|
||||||
frame_copy[mask_overlay] = [0,0,0]
|
frame_copy[mask_overlay] = [0,0,0]
|
||||||
|
|
||||||
if draw_options.get('motion_boxes'):
|
if draw_options.get('motion_boxes'):
|
||||||
|
@ -31,7 +31,7 @@ from frigate.util import (EventsPerSecond, FrameManager,
|
|||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
def filtered(obj, objects_to_track, object_filters, mask=None):
|
def filtered(obj, objects_to_track, object_filters):
|
||||||
object_name = obj[0]
|
object_name = obj[0]
|
||||||
|
|
||||||
if not object_name in objects_to_track:
|
if not object_name in objects_to_track:
|
||||||
@ -54,14 +54,15 @@ def filtered(obj, objects_to_track, object_filters, mask=None):
|
|||||||
if obj_settings.min_score > obj[1]:
|
if obj_settings.min_score > obj[1]:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# compute the coordinates of the object and make sure
|
if not obj_settings.mask is None:
|
||||||
# the location isnt outside the bounds of the image (can happen from rounding)
|
# compute the coordinates of the object and make sure
|
||||||
y_location = min(int(obj[2][3]), len(mask)-1)
|
# the location isnt outside the bounds of the image (can happen from rounding)
|
||||||
x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(mask[0])-1)
|
y_location = min(int(obj[2][3]), len(obj_settings.mask)-1)
|
||||||
|
x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(obj_settings.mask[0])-1)
|
||||||
|
|
||||||
# if the object is in a masked location, don't add it to detected objects
|
# if the object is in a masked location, don't add it to detected objects
|
||||||
if (not mask is None) and (mask[y_location][x_location] == 0):
|
if obj_settings.mask[y_location][x_location] == 0:
|
||||||
return True
|
return True
|
||||||
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -258,9 +259,8 @@ def track_camera(name, config: CameraConfig, model_shape, detection_queue, resul
|
|||||||
frame_shape = config.frame_shape
|
frame_shape = config.frame_shape
|
||||||
objects_to_track = config.objects.track
|
objects_to_track = config.objects.track
|
||||||
object_filters = config.objects.filters
|
object_filters = config.objects.filters
|
||||||
mask = config.mask
|
|
||||||
|
|
||||||
motion_detector = MotionDetector(frame_shape, mask, config.motion)
|
motion_detector = MotionDetector(frame_shape, config.motion)
|
||||||
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
|
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
|
||||||
|
|
||||||
object_tracker = ObjectTracker(config.detect)
|
object_tracker = ObjectTracker(config.detect)
|
||||||
@ -268,7 +268,7 @@ def track_camera(name, config: CameraConfig, model_shape, detection_queue, resul
|
|||||||
frame_manager = SharedMemoryFrameManager()
|
frame_manager = SharedMemoryFrameManager()
|
||||||
|
|
||||||
process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
|
process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
|
||||||
object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, mask, stop_event)
|
object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, stop_event)
|
||||||
|
|
||||||
logger.info(f"{name}: exiting subprocess")
|
logger.info(f"{name}: exiting subprocess")
|
||||||
|
|
||||||
@ -278,7 +278,7 @@ def reduce_boxes(boxes):
|
|||||||
reduced_boxes = cv2.groupRectangles([list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2)[0]
|
reduced_boxes = cv2.groupRectangles([list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2)[0]
|
||||||
return [tuple(b) for b in reduced_boxes]
|
return [tuple(b) for b in reduced_boxes]
|
||||||
|
|
||||||
def detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask):
|
def detect(object_detector, frame, model_shape, region, objects_to_track, object_filters):
|
||||||
tensor_input = create_tensor_input(frame, model_shape, region)
|
tensor_input = create_tensor_input(frame, model_shape, region)
|
||||||
|
|
||||||
detections = []
|
detections = []
|
||||||
@ -296,7 +296,7 @@ def detect(object_detector, frame, model_shape, region, objects_to_track, object
|
|||||||
(x_max-x_min)*(y_max-y_min),
|
(x_max-x_min)*(y_max-y_min),
|
||||||
region)
|
region)
|
||||||
# apply object filters
|
# apply object filters
|
||||||
if filtered(det, objects_to_track, object_filters, mask):
|
if filtered(det, objects_to_track, object_filters):
|
||||||
continue
|
continue
|
||||||
detections.append(det)
|
detections.append(det)
|
||||||
return detections
|
return detections
|
||||||
@ -305,7 +305,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
|
|||||||
frame_manager: FrameManager, motion_detector: MotionDetector,
|
frame_manager: FrameManager, motion_detector: MotionDetector,
|
||||||
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
|
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
|
||||||
detected_objects_queue: mp.Queue, process_info: Dict,
|
detected_objects_queue: mp.Queue, process_info: Dict,
|
||||||
objects_to_track: List[str], object_filters, mask, stop_event,
|
objects_to_track: List[str], object_filters, stop_event,
|
||||||
exit_on_empty: bool = False):
|
exit_on_empty: bool = False):
|
||||||
|
|
||||||
fps = process_info['process_fps']
|
fps = process_info['process_fps']
|
||||||
@ -358,7 +358,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
|
|||||||
# resize regions and detect
|
# resize regions and detect
|
||||||
detections = []
|
detections = []
|
||||||
for region in regions:
|
for region in regions:
|
||||||
detections.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask))
|
detections.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
|
||||||
|
|
||||||
#########
|
#########
|
||||||
# merge objects, check for clipped objects and look again up to 4 times
|
# merge objects, check for clipped objects and look again up to 4 times
|
||||||
@ -393,7 +393,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
|
|||||||
|
|
||||||
regions.append(region)
|
regions.append(region)
|
||||||
|
|
||||||
selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask))
|
selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
|
||||||
|
|
||||||
refining = True
|
refining = True
|
||||||
else:
|
else:
|
||||||
|
Loading…
Reference in New Issue
Block a user