mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
implement person filtering with min/max by y position
This commit is contained in:
parent
3e803b6a03
commit
4f829e818e
@ -17,33 +17,25 @@ cameras:
|
|||||||
- size: 350
|
- size: 350
|
||||||
x_offset: 0
|
x_offset: 0
|
||||||
y_offset: 300
|
y_offset: 300
|
||||||
min_person_area: 5000
|
|
||||||
- size: 400
|
- size: 400
|
||||||
x_offset: 350
|
x_offset: 350
|
||||||
y_offset: 250
|
y_offset: 250
|
||||||
min_person_area: 2000
|
|
||||||
- size: 400
|
- size: 400
|
||||||
x_offset: 750
|
x_offset: 750
|
||||||
y_offset: 250
|
y_offset: 250
|
||||||
min_person_area: 2000
|
known_sizes:
|
||||||
back2:
|
- y: 300
|
||||||
rtsp:
|
min: 700
|
||||||
user: viewer
|
max: 1800
|
||||||
host: 10.0.10.10
|
- y: 400
|
||||||
port: 554
|
min: 3000
|
||||||
# values that begin with a "$" will be replaced with environment variable
|
max: 7200
|
||||||
password: $RTSP_PASSWORD
|
- y: 500
|
||||||
path: /cam/realmonitor?channel=1&subtype=2
|
min: 8500
|
||||||
regions:
|
max: 20400
|
||||||
- size: 350
|
- y: 600
|
||||||
x_offset: 0
|
min: 10000
|
||||||
y_offset: 300
|
max: 50000
|
||||||
min_person_area: 5000
|
- y: 700
|
||||||
- size: 400
|
min: 10000
|
||||||
x_offset: 350
|
max: 125000
|
||||||
y_offset: 250
|
|
||||||
min_person_area: 2000
|
|
||||||
- size: 400
|
|
||||||
x_offset: 750
|
|
||||||
y_offset: 250
|
|
||||||
min_person_area: 2000
|
|
@ -41,7 +41,7 @@ def main():
|
|||||||
|
|
||||||
cameras = {}
|
cameras = {}
|
||||||
for name, config in CONFIG['cameras'].items():
|
for name, config in CONFIG['cameras'].items():
|
||||||
cameras[name] = Camera(name, config, prepped_frame_queue, client, MQTT_TOPIC_PREFIX)
|
cameras[name] = Camera(name, config, prepped_frame_queue, client, MQTT_TOPIC_PREFIX, DEBUG)
|
||||||
|
|
||||||
prepped_queue_processor = PreppedQueueProcessor(
|
prepped_queue_processor = PreppedQueueProcessor(
|
||||||
cameras,
|
cameras,
|
||||||
|
@ -106,5 +106,3 @@ class FramePrepper(threading.Thread):
|
|||||||
'region_x_offset': self.region_x_offset,
|
'region_x_offset': self.region_x_offset,
|
||||||
'region_y_offset': self.region_y_offset
|
'region_y_offset': self.region_y_offset
|
||||||
})
|
})
|
||||||
else:
|
|
||||||
print("queue full. moving on")
|
|
||||||
|
@ -5,6 +5,7 @@ import cv2
|
|||||||
import threading
|
import threading
|
||||||
import ctypes
|
import ctypes
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
|
import numpy as np
|
||||||
from object_detection.utils import visualization_utils as vis_util
|
from object_detection.utils import visualization_utils as vis_util
|
||||||
from . util import tonumpyarray
|
from . util import tonumpyarray
|
||||||
from . object_detection import FramePrepper
|
from . object_detection import FramePrepper
|
||||||
@ -108,8 +109,59 @@ def get_rtsp_url(rtsp_config):
|
|||||||
rtsp_config['password'], rtsp_config['host'], rtsp_config['port'],
|
rtsp_config['password'], rtsp_config['host'], rtsp_config['port'],
|
||||||
rtsp_config['path'])
|
rtsp_config['path'])
|
||||||
|
|
||||||
|
def compute_sizes(frame_shape, known_sizes, mask):
|
||||||
|
# create a 3 dimensional numpy array to store estimated sizes
|
||||||
|
estimated_sizes = np.zeros((frame_shape[0], frame_shape[1], 2), np.uint32)
|
||||||
|
|
||||||
|
sorted_positions = sorted(known_sizes, key=lambda s: s['y'])
|
||||||
|
|
||||||
|
last_position = {'y': 0, 'min': 0, 'max': 0}
|
||||||
|
next_position = sorted_positions.pop(0)
|
||||||
|
# if the next position has the same y coordinate, skip
|
||||||
|
while next_position['y'] == last_position['y']:
|
||||||
|
next_position = sorted_positions.pop(0)
|
||||||
|
y_change = next_position['y']-last_position['y']
|
||||||
|
min_size_change = next_position['min']-last_position['min']
|
||||||
|
max_size_change = next_position['max']-last_position['max']
|
||||||
|
min_step_size = min_size_change/y_change
|
||||||
|
max_step_size = max_size_change/y_change
|
||||||
|
|
||||||
|
min_current_size = 0
|
||||||
|
max_current_size = 0
|
||||||
|
|
||||||
|
for y_position in range(frame_shape[0]):
|
||||||
|
# fill the row with the estimated size
|
||||||
|
estimated_sizes[y_position,:] = [min_current_size, max_current_size]
|
||||||
|
|
||||||
|
# if you have reached the next size
|
||||||
|
if y_position == next_position['y']:
|
||||||
|
last_position = next_position
|
||||||
|
# if there are still positions left
|
||||||
|
if len(sorted_positions) > 0:
|
||||||
|
next_position = sorted_positions.pop(0)
|
||||||
|
# if the next position has the same y coordinate, skip
|
||||||
|
while next_position['y'] == last_position['y']:
|
||||||
|
next_position = sorted_positions.pop(0)
|
||||||
|
y_change = next_position['y']-last_position['y']
|
||||||
|
min_size_change = next_position['min']-last_position['min']
|
||||||
|
max_size_change = next_position['max']-last_position['max']
|
||||||
|
min_step_size = min_size_change/y_change
|
||||||
|
max_step_size = max_size_change/y_change
|
||||||
|
else:
|
||||||
|
min_step_size = 0
|
||||||
|
max_step_size = 0
|
||||||
|
|
||||||
|
min_current_size += min_step_size
|
||||||
|
max_current_size += max_step_size
|
||||||
|
|
||||||
|
# apply mask by filling 0s for all locations a person could not be standing
|
||||||
|
if mask is not None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return estimated_sizes
|
||||||
|
|
||||||
class Camera:
|
class Camera:
|
||||||
def __init__(self, name, config, prepped_frame_queue, mqtt_client, mqtt_prefix):
|
def __init__(self, name, config, prepped_frame_queue, mqtt_client, mqtt_prefix, debug=False):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.config = config
|
self.config = config
|
||||||
self.detected_objects = []
|
self.detected_objects = []
|
||||||
@ -119,6 +171,7 @@ class Camera:
|
|||||||
self.frame_shape = get_frame_shape(self.rtsp_url)
|
self.frame_shape = get_frame_shape(self.rtsp_url)
|
||||||
self.mqtt_client = mqtt_client
|
self.mqtt_client = mqtt_client
|
||||||
self.mqtt_topic_prefix = '{}/{}'.format(mqtt_prefix, self.name)
|
self.mqtt_topic_prefix = '{}/{}'.format(mqtt_prefix, self.name)
|
||||||
|
self.debug = debug
|
||||||
|
|
||||||
# compute the flattened array length from the shape of the frame
|
# compute the flattened array length from the shape of the frame
|
||||||
flat_array_length = self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2]
|
flat_array_length = self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2]
|
||||||
@ -170,6 +223,13 @@ class Camera:
|
|||||||
# start a thread to publish object scores (currently only person)
|
# start a thread to publish object scores (currently only person)
|
||||||
mqtt_publisher = MqttObjectPublisher(self.mqtt_client, self.mqtt_topic_prefix, self.objects_parsed, self.detected_objects)
|
mqtt_publisher = MqttObjectPublisher(self.mqtt_client, self.mqtt_topic_prefix, self.objects_parsed, self.detected_objects)
|
||||||
mqtt_publisher.start()
|
mqtt_publisher.start()
|
||||||
|
|
||||||
|
# pre-compute estimated person size for every pixel in the image
|
||||||
|
if 'known_sizes' in self.config:
|
||||||
|
self.calculated_person_sizes = compute_sizes((self.frame_shape[0], self.frame_shape[1]),
|
||||||
|
self.config['known_sizes'], None)
|
||||||
|
else:
|
||||||
|
self.calculated_person_sizes = None
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
self.capture_process.start()
|
self.capture_process.start()
|
||||||
@ -188,23 +248,22 @@ class Camera:
|
|||||||
return
|
return
|
||||||
|
|
||||||
for obj in objects:
|
for obj in objects:
|
||||||
if obj['name'] == 'person':
|
if self.debug:
|
||||||
person_area = (obj['xmax']-obj['xmin'])*(obj['ymax']-obj['ymin'])
|
# print out the detected objects, scores and locations
|
||||||
# find the matching region
|
print(self.name, obj['name'], obj['score'], obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'])
|
||||||
region = None
|
|
||||||
for r in self.regions:
|
if self.calculated_person_sizes is not None and obj['name'] == 'person':
|
||||||
if (
|
standing_location = (int(obj['ymax']), int((obj['xmax']-obj['xmin'])/2))
|
||||||
obj['xmin'] >= r['x_offset'] and
|
person_size_range = self.calculated_person_sizes[standing_location[0]][standing_location[1]]
|
||||||
obj['ymin'] >= r['y_offset'] and
|
|
||||||
obj['xmax'] <= r['x_offset']+r['size'] and
|
# if the person isnt on the ground, continue
|
||||||
obj['ymax'] <= r['y_offset']+r['size']
|
if(person_size_range[0] == 0 and person_size_range[1] == 0):
|
||||||
):
|
continue
|
||||||
region = r
|
|
||||||
break
|
person_size = (obj['xmax']-obj['xmin'])*(obj['ymax']-obj['ymin'])
|
||||||
|
|
||||||
# if the min person area is larger than the
|
# if the person is not within 20% of the estimated size for that location, continue
|
||||||
# detected person, don't add it to detected objects
|
if person_size < person_size_range[0] or person_size > person_size_range[1]:
|
||||||
if region and region['min_person_area'] > person_area:
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
self.detected_objects.append(obj)
|
self.detected_objects.append(obj)
|
||||||
|
Loading…
Reference in New Issue
Block a user