mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
fix multiple object type tracking
This commit is contained in:
parent
d87f4407a0
commit
2aada930e3
@ -4,7 +4,9 @@ import threading
|
||||
import cv2
|
||||
import prctl
|
||||
import itertools
|
||||
import copy
|
||||
import numpy as np
|
||||
import multiprocessing as mp
|
||||
from collections import defaultdict
|
||||
from scipy.spatial import distance as dist
|
||||
from frigate.util import draw_box_with_label, LABELS, compute_intersection_rectangle, compute_intersection_over_union, calculate_region
|
||||
@ -24,6 +26,14 @@ class ObjectCleaner(threading.Thread):
|
||||
for frame_time in list(self.camera.detected_objects.keys()).copy():
|
||||
if not frame_time in self.camera.frame_cache:
|
||||
del self.camera.detected_objects[frame_time]
|
||||
|
||||
with self.camera.object_tracker.tracked_objects_lock:
|
||||
now = datetime.datetime.now().timestamp()
|
||||
for id, obj in list(self.camera.object_tracker.tracked_objects.items()):
|
||||
# if the object is more than 10 seconds old
|
||||
# and not in the most recent frame, deregister
|
||||
if (now - obj['frame_time']) > 10 and self.camera.object_tracker.most_recent_frame_time > obj['frame_time']:
|
||||
self.camera.object_tracker.deregister(id)
|
||||
|
||||
class DetectedObjectsProcessor(threading.Thread):
|
||||
def __init__(self, camera):
|
||||
@ -222,15 +232,17 @@ class ObjectTracker(threading.Thread):
|
||||
threading.Thread.__init__(self)
|
||||
self.camera = camera
|
||||
self.tracked_objects = {}
|
||||
self.disappeared = {}
|
||||
self.max_disappeared = max_disappeared
|
||||
self.tracked_objects_lock = mp.Lock()
|
||||
self.most_recent_frame_time = None
|
||||
|
||||
def run(self):
|
||||
prctl.set_name(self.__class__.__name__)
|
||||
while True:
|
||||
frame_time = self.camera.refined_frame_queue.get()
|
||||
self.match_and_update(self.camera.detected_objects[frame_time])
|
||||
self.camera.frame_output_queue.put(frame_time)
|
||||
with self.tracked_objects_lock:
|
||||
self.match_and_update(self.camera.detected_objects[frame_time])
|
||||
self.most_recent_frame_time = frame_time
|
||||
self.camera.frame_output_queue.put((frame_time, copy.deepcopy(self.tracked_objects)))
|
||||
if len(self.tracked_objects) > 0:
|
||||
with self.camera.objects_tracked:
|
||||
self.camera.objects_tracked.notify_all()
|
||||
@ -241,10 +253,8 @@ class ObjectTracker(threading.Thread):
|
||||
obj['top_score'] = obj['score']
|
||||
self.add_history(obj)
|
||||
self.tracked_objects[id] = obj
|
||||
self.disappeared[id] = 0
|
||||
|
||||
def deregister(self, id):
|
||||
del self.disappeared[id]
|
||||
del self.tracked_objects[id]
|
||||
|
||||
def update(self, id, new_obj):
|
||||
@ -267,22 +277,7 @@ class ObjectTracker(threading.Thread):
|
||||
obj['history'] = [entry]
|
||||
|
||||
def match_and_update(self, new_objects):
|
||||
# check to see if the list of input bounding box rectangles
|
||||
# is empty
|
||||
if len(new_objects) == 0:
|
||||
# loop over any existing tracked objects and mark them
|
||||
# as disappeared
|
||||
for objectID in list(self.disappeared.keys()):
|
||||
self.disappeared[objectID] += 1
|
||||
|
||||
# if we have reached a maximum number of consecutive
|
||||
# frames where a given object has been marked as
|
||||
# missing, deregister it
|
||||
if self.disappeared[objectID] > self.max_disappeared:
|
||||
self.deregister(objectID)
|
||||
|
||||
# return early as there are no centroids or tracking info
|
||||
# to update
|
||||
return
|
||||
|
||||
# group by name
|
||||
@ -291,13 +286,12 @@ class ObjectTracker(threading.Thread):
|
||||
new_object_groups[obj['name']].append(obj)
|
||||
|
||||
# track objects for each label type
|
||||
# TODO: this is going to miss deregistering objects that are not in the new groups
|
||||
for label, group in new_object_groups.items():
|
||||
current_objects = [o for o in self.tracked_objects.values() if o['name'] == label]
|
||||
current_ids = [o['id'] for o in current_objects]
|
||||
current_centroids = np.array([o['centroid'] for o in current_objects])
|
||||
|
||||
# compute centroids
|
||||
# compute centroids of new objects
|
||||
for obj in group:
|
||||
centroid_x = int((obj['box']['xmin']+obj['box']['xmax']) / 2.0)
|
||||
centroid_y = int((obj['box']['ymin']+obj['box']['ymax']) / 2.0)
|
||||
@ -339,7 +333,6 @@ class ObjectTracker(threading.Thread):
|
||||
for (row, col) in zip(rows, cols):
|
||||
# if we have already examined either the row or
|
||||
# column value before, ignore it
|
||||
# val
|
||||
if row in usedRows or col in usedCols:
|
||||
continue
|
||||
|
||||
@ -347,43 +340,22 @@ class ObjectTracker(threading.Thread):
|
||||
# set its new centroid, and reset the disappeared
|
||||
# counter
|
||||
objectID = current_ids[row]
|
||||
self.update(objectID, new_objects[col])
|
||||
self.disappeared[objectID] = 0
|
||||
self.update(objectID, group[col])
|
||||
|
||||
# indicate that we have examined each of the row and
|
||||
# column indexes, respectively
|
||||
usedRows.add(row)
|
||||
usedCols.add(col)
|
||||
|
||||
# compute both the row and column index we have NOT yet
|
||||
# examined
|
||||
unusedRows = set(range(0, D.shape[0])).difference(usedRows)
|
||||
# compute the column index we have NOT yet examined
|
||||
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
|
||||
|
||||
# in the event that the number of object centroids is
|
||||
# equal or greater than the number of input centroids
|
||||
# we need to check and see if some of these objects have
|
||||
# potentially disappeared
|
||||
if D.shape[0] >= D.shape[1]:
|
||||
# loop over the unused row indexes
|
||||
for row in unusedRows:
|
||||
# grab the object ID for the corresponding row
|
||||
# index and increment the disappeared counter
|
||||
objectID = current_ids[row]
|
||||
self.disappeared[objectID] += 1
|
||||
|
||||
# check to see if the number of consecutive
|
||||
# frames the object has been marked "disappeared"
|
||||
# for warrants deregistering the object
|
||||
if self.disappeared[objectID] > self.max_disappeared:
|
||||
self.deregister(objectID)
|
||||
|
||||
# otherwise, if the number of input centroids is greater
|
||||
# if the number of input centroids is greater
|
||||
# than the number of existing object centroids we need to
|
||||
# register each new input centroid as a trackable object
|
||||
else:
|
||||
for col in unusedCols:
|
||||
self.register(col, new_objects[col])
|
||||
# if D.shape[0] < D.shape[1]:
|
||||
for col in unusedCols:
|
||||
self.register(col, group[col])
|
||||
|
||||
# Maintains the frame and object with the highest score
|
||||
class BestFrames(threading.Thread):
|
||||
@ -400,18 +372,18 @@ class BestFrames(threading.Thread):
|
||||
with self.camera.objects_tracked:
|
||||
self.camera.objects_tracked.wait()
|
||||
|
||||
# make a copy of detected objects
|
||||
detected_objects = list(self.camera.object_tracker.tracked_objects.values()).copy()
|
||||
# make a copy of tracked objects
|
||||
tracked_objects = list(self.camera.object_tracker.tracked_objects.values())
|
||||
|
||||
for obj in detected_objects:
|
||||
for obj in tracked_objects:
|
||||
if obj['name'] in self.best_objects:
|
||||
now = datetime.datetime.now().timestamp()
|
||||
# if the object is a higher score than the current best score
|
||||
# or the current object is more than 1 minute old, use the new object
|
||||
if obj['score'] > self.best_objects[obj['name']]['score'] or (now - self.best_objects[obj['name']]['frame_time']) > 60:
|
||||
self.best_objects[obj['name']] = obj
|
||||
self.best_objects[obj['name']] = copy.deepcopy(obj)
|
||||
else:
|
||||
self.best_objects[obj['name']] = obj
|
||||
self.best_objects[obj['name']] = copy.deepcopy(obj)
|
||||
|
||||
for name, obj in self.best_objects.items():
|
||||
if obj['frame_time'] in self.camera.frame_cache:
|
||||
|
@ -75,12 +75,13 @@ def compute_intersection_over_union(box_a, box_b):
|
||||
def tonumpyarray(mp_arr):
|
||||
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
|
||||
|
||||
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info):
|
||||
color = COLOR_MAP[label]
|
||||
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None):
|
||||
if color is None:
|
||||
color = COLOR_MAP[label]
|
||||
display_text = "{}: {}".format(label, info)
|
||||
cv2.rectangle(frame, (x_min, y_min),
|
||||
(x_max, y_max),
|
||||
color, 2)
|
||||
color, thickness)
|
||||
font_scale = 0.5
|
||||
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||
# get the width and height of the text box
|
||||
|
@ -9,6 +9,7 @@ import multiprocessing as mp
|
||||
import subprocess as sp
|
||||
import numpy as np
|
||||
import prctl
|
||||
import copy
|
||||
import itertools
|
||||
from collections import defaultdict
|
||||
from frigate.util import tonumpyarray, LABELS, draw_box_with_label, calculate_region, EventsPerSecond
|
||||
@ -64,7 +65,7 @@ class CameraWatchdog(threading.Thread):
|
||||
# wait a bit before checking
|
||||
time.sleep(10)
|
||||
|
||||
if (datetime.datetime.now().timestamp() - self.camera.frame_time.value) > 300:
|
||||
if self.camera.frame_time.value != 0.0 and (datetime.datetime.now().timestamp() - self.camera.frame_time.value) > 300:
|
||||
print("last frame is more than 5 minutes old, restarting camera capture...")
|
||||
self.camera.start_or_restart_capture()
|
||||
time.sleep(5)
|
||||
@ -116,12 +117,12 @@ class VideoWriter(threading.Thread):
|
||||
def run(self):
|
||||
prctl.set_name(self.__class__.__name__)
|
||||
while True:
|
||||
frame_time = self.camera.frame_output_queue.get()
|
||||
if len(self.camera.object_tracker.tracked_objects) == 0:
|
||||
continue
|
||||
f = open(f"/debug/{self.camera.name}-{str(frame_time)}.jpg", 'wb')
|
||||
f.write(self.camera.frame_with_objects(frame_time))
|
||||
f.close()
|
||||
(frame_time, tracked_objects) = self.camera.frame_output_queue.get()
|
||||
# if len(self.camera.object_tracker.tracked_objects) == 0:
|
||||
# continue
|
||||
# f = open(f"/debug/output/{self.camera.name}-{str(format(frame_time, '.8f'))}.jpg", 'wb')
|
||||
# f.write(self.camera.frame_with_objects(frame_time, tracked_objects))
|
||||
# f.close()
|
||||
|
||||
class Camera:
|
||||
def __init__(self, name, ffmpeg_config, global_objects_config, config, prepped_frame_queue, mqtt_client, mqtt_prefix):
|
||||
@ -195,6 +196,14 @@ class Camera:
|
||||
for obj in objects_with_config:
|
||||
self.object_filters[obj] = {**global_object_filters.get(obj, {}), **camera_object_filters.get(obj, {})}
|
||||
|
||||
# start a thread to track objects
|
||||
self.object_tracker = ObjectTracker(self, 10)
|
||||
self.object_tracker.start()
|
||||
|
||||
# start a thread to write tracked frames to disk
|
||||
self.video_writer = VideoWriter(self)
|
||||
self.video_writer.start()
|
||||
|
||||
# start a thread to queue resize requests for regions
|
||||
self.region_requester = RegionRequester(self)
|
||||
self.region_requester.start()
|
||||
@ -222,14 +231,6 @@ class Camera:
|
||||
self.region_refiner.start()
|
||||
self.dynamic_region_fps.start()
|
||||
|
||||
# start a thread to track objects
|
||||
self.object_tracker = ObjectTracker(self, 10)
|
||||
self.object_tracker.start()
|
||||
|
||||
# start a thread to write tracked frames to disk
|
||||
self.video_writer = VideoWriter(self)
|
||||
self.video_writer.start()
|
||||
|
||||
# start a thread to publish object scores
|
||||
mqtt_publisher = MqttObjectPublisher(self.mqtt_client, self.mqtt_topic_prefix, self)
|
||||
mqtt_publisher.start()
|
||||
@ -312,8 +313,9 @@ class Camera:
|
||||
'dynamic_regions_per_sec': self.dynamic_region_fps.eps()
|
||||
}
|
||||
|
||||
def frame_with_objects(self, frame_time):
|
||||
def frame_with_objects(self, frame_time, tracked_objects=None):
|
||||
frame = self.frame_cache[frame_time].copy()
|
||||
detected_objects = self.detected_objects[frame_time].copy()
|
||||
|
||||
for region in self.regions:
|
||||
color = (255,255,255)
|
||||
@ -322,13 +324,17 @@ class Camera:
|
||||
color, 2)
|
||||
|
||||
# draw the bounding boxes on the screen
|
||||
for id, obj in list(self.object_tracker.tracked_objects.items()):
|
||||
# for obj in detected_objects[frame_time]:
|
||||
cv2.rectangle(frame, (obj['region']['xmin'], obj['region']['ymin']),
|
||||
(obj['region']['xmax'], obj['region']['ymax']),
|
||||
(0,255,0), 1)
|
||||
draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], f"{int(obj['score']*100)}% {obj['area']} {id}")
|
||||
|
||||
|
||||
if tracked_objects is None:
|
||||
tracked_objects = copy.deepcopy(self.object_tracker.tracked_objects)
|
||||
|
||||
for obj in detected_objects:
|
||||
draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], f"{int(obj['score']*100)}% {obj['area']}", thickness=3)
|
||||
|
||||
for id, obj in tracked_objects.items():
|
||||
color = (0, 255,0) if obj['frame_time'] == frame_time else (255, 0, 0)
|
||||
draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], f"{int(obj['score']*100)}% {obj['area']} {id}", color=color, thickness=1)
|
||||
|
||||
# print a timestamp
|
||||
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
|
||||
cv2.putText(frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
||||
|
Loading…
Reference in New Issue
Block a user