Refactor with a working false positive test

This commit is contained in:
Blake Blackshear 2020-08-22 07:05:20 -05:00
parent a8556a729b
commit ea4ecae27c
9 changed files with 272 additions and 168 deletions

View File

@ -3,7 +3,7 @@ from statistics import mean
import multiprocessing as mp
import numpy as np
import datetime
from frigate.edgetpu import ObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels
from frigate.edgetpu import LocalObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels
my_frame = np.expand_dims(np.full((300,300,3), 1, np.uint8), axis=0)
labels = load_labels('/labelmap.txt')

View File

@ -262,7 +262,7 @@ def main():
camera_process = mp.Process(target=track_camera, args=(name, config, GLOBAL_OBJECT_CONFIG, frame_queue, frame_shape,
tflite_process.detection_queue, tracked_objects_queue, camera_processes[name]['process_fps'],
camera_processes[name]['detection_fps'],
camera_processes[name]['read_start'], camera_processes[name]['detection_frame']))
camera_processes[name]['read_start'], camera_processes[name]['detection_frame'], stop_event))
camera_process.daemon = True
camera_processes[name]['process'] = camera_process

0
frigate/__init__.py Normal file
View File

View File

@ -2,6 +2,7 @@ import os
import datetime
import hashlib
import multiprocessing as mp
from abc import ABC, abstractmethod
import numpy as np
import pyarrow.plasma as plasma
import tflite_runtime.interpreter as tflite
@ -27,8 +28,18 @@ def load_labels(path, encoding='utf-8'):
else:
return {index: line.strip() for index, line in enumerate(lines)}
class ObjectDetector():
def __init__(self):
class ObjectDetector(ABC):
@abstractmethod
def detect(self, tensor_input, threshold = .4):
pass
class LocalObjectDetector(ObjectDetector):
def __init__(self, labels=None):
if labels is None:
self.labels = {}
else:
self.labels = load_labels(labels)
edge_tpu_delegate = None
try:
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', {"device": "usb"})
@ -53,6 +64,21 @@ class ObjectDetector():
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
def detect(self, tensor_input, threshold=.4):
detections = []
raw_detections = self.detect_raw(tensor_input)
for d in raw_detections:
if d[1] < threshold:
break
detections.append((
self.labels[int(d[0])],
float(d[1]),
(d[2], d[3], d[4], d[5])
))
return detections
def detect_raw(self, tensor_input):
self.interpreter.set_tensor(self.tensor_input_details[0]['index'], tensor_input)
self.interpreter.invoke()
@ -70,7 +96,7 @@ def run_detector(detection_queue, avg_speed, start):
print(f"Starting detection process: {os.getpid()}")
listen()
plasma_client = plasma.connect("/tmp/plasma")
object_detector = ObjectDetector()
object_detector = LocalObjectDetector()
while True:
object_id_str = detection_queue.get()

View File

@ -11,7 +11,7 @@ from collections import Counter, defaultdict
import itertools
import pyarrow.plasma as plasma
import matplotlib.pyplot as plt
from frigate.util import draw_box_with_label, PlasmaManager
from frigate.util import draw_box_with_label, PlasmaFrameManager
from frigate.edgetpu import load_labels
PATH_TO_LABELS = '/labelmap.txt'
@ -91,7 +91,7 @@ class TrackedObjectProcessor(threading.Thread):
for i, zone in enumerate(self.zone_data.values()):
zone['color'] = tuple(int(round(255 * c)) for c in colors(i)[:3])
self.plasma_client = PlasmaManager(self.stop_event)
self.plasma_client = PlasmaFrameManager(self.stop_event)
def get_best(self, camera, label):
if label in self.camera_data[camera]['best_objects']:

0
frigate/test/__init__.py Normal file
View File

View File

@ -0,0 +1,71 @@
import datetime
from unittest import TestCase, main
from frigate.video import process_frames, start_or_restart_ffmpeg, capture_frames
from frigate.util import DictFrameManager, EventsPerSecond, draw_box_with_label
from frigate.motion import MotionDetector
from frigate.edgetpu import LocalObjectDetector
from frigate.objects import ObjectTracker
import multiprocessing as mp
import numpy as np
import cv2
from frigate.object_processing import COLOR_MAP
class FalsePositiveTests(TestCase):
def test_back_1594395958_675351_0(self):
### load in frames
frame_shape = (1080,1920,3)
frame_manager = DictFrameManager()
frame_queue = mp.Queue()
fps = EventsPerSecond()
skipped_fps = EventsPerSecond()
stop_event = mp.Event()
detection_frame = mp.Value('d', datetime.datetime.now().timestamp()+100000)
ffmpeg_cmd = "ffmpeg -hide_banner -loglevel panic -i /debug/false_positives/back-1595647759.228381-0.mp4 -f rawvideo -pix_fmt rgb24 pipe:".split(" ")
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, frame_shape[0]*frame_shape[1]*frame_shape[2])
capture_frames(ffmpeg_process, "back", frame_shape, frame_manager, frame_queue, 1, fps, skipped_fps, stop_event, detection_frame)
ffmpeg_process.wait()
ffmpeg_process.communicate()
assert(frame_queue.qsize() > 0)
### process frames
mask = np.zeros((frame_shape[0], frame_shape[1], 1), np.uint8)
mask[:] = 255
motion_detector = MotionDetector(frame_shape, mask)
object_detector = LocalObjectDetector(labels='/labelmap.txt')
object_tracker = ObjectTracker(10)
detected_objects_queue = mp.Queue()
process_fps = EventsPerSecond()
current_frame = mp.Value('d', 0.0)
process_frames("back", frame_queue, frame_shape, frame_manager, motion_detector, object_detector, object_tracker, detected_objects_queue,
process_fps, current_frame, ['person'], {}, mask, stop_event, exit_on_empty=True)
assert(detected_objects_queue.qsize() > 0)
### check result
while(not detected_objects_queue.empty()):
camera_name, frame_time, current_tracked_objects = detected_objects_queue.get()
current_frame = frame_manager.get(f"{camera_name}{frame_time}")
# draw the bounding boxes on the frame
for obj in current_tracked_objects.values():
thickness = 2
color = COLOR_MAP[obj['label']]
if obj['frame_time'] != frame_time:
thickness = 1
color = (255,0,0)
# draw the bounding boxes on the frame
box = obj['box']
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
# draw the regions on the frame
region = obj['region']
draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', f"{region[2]-region[0]}", thickness=1, color=(0,255,0))
cv2.imwrite(f"/debug/frames/{int(frame_time*1000000)}.jpg", cv2.cvtColor(current_frame, cv2.COLOR_RGB2BGR))
if __name__ == '__main__':
main()

View File

@ -1,3 +1,4 @@
from abc import ABC, abstractmethod
import datetime
import time
import signal
@ -139,7 +140,33 @@ def print_stack(sig, frame):
def listen():
signal.signal(signal.SIGUSR1, print_stack)
class PlasmaManager:
class FrameManager(ABC):
@abstractmethod
def get(self, name, timeout_ms=0):
pass
@abstractmethod
def put(self, name, frame):
pass
@abstractmethod
def delete(self, name):
pass
class DictFrameManager(FrameManager):
def __init__(self):
self.frames = {}
def get(self, name, timeout_ms=0):
return self.frames.get(name)
def put(self, name, frame):
self.frames[name] = frame
def delete(self, name):
del self.frames[name]
class PlasmaFrameManager(FrameManager):
def __init__(self, stop_event=None):
self.stop_event = stop_event
self.connect()
@ -161,18 +188,21 @@ class PlasmaManager:
if self.stop_event != None and self.stop_event.is_set():
return
try:
return self.plasma_client.get(object_id, timeout_ms=timeout_ms)
frame = self.plasma_client.get(object_id, timeout_ms=timeout_ms)
if frame is plasma.ObjectNotAvailable:
return None
return frame
except:
self.connect()
time.sleep(1)
def put(self, name, obj):
def put(self, name, frame):
object_id = plasma.ObjectID(hashlib.sha1(str.encode(name)).digest())
while True:
if self.stop_event != None and self.stop_event.is_set():
return
try:
self.plasma_client.put(obj, object_id)
self.plasma_client.put(frame, object_id)
return
except Exception as e:
print(f"Failed to put in plasma: {e}")

View File

@ -13,8 +13,9 @@ import copy
import itertools
import json
import base64
from typing import Dict, List
from collections import defaultdict
from frigate.util import draw_box_with_label, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond, listen, PlasmaManager
from frigate.util import draw_box_with_label, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond, listen, FrameManager, PlasmaFrameManager
from frigate.objects import ObjectTracker
from frigate.edgetpu import RemoteObjectDetector
from frigate.motion import MotionDetector
@ -53,7 +54,7 @@ def get_ffmpeg_input(ffmpeg_input):
frigate_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
return ffmpeg_input.format(**frigate_vars)
def filtered(obj, objects_to_track, object_filters, mask):
def filtered(obj, objects_to_track, object_filters, mask=None):
object_name = obj[0]
if not object_name in objects_to_track:
@ -82,7 +83,7 @@ def filtered(obj, objects_to_track, object_filters, mask):
x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(mask[0])-1)
# if the object is in a masked location, don't add it to detected objects
if mask[y_location][x_location] == [0]:
if mask != None and mask[y_location][x_location] == [0]:
return True
return False
@ -115,6 +116,53 @@ def start_or_restart_ffmpeg(ffmpeg_cmd, frame_size, ffmpeg_process=None):
process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, stdin = sp.DEVNULL, bufsize=frame_size*10, start_new_session=True)
return process
def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager,
frame_queue, take_frame: int, fps:EventsPerSecond, skipped_fps: EventsPerSecond,
stop_event: mp.Event, detection_frame: mp.Value):
frame_num = 0
last_frame = 0
frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
skipped_fps.start()
while True:
if stop_event.is_set():
print(f"{camera_name}: stop event set. exiting capture thread...")
break
frame_bytes = ffmpeg_process.stdout.read(frame_size)
current_frame = datetime.datetime.now().timestamp()
if len(frame_bytes) == 0:
print(f"{camera_name}: ffmpeg didnt return a frame. something is wrong.")
if ffmpeg_process.poll() != None:
print(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
break
else:
continue
fps.update()
frame_num += 1
if (frame_num % take_frame) != 0:
skipped_fps.update()
continue
# if the detection process is more than 1 second behind, skip this frame
if detection_frame.value > 0.0 and (last_frame - detection_frame.value) > 1:
skipped_fps.update()
continue
# put the frame in the frame manager
frame_manager.put(f"{camera_name}{current_frame}",
np
.frombuffer(frame_bytes, np.uint8)
.reshape(frame_shape)
)
# add to the queue
frame_queue.put(current_frame)
last_frame = current_frame
class CameraCapture(threading.Thread):
def __init__(self, name, ffmpeg_process, frame_shape, frame_queue, take_frame, fps, detection_frame, stop_event):
threading.Thread.__init__(self)
@ -125,7 +173,7 @@ class CameraCapture(threading.Thread):
self.take_frame = take_frame
self.fps = fps
self.skipped_fps = EventsPerSecond()
self.plasma_client = PlasmaManager(stop_event)
self.plasma_client = PlasmaFrameManager(stop_event)
self.ffmpeg_process = ffmpeg_process
self.current_frame = 0
self.last_frame = 0
@ -133,47 +181,11 @@ class CameraCapture(threading.Thread):
self.stop_event = stop_event
def run(self):
frame_num = 0
self.skipped_fps.start()
while True:
if self.stop_event.is_set():
print(f"{self.name}: stop event set. exiting capture thread...")
break
capture_frames(self.ffmpeg_process, self.name, self.frame_shape, self.plasma_client, self.frame_queue, self.take_frame,
self.fps, self.skipped_fps, self.stop_event, self.detection_frame)
if self.ffmpeg_process.poll() != None:
print(f"{self.name}: ffmpeg process is not running. exiting capture thread...")
break
frame_bytes = self.ffmpeg_process.stdout.read(self.frame_size)
self.current_frame = datetime.datetime.now().timestamp()
if len(frame_bytes) == 0:
print(f"{self.name}: ffmpeg didnt return a frame. something is wrong.")
continue
self.fps.update()
frame_num += 1
if (frame_num % self.take_frame) != 0:
self.skipped_fps.update()
continue
# if the detection process is more than 1 second behind, skip this frame
if self.detection_frame.value > 0.0 and (self.last_frame - self.detection_frame.value) > 1:
self.skipped_fps.update()
continue
# put the frame in the plasma store
self.plasma_client.put(f"{self.name}{self.current_frame}",
np
.frombuffer(frame_bytes, np.uint8)
.reshape(self.frame_shape)
)
# add to the queue
self.frame_queue.put(self.current_frame)
self.last_frame = self.current_frame
def track_camera(name, config, global_objects_config, frame_queue, frame_shape, detection_queue, detected_objects_queue, fps, detection_fps, read_start, detection_frame):
def track_camera(name, config, global_objects_config, frame_queue, frame_shape, detection_queue, detected_objects_queue, fps, detection_fps, read_start, detection_frame, stop_event):
print(f"Starting process for {name}: {os.getpid()}")
listen()
@ -191,8 +203,6 @@ def track_camera(name, config, global_objects_config, frame_queue, frame_shape,
for obj in objects_with_config:
object_filters[obj] = {**global_object_filters.get(obj, {}), **camera_object_filters.get(obj, {})}
frame = np.zeros(frame_shape, np.uint8)
# load in the mask for object detection
if 'mask' in config:
if config['mask'].startswith('base64,'):
@ -213,109 +223,96 @@ def track_camera(name, config, global_objects_config, frame_queue, frame_shape,
object_tracker = ObjectTracker(10)
plasma_client = PlasmaManager()
avg_wait = 0.0
plasma_client = PlasmaFrameManager()
process_frames(name, frame_queue, frame_shape, plasma_client, motion_detector, object_detector,
object_tracker, detected_objects_queue, fps, detection_frame, objects_to_track, object_filters, mask, stop_event)
print(f"{name}: exiting subprocess")
def reduce_boxes(boxes):
if len(boxes) == 0:
return []
reduced_boxes = cv2.groupRectangles([list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2)[0]
return [tuple(b) for b in reduced_boxes]
def detect(object_detector, frame, region, objects_to_track, object_filters, mask):
tensor_input = create_tensor_input(frame, region)
detections = []
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2]-region[0]
x_min = int((box[1] * size) + region[0])
y_min = int((box[0] * size) + region[1])
x_max = int((box[3] * size) + region[0])
y_max = int((box[2] * size) + region[1])
det = (d[0],
d[1],
(x_min, y_min, x_max, y_max),
(x_max-x_min)*(y_max-y_min),
region)
# apply object filters
if filtered(det, objects_to_track, object_filters, mask):
continue
detections.append(det)
return detections
def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
frame_manager: FrameManager, motion_detector: MotionDetector,
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue, fps: mp.Value, current_frame_time: mp.Value,
objects_to_track: List[str], object_filters: Dict, mask, stop_event: mp.Event,
exit_on_empty: bool = False):
fps_tracker = EventsPerSecond()
fps_tracker.start()
object_detector.fps.start()
while True:
read_start.value = datetime.datetime.now().timestamp()
frame_time = frame_queue.get()
duration = datetime.datetime.now().timestamp()-read_start.value
read_start.value = 0.0
avg_wait = (avg_wait*99+duration)/100
detection_frame.value = frame_time
if stop_event.is_set() or (exit_on_empty and frame_queue.empty()):
print(f"Exiting track_objects...")
break
# Get frame from plasma store
frame = plasma_client.get(f"{name}{frame_time}")
if frame is plasma.ObjectNotAvailable:
try:
frame_time = frame_queue.get(True, 10)
except queue.Empty:
continue
current_frame_time.value = frame_time
frame = frame_manager.get(f"{camera_name}{frame_time}")
fps_tracker.update()
fps.value = fps_tracker.eps()
detection_fps.value = object_detector.fps.eps()
# look for motion
motion_boxes = motion_detector.detect(frame)
tracked_objects = object_tracker.tracked_objects.values()
tracked_object_boxes = [obj['box'] for obj in object_tracker.tracked_objects.values()]
# merge areas of motion that intersect with a known tracked object into a single area to look at
areas_of_interest = []
used_motion_boxes = []
for obj in tracked_objects:
x_min, y_min, x_max, y_max = obj['box']
for m_index, motion_box in enumerate(motion_boxes):
if intersection_over_union(motion_box, obj['box']) > .2:
used_motion_boxes.append(m_index)
x_min = min(obj['box'][0], motion_box[0])
y_min = min(obj['box'][1], motion_box[1])
x_max = max(obj['box'][2], motion_box[2])
y_max = max(obj['box'][3], motion_box[3])
areas_of_interest.append((x_min, y_min, x_max, y_max))
unused_motion_boxes = set(range(0, len(motion_boxes))).difference(used_motion_boxes)
# combine motion boxes with known locations of existing objects
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
# compute motion regions
motion_regions = [calculate_region(frame_shape, motion_boxes[i][0], motion_boxes[i][1], motion_boxes[i][2], motion_boxes[i][3], 1.2)
for i in unused_motion_boxes]
# compute regions
regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
for a in combined_boxes]
# compute tracked object regions
object_regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
for a in areas_of_interest]
# combine overlapping regions
combined_regions = reduce_boxes(regions)
# merge regions with high IOU
merged_regions = motion_regions+object_regions
while True:
max_iou = 0.0
max_indices = None
region_indices = range(len(merged_regions))
for a, b in itertools.combinations(region_indices, 2):
iou = intersection_over_union(merged_regions[a], merged_regions[b])
if iou > max_iou:
max_iou = iou
max_indices = (a, b)
if max_iou > 0.1:
a = merged_regions[max_indices[0]]
b = merged_regions[max_indices[1]]
merged_regions.append(calculate_region(frame_shape,
min(a[0], b[0]),
min(a[1], b[1]),
max(a[2], b[2]),
max(a[3], b[3]),
1
))
del merged_regions[max(max_indices[0], max_indices[1])]
del merged_regions[min(max_indices[0], max_indices[1])]
else:
break
# re-compute regions
regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
for a in combined_regions]
# resize regions and detect
detections = []
for region in merged_regions:
tensor_input = create_tensor_input(frame, region)
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2]-region[0]
x_min = int((box[1] * size) + region[0])
y_min = int((box[0] * size) + region[1])
x_max = int((box[3] * size) + region[0])
y_max = int((box[2] * size) + region[1])
det = (d[0],
d[1],
(x_min, y_min, x_max, y_max),
(x_max-x_min)*(y_max-y_min),
region)
if filtered(det, objects_to_track, object_filters, mask):
continue
detections.append(det)
for region in regions:
detections.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask))
#########
# merge objects, check for clipped objects and look again up to N times
# merge objects, check for clipped objects and look again up to 4 times
#########
refining = True
refine_count = 0
@ -345,29 +342,11 @@ def track_camera(name, config, global_objects_config, frame_queue, frame_shape,
box[0], box[1],
box[2], box[3])
tensor_input = create_tensor_input(frame, region)
# run detection on new region
refined_detections = object_detector.detect(tensor_input)
for d in refined_detections:
box = d[2]
size = region[2]-region[0]
x_min = int((box[1] * size) + region[0])
y_min = int((box[0] * size) + region[1])
x_max = int((box[3] * size) + region[0])
y_max = int((box[2] * size) + region[1])
det = (d[0],
d[1],
(x_min, y_min, x_max, y_max),
(x_max-x_min)*(y_max-y_min),
region)
if filtered(det, objects_to_track, object_filters, mask):
continue
selected_objects.append(det)
selected_objects.extend(detect(object_detector, frame, region, objects_to_track, object_filters, mask))
refining = True
else:
selected_objects.append(obj)
# set the detections list to only include top, complete objects
# and new detections
detections = selected_objects
@ -379,6 +358,4 @@ def track_camera(name, config, global_objects_config, frame_queue, frame_shape,
object_tracker.match_and_update(frame_time, detections)
# add to the queue
detected_objects_queue.put((name, frame_time, object_tracker.tracked_objects))
print(f"{name}: exiting subprocess")
detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects))