mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
cleanup old code
This commit is contained in:
parent
68c3a069ba
commit
1089a40943
@ -1,139 +0,0 @@
|
|||||||
import datetime
|
|
||||||
import time
|
|
||||||
import cv2
|
|
||||||
import threading
|
|
||||||
import copy
|
|
||||||
# import prctl
|
|
||||||
import numpy as np
|
|
||||||
from edgetpu.detection.engine import DetectionEngine
|
|
||||||
|
|
||||||
from frigate.util import tonumpyarray, LABELS, PATH_TO_CKPT, calculate_region
|
|
||||||
|
|
||||||
class PreppedQueueProcessor(threading.Thread):
|
|
||||||
def __init__(self, cameras, prepped_frame_queue, fps):
|
|
||||||
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.cameras = cameras
|
|
||||||
self.prepped_frame_queue = prepped_frame_queue
|
|
||||||
|
|
||||||
# Load the edgetpu engine and labels
|
|
||||||
self.engine = DetectionEngine(PATH_TO_CKPT)
|
|
||||||
self.labels = LABELS
|
|
||||||
self.fps = fps
|
|
||||||
self.avg_inference_speed = 10
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
prctl.set_name(self.__class__.__name__)
|
|
||||||
# process queue...
|
|
||||||
while True:
|
|
||||||
frame = self.prepped_frame_queue.get()
|
|
||||||
|
|
||||||
# Actual detection.
|
|
||||||
frame['detected_objects'] = self.engine.detect_with_input_tensor(frame['frame'], threshold=0.2, top_k=5)
|
|
||||||
self.fps.update()
|
|
||||||
self.avg_inference_speed = (self.avg_inference_speed*9 + self.engine.get_inference_time())/10
|
|
||||||
|
|
||||||
self.cameras[frame['camera_name']].detected_objects_queue.put(frame)
|
|
||||||
|
|
||||||
class RegionRequester(threading.Thread):
|
|
||||||
def __init__(self, camera):
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.camera = camera
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
prctl.set_name(self.__class__.__name__)
|
|
||||||
frame_time = 0.0
|
|
||||||
while True:
|
|
||||||
now = datetime.datetime.now().timestamp()
|
|
||||||
|
|
||||||
with self.camera.frame_ready:
|
|
||||||
# if there isnt a frame ready for processing or it is old, wait for a new frame
|
|
||||||
if self.camera.frame_time.value == frame_time or (now - self.camera.frame_time.value) > 0.5:
|
|
||||||
self.camera.frame_ready.wait()
|
|
||||||
|
|
||||||
# make a copy of the frame_time
|
|
||||||
frame_time = self.camera.frame_time.value
|
|
||||||
|
|
||||||
# grab the current tracked objects
|
|
||||||
with self.camera.object_tracker.tracked_objects_lock:
|
|
||||||
tracked_objects = copy.deepcopy(self.camera.object_tracker.tracked_objects).values()
|
|
||||||
|
|
||||||
with self.camera.regions_in_process_lock:
|
|
||||||
self.camera.regions_in_process[frame_time] = len(self.camera.config['regions'])
|
|
||||||
self.camera.regions_in_process[frame_time] += len(tracked_objects)
|
|
||||||
|
|
||||||
for index, region in enumerate(self.camera.config['regions']):
|
|
||||||
self.camera.resize_queue.put({
|
|
||||||
'camera_name': self.camera.name,
|
|
||||||
'frame_time': frame_time,
|
|
||||||
'region_id': index,
|
|
||||||
'size': region['size'],
|
|
||||||
'x_offset': region['x_offset'],
|
|
||||||
'y_offset': region['y_offset']
|
|
||||||
})
|
|
||||||
|
|
||||||
# request a region for tracked objects
|
|
||||||
for tracked_object in tracked_objects:
|
|
||||||
box = tracked_object['box']
|
|
||||||
# calculate a new region that will hopefully get the entire object
|
|
||||||
(size, x_offset, y_offset) = calculate_region(self.camera.frame_shape,
|
|
||||||
box['xmin'], box['ymin'],
|
|
||||||
box['xmax'], box['ymax'])
|
|
||||||
|
|
||||||
self.camera.resize_queue.put({
|
|
||||||
'camera_name': self.camera.name,
|
|
||||||
'frame_time': frame_time,
|
|
||||||
'region_id': -1,
|
|
||||||
'size': size,
|
|
||||||
'x_offset': x_offset,
|
|
||||||
'y_offset': y_offset
|
|
||||||
})
|
|
||||||
|
|
||||||
|
|
||||||
class RegionPrepper(threading.Thread):
|
|
||||||
def __init__(self, camera, frame_cache, resize_request_queue, prepped_frame_queue):
|
|
||||||
threading.Thread.__init__(self)
|
|
||||||
self.camera = camera
|
|
||||||
self.frame_cache = frame_cache
|
|
||||||
self.resize_request_queue = resize_request_queue
|
|
||||||
self.prepped_frame_queue = prepped_frame_queue
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
prctl.set_name(self.__class__.__name__)
|
|
||||||
while True:
|
|
||||||
|
|
||||||
resize_request = self.resize_request_queue.get()
|
|
||||||
|
|
||||||
# if the queue is over 100 items long, only prep dynamic regions
|
|
||||||
if resize_request['region_id'] != -1 and self.prepped_frame_queue.qsize() > 100:
|
|
||||||
with self.camera.regions_in_process_lock:
|
|
||||||
self.camera.regions_in_process[resize_request['frame_time']] -= 1
|
|
||||||
if self.camera.regions_in_process[resize_request['frame_time']] == 0:
|
|
||||||
del self.camera.regions_in_process[resize_request['frame_time']]
|
|
||||||
self.camera.skipped_region_tracker.update()
|
|
||||||
continue
|
|
||||||
|
|
||||||
frame = self.frame_cache.get(resize_request['frame_time'], None)
|
|
||||||
|
|
||||||
if frame is None:
|
|
||||||
print("RegionPrepper: frame_time not in frame_cache")
|
|
||||||
with self.camera.regions_in_process_lock:
|
|
||||||
self.camera.regions_in_process[resize_request['frame_time']] -= 1
|
|
||||||
if self.camera.regions_in_process[resize_request['frame_time']] == 0:
|
|
||||||
del self.camera.regions_in_process[resize_request['frame_time']]
|
|
||||||
self.camera.skipped_region_tracker.update()
|
|
||||||
continue
|
|
||||||
|
|
||||||
# make a copy of the region
|
|
||||||
cropped_frame = frame[resize_request['y_offset']:resize_request['y_offset']+resize_request['size'], resize_request['x_offset']:resize_request['x_offset']+resize_request['size']].copy()
|
|
||||||
|
|
||||||
# Resize to 300x300 if needed
|
|
||||||
if cropped_frame.shape != (300, 300, 3):
|
|
||||||
# TODO: use Pillow-SIMD?
|
|
||||||
cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
|
|
||||||
# Expand dimensions since the model expects images to have shape: [1, 300, 300, 3]
|
|
||||||
frame_expanded = np.expand_dims(cropped_frame, axis=0)
|
|
||||||
|
|
||||||
# add the frame to the queue
|
|
||||||
resize_request['frame'] = frame_expanded.flatten().copy()
|
|
||||||
self.prepped_frame_queue.put(resize_request)
|
|
@ -10,11 +10,12 @@ import itertools
|
|||||||
import pyarrow.plasma as plasma
|
import pyarrow.plasma as plasma
|
||||||
import SharedArray as sa
|
import SharedArray as sa
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
from frigate.util import draw_box_with_label, ReadLabelFile
|
from frigate.util import draw_box_with_label
|
||||||
|
from frigate.edgetpu import load_labels
|
||||||
|
|
||||||
PATH_TO_LABELS = '/lab/labelmap.txt'
|
PATH_TO_LABELS = '/lab/labelmap.txt'
|
||||||
|
|
||||||
LABELS = ReadLabelFile(PATH_TO_LABELS)
|
LABELS = load_labels(PATH_TO_LABELS)
|
||||||
cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
|
cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
|
||||||
|
|
||||||
COLOR_MAP = {}
|
COLOR_MAP = {}
|
||||||
|
@ -2,7 +2,6 @@ import time
|
|||||||
import datetime
|
import datetime
|
||||||
import threading
|
import threading
|
||||||
import cv2
|
import cv2
|
||||||
# import prctl
|
|
||||||
import itertools
|
import itertools
|
||||||
import copy
|
import copy
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@ -11,237 +10,6 @@ from collections import defaultdict
|
|||||||
from scipy.spatial import distance as dist
|
from scipy.spatial import distance as dist
|
||||||
from frigate.util import draw_box_with_label, calculate_region
|
from frigate.util import draw_box_with_label, calculate_region
|
||||||
|
|
||||||
# class ObjectCleaner(threading.Thread):
|
|
||||||
# def __init__(self, camera):
|
|
||||||
# threading.Thread.__init__(self)
|
|
||||||
# self.camera = camera
|
|
||||||
|
|
||||||
# def run(self):
|
|
||||||
# prctl.set_name("ObjectCleaner")
|
|
||||||
# while True:
|
|
||||||
|
|
||||||
# # wait a bit before checking for expired frames
|
|
||||||
# time.sleep(0.2)
|
|
||||||
|
|
||||||
# for frame_time in list(self.camera.detected_objects.keys()).copy():
|
|
||||||
# if not frame_time in self.camera.frame_cache:
|
|
||||||
# del self.camera.detected_objects[frame_time]
|
|
||||||
|
|
||||||
# objects_deregistered = False
|
|
||||||
# with self.camera.object_tracker.tracked_objects_lock:
|
|
||||||
# now = datetime.datetime.now().timestamp()
|
|
||||||
# for id, obj in list(self.camera.object_tracker.tracked_objects.items()):
|
|
||||||
# # if the object is more than 10 seconds old
|
|
||||||
# # and not in the most recent frame, deregister
|
|
||||||
# if (now - obj['frame_time']) > 10 and self.camera.object_tracker.most_recent_frame_time > obj['frame_time']:
|
|
||||||
# self.camera.object_tracker.deregister(id)
|
|
||||||
# objects_deregistered = True
|
|
||||||
|
|
||||||
# if objects_deregistered:
|
|
||||||
# with self.camera.objects_tracked:
|
|
||||||
# self.camera.objects_tracked.notify_all()
|
|
||||||
|
|
||||||
# class DetectedObjectsProcessor(threading.Thread):
|
|
||||||
# def __init__(self, camera):
|
|
||||||
# threading.Thread.__init__(self)
|
|
||||||
# self.camera = camera
|
|
||||||
|
|
||||||
# def run(self):
|
|
||||||
# prctl.set_name(self.__class__.__name__)
|
|
||||||
# while True:
|
|
||||||
# frame = self.camera.detected_objects_queue.get()
|
|
||||||
|
|
||||||
# objects = frame['detected_objects']
|
|
||||||
|
|
||||||
# for raw_obj in objects:
|
|
||||||
# name = str(LABELS[raw_obj.label_id])
|
|
||||||
|
|
||||||
# if not name in self.camera.objects_to_track:
|
|
||||||
# continue
|
|
||||||
|
|
||||||
# obj = {
|
|
||||||
# 'name': name,
|
|
||||||
# 'score': float(raw_obj.score),
|
|
||||||
# 'box': {
|
|
||||||
# 'xmin': int((raw_obj.bounding_box[0][0] * frame['size']) + frame['x_offset']),
|
|
||||||
# 'ymin': int((raw_obj.bounding_box[0][1] * frame['size']) + frame['y_offset']),
|
|
||||||
# 'xmax': int((raw_obj.bounding_box[1][0] * frame['size']) + frame['x_offset']),
|
|
||||||
# 'ymax': int((raw_obj.bounding_box[1][1] * frame['size']) + frame['y_offset'])
|
|
||||||
# },
|
|
||||||
# 'region': {
|
|
||||||
# 'xmin': frame['x_offset'],
|
|
||||||
# 'ymin': frame['y_offset'],
|
|
||||||
# 'xmax': frame['x_offset']+frame['size'],
|
|
||||||
# 'ymax': frame['y_offset']+frame['size']
|
|
||||||
# },
|
|
||||||
# 'frame_time': frame['frame_time'],
|
|
||||||
# 'region_id': frame['region_id']
|
|
||||||
# }
|
|
||||||
|
|
||||||
# # if the object is within 5 pixels of the region border, and the region is not on the edge
|
|
||||||
# # consider the object to be clipped
|
|
||||||
# obj['clipped'] = False
|
|
||||||
# if ((obj['region']['xmin'] > 5 and obj['box']['xmin']-obj['region']['xmin'] <= 5) or
|
|
||||||
# (obj['region']['ymin'] > 5 and obj['box']['ymin']-obj['region']['ymin'] <= 5) or
|
|
||||||
# (self.camera.frame_shape[1]-obj['region']['xmax'] > 5 and obj['region']['xmax']-obj['box']['xmax'] <= 5) or
|
|
||||||
# (self.camera.frame_shape[0]-obj['region']['ymax'] > 5 and obj['region']['ymax']-obj['box']['ymax'] <= 5)):
|
|
||||||
# obj['clipped'] = True
|
|
||||||
|
|
||||||
# # Compute the area
|
|
||||||
# # TODO: +1 right?
|
|
||||||
# obj['area'] = (obj['box']['xmax']-obj['box']['xmin'])*(obj['box']['ymax']-obj['box']['ymin'])
|
|
||||||
|
|
||||||
# self.camera.detected_objects[frame['frame_time']].append(obj)
|
|
||||||
|
|
||||||
# # TODO: use in_process and processed counts instead to avoid lock
|
|
||||||
# with self.camera.regions_in_process_lock:
|
|
||||||
# if frame['frame_time'] in self.camera.regions_in_process:
|
|
||||||
# self.camera.regions_in_process[frame['frame_time']] -= 1
|
|
||||||
# # print(f"{frame['frame_time']} remaining regions {self.camera.regions_in_process[frame['frame_time']]}")
|
|
||||||
|
|
||||||
# if self.camera.regions_in_process[frame['frame_time']] == 0:
|
|
||||||
# del self.camera.regions_in_process[frame['frame_time']]
|
|
||||||
# # print(f"{frame['frame_time']} no remaining regions")
|
|
||||||
# self.camera.finished_frame_queue.put(frame['frame_time'])
|
|
||||||
# else:
|
|
||||||
# self.camera.finished_frame_queue.put(frame['frame_time'])
|
|
||||||
|
|
||||||
# # Thread that checks finished frames for clipped objects and sends back
|
|
||||||
# # for processing if needed
|
|
||||||
# # TODO: evaluate whether or not i really need separate threads/queues for each step
|
|
||||||
# # given that only 1 thread will really be able to run at a time. you need a
|
|
||||||
# # separate process to actually do things in parallel for when you are CPU bound.
|
|
||||||
# # threads are good when you are waiting and could be processing while you wait
|
|
||||||
# class RegionRefiner(threading.Thread):
|
|
||||||
# def __init__(self, camera):
|
|
||||||
# threading.Thread.__init__(self)
|
|
||||||
# self.camera = camera
|
|
||||||
|
|
||||||
# def run(self):
|
|
||||||
# prctl.set_name(self.__class__.__name__)
|
|
||||||
# while True:
|
|
||||||
# frame_time = self.camera.finished_frame_queue.get()
|
|
||||||
|
|
||||||
# detected_objects = self.camera.detected_objects[frame_time].copy()
|
|
||||||
# # print(f"{frame_time} finished")
|
|
||||||
|
|
||||||
# # group by name
|
|
||||||
# detected_object_groups = defaultdict(lambda: [])
|
|
||||||
# for obj in detected_objects:
|
|
||||||
# detected_object_groups[obj['name']].append(obj)
|
|
||||||
|
|
||||||
# look_again = False
|
|
||||||
# selected_objects = []
|
|
||||||
# for group in detected_object_groups.values():
|
|
||||||
|
|
||||||
# # apply non-maxima suppression to suppress weak, overlapping bounding boxes
|
|
||||||
# boxes = [(o['box']['xmin'], o['box']['ymin'], o['box']['xmax']-o['box']['xmin'], o['box']['ymax']-o['box']['ymin'])
|
|
||||||
# for o in group]
|
|
||||||
# confidences = [o['score'] for o in group]
|
|
||||||
# idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
|
|
||||||
|
|
||||||
# for index in idxs:
|
|
||||||
# obj = group[index[0]]
|
|
||||||
# selected_objects.append(obj)
|
|
||||||
# if obj['clipped']:
|
|
||||||
# box = obj['box']
|
|
||||||
# # calculate a new region that will hopefully get the entire object
|
|
||||||
# (size, x_offset, y_offset) = calculate_region(self.camera.frame_shape,
|
|
||||||
# box['xmin'], box['ymin'],
|
|
||||||
# box['xmax'], box['ymax'])
|
|
||||||
# # print(f"{frame_time} new region: {size} {x_offset} {y_offset}")
|
|
||||||
|
|
||||||
# with self.camera.regions_in_process_lock:
|
|
||||||
# if not frame_time in self.camera.regions_in_process:
|
|
||||||
# self.camera.regions_in_process[frame_time] = 1
|
|
||||||
# else:
|
|
||||||
# self.camera.regions_in_process[frame_time] += 1
|
|
||||||
|
|
||||||
# # add it to the queue
|
|
||||||
# self.camera.resize_queue.put({
|
|
||||||
# 'camera_name': self.camera.name,
|
|
||||||
# 'frame_time': frame_time,
|
|
||||||
# 'region_id': -1,
|
|
||||||
# 'size': size,
|
|
||||||
# 'x_offset': x_offset,
|
|
||||||
# 'y_offset': y_offset
|
|
||||||
# })
|
|
||||||
# self.camera.dynamic_region_fps.update()
|
|
||||||
# look_again = True
|
|
||||||
|
|
||||||
# # if we are looking again, then this frame is not ready for processing
|
|
||||||
# if look_again:
|
|
||||||
# # remove the clipped objects
|
|
||||||
# self.camera.detected_objects[frame_time] = [o for o in selected_objects if not o['clipped']]
|
|
||||||
# continue
|
|
||||||
|
|
||||||
# # filter objects based on camera settings
|
|
||||||
# selected_objects = [o for o in selected_objects if not self.filtered(o)]
|
|
||||||
|
|
||||||
# self.camera.detected_objects[frame_time] = selected_objects
|
|
||||||
|
|
||||||
# # print(f"{frame_time} is actually finished")
|
|
||||||
|
|
||||||
# # keep adding frames to the refined queue as long as they are finished
|
|
||||||
# with self.camera.regions_in_process_lock:
|
|
||||||
# while self.camera.frame_queue.qsize() > 0 and self.camera.frame_queue.queue[0] not in self.camera.regions_in_process:
|
|
||||||
# self.camera.last_processed_frame = self.camera.frame_queue.get()
|
|
||||||
# self.camera.refined_frame_queue.put(self.camera.last_processed_frame)
|
|
||||||
|
|
||||||
# def filtered(self, obj):
|
|
||||||
# object_name = obj['name']
|
|
||||||
|
|
||||||
# if object_name in self.camera.object_filters:
|
|
||||||
# obj_settings = self.camera.object_filters[object_name]
|
|
||||||
|
|
||||||
# # if the min area is larger than the
|
|
||||||
# # detected object, don't add it to detected objects
|
|
||||||
# if obj_settings.get('min_area',-1) > obj['area']:
|
|
||||||
# return True
|
|
||||||
|
|
||||||
# # if the detected object is larger than the
|
|
||||||
# # max area, don't add it to detected objects
|
|
||||||
# if obj_settings.get('max_area', self.camera.frame_shape[0]*self.camera.frame_shape[1]) < obj['area']:
|
|
||||||
# return True
|
|
||||||
|
|
||||||
# # if the score is lower than the threshold, skip
|
|
||||||
# if obj_settings.get('threshold', 0) > obj['score']:
|
|
||||||
# return True
|
|
||||||
|
|
||||||
# # compute the coordinates of the object and make sure
|
|
||||||
# # the location isnt outside the bounds of the image (can happen from rounding)
|
|
||||||
# y_location = min(int(obj['box']['ymax']), len(self.camera.mask)-1)
|
|
||||||
# x_location = min(int((obj['box']['xmax']-obj['box']['xmin'])/2.0)+obj['box']['xmin'], len(self.camera.mask[0])-1)
|
|
||||||
|
|
||||||
# # if the object is in a masked location, don't add it to detected objects
|
|
||||||
# if self.camera.mask[y_location][x_location] == [0]:
|
|
||||||
# return True
|
|
||||||
|
|
||||||
# return False
|
|
||||||
|
|
||||||
# def has_overlap(self, new_obj, obj, overlap=.7):
|
|
||||||
# # compute intersection rectangle with existing object and new objects region
|
|
||||||
# existing_obj_current_region = compute_intersection_rectangle(obj['box'], new_obj['region'])
|
|
||||||
|
|
||||||
# # compute intersection rectangle with new object and existing objects region
|
|
||||||
# new_obj_existing_region = compute_intersection_rectangle(new_obj['box'], obj['region'])
|
|
||||||
|
|
||||||
# # compute iou for the two intersection rectangles that were just computed
|
|
||||||
# iou = compute_intersection_over_union(existing_obj_current_region, new_obj_existing_region)
|
|
||||||
|
|
||||||
# # if intersection is greater than overlap
|
|
||||||
# if iou > overlap:
|
|
||||||
# return True
|
|
||||||
# else:
|
|
||||||
# return False
|
|
||||||
|
|
||||||
# def find_group(self, new_obj, groups):
|
|
||||||
# for index, group in enumerate(groups):
|
|
||||||
# for obj in group:
|
|
||||||
# if self.has_overlap(new_obj, obj):
|
|
||||||
# return index
|
|
||||||
# return None
|
|
||||||
|
|
||||||
class ObjectTracker():
|
class ObjectTracker():
|
||||||
def __init__(self, max_disappeared):
|
def __init__(self, max_disappeared):
|
||||||
self.tracked_objects = {}
|
self.tracked_objects = {}
|
||||||
@ -385,45 +153,3 @@ class ObjectTracker():
|
|||||||
else:
|
else:
|
||||||
for col in unusedCols:
|
for col in unusedCols:
|
||||||
self.register(col, group[col])
|
self.register(col, group[col])
|
||||||
|
|
||||||
# Maintains the frame and object with the highest score
|
|
||||||
# class BestFrames(threading.Thread):
|
|
||||||
# def __init__(self, camera):
|
|
||||||
# threading.Thread.__init__(self)
|
|
||||||
# self.camera = camera
|
|
||||||
# self.best_objects = {}
|
|
||||||
# self.best_frames = {}
|
|
||||||
|
|
||||||
# def run(self):
|
|
||||||
# prctl.set_name(self.__class__.__name__)
|
|
||||||
# while True:
|
|
||||||
# # wait until objects have been tracked
|
|
||||||
# with self.camera.objects_tracked:
|
|
||||||
# self.camera.objects_tracked.wait()
|
|
||||||
|
|
||||||
# # make a copy of tracked objects
|
|
||||||
# tracked_objects = list(self.camera.object_tracker.tracked_objects.values())
|
|
||||||
|
|
||||||
# for obj in tracked_objects:
|
|
||||||
# if obj['name'] in self.best_objects:
|
|
||||||
# now = datetime.datetime.now().timestamp()
|
|
||||||
# # if the object is a higher score than the current best score
|
|
||||||
# # or the current object is more than 1 minute old, use the new object
|
|
||||||
# if obj['score'] > self.best_objects[obj['name']]['score'] or (now - self.best_objects[obj['name']]['frame_time']) > 60:
|
|
||||||
# self.best_objects[obj['name']] = copy.deepcopy(obj)
|
|
||||||
# else:
|
|
||||||
# self.best_objects[obj['name']] = copy.deepcopy(obj)
|
|
||||||
|
|
||||||
# for name, obj in self.best_objects.items():
|
|
||||||
# if obj['frame_time'] in self.camera.frame_cache:
|
|
||||||
# best_frame = self.camera.frame_cache[obj['frame_time']]
|
|
||||||
|
|
||||||
# draw_box_with_label(best_frame, obj['box']['xmin'], obj['box']['ymin'],
|
|
||||||
# obj['box']['xmax'], obj['box']['ymax'], obj['name'], "{}% {}".format(int(obj['score']*100), obj['area']))
|
|
||||||
|
|
||||||
# # print a timestamp
|
|
||||||
# if self.camera.snapshot_config['show_timestamp']:
|
|
||||||
# time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
|
|
||||||
# cv2.putText(best_frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
|
||||||
|
|
||||||
# self.best_frames[name] = best_frame
|
|
@ -5,16 +5,6 @@ import cv2
|
|||||||
import threading
|
import threading
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
# Function to read labels from text files.
|
|
||||||
def ReadLabelFile(file_path):
|
|
||||||
with open(file_path, 'r') as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
ret = {}
|
|
||||||
for line in lines:
|
|
||||||
pair = line.strip().split(maxsplit=1)
|
|
||||||
ret[int(pair[0])] = pair[1].strip()
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
|
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
|
||||||
if color is None:
|
if color is None:
|
||||||
color = (0,0,255)
|
color = (0,0,255)
|
||||||
@ -117,10 +107,6 @@ def clipped(obj, frame_shape):
|
|||||||
else:
|
else:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# convert shared memory array into numpy array
|
|
||||||
def tonumpyarray(mp_arr):
|
|
||||||
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
|
|
||||||
|
|
||||||
class EventsPerSecond:
|
class EventsPerSecond:
|
||||||
def __init__(self, max_events=1000):
|
def __init__(self, max_events=1000):
|
||||||
self._start = None
|
self._start = None
|
||||||
|
@ -11,43 +11,15 @@ import numpy as np
|
|||||||
import hashlib
|
import hashlib
|
||||||
import pyarrow.plasma as plasma
|
import pyarrow.plasma as plasma
|
||||||
import SharedArray as sa
|
import SharedArray as sa
|
||||||
# import prctl
|
|
||||||
import copy
|
import copy
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
from frigate.util import tonumpyarray, draw_box_with_label, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond
|
from frigate.util import draw_box_with_label, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond
|
||||||
# from frigate.object_detection import RegionPrepper, RegionRequester
|
|
||||||
from frigate.objects import ObjectTracker
|
from frigate.objects import ObjectTracker
|
||||||
# from frigate.mqtt import MqttObjectPublisher
|
|
||||||
from frigate.edgetpu import RemoteObjectDetector
|
from frigate.edgetpu import RemoteObjectDetector
|
||||||
from frigate.motion import MotionDetector
|
from frigate.motion import MotionDetector
|
||||||
|
|
||||||
# Stores 2 seconds worth of frames so they can be used for other threads
|
|
||||||
# TODO: we do actually know when these frames are no longer needed
|
|
||||||
# class FrameTracker(threading.Thread):
|
|
||||||
# def __init__(self, frame_time, frame_ready, frame_lock, recent_frames):
|
|
||||||
# threading.Thread.__init__(self)
|
|
||||||
# self.frame_time = frame_time
|
|
||||||
# self.frame_ready = frame_ready
|
|
||||||
# self.frame_lock = frame_lock
|
|
||||||
# self.recent_frames = recent_frames
|
|
||||||
|
|
||||||
# def run(self):
|
|
||||||
# prctl.set_name(self.__class__.__name__)
|
|
||||||
# while True:
|
|
||||||
# # wait for a frame
|
|
||||||
# with self.frame_ready:
|
|
||||||
# self.frame_ready.wait()
|
|
||||||
|
|
||||||
# # delete any old frames
|
|
||||||
# stored_frame_times = list(self.recent_frames.keys())
|
|
||||||
# stored_frame_times.sort(reverse=True)
|
|
||||||
# if len(stored_frame_times) > 100:
|
|
||||||
# frames_to_delete = stored_frame_times[50:]
|
|
||||||
# for k in frames_to_delete:
|
|
||||||
# del self.recent_frames[k]
|
|
||||||
|
|
||||||
# TODO: add back opencv fallback
|
# TODO: add back opencv fallback
|
||||||
def get_frame_shape(source):
|
def get_frame_shape(source):
|
||||||
ffprobe_cmd = " ".join([
|
ffprobe_cmd = " ".join([
|
||||||
@ -302,23 +274,7 @@ class Camera:
|
|||||||
self.capture_thread.join()
|
self.capture_thread.join()
|
||||||
self.ffmpeg_process = None
|
self.ffmpeg_process = None
|
||||||
self.capture_thread = None
|
self.capture_thread = None
|
||||||
=======
|
|
||||||
# class CameraWatchdog(threading.Thread):
|
|
||||||
# def __init__(self, camera):
|
|
||||||
# threading.Thread.__init__(self)
|
|
||||||
# self.camera = camera
|
|
||||||
|
|
||||||
# def run(self):
|
|
||||||
# prctl.set_name(self.__class__.__name__)
|
|
||||||
# while True:
|
|
||||||
# # wait a bit before checking
|
|
||||||
# time.sleep(10)
|
|
||||||
|
|
||||||
# if self.camera.frame_time.value != 0.0 and (datetime.datetime.now().timestamp() - self.camera.frame_time.value) > self.camera.watchdog_timeout:
|
|
||||||
# print(self.camera.name + ": last frame is more than 5 minutes old, restarting camera capture...")
|
|
||||||
# self.camera.start_or_restart_capture()
|
|
||||||
# time.sleep(5)
|
|
||||||
|
|
||||||
# # Thread to read the stdout of the ffmpeg process and update the current frame
|
# # Thread to read the stdout of the ffmpeg process and update the current frame
|
||||||
# class CameraCapture(threading.Thread):
|
# class CameraCapture(threading.Thread):
|
||||||
# def __init__(self, camera):
|
# def __init__(self, camera):
|
||||||
@ -518,7 +474,6 @@ class Camera:
|
|||||||
# self.capture_thread.join()
|
# self.capture_thread.join()
|
||||||
# self.ffmpeg_process = None
|
# self.ffmpeg_process = None
|
||||||
# self.capture_thread = None
|
# self.capture_thread = None
|
||||||
>>>>>>> 9b1c7e9... split into separate processes
|
|
||||||
|
|
||||||
# # create the process to capture frames from the input stream and store in a shared array
|
# # create the process to capture frames from the input stream and store in a shared array
|
||||||
# print("Creating a new ffmpeg process...")
|
# print("Creating a new ffmpeg process...")
|
||||||
@ -626,6 +581,8 @@ class Camera:
|
|||||||
|
|
||||||
# return frame_bytes
|
# return frame_bytes
|
||||||
|
|
||||||
|
=======
|
||||||
|
>>>>>>> 2a2fbe7... cleanup old code
|
||||||
def filtered(obj, objects_to_track, object_filters, mask):
|
def filtered(obj, objects_to_track, object_filters, mask):
|
||||||
object_name = obj[0]
|
object_name = obj[0]
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user