mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
fixing a few things
This commit is contained in:
parent
5b4c6e50bc
commit
02efb6f415
@ -133,9 +133,6 @@ class DetectedObjectsProcessor(threading.Thread):
|
|||||||
# print(f"{frame['frame_time']} no remaining regions")
|
# print(f"{frame['frame_time']} no remaining regions")
|
||||||
self.camera.finished_frame_queue.put(frame['frame_time'])
|
self.camera.finished_frame_queue.put(frame['frame_time'])
|
||||||
|
|
||||||
with self.camera.objects_parsed:
|
|
||||||
self.camera.objects_parsed.notify_all()
|
|
||||||
|
|
||||||
# Thread that checks finished frames for clipped objects and sends back
|
# Thread that checks finished frames for clipped objects and sends back
|
||||||
# for processing if needed
|
# for processing if needed
|
||||||
class RegionRefiner(threading.Thread):
|
class RegionRefiner(threading.Thread):
|
||||||
@ -166,7 +163,7 @@ class RegionRefiner(threading.Thread):
|
|||||||
self.camera.detected_objects[frame_time] = [obj for obj in self.camera.detected_objects[frame_time] if obj['clipped'] == False]
|
self.camera.detected_objects[frame_time] = [obj for obj in self.camera.detected_objects[frame_time] if obj['clipped'] == False]
|
||||||
|
|
||||||
# print(f"{frame_time} found {len(object_groups)} groups")
|
# print(f"{frame_time} found {len(object_groups)} groups")
|
||||||
clipped_object = False
|
look_again = False
|
||||||
# find the largest unclipped object in each group
|
# find the largest unclipped object in each group
|
||||||
for group in object_groups:
|
for group in object_groups:
|
||||||
unclipped_objects = [obj for obj in group if obj['clipped'] == False]
|
unclipped_objects = [obj for obj in group if obj['clipped'] == False]
|
||||||
@ -198,10 +195,12 @@ class RegionRefiner(threading.Thread):
|
|||||||
'y_offset': y_offset
|
'y_offset': y_offset
|
||||||
})
|
})
|
||||||
self.camera.dynamic_region_fps.update()
|
self.camera.dynamic_region_fps.update()
|
||||||
clipped_object = True
|
look_again = True
|
||||||
|
# TODO: zoom in on unclipped low confidence objects
|
||||||
|
# else: ...
|
||||||
|
|
||||||
# if we found a clipped object, then this frame is not ready for processing
|
# if we are looking again, then this frame is not ready for processing
|
||||||
if clipped_object:
|
if look_again:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# dedupe the unclipped objects
|
# dedupe the unclipped objects
|
||||||
@ -220,14 +219,19 @@ class RegionRefiner(threading.Thread):
|
|||||||
else:
|
else:
|
||||||
if deduped_objects[duplicate]['score'] < obj['score']:
|
if deduped_objects[duplicate]['score'] < obj['score']:
|
||||||
deduped_objects[duplicate] = obj
|
deduped_objects[duplicate] = obj
|
||||||
|
|
||||||
self.camera.detected_objects[frame_time] = deduped_objects
|
self.camera.detected_objects[frame_time] = deduped_objects
|
||||||
|
|
||||||
|
with self.camera.objects_parsed:
|
||||||
|
self.camera.objects_parsed.notify_all()
|
||||||
|
|
||||||
# print(f"{frame_time} is actually finished")
|
# print(f"{frame_time} is actually finished")
|
||||||
|
|
||||||
# keep adding frames to the refined queue as long as they are finished
|
# keep adding frames to the refined queue as long as they are finished
|
||||||
with self.camera.regions_in_process_lock:
|
with self.camera.regions_in_process_lock:
|
||||||
while self.camera.frame_queue.qsize() > 0 and self.camera.frame_queue.queue[0] not in self.camera.regions_in_process:
|
while self.camera.frame_queue.qsize() > 0 and self.camera.frame_queue.queue[0] not in self.camera.regions_in_process:
|
||||||
self.camera.refined_frame_queue.put(self.camera.frame_queue.get())
|
self.camera.last_processed_frame = self.camera.frame_queue.get()
|
||||||
|
self.camera.refined_frame_queue.put(self.camera.last_processed_frame)
|
||||||
|
|
||||||
def has_overlap(self, new_obj, obj, overlap=.7):
|
def has_overlap(self, new_obj, obj, overlap=.7):
|
||||||
# compute intersection rectangle with existing object and new objects region
|
# compute intersection rectangle with existing object and new objects region
|
||||||
@ -265,9 +269,9 @@ class ObjectTracker(threading.Thread):
|
|||||||
while True:
|
while True:
|
||||||
# TODO: track objects
|
# TODO: track objects
|
||||||
frame_time = self.camera.refined_frame_queue.get()
|
frame_time = self.camera.refined_frame_queue.get()
|
||||||
f = open(f"/debug/{str(frame_time)}.jpg", 'wb')
|
# f = open(f"/debug/{str(frame_time)}.jpg", 'wb')
|
||||||
f.write(self.camera.frame_with_objects(frame_time))
|
# f.write(self.camera.frame_with_objects(frame_time))
|
||||||
f.close()
|
# f.close()
|
||||||
|
|
||||||
|
|
||||||
def register(self, index, obj):
|
def register(self, index, obj):
|
||||||
|
@ -116,6 +116,7 @@ class Camera:
|
|||||||
self.detected_objects = defaultdict(lambda: [])
|
self.detected_objects = defaultdict(lambda: [])
|
||||||
self.tracked_objects = []
|
self.tracked_objects = []
|
||||||
self.frame_cache = {}
|
self.frame_cache = {}
|
||||||
|
self.last_processed_frame = None
|
||||||
# queue for re-assembling frames in order
|
# queue for re-assembling frames in order
|
||||||
self.frame_queue = queue.Queue()
|
self.frame_queue = queue.Queue()
|
||||||
# track how many regions have been requested for a frame so we know when a frame is complete
|
# track how many regions have been requested for a frame so we know when a frame is complete
|
||||||
@ -332,45 +333,11 @@ class Camera:
|
|||||||
return jpg.tobytes()
|
return jpg.tobytes()
|
||||||
|
|
||||||
def get_current_frame_with_objects(self):
|
def get_current_frame_with_objects(self):
|
||||||
# lock and make a copy of the current frame
|
frame_time = self.last_processed_frame
|
||||||
with self.frame_lock:
|
|
||||||
frame = self.current_frame.copy()
|
|
||||||
frame_time = self.frame_time.value
|
|
||||||
|
|
||||||
if frame_time == self.cached_frame_with_objects['frame_time']:
|
if frame_time == self.cached_frame_with_objects['frame_time']:
|
||||||
return self.cached_frame_with_objects['frame_bytes']
|
return self.cached_frame_with_objects['frame_bytes']
|
||||||
|
|
||||||
# make a copy of the current detected objects
|
frame_bytes = self.frame_with_objects(frame_time)
|
||||||
detected_objects = self.detected_objects.copy()
|
|
||||||
|
|
||||||
# draw the bounding boxes on the screen
|
|
||||||
for obj in [obj for frame_list in detected_objects.values() for obj in frame_list]:
|
|
||||||
# for obj in detected_objects[frame_time]:
|
|
||||||
draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], f"{int(obj['score']*100)}% {obj['area']} {obj['clipped']}")
|
|
||||||
cv2.rectangle(frame, (obj['region']['xmin'], obj['region']['ymin']),
|
|
||||||
(obj['region']['xmax'], obj['region']['ymax']),
|
|
||||||
(0,255,0), 2)
|
|
||||||
|
|
||||||
for region in self.regions:
|
|
||||||
color = (255,255,255)
|
|
||||||
cv2.rectangle(frame, (region['x_offset'], region['y_offset']),
|
|
||||||
(region['x_offset']+region['size'], region['y_offset']+region['size']),
|
|
||||||
color, 2)
|
|
||||||
|
|
||||||
# print a timestamp
|
|
||||||
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
|
|
||||||
cv2.putText(frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
|
||||||
|
|
||||||
# print fps
|
|
||||||
cv2.putText(frame, str(self.fps.eps())+'FPS', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
|
|
||||||
|
|
||||||
# convert to BGR
|
|
||||||
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
|
||||||
|
|
||||||
# encode the image into a jpg
|
|
||||||
ret, jpg = cv2.imencode('.jpg', frame)
|
|
||||||
|
|
||||||
frame_bytes = jpg.tobytes()
|
|
||||||
|
|
||||||
self.cached_frame_with_objects = {
|
self.cached_frame_with_objects = {
|
||||||
'frame_bytes': frame_bytes,
|
'frame_bytes': frame_bytes,
|
||||||
|
Loading…
Reference in New Issue
Block a user