From 02efb6f415fc8b77cbde69fb68285bd84f76b3d7 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Thu, 2 Jan 2020 07:38:50 -0600 Subject: [PATCH] fixing a few things --- frigate/objects.py | 26 +++++++++++++++----------- frigate/video.py | 39 +++------------------------------------ 2 files changed, 18 insertions(+), 47 deletions(-) diff --git a/frigate/objects.py b/frigate/objects.py index 5d01ff659..5f7aeba2a 100644 --- a/frigate/objects.py +++ b/frigate/objects.py @@ -133,9 +133,6 @@ class DetectedObjectsProcessor(threading.Thread): # print(f"{frame['frame_time']} no remaining regions") self.camera.finished_frame_queue.put(frame['frame_time']) - with self.camera.objects_parsed: - self.camera.objects_parsed.notify_all() - # Thread that checks finished frames for clipped objects and sends back # for processing if needed class RegionRefiner(threading.Thread): @@ -166,7 +163,7 @@ class RegionRefiner(threading.Thread): self.camera.detected_objects[frame_time] = [obj for obj in self.camera.detected_objects[frame_time] if obj['clipped'] == False] # print(f"{frame_time} found {len(object_groups)} groups") - clipped_object = False + look_again = False # find the largest unclipped object in each group for group in object_groups: unclipped_objects = [obj for obj in group if obj['clipped'] == False] @@ -198,10 +195,12 @@ class RegionRefiner(threading.Thread): 'y_offset': y_offset }) self.camera.dynamic_region_fps.update() - clipped_object = True + look_again = True + # TODO: zoom in on unclipped low confidence objects + # else: ... - # if we found a clipped object, then this frame is not ready for processing - if clipped_object: + # if we are looking again, then this frame is not ready for processing + if look_again: continue # dedupe the unclipped objects @@ -220,14 +219,19 @@ class RegionRefiner(threading.Thread): else: if deduped_objects[duplicate]['score'] < obj['score']: deduped_objects[duplicate] = obj + self.camera.detected_objects[frame_time] = deduped_objects + + with self.camera.objects_parsed: + self.camera.objects_parsed.notify_all() # print(f"{frame_time} is actually finished") # keep adding frames to the refined queue as long as they are finished with self.camera.regions_in_process_lock: while self.camera.frame_queue.qsize() > 0 and self.camera.frame_queue.queue[0] not in self.camera.regions_in_process: - self.camera.refined_frame_queue.put(self.camera.frame_queue.get()) + self.camera.last_processed_frame = self.camera.frame_queue.get() + self.camera.refined_frame_queue.put(self.camera.last_processed_frame) def has_overlap(self, new_obj, obj, overlap=.7): # compute intersection rectangle with existing object and new objects region @@ -265,9 +269,9 @@ class ObjectTracker(threading.Thread): while True: # TODO: track objects frame_time = self.camera.refined_frame_queue.get() - f = open(f"/debug/{str(frame_time)}.jpg", 'wb') - f.write(self.camera.frame_with_objects(frame_time)) - f.close() + # f = open(f"/debug/{str(frame_time)}.jpg", 'wb') + # f.write(self.camera.frame_with_objects(frame_time)) + # f.close() def register(self, index, obj): diff --git a/frigate/video.py b/frigate/video.py index 0c874655b..6e22dc13a 100644 --- a/frigate/video.py +++ b/frigate/video.py @@ -116,6 +116,7 @@ class Camera: self.detected_objects = defaultdict(lambda: []) self.tracked_objects = [] self.frame_cache = {} + self.last_processed_frame = None # queue for re-assembling frames in order self.frame_queue = queue.Queue() # track how many regions have been requested for a frame so we know when a frame is complete @@ -332,45 +333,11 @@ class Camera: return jpg.tobytes() def get_current_frame_with_objects(self): - # lock and make a copy of the current frame - with self.frame_lock: - frame = self.current_frame.copy() - frame_time = self.frame_time.value - + frame_time = self.last_processed_frame if frame_time == self.cached_frame_with_objects['frame_time']: return self.cached_frame_with_objects['frame_bytes'] - # make a copy of the current detected objects - detected_objects = self.detected_objects.copy() - - # draw the bounding boxes on the screen - for obj in [obj for frame_list in detected_objects.values() for obj in frame_list]: - # for obj in detected_objects[frame_time]: - draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], f"{int(obj['score']*100)}% {obj['area']} {obj['clipped']}") - cv2.rectangle(frame, (obj['region']['xmin'], obj['region']['ymin']), - (obj['region']['xmax'], obj['region']['ymax']), - (0,255,0), 2) - - for region in self.regions: - color = (255,255,255) - cv2.rectangle(frame, (region['x_offset'], region['y_offset']), - (region['x_offset']+region['size'], region['y_offset']+region['size']), - color, 2) - - # print a timestamp - time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S") - cv2.putText(frame, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2) - - # print fps - cv2.putText(frame, str(self.fps.eps())+'FPS', (10, 60), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2) - - # convert to BGR - frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) - - # encode the image into a jpg - ret, jpg = cv2.imencode('.jpg', frame) - - frame_bytes = jpg.tobytes() + frame_bytes = self.frame_with_objects(frame_time) self.cached_frame_with_objects = { 'frame_bytes': frame_bytes,