merge boxes by label

This commit is contained in:
Blake Blackshear 2020-01-06 20:36:04 -06:00
parent 08174d8db2
commit 49dc029c43

View File

@ -113,45 +113,49 @@ class RegionRefiner(threading.Thread):
detected_objects = self.camera.detected_objects[frame_time].copy() detected_objects = self.camera.detected_objects[frame_time].copy()
# print(f"{frame_time} finished") # print(f"{frame_time} finished")
# apply non-maxima suppression to suppress weak, overlapping bounding boxes detected_object_groups = defaultdict(lambda: []))
boxes = [(o['box']['xmin'], o['box']['ymin'], o['box']['xmax']-o['box']['xmin'], o['box']['ymax']-o['box']['ymin']) # group by name
for o in detected_objects] for obj in detected_objects:
confidences = [o['score'] for o in detected_objects] detected_object_groups[obj['name']].append(obj)
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
# print(f"{frame_time} - NMS reduced objects from {len(detected_objects)} to {len(idxs)}")
look_again = False look_again = False
# get selected objects
selected_objects = [] selected_objects = []
for index in idxs: for name, group in detected_object_groups.items():
obj = detected_objects[index[0]]
selected_objects.append(obj)
if obj['clipped']:
box = obj['box']
# calculate a new region that will hopefully get the entire object
(size, x_offset, y_offset) = calculate_region(self.camera.frame_shape,
box['xmin'], box['ymin'],
box['xmax'], box['ymax'])
# print(f"{frame_time} new region: {size} {x_offset} {y_offset}")
with self.camera.regions_in_process_lock: # apply non-maxima suppression to suppress weak, overlapping bounding boxes
if not frame_time in self.camera.regions_in_process: boxes = [(o['box']['xmin'], o['box']['ymin'], o['box']['xmax']-o['box']['xmin'], o['box']['ymax']-o['box']['ymin'])
self.camera.regions_in_process[frame_time] = 1 for o in detected_objects]
else: confidences = [o['score'] for o in detected_objects]
self.camera.regions_in_process[frame_time] += 1 idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
# add it to the queue for index in idxs:
self.camera.resize_queue.put({ obj = group[index[0]]
'camera_name': self.camera.name, selected_objects.append(obj)
'frame_time': frame_time, if obj['clipped']:
'region_id': -1, box = obj['box']
'size': size, # calculate a new region that will hopefully get the entire object
'x_offset': x_offset, (size, x_offset, y_offset) = calculate_region(self.camera.frame_shape,
'y_offset': y_offset box['xmin'], box['ymin'],
}) box['xmax'], box['ymax'])
self.camera.dynamic_region_fps.update() # print(f"{frame_time} new region: {size} {x_offset} {y_offset}")
look_again = True
with self.camera.regions_in_process_lock:
if not frame_time in self.camera.regions_in_process:
self.camera.regions_in_process[frame_time] = 1
else:
self.camera.regions_in_process[frame_time] += 1
# add it to the queue
self.camera.resize_queue.put({
'camera_name': self.camera.name,
'frame_time': frame_time,
'region_id': -1,
'size': size,
'x_offset': x_offset,
'y_offset': y_offset
})
self.camera.dynamic_region_fps.update()
look_again = True
# if we are looking again, then this frame is not ready for processing # if we are looking again, then this frame is not ready for processing
if look_again: if look_again: