mirror of
				https://github.com/blakeblackshear/frigate.git
				synced 2025-10-27 10:52:11 +01:00 
			
		
		
		
	track objects and add config for tracked objects
This commit is contained in:
		
							parent
							
								
									0c6717090c
								
							
						
					
					
						commit
						cc2abe93a6
					
				@ -6,7 +6,7 @@ import prctl
 | 
				
			|||||||
import numpy as np
 | 
					import numpy as np
 | 
				
			||||||
from edgetpu.detection.engine import DetectionEngine
 | 
					from edgetpu.detection.engine import DetectionEngine
 | 
				
			||||||
 | 
					
 | 
				
			||||||
from frigate.util import tonumpyarray, LABELS, PATH_TO_CKPT
 | 
					from frigate.util import tonumpyarray, LABELS, PATH_TO_CKPT, calculate_region
 | 
				
			||||||
 | 
					
 | 
				
			||||||
class PreppedQueueProcessor(threading.Thread):
 | 
					class PreppedQueueProcessor(threading.Thread):
 | 
				
			||||||
    def __init__(self, cameras, prepped_frame_queue, fps, queue_full):
 | 
					    def __init__(self, cameras, prepped_frame_queue, fps, queue_full):
 | 
				
			||||||
@ -57,8 +57,12 @@ class RegionRequester(threading.Thread):
 | 
				
			|||||||
            # make a copy of the frame_time
 | 
					            # make a copy of the frame_time
 | 
				
			||||||
            frame_time = self.camera.frame_time.value
 | 
					            frame_time = self.camera.frame_time.value
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            # grab the current tracked objects
 | 
				
			||||||
 | 
					            tracked_objects = self.camera.object_tracker.tracked_objects.values()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            with self.camera.regions_in_process_lock:
 | 
					            with self.camera.regions_in_process_lock:
 | 
				
			||||||
                self.camera.regions_in_process[frame_time] = len(self.camera.config['regions'])
 | 
					                self.camera.regions_in_process[frame_time] = len(self.camera.config['regions'])
 | 
				
			||||||
 | 
					                self.camera.regions_in_process[frame_time] += len(tracked_objects)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            for index, region in enumerate(self.camera.config['regions']):
 | 
					            for index, region in enumerate(self.camera.config['regions']):
 | 
				
			||||||
                self.camera.resize_queue.put({
 | 
					                self.camera.resize_queue.put({
 | 
				
			||||||
@ -70,6 +74,24 @@ class RegionRequester(threading.Thread):
 | 
				
			|||||||
                    'y_offset': region['y_offset']
 | 
					                    'y_offset': region['y_offset']
 | 
				
			||||||
                })
 | 
					                })
 | 
				
			||||||
            
 | 
					            
 | 
				
			||||||
 | 
					            # request a region for tracked objects
 | 
				
			||||||
 | 
					            for tracked_object in tracked_objects:
 | 
				
			||||||
 | 
					                box = tracked_object['box']
 | 
				
			||||||
 | 
					                # calculate a new region that will hopefully get the entire object
 | 
				
			||||||
 | 
					                (size, x_offset, y_offset) = calculate_region(self.camera.frame_shape, 
 | 
				
			||||||
 | 
					                    box['xmin'], box['ymin'],
 | 
				
			||||||
 | 
					                    box['xmax'], box['ymax'])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                self.camera.resize_queue.put({
 | 
				
			||||||
 | 
					                    'camera_name': self.camera.name,
 | 
				
			||||||
 | 
					                    'frame_time': frame_time,
 | 
				
			||||||
 | 
					                    'region_id': -1,
 | 
				
			||||||
 | 
					                    'size': size,
 | 
				
			||||||
 | 
					                    'x_offset': x_offset,
 | 
				
			||||||
 | 
					                    'y_offset': y_offset
 | 
				
			||||||
 | 
					                })
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
class RegionPrepper(threading.Thread):
 | 
					class RegionPrepper(threading.Thread):
 | 
				
			||||||
    def __init__(self, frame_cache, resize_request_queue, prepped_frame_queue):
 | 
					    def __init__(self, frame_cache, resize_request_queue, prepped_frame_queue):
 | 
				
			||||||
        threading.Thread.__init__(self)
 | 
					        threading.Thread.__init__(self)
 | 
				
			||||||
 | 
				
			|||||||
@ -50,14 +50,14 @@ class DetectedObjectsProcessor(threading.Thread):
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
            objects = frame['detected_objects']
 | 
					            objects = frame['detected_objects']
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            # print(f"Processing objects for: {frame['size']} {frame['x_offset']} {frame['y_offset']}")
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            # if len(objects) == 0:
 | 
					 | 
				
			||||||
            #     continue
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            for raw_obj in objects:
 | 
					            for raw_obj in objects:
 | 
				
			||||||
 | 
					                name = str(LABELS[raw_obj.label_id])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					                if not name in self.camera.objects_to_track:
 | 
				
			||||||
 | 
					                    continue
 | 
				
			||||||
 | 
					
 | 
				
			||||||
                obj = {
 | 
					                obj = {
 | 
				
			||||||
                    'name': str(LABELS[raw_obj.label_id]),
 | 
					                    'name': name,
 | 
				
			||||||
                    'score': float(raw_obj.score),
 | 
					                    'score': float(raw_obj.score),
 | 
				
			||||||
                    'box': {
 | 
					                    'box': {
 | 
				
			||||||
                        'xmin': int((raw_obj.bounding_box[0][0] * frame['size']) + frame['x_offset']),
 | 
					                        'xmin': int((raw_obj.bounding_box[0][0] * frame['size']) + frame['x_offset']),
 | 
				
			||||||
@ -75,9 +75,6 @@ class DetectedObjectsProcessor(threading.Thread):
 | 
				
			|||||||
                    'region_id': frame['region_id']
 | 
					                    'region_id': frame['region_id']
 | 
				
			||||||
                }
 | 
					                }
 | 
				
			||||||
                
 | 
					                
 | 
				
			||||||
                if not obj['name'] == 'bicycle':
 | 
					 | 
				
			||||||
                    continue
 | 
					 | 
				
			||||||
                
 | 
					 | 
				
			||||||
                # if the object is within 5 pixels of the region border, and the region is not on the edge
 | 
					                # if the object is within 5 pixels of the region border, and the region is not on the edge
 | 
				
			||||||
                # consider the object to be clipped
 | 
					                # consider the object to be clipped
 | 
				
			||||||
                obj['clipped'] = False
 | 
					                obj['clipped'] = False
 | 
				
			||||||
@ -245,15 +242,14 @@ class ObjectTracker(threading.Thread):
 | 
				
			|||||||
    def run(self):
 | 
					    def run(self):
 | 
				
			||||||
        prctl.set_name(self.__class__.__name__)
 | 
					        prctl.set_name(self.__class__.__name__)
 | 
				
			||||||
        while True:
 | 
					        while True:
 | 
				
			||||||
            # TODO: track objects
 | 
					 | 
				
			||||||
            frame_time = self.camera.refined_frame_queue.get()
 | 
					            frame_time = self.camera.refined_frame_queue.get()
 | 
				
			||||||
 | 
					            self.match_and_update(self.camera.detected_objects[frame_time])
 | 
				
			||||||
            # f = open(f"/debug/{str(frame_time)}.jpg", 'wb')
 | 
					            # f = open(f"/debug/{str(frame_time)}.jpg", 'wb')
 | 
				
			||||||
            # f.write(self.camera.frame_with_objects(frame_time))
 | 
					            # f.write(self.camera.frame_with_objects(frame_time))
 | 
				
			||||||
            # f.close()
 | 
					            # f.close()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					 | 
				
			||||||
    def register(self, index, obj):
 | 
					    def register(self, index, obj):
 | 
				
			||||||
        id = f"{str(obj.frame_time)}-{index}"
 | 
					        id = f"{str(obj['frame_time'])}-{index}"
 | 
				
			||||||
        self.tracked_objects[id] = obj
 | 
					        self.tracked_objects[id] = obj
 | 
				
			||||||
        self.disappeared[id] = 0
 | 
					        self.disappeared[id] = 0
 | 
				
			||||||
 | 
					
 | 
				
			||||||
@ -262,10 +258,12 @@ class ObjectTracker(threading.Thread):
 | 
				
			|||||||
        del self.tracked_objects[id]
 | 
					        del self.tracked_objects[id]
 | 
				
			||||||
    
 | 
					    
 | 
				
			||||||
    def update(self, id, new_obj):
 | 
					    def update(self, id, new_obj):
 | 
				
			||||||
        new_obj.detections = self.tracked_objects[id].detections
 | 
					        self.tracked_objects[id]['centroid'] = new_obj['centroid']
 | 
				
			||||||
        new_obj.detections.append({
 | 
					        self.tracked_objects[id]['box'] = new_obj['box']
 | 
				
			||||||
 | 
					        self.tracked_objects[id]['region'] = new_obj['region']
 | 
				
			||||||
        })
 | 
					        self.tracked_objects[id]['score'] = new_obj['score']
 | 
				
			||||||
 | 
					        self.tracked_objects[id]['name'] = new_obj['name']
 | 
				
			||||||
 | 
					        # TODO: am i missing anything? history?  
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def match_and_update(self, new_objects):
 | 
					    def match_and_update(self, new_objects):
 | 
				
			||||||
        # check to see if the list of input bounding box rectangles
 | 
					        # check to see if the list of input bounding box rectangles
 | 
				
			||||||
@ -290,16 +288,16 @@ class ObjectTracker(threading.Thread):
 | 
				
			|||||||
        for obj in new_objects:
 | 
					        for obj in new_objects:
 | 
				
			||||||
            centroid_x = int((obj['box']['xmin']+obj['box']['xmax']) / 2.0)
 | 
					            centroid_x = int((obj['box']['xmin']+obj['box']['xmax']) / 2.0)
 | 
				
			||||||
            centroid_y = int((obj['box']['ymin']+obj['box']['ymax']) / 2.0)
 | 
					            centroid_y = int((obj['box']['ymin']+obj['box']['ymax']) / 2.0)
 | 
				
			||||||
            obj.centroid = (centroid_x, centroid_y)
 | 
					            obj['centroid'] = (centroid_x, centroid_y)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        if len(self.tracked_objects) == 0:
 | 
					        if len(self.tracked_objects) == 0:
 | 
				
			||||||
            for index, obj in enumerate(new_objects):
 | 
					            for index, obj in enumerate(new_objects):
 | 
				
			||||||
                self.register(index, obj)
 | 
					                self.register(index, obj)
 | 
				
			||||||
            return
 | 
					            return
 | 
				
			||||||
        
 | 
					        
 | 
				
			||||||
        new_centroids = np.array([o.centroid for o in new_objects])
 | 
					        new_centroids = np.array([o['centroid'] for o in new_objects])
 | 
				
			||||||
        current_ids = list(self.tracked_objects.keys())
 | 
					        current_ids = list(self.tracked_objects.keys())
 | 
				
			||||||
        current_centroids = np.array([o.centroid for o in self.tracked_objects])
 | 
					        current_centroids = np.array([o['centroid'] for o in self.tracked_objects.values()])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        # compute the distance between each pair of tracked
 | 
					        # compute the distance between each pair of tracked
 | 
				
			||||||
        # centroids and new centroids, respectively -- our
 | 
					        # centroids and new centroids, respectively -- our
 | 
				
			||||||
@ -376,110 +374,6 @@ class ObjectTracker(threading.Thread):
 | 
				
			|||||||
            for col in unusedCols:
 | 
					            for col in unusedCols:
 | 
				
			||||||
                self.register(col, new_objects[col])
 | 
					                self.register(col, new_objects[col])
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					 | 
				
			||||||
        # -------------
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        # # initialize an array of input centroids for the current frame
 | 
					 | 
				
			||||||
        # inputCentroids = np.zeros((len(rects), 2), dtype="int")
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        # # loop over the bounding box rectangles
 | 
					 | 
				
			||||||
        # for (i, (startX, startY, endX, endY)) in enumerate(rects):
 | 
					 | 
				
			||||||
        #     # use the bounding box coordinates to derive the centroid
 | 
					 | 
				
			||||||
        #     cX = int((startX + endX) / 2.0)
 | 
					 | 
				
			||||||
        #     cY = int((startY + endY) / 2.0)
 | 
					 | 
				
			||||||
        #     inputCentroids[i] = (cX, cY)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        # # if we are currently not tracking any objects take the input
 | 
					 | 
				
			||||||
        # # centroids and register each of them
 | 
					 | 
				
			||||||
        # if len(self.objects) == 0:
 | 
					 | 
				
			||||||
        #     for i in range(0, len(inputCentroids)):
 | 
					 | 
				
			||||||
        #         self.register(inputCentroids[i])
 | 
					 | 
				
			||||||
        # # otherwise, are are currently tracking objects so we need to
 | 
					 | 
				
			||||||
        # # try to match the input centroids to existing object
 | 
					 | 
				
			||||||
        # # centroids
 | 
					 | 
				
			||||||
        # else:
 | 
					 | 
				
			||||||
        #     # grab the set of object IDs and corresponding centroids
 | 
					 | 
				
			||||||
        #     objectIDs = list(self.objects.keys())
 | 
					 | 
				
			||||||
        #     objectCentroids = list(self.objects.values())
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #     # compute the distance between each pair of object
 | 
					 | 
				
			||||||
        #     # centroids and input centroids, respectively -- our
 | 
					 | 
				
			||||||
        #     # goal will be to match an input centroid to an existing
 | 
					 | 
				
			||||||
        #     # object centroid
 | 
					 | 
				
			||||||
        #     D = dist.cdist(np.array(objectCentroids), inputCentroids)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #     # in order to perform this matching we must (1) find the
 | 
					 | 
				
			||||||
        #     # smallest value in each row and then (2) sort the row
 | 
					 | 
				
			||||||
        #     # indexes based on their minimum values so that the row
 | 
					 | 
				
			||||||
        #     # with the smallest value is at the *front* of the index
 | 
					 | 
				
			||||||
        #     # list
 | 
					 | 
				
			||||||
        #     rows = D.min(axis=1).argsort()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #     # next, we perform a similar process on the columns by
 | 
					 | 
				
			||||||
        #     # finding the smallest value in each column and then
 | 
					 | 
				
			||||||
        #     # sorting using the previously computed row index list
 | 
					 | 
				
			||||||
        #     cols = D.argmin(axis=1)[rows]
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #     # in order to determine if we need to update, register,
 | 
					 | 
				
			||||||
        #     # or deregister an object we need to keep track of which
 | 
					 | 
				
			||||||
        #     # of the rows and column indexes we have already examined
 | 
					 | 
				
			||||||
        #     usedRows = set()
 | 
					 | 
				
			||||||
        #     usedCols = set()
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #     # loop over the combination of the (row, column) index
 | 
					 | 
				
			||||||
        #     # tuples
 | 
					 | 
				
			||||||
        #     for (row, col) in zip(rows, cols):
 | 
					 | 
				
			||||||
        #         # if we have already examined either the row or
 | 
					 | 
				
			||||||
        #         # column value before, ignore it
 | 
					 | 
				
			||||||
        #         # val
 | 
					 | 
				
			||||||
        #         if row in usedRows or col in usedCols:
 | 
					 | 
				
			||||||
        #             continue
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #         # otherwise, grab the object ID for the current row,
 | 
					 | 
				
			||||||
        #         # set its new centroid, and reset the disappeared
 | 
					 | 
				
			||||||
        #         # counter
 | 
					 | 
				
			||||||
        #         objectID = objectIDs[row]
 | 
					 | 
				
			||||||
        #         self.objects[objectID] = inputCentroids[col]
 | 
					 | 
				
			||||||
        #         self.disappeared[objectID] = 0
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #         # indicate that we have examined each of the row and
 | 
					 | 
				
			||||||
        #         # column indexes, respectively
 | 
					 | 
				
			||||||
        #         usedRows.add(row)
 | 
					 | 
				
			||||||
        #         usedCols.add(col)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #     # compute both the row and column index we have NOT yet
 | 
					 | 
				
			||||||
        #     # examined
 | 
					 | 
				
			||||||
        #     unusedRows = set(range(0, D.shape[0])).difference(usedRows)
 | 
					 | 
				
			||||||
        #     unusedCols = set(range(0, D.shape[1])).difference(usedCols)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #     # in the event that the number of object centroids is
 | 
					 | 
				
			||||||
        #     # equal or greater than the number of input centroids
 | 
					 | 
				
			||||||
        #     # we need to check and see if some of these objects have
 | 
					 | 
				
			||||||
        #     # potentially disappeared
 | 
					 | 
				
			||||||
        #     if D.shape[0] >= D.shape[1]:
 | 
					 | 
				
			||||||
        #         # loop over the unused row indexes
 | 
					 | 
				
			||||||
        #         for row in unusedRows:
 | 
					 | 
				
			||||||
        #             # grab the object ID for the corresponding row
 | 
					 | 
				
			||||||
        #             # index and increment the disappeared counter
 | 
					 | 
				
			||||||
        #             objectID = objectIDs[row]
 | 
					 | 
				
			||||||
        #             self.disappeared[objectID] += 1
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #             # check to see if the number of consecutive
 | 
					 | 
				
			||||||
        #             # frames the object has been marked "disappeared"
 | 
					 | 
				
			||||||
        #             # for warrants deregistering the object
 | 
					 | 
				
			||||||
        #             if self.disappeared[objectID] > self.maxDisappeared:
 | 
					 | 
				
			||||||
        #                 self.deregister(objectID)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        #     # otherwise, if the number of input centroids is greater
 | 
					 | 
				
			||||||
        #     # than the number of existing object centroids we need to
 | 
					 | 
				
			||||||
        #     # register each new input centroid as a trackable object
 | 
					 | 
				
			||||||
        #     else:
 | 
					 | 
				
			||||||
        #         for col in unusedCols:
 | 
					 | 
				
			||||||
        #             self.register(inputCentroids[col])
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        # # return the set of trackable objects
 | 
					 | 
				
			||||||
        # return self.objects
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
# Maintains the frame and object with the highest score
 | 
					# Maintains the frame and object with the highest score
 | 
				
			||||||
class BestFrames(threading.Thread):
 | 
					class BestFrames(threading.Thread):
 | 
				
			||||||
    def __init__(self, objects_parsed, recent_frames, detected_objects):
 | 
					    def __init__(self, objects_parsed, recent_frames, detected_objects):
 | 
				
			||||||
 | 
				
			|||||||
@ -114,7 +114,6 @@ class Camera:
 | 
				
			|||||||
        self.name = name
 | 
					        self.name = name
 | 
				
			||||||
        self.config = config
 | 
					        self.config = config
 | 
				
			||||||
        self.detected_objects = defaultdict(lambda: [])
 | 
					        self.detected_objects = defaultdict(lambda: [])
 | 
				
			||||||
        self.tracked_objects = []
 | 
					 | 
				
			||||||
        self.frame_cache = {}
 | 
					        self.frame_cache = {}
 | 
				
			||||||
        self.last_processed_frame = None
 | 
					        self.last_processed_frame = None
 | 
				
			||||||
        # queue for re-assembling frames in order
 | 
					        # queue for re-assembling frames in order
 | 
				
			||||||
@ -172,21 +171,13 @@ class Camera:
 | 
				
			|||||||
        self.capture_thread = None
 | 
					        self.capture_thread = None
 | 
				
			||||||
        self.fps = EventsPerSecond()
 | 
					        self.fps = EventsPerSecond()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        # merge object filter config
 | 
					        # combine tracked objects lists
 | 
				
			||||||
        objects_with_config = set().union(global_objects_config.keys(), camera_objects_config.keys())
 | 
					        self.objects_to_track = set().union(global_objects_config.get('track', ['person', 'car', 'truck']), camera_objects_config.get('track', []))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					        # merge object filters
 | 
				
			||||||
 | 
					        objects_with_config = set().union(global_objects_config.get('filters', {}).keys(), camera_objects_config.get('filters', {}).keys())
 | 
				
			||||||
        for obj in objects_with_config:
 | 
					        for obj in objects_with_config:
 | 
				
			||||||
            self.object_filters = {**global_objects_config.get(obj,{}), **camera_objects_config.get(obj, {})}
 | 
					            self.object_filters = {**global_objects_config.get(obj, {}), **camera_objects_config.get(obj, {})}
 | 
				
			||||||
 | 
					 | 
				
			||||||
        # # for each region, merge the object config
 | 
					 | 
				
			||||||
        # for region in self.config['regions']:
 | 
					 | 
				
			||||||
        #     region_objects = region.get('objects', {})
 | 
					 | 
				
			||||||
        #     # build objects config for region
 | 
					 | 
				
			||||||
        #     objects_with_config = set().union(global_objects_config.keys(), camera_objects_config.keys(), region_objects.keys())
 | 
					 | 
				
			||||||
        #     merged_objects_config = defaultdict(lambda: {})
 | 
					 | 
				
			||||||
        #     for obj in objects_with_config:
 | 
					 | 
				
			||||||
        #         merged_objects_config[obj] = {**global_objects_config.get(obj,{}), **camera_objects_config.get(obj, {}), **region_objects.get(obj, {})}
 | 
					 | 
				
			||||||
            
 | 
					 | 
				
			||||||
        #     region['objects'] = merged_objects_config
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
        # start a thread to queue resize requests for regions
 | 
					        # start a thread to queue resize requests for regions
 | 
				
			||||||
        self.region_requester = RegionRequester(self)
 | 
					        self.region_requester = RegionRequester(self)
 | 
				
			||||||
@ -311,12 +302,12 @@ class Camera:
 | 
				
			|||||||
                color, 2)
 | 
					                color, 2)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        # draw the bounding boxes on the screen
 | 
					        # draw the bounding boxes on the screen
 | 
				
			||||||
        for obj in self.detected_objects[frame_time]:
 | 
					        for id, obj in self.object_tracker.tracked_objects.items():
 | 
				
			||||||
        # for obj in detected_objects[frame_time]:
 | 
					        # for obj in detected_objects[frame_time]:
 | 
				
			||||||
            cv2.rectangle(frame, (obj['region']['xmin'], obj['region']['ymin']), 
 | 
					            cv2.rectangle(frame, (obj['region']['xmin'], obj['region']['ymin']), 
 | 
				
			||||||
                (obj['region']['xmax'], obj['region']['ymax']), 
 | 
					                (obj['region']['xmax'], obj['region']['ymax']), 
 | 
				
			||||||
                (0,255,0), 1)
 | 
					                (0,255,0), 1)
 | 
				
			||||||
            draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], f"{int(obj['score']*100)}% {obj['area']} {obj['clipped']}")
 | 
					            draw_box_with_label(frame, obj['box']['xmin'], obj['box']['ymin'], obj['box']['xmax'], obj['box']['ymax'], obj['name'], f"{int(obj['score']*100)}% {obj['area']} {id}")
 | 
				
			||||||
            
 | 
					            
 | 
				
			||||||
        # print a timestamp
 | 
					        # print a timestamp
 | 
				
			||||||
        time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
 | 
					        time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
		Reference in New Issue
	
	Block a user