From 80f8256422253d28f387589f8473e468aae5daf1 Mon Sep 17 00:00:00 2001 From: Sean Vig Date: Fri, 21 May 2021 22:11:36 -0400 Subject: [PATCH] Improve handling of object matching Use `np.unique` to determine the correct set of row/col pairs to iterate over when doing the object matching without needing to track which rows or columns have already been seen. Add to some of the accompanying documentation to clarify this algorithm. Also fix what looks to be an erroneous early return, and change this to a continue. --- frigate/objects.py | 57 +++++++++++++++++++--------------------------- 1 file changed, 23 insertions(+), 34 deletions(-) diff --git a/frigate/objects.py b/frigate/objects.py index 229eb3694..4da7f12d4 100644 --- a/frigate/objects.py +++ b/frigate/objects.py @@ -82,56 +82,45 @@ class ObjectTracker: if len(current_objects) == 0: for index, obj in enumerate(group): self.register(index, obj) - return + continue new_centroids = np.array([o["centroid"] for o in group]) # compute the distance between each pair of tracked # centroids and new centroids, respectively -- our - # goal will be to match each new centroid to an existing + # goal will be to match each current centroid to a new # object centroid D = dist.cdist(current_centroids, new_centroids) - # in order to perform this matching we must (1) find the - # smallest value in each row and then (2) sort the row - # indexes based on their minimum values so that the row - # with the smallest value is at the *front* of the index - # list + # in order to perform this matching we must (1) find the smallest + # value in each row (i.e. the distance from each current object to + # the closest new object) and then (2) sort the row indexes based + # on their minimum values so that the row with the smallest + # distance (the best match) is at the *front* of the index list rows = D.min(axis=1).argsort() - # next, we perform a similar process on the columns by - # finding the smallest value in each column and then - # sorting using the previously computed row index list + # next, we determine which new object each existing object matched + # against, and apply the same sorting as was applied previously cols = D.argmin(axis=1)[rows] - # in order to determine if we need to update, register, - # or deregister an object we need to keep track of which - # of the rows and column indexes we have already examined - usedRows = set() - usedCols = set() + # many current objects may register with each new object, so only + # match the closest ones. unique returns the indices of the first + # occurrences of each value, and because the rows are sorted by + # distance, this will be index of the closest match + _, index = np.unique(cols, return_index=True) + rows = rows[index] + cols = cols[index] - # loop over the combination of the (row, column) index - # tuples - for (row, col) in zip(rows, cols): - # if we have already examined either the row or - # column value before, ignore it - if row in usedRows or col in usedCols: - continue - - # otherwise, grab the object ID for the current row, - # set its new centroid, and reset the disappeared - # counter + # loop over the combination of the (row, column) index tuples + for row, col in zip(rows, cols): + # grab the object ID for the current row, set its new centroid, + # and reset the disappeared counter objectID = current_ids[row] self.update(objectID, group[col]) - # indicate that we have examined each of the row and - # column indexes, respectively - usedRows.add(row) - usedCols.add(col) - - # compute the column index we have NOT yet examined - unusedRows = set(range(0, D.shape[0])).difference(usedRows) - unusedCols = set(range(0, D.shape[1])).difference(usedCols) + # compute the row and column indices we have NOT yet examined + unusedRows = set(range(D.shape[0])).difference(rows) + unusedCols = set(range(D.shape[1])).difference(cols) # in the event that the number of object centroids is # equal or greater than the number of input centroids