Face recognition improvements (#17387)

* Increase frequency of updates when internal face detection is used

* Adjust number of required faces based on detection type

* Adjust min_score config to unknown_score

* Only for person

* Improve typing

* Update face rec docs

* Cleanup ui colors

* Cleanup
This commit is contained in:
Nicolas Mowen
2025-03-26 07:23:01 -06:00
committed by GitHub
parent 395fc33ccc
commit e3d4b84803
8 changed files with 97 additions and 65 deletions

View File

@@ -164,9 +164,7 @@ class LBPHRecognizer(FaceRecognizer):
return
self.recognizer: cv2.face.LBPHFaceRecognizer = (
cv2.face.LBPHFaceRecognizer_create(
radius=2, threshold=(1 - self.config.face_recognition.min_score) * 1000
)
cv2.face.LBPHFaceRecognizer_create(radius=2, threshold=400)
)
self.recognizer.train(faces, np.array(labels))
@@ -243,6 +241,8 @@ class ArcFaceRecognizer(FaceRecognizer):
for name, embs in face_embeddings_map.items():
self.mean_embs[name] = stats.trim_mean(embs, 0.15)
logger.debug("Finished building ArcFace model")
def similarity_to_confidence(
self, cosine_similarity: float, median=0.3, range_width=0.6, slope_factor=12
):
@@ -302,7 +302,4 @@ class ArcFaceRecognizer(FaceRecognizer):
score = confidence
label = name
if score < self.config.face_recognition.min_score:
return None
return label, round(score * blur_factor, 2)

View File

@@ -36,36 +36,6 @@ MAX_DETECTION_HEIGHT = 1080
MIN_MATCHING_FACES = 2
def weighted_average_by_area(results_list: list[tuple[str, float, int]]):
if len(results_list) < 3:
return "unknown", 0.0
score_count = {}
weighted_scores = {}
total_face_areas = {}
for name, score, face_area in results_list:
if name not in weighted_scores:
score_count[name] = 1
weighted_scores[name] = 0.0
total_face_areas[name] = 0.0
else:
score_count[name] += 1
weighted_scores[name] += score * face_area
total_face_areas[name] += face_area
prominent_name = max(score_count)
# if a single name is not prominent in the history then we are not confident
if score_count[prominent_name] / len(results_list) < 0.65:
return "unknown", 0.0
return prominent_name, weighted_scores[prominent_name] / total_face_areas[
prominent_name
]
class FaceRealTimeProcessor(RealTimeProcessorApi):
def __init__(
self,
@@ -271,6 +241,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
sub_label, score = res
if score < self.face_config.unknown_score:
sub_label = "unknown"
logger.debug(
f"Detected best face for person as: {sub_label} with probability {score}"
)
@@ -288,7 +261,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
self.person_face_history[id].append(
(sub_label, score, face_frame.shape[0] * face_frame.shape[1])
)
(weighted_sub_label, weighted_score) = weighted_average_by_area(
(weighted_sub_label, weighted_score) = self.weighted_average_by_area(
self.person_face_history[id]
)
@@ -415,3 +388,34 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
def expire_object(self, object_id: str):
if object_id in self.person_face_history:
self.person_face_history.pop(object_id)
def weighted_average_by_area(self, results_list: list[tuple[str, float, int]]):
min_faces = 1 if self.requires_face_detection else 3
if len(results_list) < min_faces:
return "unknown", 0.0
score_count = {}
weighted_scores = {}
total_face_areas = {}
for name, score, face_area in results_list:
if name not in weighted_scores:
score_count[name] = 1
weighted_scores[name] = 0.0
total_face_areas[name] = 0.0
else:
score_count[name] += 1
weighted_scores[name] += score * face_area
total_face_areas[name] += face_area
prominent_name = max(score_count)
# if a single name is not prominent in the history then we are not confident
if score_count[prominent_name] / len(results_list) < 0.65:
return "unknown", 0.0
return prominent_name, weighted_scores[prominent_name] / total_face_areas[
prominent_name
]