Face recognition reprocess (#16212)

* Implement update topic

* Add API for reprocessing face

* Get reprocess working

* Fix crash when no faces exist

* Simplify
This commit is contained in:
Nicolas Mowen 2025-01-29 07:41:35 -07:00 committed by Blake Blackshear
parent 6f4002a56f
commit 1c3527f5c4
5 changed files with 112 additions and 4 deletions

View File

@ -100,6 +100,39 @@ def train_face(request: Request, name: str, body: dict = None):
)
@router.post("/faces/reprocess")
def reclassify_face(request: Request, name: str, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled:
return JSONResponse(
status_code=400,
content={"message": "Face recognition is not enabled.", "success": False},
)
json: dict[str, any] = body or {}
training_file = os.path.join(
FACE_DIR, f"train/{sanitize_filename(json.get('training_file', ''))}"
)
if not training_file or not os.path.isfile(training_file):
return JSONResponse(
content=(
{
"success": False,
"message": f"Invalid filename or no file exists: {training_file}",
}
),
status_code=404,
)
context: EmbeddingsContext = request.app.embeddings
response = context.reprocess_face(training_file)
return JSONResponse(
content=response,
status_code=200,
)
@router.post("/faces/{name}/delete")
def deregister_faces(request: Request, name: str, body: dict = None):
if not request.app.frigate_config.face_recognition.enabled:

View File

@ -14,6 +14,7 @@ class EmbeddingsRequestEnum(Enum):
embed_thumbnail = "embed_thumbnail"
generate_search = "generate_search"
register_face = "register_face"
reprocess_face = "reprocess_face"
class EmbeddingsResponder:

View File

@ -5,6 +5,7 @@ import datetime
import logging
import os
import random
import shutil
import string
from typing import Optional
@ -32,7 +33,7 @@ class FaceProcessor(RealTimeProcessorApi):
self.face_config = config.face_recognition
self.face_detector: cv2.FaceDetectorYN = None
self.landmark_detector: cv2.face.FacemarkLBF = None
self.face_recognizer: cv2.face.LBPHFaceRecognizer = None
self.recognizer: cv2.face.LBPHFaceRecognizer = None
self.requires_face_detection = "face" not in self.config.objects.all_objects
self.detected_faces: dict[str, float] = {}
@ -113,6 +114,9 @@ class FaceProcessor(RealTimeProcessorApi):
faces.append(img)
labels.append(idx)
if not faces:
return
self.recognizer: cv2.face.LBPHFaceRecognizer = (
cv2.face.LBPHFaceRecognizer_create(
radius=2, threshold=(1 - self.face_config.min_score) * 1000
@ -211,9 +215,12 @@ class FaceProcessor(RealTimeProcessorApi):
if not self.landmark_detector:
return None
if not self.label_map:
if not self.recognizer:
self.__build_classifier()
if not self.recognizer:
return None
img = cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)
img = self.__align_face(img, img.shape[1], img.shape[0])
index, distance = self.recognizer.predict(img)
@ -400,6 +407,35 @@ class FaceProcessor(RealTimeProcessorApi):
"message": "Successfully registered face.",
"success": True,
}
elif topic == EmbeddingsRequestEnum.reprocess_face.value:
current_file: str = request_data["image_file"]
id = current_file[0 : current_file.index("-", current_file.index("-") + 1)]
face_score = current_file[current_file.rfind("-") : current_file.rfind(".")]
img = None
if current_file:
img = cv2.imread(current_file)
if img is None:
return {
"message": "Invalid image file.",
"success": False,
}
res = self.__classify_face(img)
if not res:
return
sub_label, score = res
if self.config.face_recognition.save_attempts:
# write face to library
folder = os.path.join(FACE_DIR, "train")
new_file = os.path.join(
folder, f"{id}-{sub_label}-{score}-{face_score}.webp"
)
shutil.move(current_file, new_file)
def expire_object(self, object_id: str):
if object_id in self.detected_faces:

View File

@ -211,6 +211,11 @@ class EmbeddingsContext:
return self.db.execute_sql(sql_query).fetchall()
def reprocess_face(self, face_file: str) -> dict[str, any]:
return self.requestor.send_data(
EmbeddingsRequestEnum.reprocess_face.value, {"image_file": face_file}
)
def clear_face_classifier(self) -> None:
self.requestor.send_data(
EmbeddingsRequestEnum.clear_face_classifier.value, None

View File

@ -23,7 +23,7 @@ import { cn } from "@/lib/utils";
import { FrigateConfig } from "@/types/frigateConfig";
import axios from "axios";
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
import { LuImagePlus, LuTrash2 } from "react-icons/lu";
import { LuImagePlus, LuRefreshCw, LuTrash2 } from "react-icons/lu";
import { toast } from "sonner";
import useSWR from "swr";
@ -274,6 +274,30 @@ function FaceAttempt({
[image, onRefresh],
);
const onReprocess = useCallback(() => {
axios
.post(`/faces/reprocess`, { training_file: image })
.then((resp) => {
if (resp.status == 200) {
toast.success(`Successfully trained face.`, {
position: "top-center",
});
onRefresh();
}
})
.catch((error) => {
if (error.response?.data?.message) {
toast.error(`Failed to train: ${error.response.data.message}`, {
position: "top-center",
});
} else {
toast.error(`Failed to train: ${error.message}`, {
position: "top-center",
});
}
});
}, [image, onRefresh]);
const onDelete = useCallback(() => {
axios
.post(`/faces/train/delete`, { ids: [image] })
@ -301,7 +325,7 @@ function FaceAttempt({
return (
<div className="relative flex flex-col rounded-lg">
<div className="w-full overflow-hidden rounded-t-lg border border-t-0 *:text-card-foreground">
<img className="h-40" src={`${baseUrl}clips/faces/train/${image}`} />
<img className="size-40" src={`${baseUrl}clips/faces/train/${image}`} />
</div>
<div className="rounded-b-lg bg-card p-2">
<div className="flex w-full flex-row items-center justify-between gap-2">
@ -340,6 +364,15 @@ function FaceAttempt({
</DropdownMenu>
<TooltipContent>Train Face as Person</TooltipContent>
</Tooltip>
<Tooltip>
<TooltipTrigger>
<LuRefreshCw
className="size-5 cursor-pointer text-primary-variant hover:text-primary"
onClick={() => onReprocess()}
/>
</TooltipTrigger>
<TooltipContent>Delete Face Attempt</TooltipContent>
</Tooltip>
<Tooltip>
<TooltipTrigger>
<LuTrash2