mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-08-04 13:47:37 +02:00
Face UI cleanup (#17472)
* Add note * Sort by event id * Fix reprocess causing shift * Move event group to separate comp * Handle selecting events * implement event selection * Implement selected handler * handle right click * Toggle ctrl + a * Stop propogation * Fix
This commit is contained in:
parent
1dd5007fa8
commit
207d1d2806
@ -108,6 +108,10 @@ Once straight-on images are performing well, start choosing slightly off-angle i
|
|||||||
|
|
||||||
It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to over-fitting in that particular scenario and hurt recognition performance.
|
It is important to methodically add photos to the library, bulk importing photos (especially from a general photo library) will lead to over-fitting in that particular scenario and hurt recognition performance.
|
||||||
|
|
||||||
|
### Why can't I bulk reprocess faces?
|
||||||
|
|
||||||
|
Face embedding models work by breaking apart faces into different features. This means that when reprocessing an image, only images from a similar angle will have its score affected.
|
||||||
|
|
||||||
### Why do unknown people score similarly to known people?
|
### Why do unknown people score similarly to known people?
|
||||||
|
|
||||||
This can happen for a few different reasons, but this is usually an indicator that the training set needs to be improved. This is often related to over-fitting:
|
This can happen for a few different reasons, but this is usually an indicator that the training set needs to be improved. This is often related to over-fitting:
|
||||||
|
@ -272,23 +272,10 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
f"Detected best face for person as: {sub_label} with probability {score}"
|
f"Detected best face for person as: {sub_label} with probability {score}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.config.face_recognition.save_attempts:
|
self.write_face_attempt(
|
||||||
# write face to library
|
face_frame, id, datetime.datetime.now().timestamp(), sub_label, score
|
||||||
folder = os.path.join(FACE_DIR, "train")
|
|
||||||
file = os.path.join(folder, f"{id}-{sub_label}-{score}-0.webp")
|
|
||||||
os.makedirs(folder, exist_ok=True)
|
|
||||||
cv2.imwrite(file, face_frame)
|
|
||||||
|
|
||||||
files = sorted(
|
|
||||||
filter(lambda f: (f.endswith(".webp")), os.listdir(folder)),
|
|
||||||
key=lambda f: os.path.getctime(os.path.join(folder, f)),
|
|
||||||
reverse=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# delete oldest face image if maximum is reached
|
|
||||||
if len(files) > self.config.face_recognition.save_attempts:
|
|
||||||
os.unlink(os.path.join(folder, files[-1]))
|
|
||||||
|
|
||||||
if id not in self.person_face_history:
|
if id not in self.person_face_history:
|
||||||
self.person_face_history[id] = []
|
self.person_face_history[id] = []
|
||||||
|
|
||||||
@ -383,9 +370,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
}
|
}
|
||||||
elif topic == EmbeddingsRequestEnum.reprocess_face.value:
|
elif topic == EmbeddingsRequestEnum.reprocess_face.value:
|
||||||
current_file: str = request_data["image_file"]
|
current_file: str = request_data["image_file"]
|
||||||
id = current_file[0 : current_file.index("-", current_file.index("-") + 1)]
|
(id_time, id_rand, timestamp, _, _) = current_file.split("-")
|
||||||
face_score = current_file[current_file.rfind("-") : current_file.rfind(".")]
|
|
||||||
img = None
|
img = None
|
||||||
|
id = f"{id_time}-{id_rand}"
|
||||||
|
|
||||||
if current_file:
|
if current_file:
|
||||||
img = cv2.imread(current_file)
|
img = cv2.imread(current_file)
|
||||||
@ -411,7 +398,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
folder = os.path.join(FACE_DIR, "train")
|
folder = os.path.join(FACE_DIR, "train")
|
||||||
os.makedirs(folder, exist_ok=True)
|
os.makedirs(folder, exist_ok=True)
|
||||||
new_file = os.path.join(
|
new_file = os.path.join(
|
||||||
folder, f"{id}-{sub_label}-{score}-{face_score}.webp"
|
folder, f"{id}-{timestamp}-{sub_label}-{score}.webp"
|
||||||
)
|
)
|
||||||
shutil.move(current_file, new_file)
|
shutil.move(current_file, new_file)
|
||||||
|
|
||||||
@ -461,3 +448,30 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
weighted_average = weighted_scores[best_name] / total_weights[best_name]
|
weighted_average = weighted_scores[best_name] / total_weights[best_name]
|
||||||
|
|
||||||
return best_name, weighted_average
|
return best_name, weighted_average
|
||||||
|
|
||||||
|
def write_face_attempt(
|
||||||
|
self,
|
||||||
|
frame: np.ndarray,
|
||||||
|
event_id: str,
|
||||||
|
timestamp: float,
|
||||||
|
sub_label: str,
|
||||||
|
score: float,
|
||||||
|
) -> None:
|
||||||
|
if self.config.face_recognition.save_attempts:
|
||||||
|
# write face to library
|
||||||
|
folder = os.path.join(FACE_DIR, "train")
|
||||||
|
file = os.path.join(
|
||||||
|
folder, f"{event_id}-{timestamp}-{sub_label}-{score}.webp"
|
||||||
|
)
|
||||||
|
os.makedirs(folder, exist_ok=True)
|
||||||
|
cv2.imwrite(file, frame)
|
||||||
|
|
||||||
|
files = sorted(
|
||||||
|
filter(lambda f: (f.endswith(".webp")), os.listdir(folder)),
|
||||||
|
key=lambda f: os.path.getctime(os.path.join(folder, f)),
|
||||||
|
reverse=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# delete oldest face image if maximum is reached
|
||||||
|
if len(files) > self.config.face_recognition.save_attempts:
|
||||||
|
os.unlink(os.path.join(folder, files[-1]))
|
||||||
|
@ -33,6 +33,7 @@ export default function useContextMenu(
|
|||||||
};
|
};
|
||||||
} else {
|
} else {
|
||||||
const context = (e: MouseEvent) => {
|
const context = (e: MouseEvent) => {
|
||||||
|
e.stopPropagation();
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
callback();
|
callback();
|
||||||
};
|
};
|
||||||
|
@ -142,29 +142,33 @@ export default function FaceLibrary() {
|
|||||||
|
|
||||||
const [selectedFaces, setSelectedFaces] = useState<string[]>([]);
|
const [selectedFaces, setSelectedFaces] = useState<string[]>([]);
|
||||||
|
|
||||||
const onClickFace = useCallback(
|
const onClickFaces = useCallback(
|
||||||
(imageId: string, ctrl: boolean) => {
|
(images: string[], ctrl: boolean) => {
|
||||||
if (selectedFaces.length == 0 && !ctrl) {
|
if (selectedFaces.length == 0 && !ctrl) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const index = selectedFaces.indexOf(imageId);
|
let newSelectedFaces = [...selectedFaces];
|
||||||
|
|
||||||
|
images.forEach((imageId) => {
|
||||||
|
const index = newSelectedFaces.indexOf(imageId);
|
||||||
|
|
||||||
if (index != -1) {
|
if (index != -1) {
|
||||||
if (selectedFaces.length == 1) {
|
if (selectedFaces.length == 1) {
|
||||||
setSelectedFaces([]);
|
newSelectedFaces = [];
|
||||||
} else {
|
} else {
|
||||||
const copy = [
|
const copy = [
|
||||||
...selectedFaces.slice(0, index),
|
...newSelectedFaces.slice(0, index),
|
||||||
...selectedFaces.slice(index + 1),
|
...newSelectedFaces.slice(index + 1),
|
||||||
];
|
];
|
||||||
setSelectedFaces(copy);
|
newSelectedFaces = copy;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const copy = [...selectedFaces];
|
newSelectedFaces.push(imageId);
|
||||||
copy.push(imageId);
|
|
||||||
setSelectedFaces(copy);
|
|
||||||
}
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
setSelectedFaces(newSelectedFaces);
|
||||||
},
|
},
|
||||||
[selectedFaces, setSelectedFaces],
|
[selectedFaces, setSelectedFaces],
|
||||||
);
|
);
|
||||||
@ -212,8 +216,12 @@ export default function FaceLibrary() {
|
|||||||
switch (key) {
|
switch (key) {
|
||||||
case "a":
|
case "a":
|
||||||
if (modifiers.ctrl) {
|
if (modifiers.ctrl) {
|
||||||
|
if (selectedFaces.length) {
|
||||||
|
setSelectedFaces([]);
|
||||||
|
} else {
|
||||||
setSelectedFaces([...trainImages]);
|
setSelectedFaces([...trainImages]);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case "Escape":
|
case "Escape":
|
||||||
setSelectedFaces([]);
|
setSelectedFaces([]);
|
||||||
@ -253,6 +261,16 @@ export default function FaceLibrary() {
|
|||||||
/>
|
/>
|
||||||
{selectedFaces?.length > 0 ? (
|
{selectedFaces?.length > 0 ? (
|
||||||
<div className="flex items-center justify-center gap-2">
|
<div className="flex items-center justify-center gap-2">
|
||||||
|
<div className="mx-1 flex w-48 items-center justify-center text-sm text-muted-foreground">
|
||||||
|
<div className="p-1">{`${selectedFaces.length} selected`}</div>
|
||||||
|
<div className="p-1">{"|"}</div>
|
||||||
|
<div
|
||||||
|
className="cursor-pointer p-2 text-primary hover:rounded-lg hover:bg-secondary"
|
||||||
|
onClick={() => setSelectedFaces([])}
|
||||||
|
>
|
||||||
|
{t("button.unselect", { ns: "common" })}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
<Button
|
<Button
|
||||||
className="flex gap-2"
|
className="flex gap-2"
|
||||||
onClick={() => onDelete("train", selectedFaces)}
|
onClick={() => onDelete("train", selectedFaces)}
|
||||||
@ -283,7 +301,7 @@ export default function FaceLibrary() {
|
|||||||
attemptImages={trainImages}
|
attemptImages={trainImages}
|
||||||
faceNames={faces}
|
faceNames={faces}
|
||||||
selectedFaces={selectedFaces}
|
selectedFaces={selectedFaces}
|
||||||
onClickFace={onClickFace}
|
onClickFaces={onClickFaces}
|
||||||
onRefresh={refreshFaces}
|
onRefresh={refreshFaces}
|
||||||
/>
|
/>
|
||||||
) : (
|
) : (
|
||||||
@ -391,7 +409,7 @@ type TrainingGridProps = {
|
|||||||
attemptImages: string[];
|
attemptImages: string[];
|
||||||
faceNames: string[];
|
faceNames: string[];
|
||||||
selectedFaces: string[];
|
selectedFaces: string[];
|
||||||
onClickFace: (image: string, ctrl: boolean) => void;
|
onClickFaces: (images: string[], ctrl: boolean) => void;
|
||||||
onRefresh: () => void;
|
onRefresh: () => void;
|
||||||
};
|
};
|
||||||
function TrainingGrid({
|
function TrainingGrid({
|
||||||
@ -399,34 +417,42 @@ function TrainingGrid({
|
|||||||
attemptImages,
|
attemptImages,
|
||||||
faceNames,
|
faceNames,
|
||||||
selectedFaces,
|
selectedFaces,
|
||||||
onClickFace,
|
onClickFaces,
|
||||||
onRefresh,
|
onRefresh,
|
||||||
}: TrainingGridProps) {
|
}: TrainingGridProps) {
|
||||||
const { t } = useTranslation(["views/faceLibrary", "views/explore"]);
|
const { t } = useTranslation(["views/faceLibrary"]);
|
||||||
const navigate = useNavigate();
|
|
||||||
|
|
||||||
// face data
|
// face data
|
||||||
|
|
||||||
const faceGroups = useMemo(() => {
|
const faceGroups = useMemo(() => {
|
||||||
const groups: { [eventId: string]: RecognizedFaceData[] } = {};
|
const groups: { [eventId: string]: RecognizedFaceData[] } = {};
|
||||||
|
|
||||||
Array.from(new Set(attemptImages))
|
const faces = attemptImages
|
||||||
.sort()
|
.map((image) => {
|
||||||
.reverse()
|
|
||||||
.forEach((image) => {
|
|
||||||
const parts = image.split("-");
|
const parts = image.split("-");
|
||||||
const data = {
|
|
||||||
filename: image,
|
|
||||||
timestamp: Number.parseFloat(parts[0]),
|
|
||||||
eventId: `${parts[0]}-${parts[1]}`,
|
|
||||||
name: parts[2],
|
|
||||||
score: Number.parseFloat(parts[3]),
|
|
||||||
};
|
|
||||||
|
|
||||||
if (groups[data.eventId]) {
|
try {
|
||||||
groups[data.eventId].push(data);
|
return {
|
||||||
|
filename: image,
|
||||||
|
timestamp: Number.parseFloat(parts[2]),
|
||||||
|
eventId: `${parts[0]}-${parts[1]}`,
|
||||||
|
name: parts[3],
|
||||||
|
score: Number.parseFloat(parts[4]),
|
||||||
|
};
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.filter((v) => v != null);
|
||||||
|
|
||||||
|
faces
|
||||||
|
.sort((a, b) => a.eventId.localeCompare(b.eventId))
|
||||||
|
.reverse()
|
||||||
|
.forEach((face) => {
|
||||||
|
if (groups[face.eventId]) {
|
||||||
|
groups[face.eventId].push(face);
|
||||||
} else {
|
} else {
|
||||||
groups[data.eventId] = [data];
|
groups[face.eventId] = [face];
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -507,14 +533,102 @@ function TrainingGrid({
|
|||||||
<div className="scrollbar-container flex flex-wrap gap-2 overflow-y-scroll p-1">
|
<div className="scrollbar-container flex flex-wrap gap-2 overflow-y-scroll p-1">
|
||||||
{Object.entries(faceGroups).map(([key, group]) => {
|
{Object.entries(faceGroups).map(([key, group]) => {
|
||||||
const event = events?.find((ev) => ev.id == key);
|
const event = events?.find((ev) => ev.id == key);
|
||||||
|
return (
|
||||||
|
<FaceAttemptGroup
|
||||||
|
key={key}
|
||||||
|
config={config}
|
||||||
|
group={group}
|
||||||
|
event={event}
|
||||||
|
faceNames={faceNames}
|
||||||
|
selectedFaces={selectedFaces}
|
||||||
|
onClickFaces={onClickFaces}
|
||||||
|
onSelectEvent={setSelectedEvent}
|
||||||
|
onRefresh={onRefresh}
|
||||||
|
/>
|
||||||
|
);
|
||||||
|
})}
|
||||||
|
</div>
|
||||||
|
</>
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
type FaceAttemptGroupProps = {
|
||||||
|
config: FrigateConfig;
|
||||||
|
group: RecognizedFaceData[];
|
||||||
|
event?: Event;
|
||||||
|
faceNames: string[];
|
||||||
|
selectedFaces: string[];
|
||||||
|
onClickFaces: (image: string[], ctrl: boolean) => void;
|
||||||
|
onSelectEvent: (event: Event) => void;
|
||||||
|
onRefresh: () => void;
|
||||||
|
};
|
||||||
|
function FaceAttemptGroup({
|
||||||
|
config,
|
||||||
|
group,
|
||||||
|
event,
|
||||||
|
faceNames,
|
||||||
|
selectedFaces,
|
||||||
|
onClickFaces,
|
||||||
|
onSelectEvent,
|
||||||
|
onRefresh,
|
||||||
|
}: FaceAttemptGroupProps) {
|
||||||
|
const navigate = useNavigate();
|
||||||
|
const { t } = useTranslation(["views/faceLibrary", "views/explore"]);
|
||||||
|
|
||||||
|
// data
|
||||||
|
|
||||||
|
const allFacesSelected = useMemo(
|
||||||
|
() => group.every((face) => selectedFaces.includes(face.filename)),
|
||||||
|
[group, selectedFaces],
|
||||||
|
);
|
||||||
|
|
||||||
|
// interaction
|
||||||
|
|
||||||
|
const handleClickEvent = useCallback(
|
||||||
|
(meta: boolean) => {
|
||||||
|
if (event && selectedFaces.length == 0 && !meta) {
|
||||||
|
onSelectEvent(event);
|
||||||
|
} else {
|
||||||
|
const anySelected =
|
||||||
|
group.find((face) => selectedFaces.includes(face.filename)) !=
|
||||||
|
undefined;
|
||||||
|
|
||||||
|
if (anySelected) {
|
||||||
|
// deselect all
|
||||||
|
const toDeselect: string[] = [];
|
||||||
|
group.forEach((face) => {
|
||||||
|
if (selectedFaces.includes(face.filename)) {
|
||||||
|
toDeselect.push(face.filename);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
onClickFaces(toDeselect, false);
|
||||||
|
} else {
|
||||||
|
// select all
|
||||||
|
onClickFaces(
|
||||||
|
group.map((face) => face.filename),
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
[event, group, selectedFaces, onClickFaces, onSelectEvent],
|
||||||
|
);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div
|
<div
|
||||||
key={key}
|
|
||||||
className={cn(
|
className={cn(
|
||||||
"flex flex-col gap-2 rounded-lg bg-card p-2",
|
"flex cursor-pointer flex-col gap-2 rounded-lg bg-card p-2 outline outline-[3px]",
|
||||||
isMobile && "w-full",
|
isMobile && "w-full",
|
||||||
|
allFacesSelected
|
||||||
|
? "shadow-selected outline-selected"
|
||||||
|
: "outline-transparent duration-500",
|
||||||
)}
|
)}
|
||||||
|
onClick={(e) => handleClickEvent(e.metaKey)}
|
||||||
|
onContextMenu={(e) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
e.preventDefault();
|
||||||
|
handleClickEvent(true);
|
||||||
|
}}
|
||||||
>
|
>
|
||||||
<div className="flex flex-row justify-between">
|
<div className="flex flex-row justify-between">
|
||||||
<div className="capitalize">
|
<div className="capitalize">
|
||||||
@ -560,12 +674,14 @@ function TrainingGrid({
|
|||||||
data={data}
|
data={data}
|
||||||
faceNames={faceNames}
|
faceNames={faceNames}
|
||||||
recognitionConfig={config.face_recognition}
|
recognitionConfig={config.face_recognition}
|
||||||
selected={selectedFaces.includes(data.filename)}
|
selected={
|
||||||
|
allFacesSelected ? false : selectedFaces.includes(data.filename)
|
||||||
|
}
|
||||||
onClick={(data, meta) => {
|
onClick={(data, meta) => {
|
||||||
if (meta || selectedFaces.length > 0) {
|
if (meta || selectedFaces.length > 0) {
|
||||||
onClickFace(data.filename, true);
|
onClickFaces([data.filename], true);
|
||||||
} else if (event) {
|
} else if (event) {
|
||||||
setSelectedEvent(event);
|
onSelectEvent(event);
|
||||||
}
|
}
|
||||||
}}
|
}}
|
||||||
onRefresh={onRefresh}
|
onRefresh={onRefresh}
|
||||||
@ -574,10 +690,6 @@ function TrainingGrid({
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
})}
|
|
||||||
</div>
|
|
||||||
</>
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type FaceAttemptProps = {
|
type FaceAttemptProps = {
|
||||||
@ -693,7 +805,10 @@ function FaceAttempt({
|
|||||||
ref={imgRef}
|
ref={imgRef}
|
||||||
className={cn("size-44", isMobile && "w-full")}
|
className={cn("size-44", isMobile && "w-full")}
|
||||||
src={`${baseUrl}clips/faces/train/${data.filename}`}
|
src={`${baseUrl}clips/faces/train/${data.filename}`}
|
||||||
onClick={(e) => onClick(data, e.metaKey || e.ctrlKey)}
|
onClick={(e) => {
|
||||||
|
e.stopPropagation();
|
||||||
|
onClick(data, e.metaKey || e.ctrlKey);
|
||||||
|
}}
|
||||||
/>
|
/>
|
||||||
<div className="absolute bottom-1 right-1 z-10 rounded-lg bg-black/50 px-2 py-1 text-xs text-white">
|
<div className="absolute bottom-1 right-1 z-10 rounded-lg bg-black/50 px-2 py-1 text-xs text-white">
|
||||||
<TimeAgo
|
<TimeAgo
|
||||||
@ -805,7 +920,7 @@ function FaceImage({ name, image, onDelete }: FaceImageProps) {
|
|||||||
<div className="relative flex flex-col rounded-lg">
|
<div className="relative flex flex-col rounded-lg">
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"w-full overflow-hidden rounded-t-lg border border-t-0 *:text-card-foreground",
|
"w-full overflow-hidden rounded-t-lg *:text-card-foreground",
|
||||||
isMobile && "flex justify-center",
|
isMobile && "flex justify-center",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
|
Loading…
Reference in New Issue
Block a user