mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-04-24 01:16:47 +02:00
Improve face recognition (#15205)
* Validate faces using cosine distance and SVC * Formatting * Use opencv instead of face embedding * Update docs for training data * Adjust to score system * Set bounds * remove face embeddings * Update writing images * Add face library page * Add ability to select file * Install opencv deps * Cleanup * Use different deps * Move deps * Cleanup * Only show face library for desktop * Implement deleting * Add ability to upload image * Add support for uploading images
This commit is contained in:
parent
dd7b1be7f4
commit
0e4ff91d6b
@ -16,7 +16,9 @@ apt-get -qq install --no-install-recommends -y \
|
||||
curl \
|
||||
lsof \
|
||||
jq \
|
||||
nethogs
|
||||
nethogs \
|
||||
libgl1 \
|
||||
libglib2.0-0
|
||||
|
||||
# ensure python3 defaults to python3.9
|
||||
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1
|
||||
|
@ -34,8 +34,8 @@ unidecode == 1.3.*
|
||||
# Image Manipulation
|
||||
numpy == 1.26.*
|
||||
opencv-python-headless == 4.9.0.*
|
||||
opencv-contrib-python == 4.9.0.*
|
||||
scipy == 1.13.*
|
||||
scikit-learn == 1.5.*
|
||||
# OpenVino & ONNX
|
||||
openvino == 2024.3.*
|
||||
onnxruntime-openvino == 1.19.* ; platform_machine == 'x86_64'
|
||||
|
@ -18,4 +18,18 @@ Face recognition is disabled by default and requires semantic search to be enabl
|
||||
```yaml
|
||||
face_recognition:
|
||||
enabled: true
|
||||
```
|
||||
```
|
||||
|
||||
## Dataset
|
||||
|
||||
The number of images needed for a sufficient training set for face recognition varies depending on several factors:
|
||||
|
||||
- Complexity of the task: A simple task like recognizing faces of known individuals may require fewer images than a complex task like identifying unknown individuals in a large crowd.
|
||||
- Diversity of the dataset: A dataset with diverse images, including variations in lighting, pose, and facial expressions, will require fewer images per person than a less diverse dataset.
|
||||
- Desired accuracy: The higher the desired accuracy, the more images are typically needed.
|
||||
|
||||
However, here are some general guidelines:
|
||||
|
||||
- Minimum: For basic face recognition tasks, a minimum of 10-20 images per person is often recommended.
|
||||
- Recommended: For more robust and accurate systems, 30-50 images per person is a good starting point.
|
||||
- Ideal: For optimal performance, especially in challenging conditions, 100 or more images per person can be beneficial.
|
@ -1,11 +1,14 @@
|
||||
"""Object classification APIs."""
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
from fastapi import APIRouter, Request, UploadFile
|
||||
from fastapi.responses import JSONResponse
|
||||
from pathvalidate import sanitize_filename
|
||||
|
||||
from frigate.api.defs.tags import Tags
|
||||
from frigate.const import FACE_DIR
|
||||
from frigate.embeddings import EmbeddingsContext
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -15,20 +18,18 @@ router = APIRouter(tags=[Tags.events])
|
||||
|
||||
@router.get("/faces")
|
||||
def get_faces():
|
||||
return JSONResponse(content={"message": "there are faces"})
|
||||
face_dict: dict[str, list[str]] = {}
|
||||
|
||||
for name in os.listdir(FACE_DIR):
|
||||
face_dict[name] = []
|
||||
for file in os.listdir(os.path.join(FACE_DIR, name)):
|
||||
face_dict[name].append(file)
|
||||
|
||||
return JSONResponse(status_code=200, content=face_dict)
|
||||
|
||||
|
||||
@router.post("/faces/{name}")
|
||||
async def register_face(request: Request, name: str, file: UploadFile):
|
||||
# if not file.content_type.startswith("image"):
|
||||
# return JSONResponse(
|
||||
# status_code=400,
|
||||
# content={
|
||||
# "success": False,
|
||||
# "message": "Only an image can be used to register a face.",
|
||||
# },
|
||||
# )
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
context.register_face(name, await file.read())
|
||||
return JSONResponse(
|
||||
@ -37,8 +38,8 @@ async def register_face(request: Request, name: str, file: UploadFile):
|
||||
)
|
||||
|
||||
|
||||
@router.delete("/faces")
|
||||
def deregister_faces(request: Request, body: dict = None):
|
||||
@router.post("/faces/{name}/delete")
|
||||
def deregister_faces(request: Request, name: str, body: dict = None):
|
||||
json: dict[str, any] = body or {}
|
||||
list_of_ids = json.get("ids", "")
|
||||
|
||||
@ -49,7 +50,9 @@ def deregister_faces(request: Request, body: dict = None):
|
||||
)
|
||||
|
||||
context: EmbeddingsContext = request.app.embeddings
|
||||
context.delete_face_ids(list_of_ids)
|
||||
context.delete_face_ids(
|
||||
name, map(lambda file: sanitize_filename(file), list_of_ids)
|
||||
)
|
||||
return JSONResponse(
|
||||
content=({"success": True, "message": "Successfully deleted faces."}),
|
||||
status_code=200,
|
||||
|
@ -24,7 +24,10 @@ class SemanticSearchConfig(FrigateBaseModel):
|
||||
class FaceRecognitionConfig(FrigateBaseModel):
|
||||
enabled: bool = Field(default=False, title="Enable face recognition.")
|
||||
threshold: float = Field(
|
||||
default=0.9, title="Face similarity score required to be considered a match."
|
||||
default=170,
|
||||
title="minimum face distance score required to be considered a match.",
|
||||
gt=0.0,
|
||||
le=1.0,
|
||||
)
|
||||
min_area: int = Field(
|
||||
default=500, title="Min area of face box to consider running face recognition."
|
||||
|
@ -29,10 +29,6 @@ class SqliteVecQueueDatabase(SqliteQueueDatabase):
|
||||
ids = ",".join(["?" for _ in event_ids])
|
||||
self.execute_sql(f"DELETE FROM vec_descriptions WHERE id IN ({ids})", event_ids)
|
||||
|
||||
def delete_embeddings_face(self, face_ids: list[str]) -> None:
|
||||
ids = ",".join(["?" for _ in face_ids])
|
||||
self.execute_sql(f"DELETE FROM vec_faces WHERE id IN ({ids})", face_ids)
|
||||
|
||||
def drop_embeddings_tables(self) -> None:
|
||||
self.execute_sql("""
|
||||
DROP TABLE vec_descriptions;
|
||||
@ -40,11 +36,8 @@ class SqliteVecQueueDatabase(SqliteQueueDatabase):
|
||||
self.execute_sql("""
|
||||
DROP TABLE vec_thumbnails;
|
||||
""")
|
||||
self.execute_sql("""
|
||||
DROP TABLE vec_faces;
|
||||
""")
|
||||
|
||||
def create_embeddings_tables(self, face_recognition: bool) -> None:
|
||||
def create_embeddings_tables(self) -> None:
|
||||
"""Create vec0 virtual table for embeddings"""
|
||||
self.execute_sql("""
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS vec_thumbnails USING vec0(
|
||||
@ -58,11 +51,3 @@ class SqliteVecQueueDatabase(SqliteQueueDatabase):
|
||||
description_embedding FLOAT[768] distance_metric=cosine
|
||||
);
|
||||
""")
|
||||
|
||||
if face_recognition:
|
||||
self.execute_sql("""
|
||||
CREATE VIRTUAL TABLE IF NOT EXISTS vec_faces USING vec0(
|
||||
id TEXT PRIMARY KEY,
|
||||
face_embedding FLOAT[512] distance_metric=cosine
|
||||
);
|
||||
""")
|
||||
|
@ -14,7 +14,7 @@ from setproctitle import setproctitle
|
||||
|
||||
from frigate.comms.embeddings_updater import EmbeddingsRequestEnum, EmbeddingsRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import CONFIG_DIR
|
||||
from frigate.const import CONFIG_DIR, FACE_DIR
|
||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||
from frigate.models import Event
|
||||
from frigate.util.builtin import serialize
|
||||
@ -209,8 +209,13 @@ class EmbeddingsContext:
|
||||
|
||||
return self.db.execute_sql(sql_query).fetchall()
|
||||
|
||||
def delete_face_ids(self, ids: list[str]) -> None:
|
||||
self.db.delete_embeddings_face(ids)
|
||||
def delete_face_ids(self, face: str, ids: list[str]) -> None:
|
||||
folder = os.path.join(FACE_DIR, face)
|
||||
for id in ids:
|
||||
file_path = os.path.join(folder, id)
|
||||
|
||||
if os.path.isfile(file_path):
|
||||
os.unlink(file_path)
|
||||
|
||||
def update_description(self, event_id: str, description: str) -> None:
|
||||
self.requestor.send_data(
|
||||
|
@ -3,8 +3,6 @@
|
||||
import base64
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import string
|
||||
import time
|
||||
|
||||
from numpy import ndarray
|
||||
@ -14,7 +12,6 @@ from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import (
|
||||
CONFIG_DIR,
|
||||
FACE_DIR,
|
||||
UPDATE_EMBEDDINGS_REINDEX_PROGRESS,
|
||||
UPDATE_MODEL_STATE,
|
||||
)
|
||||
@ -68,7 +65,7 @@ class Embeddings:
|
||||
self.requestor = InterProcessRequestor()
|
||||
|
||||
# Create tables if they don't exist
|
||||
self.db.create_embeddings_tables(self.config.face_recognition.enabled)
|
||||
self.db.create_embeddings_tables()
|
||||
|
||||
models = [
|
||||
"jinaai/jina-clip-v1-text_model_fp16.onnx",
|
||||
@ -126,22 +123,6 @@ class Embeddings:
|
||||
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
||||
)
|
||||
|
||||
self.face_embedding = None
|
||||
|
||||
if self.config.face_recognition.enabled:
|
||||
self.face_embedding = GenericONNXEmbedding(
|
||||
model_name="facenet",
|
||||
model_file="facenet.onnx",
|
||||
download_urls={
|
||||
"facenet.onnx": "https://github.com/NickM-27/facenet-onnx/releases/download/v1.0/facenet.onnx",
|
||||
"facedet.onnx": "https://github.com/opencv/opencv_zoo/raw/refs/heads/main/models/face_detection_yunet/face_detection_yunet_2023mar_int8.onnx",
|
||||
},
|
||||
model_size="large",
|
||||
model_type=ModelTypeEnum.face,
|
||||
requestor=self.requestor,
|
||||
device="GPU",
|
||||
)
|
||||
|
||||
self.lpr_detection_model = None
|
||||
self.lpr_classification_model = None
|
||||
self.lpr_recognition_model = None
|
||||
@ -277,40 +258,12 @@ class Embeddings:
|
||||
|
||||
return embeddings
|
||||
|
||||
def embed_face(self, label: str, thumbnail: bytes, upsert: bool = False) -> ndarray:
|
||||
embedding = self.face_embedding(thumbnail)[0]
|
||||
|
||||
if upsert:
|
||||
rand_id = "".join(
|
||||
random.choices(string.ascii_lowercase + string.digits, k=6)
|
||||
)
|
||||
id = f"{label}-{rand_id}"
|
||||
|
||||
# write face to library
|
||||
folder = os.path.join(FACE_DIR, label)
|
||||
file = os.path.join(folder, f"{id}.webp")
|
||||
os.makedirs(folder, exist_ok=True)
|
||||
|
||||
# save face image
|
||||
with open(file, "wb") as output:
|
||||
output.write(thumbnail)
|
||||
|
||||
self.db.execute_sql(
|
||||
"""
|
||||
INSERT OR REPLACE INTO vec_faces(id, face_embedding)
|
||||
VALUES(?, ?)
|
||||
""",
|
||||
(id, serialize(embedding)),
|
||||
)
|
||||
|
||||
return embedding
|
||||
|
||||
def reindex(self) -> None:
|
||||
logger.info("Indexing tracked object embeddings...")
|
||||
|
||||
self.db.drop_embeddings_tables()
|
||||
logger.debug("Dropped embeddings tables.")
|
||||
self.db.create_embeddings_tables(self.config.face_recognition.enabled)
|
||||
self.db.create_embeddings_tables()
|
||||
logger.debug("Created embeddings tables.")
|
||||
|
||||
# Delete the saved stats file
|
||||
|
@ -3,7 +3,9 @@
|
||||
import base64
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
import threading
|
||||
from multiprocessing.synchronize import Event as MpEvent
|
||||
from pathlib import Path
|
||||
@ -23,7 +25,12 @@ from frigate.comms.event_metadata_updater import (
|
||||
from frigate.comms.events_updater import EventEndSubscriber, EventUpdateSubscriber
|
||||
from frigate.comms.inter_process import InterProcessRequestor
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.const import CLIPS_DIR, FRIGATE_LOCALHOST, UPDATE_EVENT_DESCRIPTION
|
||||
from frigate.const import (
|
||||
CLIPS_DIR,
|
||||
FACE_DIR,
|
||||
FRIGATE_LOCALHOST,
|
||||
UPDATE_EVENT_DESCRIPTION,
|
||||
)
|
||||
from frigate.embeddings.lpr.lpr import LicensePlateRecognition
|
||||
from frigate.events.types import EventTypeEnum
|
||||
from frigate.genai import get_genai_client
|
||||
@ -70,7 +77,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
self.requires_face_detection = "face" not in self.config.objects.all_objects
|
||||
self.detected_faces: dict[str, float] = {}
|
||||
self.face_classifier = (
|
||||
FaceClassificationModel(db) if self.face_recognition_enabled else None
|
||||
FaceClassificationModel(self.config.face_recognition, db)
|
||||
if self.face_recognition_enabled
|
||||
else None
|
||||
)
|
||||
|
||||
# create communication for updating event descriptions
|
||||
@ -145,12 +154,14 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
if not self.face_recognition_enabled:
|
||||
return False
|
||||
|
||||
rand_id = "".join(
|
||||
random.choices(string.ascii_lowercase + string.digits, k=6)
|
||||
)
|
||||
label = data["face_name"]
|
||||
id = f"{label}-{rand_id}"
|
||||
|
||||
if data.get("cropped"):
|
||||
self.embeddings.embed_face(
|
||||
data["face_name"],
|
||||
base64.b64decode(data["image"]),
|
||||
upsert=True,
|
||||
)
|
||||
pass
|
||||
else:
|
||||
img = cv2.imdecode(
|
||||
np.frombuffer(
|
||||
@ -164,12 +175,18 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
return False
|
||||
|
||||
face = img[face_box[1] : face_box[3], face_box[0] : face_box[2]]
|
||||
ret, webp = cv2.imencode(
|
||||
ret, thumbnail = cv2.imencode(
|
||||
".webp", face, [int(cv2.IMWRITE_WEBP_QUALITY), 100]
|
||||
)
|
||||
self.embeddings.embed_face(
|
||||
data["face_name"], webp.tobytes(), upsert=True
|
||||
)
|
||||
|
||||
# write face to library
|
||||
folder = os.path.join(FACE_DIR, label)
|
||||
file = os.path.join(folder, f"{id}.webp")
|
||||
os.makedirs(folder, exist_ok=True)
|
||||
|
||||
# save face image
|
||||
with open(file, "wb") as output:
|
||||
output.write(thumbnail.tobytes())
|
||||
|
||||
self.face_classifier.clear_classifier()
|
||||
return True
|
||||
@ -202,7 +219,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
|
||||
# Create our own thumbnail based on the bounding box and the frame time
|
||||
try:
|
||||
yuv_frame = self.frame_manager.get(frame_name, camera_config.frame_shape_yuv)
|
||||
yuv_frame = self.frame_manager.get(
|
||||
frame_name, camera_config.frame_shape_yuv
|
||||
)
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
|
||||
@ -479,16 +498,7 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
),
|
||||
]
|
||||
|
||||
ret, webp = cv2.imencode(
|
||||
".webp", face_frame, [int(cv2.IMWRITE_WEBP_QUALITY), 100]
|
||||
)
|
||||
|
||||
if not ret:
|
||||
logger.debug("Not processing face due to error creating cropped image.")
|
||||
return
|
||||
|
||||
embedding = self.embeddings.embed_face("unknown", webp.tobytes(), upsert=False)
|
||||
res = self.face_classifier.classify_face(embedding)
|
||||
res = self.face_classifier.classify_face(face_frame)
|
||||
|
||||
if not res:
|
||||
return
|
||||
@ -499,11 +509,9 @@ class EmbeddingMaintainer(threading.Thread):
|
||||
f"Detected best face for person as: {sub_label} with score {score}"
|
||||
)
|
||||
|
||||
if score < self.config.face_recognition.threshold or (
|
||||
id in self.detected_faces and score <= self.detected_faces[id]
|
||||
):
|
||||
if id in self.detected_faces and score <= self.detected_faces[id]:
|
||||
logger.debug(
|
||||
f"Recognized face score {score} is less than threshold ({self.config.face_recognition.threshold}) / previous face score ({self.detected_faces.get(id)})."
|
||||
f"Recognized face distance {score} is less than previous face distance ({self.detected_faces.get(id)})."
|
||||
)
|
||||
return
|
||||
|
||||
|
@ -4,13 +4,12 @@ import logging
|
||||
import os
|
||||
from typing import Any, Optional
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
import onnxruntime as ort
|
||||
from playhouse.sqliteq import SqliteQueueDatabase
|
||||
from sklearn.preprocessing import LabelEncoder, Normalizer
|
||||
from sklearn.svm import SVC
|
||||
|
||||
from frigate.util.builtin import deserialize
|
||||
from frigate.config.semantic_search import FaceRecognitionConfig
|
||||
|
||||
try:
|
||||
import openvino as ov
|
||||
@ -21,6 +20,9 @@ except ImportError:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
MIN_MATCHING_FACES = 2
|
||||
|
||||
|
||||
def get_ort_providers(
|
||||
force_cpu: bool = False, device: str = "AUTO", requires_fp16: bool = False
|
||||
) -> tuple[list[str], list[dict[str, any]]]:
|
||||
@ -157,38 +159,42 @@ class ONNXModelRunner:
|
||||
|
||||
|
||||
class FaceClassificationModel:
|
||||
def __init__(self, db: SqliteQueueDatabase):
|
||||
def __init__(self, config: FaceRecognitionConfig, db: SqliteQueueDatabase):
|
||||
self.config = config
|
||||
self.db = db
|
||||
self.labeler: Optional[LabelEncoder] = None
|
||||
self.classifier: Optional[SVC] = None
|
||||
self.recognizer = cv2.face.LBPHFaceRecognizer_create(radius=4, threshold=(1 - config.threshold) * 1000)
|
||||
self.label_map: dict[int, str] = {}
|
||||
|
||||
def __build_classifier(self) -> None:
|
||||
faces: list[tuple[str, bytes]] = self.db.execute_sql(
|
||||
"SELECT id, face_embedding FROM vec_faces"
|
||||
).fetchall()
|
||||
embeddings = np.array([deserialize(f[1]) for f in faces])
|
||||
self.labeler = LabelEncoder()
|
||||
norms = Normalizer(norm="l2").transform(embeddings)
|
||||
labels = self.labeler.fit_transform([f[0].split("-")[0] for f in faces])
|
||||
self.classifier = SVC(kernel="linear", probability=True)
|
||||
self.classifier.fit(norms, labels)
|
||||
labels = []
|
||||
faces = []
|
||||
|
||||
dir = "/media/frigate/clips/faces"
|
||||
for idx, name in enumerate(os.listdir(dir)):
|
||||
self.label_map[idx] = name
|
||||
face_folder = os.path.join(dir, name)
|
||||
for image in os.listdir(face_folder):
|
||||
img = cv2.imread(os.path.join(face_folder, image))
|
||||
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
|
||||
equ = cv2.equalizeHist(gray)
|
||||
faces.append(equ)
|
||||
labels.append(idx)
|
||||
|
||||
self.recognizer.train(faces, np.array(labels))
|
||||
|
||||
def clear_classifier(self) -> None:
|
||||
self.classifier = None
|
||||
self.labeler = None
|
||||
|
||||
def classify_face(self, embedding: np.ndarray) -> Optional[tuple[str, float]]:
|
||||
if not self.classifier:
|
||||
def classify_face(self, face_image: np.ndarray) -> Optional[tuple[str, float]]:
|
||||
if not self.label_map:
|
||||
self.__build_classifier()
|
||||
|
||||
res = self.classifier.predict([embedding])
|
||||
index, distance = self.recognizer.predict(cv2.equalizeHist(cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY)))
|
||||
|
||||
if res is None:
|
||||
if index == -1:
|
||||
return None
|
||||
|
||||
label = res[0]
|
||||
probabilities = self.classifier.predict_proba([embedding])[0]
|
||||
return (
|
||||
self.labeler.inverse_transform([label])[0],
|
||||
round(probabilities[label], 2),
|
||||
)
|
||||
score = 1.0 - (distance / 1000)
|
||||
return self.label_map[index], round(score, 2)
|
||||
|
||||
|
@ -19,6 +19,7 @@ const ConfigEditor = lazy(() => import("@/pages/ConfigEditor"));
|
||||
const System = lazy(() => import("@/pages/System"));
|
||||
const Settings = lazy(() => import("@/pages/Settings"));
|
||||
const UIPlayground = lazy(() => import("@/pages/UIPlayground"));
|
||||
const FaceLibrary = lazy(() => import("@/pages/FaceLibrary"));
|
||||
const Logs = lazy(() => import("@/pages/Logs"));
|
||||
|
||||
function App() {
|
||||
@ -51,6 +52,7 @@ function App() {
|
||||
<Route path="/config" element={<ConfigEditor />} />
|
||||
<Route path="/logs" element={<Logs />} />
|
||||
<Route path="/playground" element={<UIPlayground />} />
|
||||
<Route path="/faces" element={<FaceLibrary />} />
|
||||
<Route path="*" element={<Redirect to="/" />} />
|
||||
</Routes>
|
||||
</Suspense>
|
||||
|
88
web/src/components/overlay/dialog/UploadImageDialog.tsx
Normal file
88
web/src/components/overlay/dialog/UploadImageDialog.tsx
Normal file
@ -0,0 +1,88 @@
|
||||
import { Button } from "@/components/ui/button";
|
||||
import {
|
||||
Dialog,
|
||||
DialogContent,
|
||||
DialogDescription,
|
||||
DialogFooter,
|
||||
DialogHeader,
|
||||
DialogTitle,
|
||||
} from "@/components/ui/dialog";
|
||||
import { Form, FormControl, FormField, FormItem } from "@/components/ui/form";
|
||||
import { Input } from "@/components/ui/input";
|
||||
import { zodResolver } from "@hookform/resolvers/zod";
|
||||
import { useCallback } from "react";
|
||||
import { useForm } from "react-hook-form";
|
||||
import { z } from "zod";
|
||||
|
||||
type UploadImageDialogProps = {
|
||||
open: boolean;
|
||||
title: string;
|
||||
description?: string;
|
||||
setOpen: (open: boolean) => void;
|
||||
onSave: (file: File) => void;
|
||||
};
|
||||
export default function UploadImageDialog({
|
||||
open,
|
||||
title,
|
||||
description,
|
||||
setOpen,
|
||||
onSave,
|
||||
}: UploadImageDialogProps) {
|
||||
const formSchema = z.object({
|
||||
file: z.instanceof(FileList, { message: "Please select an image file." }),
|
||||
});
|
||||
|
||||
const form = useForm<z.infer<typeof formSchema>>({
|
||||
resolver: zodResolver(formSchema),
|
||||
});
|
||||
const fileRef = form.register("file");
|
||||
|
||||
// upload handler
|
||||
|
||||
const onSubmit = useCallback(
|
||||
(data: z.infer<typeof formSchema>) => {
|
||||
if (!data["file"]) {
|
||||
return;
|
||||
}
|
||||
|
||||
onSave(data["file"]["0"]);
|
||||
},
|
||||
[onSave],
|
||||
);
|
||||
|
||||
return (
|
||||
<Dialog open={open} defaultOpen={false} onOpenChange={setOpen}>
|
||||
<DialogContent>
|
||||
<DialogHeader>
|
||||
<DialogTitle>{title}</DialogTitle>
|
||||
{description && <DialogDescription>{description}</DialogDescription>}
|
||||
</DialogHeader>
|
||||
<Form {...form}>
|
||||
<form onSubmit={form.handleSubmit(onSubmit)}>
|
||||
<FormField
|
||||
control={form.control}
|
||||
name="file"
|
||||
render={() => (
|
||||
<FormItem>
|
||||
<FormControl>
|
||||
<Input
|
||||
className="aspect-video h-40 w-full"
|
||||
type="file"
|
||||
{...fileRef}
|
||||
/>
|
||||
</FormControl>
|
||||
</FormItem>
|
||||
)}
|
||||
/>
|
||||
<DialogFooter className="pt-4">
|
||||
<Button onClick={() => setOpen(false)}>Cancel</Button>
|
||||
<Button variant="select" type="submit">
|
||||
Save
|
||||
</Button>
|
||||
</DialogFooter>
|
||||
</form>
|
||||
</Form>
|
||||
</DialogContent>
|
||||
</Dialog>
|
||||
);
|
||||
}
|
@ -1,20 +1,29 @@
|
||||
import { ENV } from "@/env";
|
||||
import { FrigateConfig } from "@/types/frigateConfig";
|
||||
import { NavData } from "@/types/navigation";
|
||||
import { useMemo } from "react";
|
||||
import { isDesktop } from "react-device-detect";
|
||||
import { FaCompactDisc, FaVideo } from "react-icons/fa";
|
||||
import { IoSearch } from "react-icons/io5";
|
||||
import { LuConstruction } from "react-icons/lu";
|
||||
import { MdVideoLibrary } from "react-icons/md";
|
||||
import { TbFaceId } from "react-icons/tb";
|
||||
import useSWR from "swr";
|
||||
|
||||
export const ID_LIVE = 1;
|
||||
export const ID_REVIEW = 2;
|
||||
export const ID_EXPLORE = 3;
|
||||
export const ID_EXPORT = 4;
|
||||
export const ID_PLAYGROUND = 5;
|
||||
export const ID_FACE_LIBRARY = 6;
|
||||
|
||||
export default function useNavigation(
|
||||
variant: "primary" | "secondary" = "primary",
|
||||
) {
|
||||
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||
revalidateOnFocus: false,
|
||||
});
|
||||
|
||||
return useMemo(
|
||||
() =>
|
||||
[
|
||||
@ -54,7 +63,15 @@ export default function useNavigation(
|
||||
url: "/playground",
|
||||
enabled: ENV !== "production",
|
||||
},
|
||||
{
|
||||
id: ID_FACE_LIBRARY,
|
||||
variant,
|
||||
icon: TbFaceId,
|
||||
title: "Face Library",
|
||||
url: "/faces",
|
||||
enabled: isDesktop && config?.face_recognition.enabled,
|
||||
},
|
||||
] as NavData[],
|
||||
[variant],
|
||||
[config?.face_recognition.enabled, variant],
|
||||
);
|
||||
}
|
||||
|
170
web/src/pages/FaceLibrary.tsx
Normal file
170
web/src/pages/FaceLibrary.tsx
Normal file
@ -0,0 +1,170 @@
|
||||
import { baseUrl } from "@/api/baseUrl";
|
||||
import Chip from "@/components/indicators/Chip";
|
||||
import UploadImageDialog from "@/components/overlay/dialog/UploadImageDialog";
|
||||
import { Button } from "@/components/ui/button";
|
||||
import { ScrollArea, ScrollBar } from "@/components/ui/scroll-area";
|
||||
import { Toaster } from "@/components/ui/sonner";
|
||||
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
|
||||
import useOptimisticState from "@/hooks/use-optimistic-state";
|
||||
import axios from "axios";
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from "react";
|
||||
import { isDesktop } from "react-device-detect";
|
||||
import { LuImagePlus, LuTrash } from "react-icons/lu";
|
||||
import { toast } from "sonner";
|
||||
import useSWR from "swr";
|
||||
|
||||
export default function FaceLibrary() {
|
||||
const [page, setPage] = useState<string>();
|
||||
const [pageToggle, setPageToggle] = useOptimisticState(page, setPage, 100);
|
||||
const tabsRef = useRef<HTMLDivElement | null>(null);
|
||||
|
||||
// face data
|
||||
|
||||
const { data: faceData } = useSWR("faces");
|
||||
|
||||
const faces = useMemo<string[]>(
|
||||
() => (faceData ? Object.keys(faceData) : []),
|
||||
[faceData],
|
||||
);
|
||||
const faceImages = useMemo<string[]>(
|
||||
() => (pageToggle && faceData ? faceData[pageToggle] : []),
|
||||
[pageToggle, faceData],
|
||||
);
|
||||
|
||||
useEffect(() => {
|
||||
if (!pageToggle && faces) {
|
||||
setPageToggle(faces[0]);
|
||||
}
|
||||
// we need to listen on the value of the faces list
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [faces]);
|
||||
|
||||
// upload
|
||||
|
||||
const [upload, setUpload] = useState(false);
|
||||
|
||||
const onUploadImage = useCallback(
|
||||
(file: File) => {
|
||||
const formData = new FormData();
|
||||
formData.append("file", file);
|
||||
axios.post(`faces/${pageToggle}`, formData, {
|
||||
headers: {
|
||||
"Content-Type": "multipart/form-data",
|
||||
},
|
||||
});
|
||||
},
|
||||
[pageToggle],
|
||||
);
|
||||
|
||||
return (
|
||||
<div className="flex size-full flex-col p-2">
|
||||
<Toaster />
|
||||
|
||||
<UploadImageDialog
|
||||
open={upload}
|
||||
title="Upload Face Image"
|
||||
description={`Upload an image to scan for faces and include for ${pageToggle}`}
|
||||
setOpen={setUpload}
|
||||
onSave={onUploadImage}
|
||||
/>
|
||||
|
||||
<div className="relative flex h-11 w-full items-center justify-between">
|
||||
<ScrollArea className="w-full whitespace-nowrap">
|
||||
<div ref={tabsRef} className="flex flex-row">
|
||||
<ToggleGroup
|
||||
className="*:rounded-md *:px-3 *:py-4"
|
||||
type="single"
|
||||
size="sm"
|
||||
value={pageToggle}
|
||||
onValueChange={(value: string) => {
|
||||
if (value) {
|
||||
setPageToggle(value);
|
||||
}
|
||||
}}
|
||||
>
|
||||
{Object.values(faces).map((item) => (
|
||||
<ToggleGroupItem
|
||||
key={item}
|
||||
className={`flex scroll-mx-10 items-center justify-between gap-2 ${page == "UI settings" ? "last:mr-20" : ""} ${pageToggle == item ? "" : "*:text-muted-foreground"}`}
|
||||
value={item}
|
||||
data-nav-item={item}
|
||||
aria-label={`Select ${item}`}
|
||||
>
|
||||
<div className="capitalize">{item}</div>
|
||||
</ToggleGroupItem>
|
||||
))}
|
||||
</ToggleGroup>
|
||||
<ScrollBar orientation="horizontal" className="h-0" />
|
||||
</div>
|
||||
</ScrollArea>
|
||||
</div>
|
||||
{pageToggle && (
|
||||
<div className="flex flex-wrap gap-2">
|
||||
{faceImages.map((image: string) => (
|
||||
<FaceImage key={image} name={pageToggle} image={image} />
|
||||
))}
|
||||
<Button
|
||||
key="upload"
|
||||
className="size-40"
|
||||
onClick={() => setUpload(true)}
|
||||
>
|
||||
<LuImagePlus className="size-10" />
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
type FaceImageProps = {
|
||||
name: string;
|
||||
image: string;
|
||||
};
|
||||
function FaceImage({ name, image }: FaceImageProps) {
|
||||
const [hovered, setHovered] = useState(false);
|
||||
|
||||
const onDelete = useCallback(() => {
|
||||
axios
|
||||
.post(`/faces/${name}/delete`, { ids: [image] })
|
||||
.then((resp) => {
|
||||
if (resp.status == 200) {
|
||||
toast.error(`Successfully deleted face.`, { position: "top-center" });
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
if (error.response?.data?.message) {
|
||||
toast.error(`Failed to delete: ${error.response.data.message}`, {
|
||||
position: "top-center",
|
||||
});
|
||||
} else {
|
||||
toast.error(`Failed to delete: ${error.message}`, {
|
||||
position: "top-center",
|
||||
});
|
||||
}
|
||||
});
|
||||
}, [name, image]);
|
||||
|
||||
return (
|
||||
<div
|
||||
className="relative h-40"
|
||||
onMouseEnter={isDesktop ? () => setHovered(true) : undefined}
|
||||
onMouseLeave={isDesktop ? () => setHovered(false) : undefined}
|
||||
onClick={isDesktop ? undefined : () => setHovered(!hovered)}
|
||||
>
|
||||
{hovered && (
|
||||
<div className="absolute right-1 top-1">
|
||||
<Chip
|
||||
className="cursor-pointer rounded-md bg-gray-500 bg-gradient-to-br from-gray-400 to-gray-500"
|
||||
onClick={() => onDelete()}
|
||||
>
|
||||
<LuTrash className="size-4 fill-destructive text-destructive" />
|
||||
</Chip>
|
||||
</div>
|
||||
)}
|
||||
<img
|
||||
className="h-40 rounded-md"
|
||||
src={`${baseUrl}clips/faces/${name}/${image}`}
|
||||
/>
|
||||
</div>
|
||||
);
|
||||
}
|
@ -288,6 +288,10 @@ export interface FrigateConfig {
|
||||
|
||||
environment_vars: Record<string, unknown>;
|
||||
|
||||
face_recognition: {
|
||||
enabled: boolean;
|
||||
};
|
||||
|
||||
ffmpeg: {
|
||||
global_args: string[];
|
||||
hwaccel_args: string;
|
||||
|
Loading…
Reference in New Issue
Block a user