Face recognition fixes (#15222)

* Fix nginx max upload size

* Close upload dialog when done and add toasts

* Formatting

* fix ruff
This commit is contained in:
Nicolas Mowen 2024-11-27 12:48:39 -07:00 committed by Blake Blackshear
parent 0e4ff91d6b
commit 66675cf977
3 changed files with 39 additions and 12 deletions

View File

@ -81,6 +81,9 @@ http {
open_file_cache_errors on; open_file_cache_errors on;
aio on; aio on;
# file upload size
client_max_body_size 10M;
# https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool # https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
vod_open_file_thread_pool default; vod_open_file_thread_pool default;
@ -246,8 +249,6 @@ http {
proxy_no_cache $should_not_cache; proxy_no_cache $should_not_cache;
add_header X-Cache-Status $upstream_cache_status; add_header X-Cache-Status $upstream_cache_status;
client_max_body_size 10M;
location /api/vod/ { location /api/vod/ {
include auth_request.conf; include auth_request.conf;
proxy_pass http://frigate_api/vod/; proxy_pass http://frigate_api/vod/;

View File

@ -162,7 +162,9 @@ class FaceClassificationModel:
def __init__(self, config: FaceRecognitionConfig, db: SqliteQueueDatabase): def __init__(self, config: FaceRecognitionConfig, db: SqliteQueueDatabase):
self.config = config self.config = config
self.db = db self.db = db
self.recognizer = cv2.face.LBPHFaceRecognizer_create(radius=4, threshold=(1 - config.threshold) * 1000) self.recognizer = cv2.face.LBPHFaceRecognizer_create(
radius=4, threshold=(1 - config.threshold) * 1000
)
self.label_map: dict[int, str] = {} self.label_map: dict[int, str] = {}
def __build_classifier(self) -> None: def __build_classifier(self) -> None:
@ -190,11 +192,12 @@ class FaceClassificationModel:
if not self.label_map: if not self.label_map:
self.__build_classifier() self.__build_classifier()
index, distance = self.recognizer.predict(cv2.equalizeHist(cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY))) index, distance = self.recognizer.predict(
cv2.equalizeHist(cv2.cvtColor(face_image, cv2.COLOR_BGR2GRAY))
)
if index == -1: if index == -1:
return None return None
score = 1.0 - (distance / 1000) score = 1.0 - (distance / 1000)
return self.label_map[index], round(score, 2) return self.label_map[index], round(score, 2)

View File

@ -20,7 +20,7 @@ export default function FaceLibrary() {
// face data // face data
const { data: faceData } = useSWR("faces"); const { data: faceData, mutate: refreshFaces } = useSWR("faces");
const faces = useMemo<string[]>( const faces = useMemo<string[]>(
() => (faceData ? Object.keys(faceData) : []), () => (faceData ? Object.keys(faceData) : []),
@ -47,13 +47,36 @@ export default function FaceLibrary() {
(file: File) => { (file: File) => {
const formData = new FormData(); const formData = new FormData();
formData.append("file", file); formData.append("file", file);
axios.post(`faces/${pageToggle}`, formData, { axios
.post(`faces/${pageToggle}`, formData, {
headers: { headers: {
"Content-Type": "multipart/form-data", "Content-Type": "multipart/form-data",
}, },
})
.then((resp) => {
if (resp.status == 200) {
setUpload(false);
refreshFaces();
toast.success(
"Successfully uploaded iamge. View the file in the /exports folder.",
{ position: "top-center" },
);
}
})
.catch((error) => {
if (error.response?.data?.message) {
toast.error(
`Failed to upload image: ${error.response.data.message}`,
{ position: "top-center" },
);
} else {
toast.error(`Failed to upload image: ${error.message}`, {
position: "top-center",
});
}
}); });
}, },
[pageToggle], [pageToggle, refreshFaces],
); );
return ( return (