mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-07-26 13:47:03 +02:00
Fixes (#18338)
* improve spacing of face selection in mobile drawer * fix spacing * sort face names alphabetically * Improve face selection dialog * Use a state to track when face image loads The naturalWidth and naturalHeight will always be 0 until the image loads. So we use onLoad and a state to track loading and then calculate the area after it has loaded * Verify that a camera only tracks objects that are possible to track * Fix test * genai docs tweak * Disable openvino model cache * Clenaup * Sanitize floats for estimated speed and angle Users can configure speed zones in such a way that velocity estimates from Norfair cause a value of inf to be stored as an estimated speed. FastAPI doesn't serialize inf as a float, so trying to return this value would result in an API error. Sanitizing the value before storing should correct this. --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
parent
49c6073de6
commit
c16e536b46
@ -167,7 +167,7 @@ Analyze the sequence of images containing the {label}. Focus on the likely inten
|
||||
|
||||
:::tip
|
||||
|
||||
Prompts can use variable replacements like `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt.
|
||||
Prompts can use variable replacements `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt.
|
||||
|
||||
:::
|
||||
|
||||
|
@ -299,6 +299,22 @@ def verify_motion_and_detect(camera_config: CameraConfig) -> ValueError | None:
|
||||
)
|
||||
|
||||
|
||||
def verify_objects_track(
|
||||
camera_config: CameraConfig, enabled_objects: list[str]
|
||||
) -> None:
|
||||
"""Verify that a user has not specified an object to track that is not in the labelmap."""
|
||||
valid_objects = [
|
||||
obj for obj in camera_config.objects.track if obj in enabled_objects
|
||||
]
|
||||
|
||||
if len(valid_objects) != len(camera_config.objects.track):
|
||||
invalid_objects = set(camera_config.objects.track) - set(valid_objects)
|
||||
logger.warning(
|
||||
f"{camera_config.name} is configured to track {list(invalid_objects)} objects, which are not supported by the current model."
|
||||
)
|
||||
camera_config.objects.track = valid_objects
|
||||
|
||||
|
||||
def verify_lpr_and_face(
|
||||
frigate_config: FrigateConfig, camera_config: CameraConfig
|
||||
) -> ValueError | None:
|
||||
@ -471,6 +487,37 @@ class FrigateConfig(FrigateBaseModel):
|
||||
exclude_unset=True,
|
||||
)
|
||||
|
||||
for key, detector in self.detectors.items():
|
||||
adapter = TypeAdapter(DetectorConfig)
|
||||
model_dict = (
|
||||
detector
|
||||
if isinstance(detector, dict)
|
||||
else detector.model_dump(warnings="none")
|
||||
)
|
||||
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
|
||||
|
||||
# users should not set model themselves
|
||||
if detector_config.model:
|
||||
detector_config.model = None
|
||||
|
||||
model_config = self.model.model_dump(exclude_unset=True, warnings="none")
|
||||
|
||||
if detector_config.model_path:
|
||||
model_config["path"] = detector_config.model_path
|
||||
|
||||
if "path" not in model_config:
|
||||
if detector_config.type == "cpu":
|
||||
model_config["path"] = "/cpu_model.tflite"
|
||||
elif detector_config.type == "edgetpu":
|
||||
model_config["path"] = "/edgetpu_model.tflite"
|
||||
|
||||
model = ModelConfig.model_validate(model_config)
|
||||
model.check_and_load_plus_model(self.plus_api, detector_config.type)
|
||||
model.compute_model_hash()
|
||||
labelmap_objects = model.merged_labelmap.values()
|
||||
detector_config.model = model
|
||||
self.detectors[key] = detector_config
|
||||
|
||||
for name, camera in self.cameras.items():
|
||||
modified_global_config = global_config.copy()
|
||||
|
||||
@ -644,6 +691,7 @@ class FrigateConfig(FrigateBaseModel):
|
||||
verify_required_zones_exist(camera_config)
|
||||
verify_autotrack_zones(camera_config)
|
||||
verify_motion_and_detect(camera_config)
|
||||
verify_objects_track(camera_config, labelmap_objects)
|
||||
verify_lpr_and_face(self, camera_config)
|
||||
|
||||
self.objects.parse_all_objects(self.cameras)
|
||||
@ -655,36 +703,6 @@ class FrigateConfig(FrigateBaseModel):
|
||||
"Frigate+ is configured but clean snapshots are not enabled, submissions to Frigate+ will not be possible./"
|
||||
)
|
||||
|
||||
for key, detector in self.detectors.items():
|
||||
adapter = TypeAdapter(DetectorConfig)
|
||||
model_dict = (
|
||||
detector
|
||||
if isinstance(detector, dict)
|
||||
else detector.model_dump(warnings="none")
|
||||
)
|
||||
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
|
||||
|
||||
# users should not set model themselves
|
||||
if detector_config.model:
|
||||
detector_config.model = None
|
||||
|
||||
model_config = self.model.model_dump(exclude_unset=True, warnings="none")
|
||||
|
||||
if detector_config.model_path:
|
||||
model_config["path"] = detector_config.model_path
|
||||
|
||||
if "path" not in model_config:
|
||||
if detector_config.type == "cpu":
|
||||
model_config["path"] = "/cpu_model.tflite"
|
||||
elif detector_config.type == "edgetpu":
|
||||
model_config["path"] = "/edgetpu_model.tflite"
|
||||
|
||||
model = ModelConfig.model_validate(model_config)
|
||||
model.check_and_load_plus_model(self.plus_api, detector_config.type)
|
||||
model.compute_model_hash()
|
||||
detector_config.model = model
|
||||
self.detectors[key] = detector_config
|
||||
|
||||
return self
|
||||
|
||||
@field_validator("cameras")
|
||||
|
@ -3,11 +3,9 @@ import os
|
||||
|
||||
import numpy as np
|
||||
import openvino as ov
|
||||
import openvino.properties as props
|
||||
from pydantic import Field
|
||||
from typing_extensions import Literal
|
||||
|
||||
from frigate.const import MODEL_CACHE_DIR
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||
from frigate.util.model import (
|
||||
@ -49,10 +47,6 @@ class OvDetector(DetectionApi):
|
||||
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
|
||||
raise FileNotFoundError
|
||||
|
||||
os.makedirs(os.path.join(MODEL_CACHE_DIR, "openvino"), exist_ok=True)
|
||||
self.ov_core.set_property(
|
||||
{props.cache_dir: os.path.join(MODEL_CACHE_DIR, "openvino")}
|
||||
)
|
||||
self.interpreter = self.ov_core.compile_model(
|
||||
model=detector_config.model.path, device_name=detector_config.device
|
||||
)
|
||||
|
@ -39,7 +39,7 @@ class TestConfig(unittest.TestCase):
|
||||
"description": "Fine tuned model",
|
||||
"trainDate": "2023-04-28T23:22:01.262Z",
|
||||
"type": "ssd",
|
||||
"supportedDetectors": ["edgetpu"],
|
||||
"supportedDetectors": ["cpu", "edgetpu"],
|
||||
"width": 320,
|
||||
"height": 320,
|
||||
"inputShape": "nhwc",
|
||||
|
@ -18,6 +18,7 @@ from frigate.config import (
|
||||
)
|
||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||
from frigate.review.types import SeverityEnum
|
||||
from frigate.util.builtin import sanitize_float
|
||||
from frigate.util.image import (
|
||||
area,
|
||||
calculate_region,
|
||||
@ -202,6 +203,11 @@ class TrackedObject:
|
||||
)
|
||||
)
|
||||
|
||||
# users can configure speed zones incorrectly, so sanitize speed_magnitude
|
||||
# and velocity_angle in case the values come back as inf or NaN
|
||||
speed_magnitude = sanitize_float(speed_magnitude)
|
||||
self.velocity_angle = sanitize_float(self.velocity_angle)
|
||||
|
||||
if self.ui_config.unit_system == "metric":
|
||||
self.current_estimated_speed = (
|
||||
speed_magnitude * 3.6
|
||||
|
@ -4,6 +4,7 @@ import ast
|
||||
import copy
|
||||
import datetime
|
||||
import logging
|
||||
import math
|
||||
import multiprocessing as mp
|
||||
import queue
|
||||
import re
|
||||
@ -399,3 +400,10 @@ def serialize(
|
||||
def deserialize(bytes_data: bytes) -> list[float]:
|
||||
"""Deserializes a compact "raw bytes" format into a list of floats"""
|
||||
return list(struct.unpack("%sf" % (len(bytes_data) // 4), bytes_data))
|
||||
|
||||
|
||||
def sanitize_float(value):
|
||||
"""Replace NaN or inf with 0.0."""
|
||||
if isinstance(value, (int, float)) and not math.isfinite(value):
|
||||
return 0.0
|
||||
return value
|
||||
|
@ -53,7 +53,13 @@ export default function FaceSelectionDialog({
|
||||
const Selector = isDesktop ? DropdownMenu : Drawer;
|
||||
const SelectorTrigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger;
|
||||
const SelectorContent = isDesktop ? DropdownMenuContent : DrawerContent;
|
||||
const SelectorItem = isDesktop ? DropdownMenuItem : DrawerClose;
|
||||
const SelectorItem = isDesktop
|
||||
? DropdownMenuItem
|
||||
: (props: React.HTMLAttributes<HTMLDivElement>) => (
|
||||
<DrawerClose asChild>
|
||||
<div {...props} className={cn(props.className, "my-2")} />
|
||||
</DrawerClose>
|
||||
);
|
||||
|
||||
return (
|
||||
<div className={className ?? ""}>
|
||||
@ -72,10 +78,7 @@ export default function FaceSelectionDialog({
|
||||
<TooltipTrigger asChild={isChildButton}>{children}</TooltipTrigger>
|
||||
</SelectorTrigger>
|
||||
<SelectorContent
|
||||
className={cn(
|
||||
"max-h-[75dvh] overflow-hidden",
|
||||
isMobile && "mx-1 gap-2 rounded-t-2xl px-4",
|
||||
)}
|
||||
className={cn("", isMobile && "mx-1 gap-2 rounded-t-2xl px-4")}
|
||||
>
|
||||
{isMobile && (
|
||||
<DrawerHeader className="sr-only">
|
||||
@ -86,8 +89,8 @@ export default function FaceSelectionDialog({
|
||||
<DropdownMenuLabel>{t("trainFaceAs")}</DropdownMenuLabel>
|
||||
<div
|
||||
className={cn(
|
||||
"flex flex-col",
|
||||
isMobile && "gap-2 overflow-y-auto pb-4",
|
||||
"flex max-h-[40dvh] flex-col overflow-y-auto",
|
||||
isMobile && "gap-2 pb-4",
|
||||
)}
|
||||
>
|
||||
<SelectorItem
|
||||
@ -97,7 +100,7 @@ export default function FaceSelectionDialog({
|
||||
<LuPlus />
|
||||
{t("createFaceLibrary.new")}
|
||||
</SelectorItem>
|
||||
{faceNames.map((faceName) => (
|
||||
{faceNames.sort().map((faceName) => (
|
||||
<SelectorItem
|
||||
key={faceName}
|
||||
className="flex cursor-pointer gap-2 smart-capitalize"
|
||||
|
@ -876,6 +876,7 @@ function FaceAttempt({
|
||||
onRefresh,
|
||||
}: FaceAttemptProps) {
|
||||
const { t } = useTranslation(["views/faceLibrary"]);
|
||||
const [imageLoaded, setImageLoaded] = useState(false);
|
||||
|
||||
const scoreStatus = useMemo(() => {
|
||||
if (data.score >= recognitionConfig.recognition_threshold) {
|
||||
@ -896,14 +897,12 @@ function FaceAttempt({
|
||||
});
|
||||
|
||||
const imageArea = useMemo(() => {
|
||||
if (!imgRef.current) {
|
||||
if (imgRef.current == null || !imageLoaded) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
return imgRef.current.naturalWidth * imgRef.current.naturalHeight;
|
||||
// only refresh when severity changes
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
}, [imgRef.current]);
|
||||
}, [imageLoaded]);
|
||||
|
||||
// api calls
|
||||
|
||||
@ -966,9 +965,10 @@ function FaceAttempt({
|
||||
: "outline-transparent duration-500",
|
||||
)}
|
||||
>
|
||||
<div className="relative w-full select-none overflow-hidden rounded-lg *:text-card-foreground">
|
||||
<div className="relative w-full select-none overflow-hidden rounded-lg">
|
||||
<img
|
||||
ref={imgRef}
|
||||
onLoad={() => setImageLoaded(true)}
|
||||
className={cn("size-44", isMobile && "w-full")}
|
||||
src={`${baseUrl}clips/faces/train/${data.filename}`}
|
||||
onClick={(e) => {
|
||||
|
Loading…
Reference in New Issue
Block a user