mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-07-30 13:48:07 +02:00
Fixes (#18338)
* improve spacing of face selection in mobile drawer * fix spacing * sort face names alphabetically * Improve face selection dialog * Use a state to track when face image loads The naturalWidth and naturalHeight will always be 0 until the image loads. So we use onLoad and a state to track loading and then calculate the area after it has loaded * Verify that a camera only tracks objects that are possible to track * Fix test * genai docs tweak * Disable openvino model cache * Clenaup * Sanitize floats for estimated speed and angle Users can configure speed zones in such a way that velocity estimates from Norfair cause a value of inf to be stored as an estimated speed. FastAPI doesn't serialize inf as a float, so trying to return this value would result in an API error. Sanitizing the value before storing should correct this. --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
parent
49c6073de6
commit
c16e536b46
@ -167,7 +167,7 @@ Analyze the sequence of images containing the {label}. Focus on the likely inten
|
|||||||
|
|
||||||
:::tip
|
:::tip
|
||||||
|
|
||||||
Prompts can use variable replacements like `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt.
|
Prompts can use variable replacements `{label}`, `{sub_label}`, and `{camera}` to substitute information from the tracked object as part of the prompt.
|
||||||
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -299,6 +299,22 @@ def verify_motion_and_detect(camera_config: CameraConfig) -> ValueError | None:
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def verify_objects_track(
|
||||||
|
camera_config: CameraConfig, enabled_objects: list[str]
|
||||||
|
) -> None:
|
||||||
|
"""Verify that a user has not specified an object to track that is not in the labelmap."""
|
||||||
|
valid_objects = [
|
||||||
|
obj for obj in camera_config.objects.track if obj in enabled_objects
|
||||||
|
]
|
||||||
|
|
||||||
|
if len(valid_objects) != len(camera_config.objects.track):
|
||||||
|
invalid_objects = set(camera_config.objects.track) - set(valid_objects)
|
||||||
|
logger.warning(
|
||||||
|
f"{camera_config.name} is configured to track {list(invalid_objects)} objects, which are not supported by the current model."
|
||||||
|
)
|
||||||
|
camera_config.objects.track = valid_objects
|
||||||
|
|
||||||
|
|
||||||
def verify_lpr_and_face(
|
def verify_lpr_and_face(
|
||||||
frigate_config: FrigateConfig, camera_config: CameraConfig
|
frigate_config: FrigateConfig, camera_config: CameraConfig
|
||||||
) -> ValueError | None:
|
) -> ValueError | None:
|
||||||
@ -471,6 +487,37 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
exclude_unset=True,
|
exclude_unset=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
for key, detector in self.detectors.items():
|
||||||
|
adapter = TypeAdapter(DetectorConfig)
|
||||||
|
model_dict = (
|
||||||
|
detector
|
||||||
|
if isinstance(detector, dict)
|
||||||
|
else detector.model_dump(warnings="none")
|
||||||
|
)
|
||||||
|
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
|
||||||
|
|
||||||
|
# users should not set model themselves
|
||||||
|
if detector_config.model:
|
||||||
|
detector_config.model = None
|
||||||
|
|
||||||
|
model_config = self.model.model_dump(exclude_unset=True, warnings="none")
|
||||||
|
|
||||||
|
if detector_config.model_path:
|
||||||
|
model_config["path"] = detector_config.model_path
|
||||||
|
|
||||||
|
if "path" not in model_config:
|
||||||
|
if detector_config.type == "cpu":
|
||||||
|
model_config["path"] = "/cpu_model.tflite"
|
||||||
|
elif detector_config.type == "edgetpu":
|
||||||
|
model_config["path"] = "/edgetpu_model.tflite"
|
||||||
|
|
||||||
|
model = ModelConfig.model_validate(model_config)
|
||||||
|
model.check_and_load_plus_model(self.plus_api, detector_config.type)
|
||||||
|
model.compute_model_hash()
|
||||||
|
labelmap_objects = model.merged_labelmap.values()
|
||||||
|
detector_config.model = model
|
||||||
|
self.detectors[key] = detector_config
|
||||||
|
|
||||||
for name, camera in self.cameras.items():
|
for name, camera in self.cameras.items():
|
||||||
modified_global_config = global_config.copy()
|
modified_global_config = global_config.copy()
|
||||||
|
|
||||||
@ -644,6 +691,7 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
verify_required_zones_exist(camera_config)
|
verify_required_zones_exist(camera_config)
|
||||||
verify_autotrack_zones(camera_config)
|
verify_autotrack_zones(camera_config)
|
||||||
verify_motion_and_detect(camera_config)
|
verify_motion_and_detect(camera_config)
|
||||||
|
verify_objects_track(camera_config, labelmap_objects)
|
||||||
verify_lpr_and_face(self, camera_config)
|
verify_lpr_and_face(self, camera_config)
|
||||||
|
|
||||||
self.objects.parse_all_objects(self.cameras)
|
self.objects.parse_all_objects(self.cameras)
|
||||||
@ -655,36 +703,6 @@ class FrigateConfig(FrigateBaseModel):
|
|||||||
"Frigate+ is configured but clean snapshots are not enabled, submissions to Frigate+ will not be possible./"
|
"Frigate+ is configured but clean snapshots are not enabled, submissions to Frigate+ will not be possible./"
|
||||||
)
|
)
|
||||||
|
|
||||||
for key, detector in self.detectors.items():
|
|
||||||
adapter = TypeAdapter(DetectorConfig)
|
|
||||||
model_dict = (
|
|
||||||
detector
|
|
||||||
if isinstance(detector, dict)
|
|
||||||
else detector.model_dump(warnings="none")
|
|
||||||
)
|
|
||||||
detector_config: BaseDetectorConfig = adapter.validate_python(model_dict)
|
|
||||||
|
|
||||||
# users should not set model themselves
|
|
||||||
if detector_config.model:
|
|
||||||
detector_config.model = None
|
|
||||||
|
|
||||||
model_config = self.model.model_dump(exclude_unset=True, warnings="none")
|
|
||||||
|
|
||||||
if detector_config.model_path:
|
|
||||||
model_config["path"] = detector_config.model_path
|
|
||||||
|
|
||||||
if "path" not in model_config:
|
|
||||||
if detector_config.type == "cpu":
|
|
||||||
model_config["path"] = "/cpu_model.tflite"
|
|
||||||
elif detector_config.type == "edgetpu":
|
|
||||||
model_config["path"] = "/edgetpu_model.tflite"
|
|
||||||
|
|
||||||
model = ModelConfig.model_validate(model_config)
|
|
||||||
model.check_and_load_plus_model(self.plus_api, detector_config.type)
|
|
||||||
model.compute_model_hash()
|
|
||||||
detector_config.model = model
|
|
||||||
self.detectors[key] = detector_config
|
|
||||||
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
@field_validator("cameras")
|
@field_validator("cameras")
|
||||||
|
@ -3,11 +3,9 @@ import os
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import openvino as ov
|
import openvino as ov
|
||||||
import openvino.properties as props
|
|
||||||
from pydantic import Field
|
from pydantic import Field
|
||||||
from typing_extensions import Literal
|
from typing_extensions import Literal
|
||||||
|
|
||||||
from frigate.const import MODEL_CACHE_DIR
|
|
||||||
from frigate.detectors.detection_api import DetectionApi
|
from frigate.detectors.detection_api import DetectionApi
|
||||||
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
from frigate.detectors.detector_config import BaseDetectorConfig, ModelTypeEnum
|
||||||
from frigate.util.model import (
|
from frigate.util.model import (
|
||||||
@ -49,10 +47,6 @@ class OvDetector(DetectionApi):
|
|||||||
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
|
logger.error(f"OpenVino model file {detector_config.model.path} not found.")
|
||||||
raise FileNotFoundError
|
raise FileNotFoundError
|
||||||
|
|
||||||
os.makedirs(os.path.join(MODEL_CACHE_DIR, "openvino"), exist_ok=True)
|
|
||||||
self.ov_core.set_property(
|
|
||||||
{props.cache_dir: os.path.join(MODEL_CACHE_DIR, "openvino")}
|
|
||||||
)
|
|
||||||
self.interpreter = self.ov_core.compile_model(
|
self.interpreter = self.ov_core.compile_model(
|
||||||
model=detector_config.model.path, device_name=detector_config.device
|
model=detector_config.model.path, device_name=detector_config.device
|
||||||
)
|
)
|
||||||
|
@ -39,7 +39,7 @@ class TestConfig(unittest.TestCase):
|
|||||||
"description": "Fine tuned model",
|
"description": "Fine tuned model",
|
||||||
"trainDate": "2023-04-28T23:22:01.262Z",
|
"trainDate": "2023-04-28T23:22:01.262Z",
|
||||||
"type": "ssd",
|
"type": "ssd",
|
||||||
"supportedDetectors": ["edgetpu"],
|
"supportedDetectors": ["cpu", "edgetpu"],
|
||||||
"width": 320,
|
"width": 320,
|
||||||
"height": 320,
|
"height": 320,
|
||||||
"inputShape": "nhwc",
|
"inputShape": "nhwc",
|
||||||
|
@ -18,6 +18,7 @@ from frigate.config import (
|
|||||||
)
|
)
|
||||||
from frigate.const import CLIPS_DIR, THUMB_DIR
|
from frigate.const import CLIPS_DIR, THUMB_DIR
|
||||||
from frigate.review.types import SeverityEnum
|
from frigate.review.types import SeverityEnum
|
||||||
|
from frigate.util.builtin import sanitize_float
|
||||||
from frigate.util.image import (
|
from frigate.util.image import (
|
||||||
area,
|
area,
|
||||||
calculate_region,
|
calculate_region,
|
||||||
@ -202,6 +203,11 @@ class TrackedObject:
|
|||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# users can configure speed zones incorrectly, so sanitize speed_magnitude
|
||||||
|
# and velocity_angle in case the values come back as inf or NaN
|
||||||
|
speed_magnitude = sanitize_float(speed_magnitude)
|
||||||
|
self.velocity_angle = sanitize_float(self.velocity_angle)
|
||||||
|
|
||||||
if self.ui_config.unit_system == "metric":
|
if self.ui_config.unit_system == "metric":
|
||||||
self.current_estimated_speed = (
|
self.current_estimated_speed = (
|
||||||
speed_magnitude * 3.6
|
speed_magnitude * 3.6
|
||||||
|
@ -4,6 +4,7 @@ import ast
|
|||||||
import copy
|
import copy
|
||||||
import datetime
|
import datetime
|
||||||
import logging
|
import logging
|
||||||
|
import math
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
import queue
|
import queue
|
||||||
import re
|
import re
|
||||||
@ -399,3 +400,10 @@ def serialize(
|
|||||||
def deserialize(bytes_data: bytes) -> list[float]:
|
def deserialize(bytes_data: bytes) -> list[float]:
|
||||||
"""Deserializes a compact "raw bytes" format into a list of floats"""
|
"""Deserializes a compact "raw bytes" format into a list of floats"""
|
||||||
return list(struct.unpack("%sf" % (len(bytes_data) // 4), bytes_data))
|
return list(struct.unpack("%sf" % (len(bytes_data) // 4), bytes_data))
|
||||||
|
|
||||||
|
|
||||||
|
def sanitize_float(value):
|
||||||
|
"""Replace NaN or inf with 0.0."""
|
||||||
|
if isinstance(value, (int, float)) and not math.isfinite(value):
|
||||||
|
return 0.0
|
||||||
|
return value
|
||||||
|
@ -53,7 +53,13 @@ export default function FaceSelectionDialog({
|
|||||||
const Selector = isDesktop ? DropdownMenu : Drawer;
|
const Selector = isDesktop ? DropdownMenu : Drawer;
|
||||||
const SelectorTrigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger;
|
const SelectorTrigger = isDesktop ? DropdownMenuTrigger : DrawerTrigger;
|
||||||
const SelectorContent = isDesktop ? DropdownMenuContent : DrawerContent;
|
const SelectorContent = isDesktop ? DropdownMenuContent : DrawerContent;
|
||||||
const SelectorItem = isDesktop ? DropdownMenuItem : DrawerClose;
|
const SelectorItem = isDesktop
|
||||||
|
? DropdownMenuItem
|
||||||
|
: (props: React.HTMLAttributes<HTMLDivElement>) => (
|
||||||
|
<DrawerClose asChild>
|
||||||
|
<div {...props} className={cn(props.className, "my-2")} />
|
||||||
|
</DrawerClose>
|
||||||
|
);
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div className={className ?? ""}>
|
<div className={className ?? ""}>
|
||||||
@ -72,10 +78,7 @@ export default function FaceSelectionDialog({
|
|||||||
<TooltipTrigger asChild={isChildButton}>{children}</TooltipTrigger>
|
<TooltipTrigger asChild={isChildButton}>{children}</TooltipTrigger>
|
||||||
</SelectorTrigger>
|
</SelectorTrigger>
|
||||||
<SelectorContent
|
<SelectorContent
|
||||||
className={cn(
|
className={cn("", isMobile && "mx-1 gap-2 rounded-t-2xl px-4")}
|
||||||
"max-h-[75dvh] overflow-hidden",
|
|
||||||
isMobile && "mx-1 gap-2 rounded-t-2xl px-4",
|
|
||||||
)}
|
|
||||||
>
|
>
|
||||||
{isMobile && (
|
{isMobile && (
|
||||||
<DrawerHeader className="sr-only">
|
<DrawerHeader className="sr-only">
|
||||||
@ -86,8 +89,8 @@ export default function FaceSelectionDialog({
|
|||||||
<DropdownMenuLabel>{t("trainFaceAs")}</DropdownMenuLabel>
|
<DropdownMenuLabel>{t("trainFaceAs")}</DropdownMenuLabel>
|
||||||
<div
|
<div
|
||||||
className={cn(
|
className={cn(
|
||||||
"flex flex-col",
|
"flex max-h-[40dvh] flex-col overflow-y-auto",
|
||||||
isMobile && "gap-2 overflow-y-auto pb-4",
|
isMobile && "gap-2 pb-4",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
<SelectorItem
|
<SelectorItem
|
||||||
@ -97,7 +100,7 @@ export default function FaceSelectionDialog({
|
|||||||
<LuPlus />
|
<LuPlus />
|
||||||
{t("createFaceLibrary.new")}
|
{t("createFaceLibrary.new")}
|
||||||
</SelectorItem>
|
</SelectorItem>
|
||||||
{faceNames.map((faceName) => (
|
{faceNames.sort().map((faceName) => (
|
||||||
<SelectorItem
|
<SelectorItem
|
||||||
key={faceName}
|
key={faceName}
|
||||||
className="flex cursor-pointer gap-2 smart-capitalize"
|
className="flex cursor-pointer gap-2 smart-capitalize"
|
||||||
|
@ -876,6 +876,7 @@ function FaceAttempt({
|
|||||||
onRefresh,
|
onRefresh,
|
||||||
}: FaceAttemptProps) {
|
}: FaceAttemptProps) {
|
||||||
const { t } = useTranslation(["views/faceLibrary"]);
|
const { t } = useTranslation(["views/faceLibrary"]);
|
||||||
|
const [imageLoaded, setImageLoaded] = useState(false);
|
||||||
|
|
||||||
const scoreStatus = useMemo(() => {
|
const scoreStatus = useMemo(() => {
|
||||||
if (data.score >= recognitionConfig.recognition_threshold) {
|
if (data.score >= recognitionConfig.recognition_threshold) {
|
||||||
@ -896,14 +897,12 @@ function FaceAttempt({
|
|||||||
});
|
});
|
||||||
|
|
||||||
const imageArea = useMemo(() => {
|
const imageArea = useMemo(() => {
|
||||||
if (!imgRef.current) {
|
if (imgRef.current == null || !imageLoaded) {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
return imgRef.current.naturalWidth * imgRef.current.naturalHeight;
|
return imgRef.current.naturalWidth * imgRef.current.naturalHeight;
|
||||||
// only refresh when severity changes
|
}, [imageLoaded]);
|
||||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
|
||||||
}, [imgRef.current]);
|
|
||||||
|
|
||||||
// api calls
|
// api calls
|
||||||
|
|
||||||
@ -966,9 +965,10 @@ function FaceAttempt({
|
|||||||
: "outline-transparent duration-500",
|
: "outline-transparent duration-500",
|
||||||
)}
|
)}
|
||||||
>
|
>
|
||||||
<div className="relative w-full select-none overflow-hidden rounded-lg *:text-card-foreground">
|
<div className="relative w-full select-none overflow-hidden rounded-lg">
|
||||||
<img
|
<img
|
||||||
ref={imgRef}
|
ref={imgRef}
|
||||||
|
onLoad={() => setImageLoaded(true)}
|
||||||
className={cn("size-44", isMobile && "w-full")}
|
className={cn("size-44", isMobile && "w-full")}
|
||||||
src={`${baseUrl}clips/faces/train/${data.filename}`}
|
src={`${baseUrl}clips/faces/train/${data.filename}`}
|
||||||
onClick={(e) => {
|
onClick={(e) => {
|
||||||
|
Loading…
Reference in New Issue
Block a user