Add per-camera face and lpr configs (#17235)

* Add per-camera face and lpr configs

* Formatting

* Cleanup
This commit is contained in:
Nicolas Mowen 2025-03-19 09:02:40 -06:00 committed by GitHub
parent e33fa96599
commit ce43b7b3d9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 64 additions and 17 deletions

View File

@ -543,12 +543,23 @@ semantic_search:
model_size: "small"
# Optional: Configuration for face recognition capability
# NOTE: Can (enabled, min_area) be overridden at the camera level
face_recognition:
# Optional: Enable semantic search (default: shown below)
enabled: False
# Optional: Set the model size used for embeddings. (default: shown below)
# NOTE: small model runs on CPU and large model runs on GPU
model_size: "small"
# Optional: Minimum face distance score required to save the attempt (default: shown below)
min_score: 0.8
# Optional: Minimum face detection score required to detect a face (default: shown below)
# NOTE: This only applies when not running a Frigate+ model
detection_threshold: 0.7
# Optional: Minimum face distance score required to be considered a match (default: shown below)
recognition_threshold: 0.9
# Optional: Min area of detected face box to consider running face recognition (default: shown below)
min_area: 500
# Optional: Save images of recognized faces for training (default: shown below)
save_attempts: True
# Optional: Apply a blur quality filter to adjust confidence based on the blur level of the image (default: shown below)
blur_confidence_filter: True
# Optional: Configuration for license plate recognition capability
lpr:

View File

@ -17,6 +17,10 @@ from frigate.util.builtin import (
)
from ..base import FrigateBaseModel
from ..classification import (
CameraFaceRecognitionConfig,
CameraLicensePlateRecognitionConfig,
)
from .audio import AudioConfig
from .birdseye import BirdseyeCameraConfig
from .detect import DetectConfig
@ -52,6 +56,9 @@ class CameraConfig(FrigateBaseModel):
detect: DetectConfig = Field(
default_factory=DetectConfig, title="Object detection configuration."
)
face_recognition: CameraFaceRecognitionConfig = Field(
default_factory=CameraFaceRecognitionConfig, title="Face recognition config."
)
ffmpeg: CameraFfmpegConfig = Field(title="FFmpeg configuration for the camera.")
genai: GenAICameraConfig = Field(
default_factory=GenAICameraConfig, title="Generative AI configuration."
@ -59,6 +66,9 @@ class CameraConfig(FrigateBaseModel):
live: CameraLiveConfig = Field(
default_factory=CameraLiveConfig, title="Live playback settings."
)
lpr: CameraLicensePlateRecognitionConfig = Field(
default_factory=CameraLicensePlateRecognitionConfig, title="LPR config."
)
motion: Optional[MotionConfig] = Field(
None, title="Motion detection configuration."
)

View File

@ -6,6 +6,8 @@ from pydantic import Field
from .base import FrigateBaseModel
__all__ = [
"CameraFaceRecognitionConfig",
"CameraLicensePlateRecognitionConfig",
"FaceRecognitionConfig",
"SemanticSearchConfig",
"LicensePlateRecognitionConfig",
@ -78,6 +80,13 @@ class FaceRecognitionConfig(FrigateBaseModel):
)
class CameraFaceRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable face recognition.")
min_area: int = Field(
default=500, title="Min area of face box to consider running face recognition."
)
class LicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable license plate recognition.")
detection_threshold: float = Field(
@ -112,3 +121,11 @@ class LicensePlateRecognitionConfig(FrigateBaseModel):
known_plates: Optional[Dict[str, List[str]]] = Field(
default={}, title="Known plates to track (strings or regular expressions)."
)
class CameraLicensePlateRecognitionConfig(FrigateBaseModel):
enabled: bool = Field(default=False, title="Enable license plate recognition.")
min_area: int = Field(
default=1000,
title="Minimum area of license plate to begin running recognition.",
)

View File

@ -331,19 +331,6 @@ class FrigateConfig(FrigateBaseModel):
default_factory=TelemetryConfig, title="Telemetry configuration."
)
tls: TlsConfig = Field(default_factory=TlsConfig, title="TLS configuration.")
classification: ClassificationConfig = Field(
default_factory=ClassificationConfig, title="Object classification config."
)
semantic_search: SemanticSearchConfig = Field(
default_factory=SemanticSearchConfig, title="Semantic search configuration."
)
face_recognition: FaceRecognitionConfig = Field(
default_factory=FaceRecognitionConfig, title="Face recognition config."
)
lpr: LicensePlateRecognitionConfig = Field(
default_factory=LicensePlateRecognitionConfig,
title="License Plate recognition config.",
)
ui: UIConfig = Field(default_factory=UIConfig, title="UI configuration.")
# Detector config
@ -395,6 +382,21 @@ class FrigateConfig(FrigateBaseModel):
title="Global timestamp style configuration.",
)
# Classification Config
classification: ClassificationConfig = Field(
default_factory=ClassificationConfig, title="Object classification config."
)
semantic_search: SemanticSearchConfig = Field(
default_factory=SemanticSearchConfig, title="Semantic search configuration."
)
face_recognition: FaceRecognitionConfig = Field(
default_factory=FaceRecognitionConfig, title="Face recognition config."
)
lpr: LicensePlateRecognitionConfig = Field(
default_factory=LicensePlateRecognitionConfig,
title="License Plate recognition config.",
)
camera_groups: Dict[str, CameraGroupConfig] = Field(
default_factory=dict, title="Camera group configuration"
)

View File

@ -288,6 +288,9 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
"""Look for faces in image."""
if not self.config.cameras[obj_data["camera"]].face_recognition.enabled:
return
start = datetime.datetime.now().timestamp()
id = obj_data["id"]
@ -348,7 +351,11 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
face_box = face.get("box")
# check that face is valid
if not face_box or area(face_box) < self.config.face_recognition.min_area:
if (
not face_box
or area(face_box)
< self.config.cameras[obj_data["camera"]].face_recognition.min_area
):
logger.debug(f"Invalid face box {face}")
return