mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-08-04 13:47:37 +02:00
Implement enchrichments events per second graph (#17436)
* Cleanup existing naming * Add face recognitions per second * Add lpr fps * Add all eps * Clean up line graph * Translations * Change wording * Fix incorrect access * Don't require plates * Add comment * Fix
This commit is contained in:
parent
b14abffea3
commit
9e8b85a957
@ -24,6 +24,7 @@ from frigate.comms.event_metadata_updater import (
|
|||||||
from frigate.config.camera.camera import CameraTypeEnum
|
from frigate.config.camera.camera import CameraTypeEnum
|
||||||
from frigate.const import CLIPS_DIR
|
from frigate.const import CLIPS_DIR
|
||||||
from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE
|
from frigate.embeddings.onnx.lpr_embedding import LPR_EMBEDDING_SIZE
|
||||||
|
from frigate.util.builtin import EventsPerSecond
|
||||||
from frigate.util.image import area
|
from frigate.util.image import area
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -34,11 +35,12 @@ WRITE_DEBUG_IMAGES = False
|
|||||||
class LicensePlateProcessingMixin:
|
class LicensePlateProcessingMixin:
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
self.plates_rec_second = EventsPerSecond()
|
||||||
|
self.plates_rec_second.start()
|
||||||
|
self.plates_det_second = EventsPerSecond()
|
||||||
|
self.plates_det_second.start()
|
||||||
self.event_metadata_publisher = EventMetadataPublisher()
|
self.event_metadata_publisher = EventMetadataPublisher()
|
||||||
|
|
||||||
self.ctc_decoder = CTCDecoder()
|
self.ctc_decoder = CTCDecoder()
|
||||||
|
|
||||||
self.batch_size = 6
|
self.batch_size = 6
|
||||||
|
|
||||||
# Detection specific parameters
|
# Detection specific parameters
|
||||||
@ -947,15 +949,17 @@ class LicensePlateProcessingMixin:
|
|||||||
"""
|
"""
|
||||||
Update inference metrics.
|
Update inference metrics.
|
||||||
"""
|
"""
|
||||||
self.metrics.yolov9_lpr_fps.value = (
|
self.metrics.yolov9_lpr_speed.value = (
|
||||||
self.metrics.yolov9_lpr_fps.value * 9 + duration
|
self.metrics.yolov9_lpr_speed.value * 9 + duration
|
||||||
) / 10
|
) / 10
|
||||||
|
|
||||||
def __update_lpr_metrics(self, duration: float) -> None:
|
def __update_lpr_metrics(self, duration: float) -> None:
|
||||||
"""
|
"""
|
||||||
Update inference metrics.
|
Update inference metrics.
|
||||||
"""
|
"""
|
||||||
self.metrics.alpr_pps.value = (self.metrics.alpr_pps.value * 9 + duration) / 10
|
self.metrics.alpr_speed.value = (
|
||||||
|
self.metrics.alpr_speed.value * 9 + duration
|
||||||
|
) / 10
|
||||||
|
|
||||||
def _generate_plate_event(self, camera: str, plate: str, plate_score: float) -> str:
|
def _generate_plate_event(self, camera: str, plate: str, plate_score: float) -> str:
|
||||||
"""Generate a unique ID for a plate event based on camera and text."""
|
"""Generate a unique ID for a plate event based on camera and text."""
|
||||||
@ -982,6 +986,8 @@ class LicensePlateProcessingMixin:
|
|||||||
self, obj_data: dict[str, any], frame: np.ndarray, dedicated_lpr: bool = False
|
self, obj_data: dict[str, any], frame: np.ndarray, dedicated_lpr: bool = False
|
||||||
):
|
):
|
||||||
"""Look for license plates in image."""
|
"""Look for license plates in image."""
|
||||||
|
self.metrics.alpr_pps.value = self.plates_rec_second.eps()
|
||||||
|
self.metrics.yolov9_lpr_pps.value = self.plates_det_second.eps()
|
||||||
camera = obj_data if dedicated_lpr else obj_data["camera"]
|
camera = obj_data if dedicated_lpr else obj_data["camera"]
|
||||||
current_time = int(datetime.datetime.now().timestamp())
|
current_time = int(datetime.datetime.now().timestamp())
|
||||||
|
|
||||||
@ -1011,6 +1017,7 @@ class LicensePlateProcessingMixin:
|
|||||||
logger.debug(
|
logger.debug(
|
||||||
f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms"
|
f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms"
|
||||||
)
|
)
|
||||||
|
self.plates_det_second.update()
|
||||||
self.__update_yolov9_metrics(
|
self.__update_yolov9_metrics(
|
||||||
datetime.datetime.now().timestamp() - yolov9_start
|
datetime.datetime.now().timestamp() - yolov9_start
|
||||||
)
|
)
|
||||||
@ -1093,6 +1100,7 @@ class LicensePlateProcessingMixin:
|
|||||||
logger.debug(
|
logger.debug(
|
||||||
f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms"
|
f"{camera}: YOLOv9 LPD inference time: {(datetime.datetime.now().timestamp() - yolov9_start) * 1000:.2f} ms"
|
||||||
)
|
)
|
||||||
|
self.plates_det_second.update()
|
||||||
self.__update_yolov9_metrics(
|
self.__update_yolov9_metrics(
|
||||||
datetime.datetime.now().timestamp() - yolov9_start
|
datetime.datetime.now().timestamp() - yolov9_start
|
||||||
)
|
)
|
||||||
@ -1197,6 +1205,7 @@ class LicensePlateProcessingMixin:
|
|||||||
license_plates, confidences, areas = self._process_license_plate(
|
license_plates, confidences, areas = self._process_license_plate(
|
||||||
camera, id, license_plate_frame
|
camera, id, license_plate_frame
|
||||||
)
|
)
|
||||||
|
self.plates_rec_second.update()
|
||||||
self.__update_lpr_metrics(datetime.datetime.now().timestamp() - start)
|
self.__update_lpr_metrics(datetime.datetime.now().timestamp() - start)
|
||||||
|
|
||||||
if license_plates:
|
if license_plates:
|
||||||
|
@ -24,6 +24,7 @@ from frigate.data_processing.common.face.model import (
|
|||||||
FaceNetRecognizer,
|
FaceNetRecognizer,
|
||||||
FaceRecognizer,
|
FaceRecognizer,
|
||||||
)
|
)
|
||||||
|
from frigate.util.builtin import EventsPerSecond
|
||||||
from frigate.util.image import area
|
from frigate.util.image import area
|
||||||
|
|
||||||
from ..types import DataProcessorMetrics
|
from ..types import DataProcessorMetrics
|
||||||
@ -51,6 +52,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
self.requires_face_detection = "face" not in self.config.objects.all_objects
|
self.requires_face_detection = "face" not in self.config.objects.all_objects
|
||||||
self.person_face_history: dict[str, list[tuple[str, float, int]]] = {}
|
self.person_face_history: dict[str, list[tuple[str, float, int]]] = {}
|
||||||
self.recognizer: FaceRecognizer | None = None
|
self.recognizer: FaceRecognizer | None = None
|
||||||
|
self.faces_per_second = EventsPerSecond()
|
||||||
|
|
||||||
download_path = os.path.join(MODEL_CACHE_DIR, "facedet")
|
download_path = os.path.join(MODEL_CACHE_DIR, "facedet")
|
||||||
self.model_files = {
|
self.model_files = {
|
||||||
@ -103,6 +105,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
score_threshold=0.5,
|
score_threshold=0.5,
|
||||||
nms_threshold=0.3,
|
nms_threshold=0.3,
|
||||||
)
|
)
|
||||||
|
self.faces_per_second.start()
|
||||||
|
|
||||||
def __detect_face(
|
def __detect_face(
|
||||||
self, input: np.ndarray, threshold: float
|
self, input: np.ndarray, threshold: float
|
||||||
@ -146,12 +149,15 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
return face
|
return face
|
||||||
|
|
||||||
def __update_metrics(self, duration: float) -> None:
|
def __update_metrics(self, duration: float) -> None:
|
||||||
self.metrics.face_rec_fps.value = (
|
self.faces_per_second.update()
|
||||||
self.metrics.face_rec_fps.value * 9 + duration
|
self.metrics.face_rec_speed.value = (
|
||||||
|
self.metrics.face_rec_speed.value * 9 + duration
|
||||||
) / 10
|
) / 10
|
||||||
|
|
||||||
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
|
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
|
||||||
"""Look for faces in image."""
|
"""Look for faces in image."""
|
||||||
|
self.metrics.face_rec_fps.value = self.faces_per_second.eps()
|
||||||
|
|
||||||
if not self.config.cameras[obj_data["camera"]].face_recognition.enabled:
|
if not self.config.cameras[obj_data["camera"]].face_recognition.enabled:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -6,18 +6,26 @@ from multiprocessing.sharedctypes import Synchronized
|
|||||||
|
|
||||||
|
|
||||||
class DataProcessorMetrics:
|
class DataProcessorMetrics:
|
||||||
image_embeddings_fps: Synchronized
|
image_embeddings_speed: Synchronized
|
||||||
text_embeddings_sps: Synchronized
|
text_embeddings_speed: Synchronized
|
||||||
|
face_rec_speed: Synchronized
|
||||||
face_rec_fps: Synchronized
|
face_rec_fps: Synchronized
|
||||||
|
alpr_speed: Synchronized
|
||||||
alpr_pps: Synchronized
|
alpr_pps: Synchronized
|
||||||
yolov9_lpr_fps: Synchronized
|
yolov9_lpr_speed: Synchronized
|
||||||
|
yolov9_lpr_pps: Synchronized
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.image_embeddings_fps = mp.Value("d", 0.01)
|
self.image_embeddings_speed = mp.Value("d", 0.01)
|
||||||
self.text_embeddings_sps = mp.Value("d", 0.01)
|
self.image_embeddings_eps = mp.Value("d", 0.0)
|
||||||
self.face_rec_fps = mp.Value("d", 0.01)
|
self.text_embeddings_speed = mp.Value("d", 0.01)
|
||||||
self.alpr_pps = mp.Value("d", 0.01)
|
self.text_embeddings_eps = mp.Value("d", 0.0)
|
||||||
self.yolov9_lpr_fps = mp.Value("d", 0.01)
|
self.face_rec_speed = mp.Value("d", 0.01)
|
||||||
|
self.face_rec_fps = mp.Value("d", 0.0)
|
||||||
|
self.alpr_speed = mp.Value("d", 0.01)
|
||||||
|
self.alpr_pps = mp.Value("d", 0.0)
|
||||||
|
self.yolov9_lpr_speed = mp.Value("d", 0.01)
|
||||||
|
self.yolov9_lpr_pps = mp.Value("d", 0.0)
|
||||||
|
|
||||||
|
|
||||||
class DataProcessorModelRunner:
|
class DataProcessorModelRunner:
|
||||||
|
@ -21,7 +21,7 @@ from frigate.data_processing.types import DataProcessorMetrics
|
|||||||
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
|
||||||
from frigate.models import Event
|
from frigate.models import Event
|
||||||
from frigate.types import ModelStatusTypesEnum
|
from frigate.types import ModelStatusTypesEnum
|
||||||
from frigate.util.builtin import serialize
|
from frigate.util.builtin import EventsPerSecond, serialize
|
||||||
from frigate.util.path import get_event_thumbnail_bytes
|
from frigate.util.path import get_event_thumbnail_bytes
|
||||||
|
|
||||||
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
|
from .onnx.jina_v1_embedding import JinaV1ImageEmbedding, JinaV1TextEmbedding
|
||||||
@ -75,6 +75,11 @@ class Embeddings:
|
|||||||
self.metrics = metrics
|
self.metrics = metrics
|
||||||
self.requestor = InterProcessRequestor()
|
self.requestor = InterProcessRequestor()
|
||||||
|
|
||||||
|
self.image_eps = EventsPerSecond()
|
||||||
|
self.image_eps.start()
|
||||||
|
self.text_eps = EventsPerSecond()
|
||||||
|
self.text_eps.start()
|
||||||
|
|
||||||
self.reindex_lock = threading.Lock()
|
self.reindex_lock = threading.Lock()
|
||||||
self.reindex_thread = None
|
self.reindex_thread = None
|
||||||
self.reindex_running = False
|
self.reindex_running = False
|
||||||
@ -120,6 +125,10 @@ class Embeddings:
|
|||||||
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
device="GPU" if config.semantic_search.model_size == "large" else "CPU",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def update_stats(self) -> None:
|
||||||
|
self.metrics.image_embeddings_eps = self.image_eps.eps()
|
||||||
|
self.metrics.text_embeddings_eps = self.text_eps.eps()
|
||||||
|
|
||||||
def get_model_definitions(self):
|
def get_model_definitions(self):
|
||||||
# Version-specific models
|
# Version-specific models
|
||||||
if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2:
|
if self.config.semantic_search.model == SemanticSearchModelEnum.jinav2:
|
||||||
@ -175,9 +184,10 @@ class Embeddings:
|
|||||||
)
|
)
|
||||||
|
|
||||||
duration = datetime.datetime.now().timestamp() - start
|
duration = datetime.datetime.now().timestamp() - start
|
||||||
self.metrics.image_embeddings_fps.value = (
|
self.metrics.image_embeddings_speed.value = (
|
||||||
self.metrics.image_embeddings_fps.value * 9 + duration
|
self.metrics.image_embeddings_speed.value * 9 + duration
|
||||||
) / 10
|
) / 10
|
||||||
|
self.image_eps.update()
|
||||||
|
|
||||||
return embedding
|
return embedding
|
||||||
|
|
||||||
@ -199,6 +209,7 @@ class Embeddings:
|
|||||||
for i in range(len(ids)):
|
for i in range(len(ids)):
|
||||||
items.append(ids[i])
|
items.append(ids[i])
|
||||||
items.append(serialize(embeddings[i]))
|
items.append(serialize(embeddings[i]))
|
||||||
|
self.image_eps.update()
|
||||||
|
|
||||||
self.db.execute_sql(
|
self.db.execute_sql(
|
||||||
"""
|
"""
|
||||||
@ -209,8 +220,8 @@ class Embeddings:
|
|||||||
)
|
)
|
||||||
|
|
||||||
duration = datetime.datetime.now().timestamp() - start
|
duration = datetime.datetime.now().timestamp() - start
|
||||||
self.metrics.text_embeddings_sps.value = (
|
self.metrics.text_embeddings_speed.value = (
|
||||||
self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids))
|
self.metrics.text_embeddings_speed.value * 9 + (duration / len(ids))
|
||||||
) / 10
|
) / 10
|
||||||
|
|
||||||
return embeddings
|
return embeddings
|
||||||
@ -231,9 +242,10 @@ class Embeddings:
|
|||||||
)
|
)
|
||||||
|
|
||||||
duration = datetime.datetime.now().timestamp() - start
|
duration = datetime.datetime.now().timestamp() - start
|
||||||
self.metrics.text_embeddings_sps.value = (
|
self.metrics.text_embeddings_speed.value = (
|
||||||
self.metrics.text_embeddings_sps.value * 9 + duration
|
self.metrics.text_embeddings_speed.value * 9 + duration
|
||||||
) / 10
|
) / 10
|
||||||
|
self.text_eps.update()
|
||||||
|
|
||||||
return embedding
|
return embedding
|
||||||
|
|
||||||
@ -254,6 +266,7 @@ class Embeddings:
|
|||||||
for i in range(len(ids)):
|
for i in range(len(ids)):
|
||||||
items.append(ids[i])
|
items.append(ids[i])
|
||||||
items.append(serialize(embeddings[i]))
|
items.append(serialize(embeddings[i]))
|
||||||
|
self.text_eps.update()
|
||||||
|
|
||||||
self.db.execute_sql(
|
self.db.execute_sql(
|
||||||
"""
|
"""
|
||||||
@ -264,8 +277,8 @@ class Embeddings:
|
|||||||
)
|
)
|
||||||
|
|
||||||
duration = datetime.datetime.now().timestamp() - start
|
duration = datetime.datetime.now().timestamp() - start
|
||||||
self.metrics.text_embeddings_sps.value = (
|
self.metrics.text_embeddings_speed.value = (
|
||||||
self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids))
|
self.metrics.text_embeddings_speed.value * 9 + (duration / len(ids))
|
||||||
) / 10
|
) / 10
|
||||||
|
|
||||||
return embeddings
|
return embeddings
|
||||||
|
@ -236,6 +236,7 @@ class EmbeddingMaintainer(threading.Thread):
|
|||||||
return
|
return
|
||||||
|
|
||||||
camera_config = self.config.cameras[camera]
|
camera_config = self.config.cameras[camera]
|
||||||
|
self.embeddings.update_stats()
|
||||||
|
|
||||||
# no need to process updated objects if face recognition, lpr, genai are disabled
|
# no need to process updated objects if face recognition, lpr, genai are disabled
|
||||||
if not camera_config.genai.enabled and len(self.realtime_processors) == 0:
|
if not camera_config.genai.enabled and len(self.realtime_processors) == 0:
|
||||||
|
@ -293,27 +293,42 @@ def stats_snapshot(
|
|||||||
stats["embeddings"].update(
|
stats["embeddings"].update(
|
||||||
{
|
{
|
||||||
"image_embedding_speed": round(
|
"image_embedding_speed": round(
|
||||||
embeddings_metrics.image_embeddings_fps.value * 1000, 2
|
embeddings_metrics.image_embeddings_speed.value * 1000, 2
|
||||||
|
),
|
||||||
|
"image_embedding": round(
|
||||||
|
embeddings_metrics.image_embeddings_eps.value, 2
|
||||||
),
|
),
|
||||||
"text_embedding_speed": round(
|
"text_embedding_speed": round(
|
||||||
embeddings_metrics.text_embeddings_sps.value * 1000, 2
|
embeddings_metrics.text_embeddings_speed.value * 1000, 2
|
||||||
|
),
|
||||||
|
"text_embedding": round(
|
||||||
|
embeddings_metrics.text_embeddings_eps.value, 2
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
if config.face_recognition.enabled:
|
if config.face_recognition.enabled:
|
||||||
stats["embeddings"]["face_recognition_speed"] = round(
|
stats["embeddings"]["face_recognition_speed"] = round(
|
||||||
embeddings_metrics.face_rec_fps.value * 1000, 2
|
embeddings_metrics.face_rec_speed.value * 1000, 2
|
||||||
|
)
|
||||||
|
stats["embeddings"]["face_recognition"] = round(
|
||||||
|
embeddings_metrics.face_rec_fps.value, 2
|
||||||
)
|
)
|
||||||
|
|
||||||
if config.lpr.enabled:
|
if config.lpr.enabled:
|
||||||
stats["embeddings"]["plate_recognition_speed"] = round(
|
stats["embeddings"]["plate_recognition_speed"] = round(
|
||||||
embeddings_metrics.alpr_pps.value * 1000, 2
|
embeddings_metrics.alpr_speed.value * 1000, 2
|
||||||
|
)
|
||||||
|
stats["embeddings"]["plate_recognition"] = round(
|
||||||
|
embeddings_metrics.alpr_pps.value, 2
|
||||||
)
|
)
|
||||||
|
|
||||||
if "license_plate" not in config.objects.all_objects:
|
if embeddings_metrics.yolov9_lpr_pps.value > 0.0:
|
||||||
stats["embeddings"]["yolov9_plate_detection_speed"] = round(
|
stats["embeddings"]["yolov9_plate_detection_speed"] = round(
|
||||||
embeddings_metrics.yolov9_lpr_fps.value * 1000, 2
|
embeddings_metrics.yolov9_lpr_speed.value * 1000, 2
|
||||||
|
)
|
||||||
|
stats["embeddings"]["yolov9_plate_detection"] = round(
|
||||||
|
embeddings_metrics.yolov9_lpr_pps.value, 2
|
||||||
)
|
)
|
||||||
|
|
||||||
get_processing_stats(config, stats, hwaccel_errors)
|
get_processing_stats(config, stats, hwaccel_errors)
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
"cameras": "Cameras Stats - Frigate",
|
"cameras": "Cameras Stats - Frigate",
|
||||||
"storage": "Storage Stats - Frigate",
|
"storage": "Storage Stats - Frigate",
|
||||||
"general": "General Stats - Frigate",
|
"general": "General Stats - Frigate",
|
||||||
"features": "Features Stats - Frigate",
|
"enrichments": "Enrichments Stats - Frigate",
|
||||||
"logs": {
|
"logs": {
|
||||||
"frigate": "Frigate Logs - Frigate",
|
"frigate": "Frigate Logs - Frigate",
|
||||||
"go2rtc": "Go2RTC Logs - Frigate",
|
"go2rtc": "Go2RTC Logs - Frigate",
|
||||||
@ -144,8 +144,9 @@
|
|||||||
"healthy": "System is healthy",
|
"healthy": "System is healthy",
|
||||||
"reindexingEmbeddings": "Reindexing embeddings ({{processed}}% complete)"
|
"reindexingEmbeddings": "Reindexing embeddings ({{processed}}% complete)"
|
||||||
},
|
},
|
||||||
"features": {
|
"enrichments": {
|
||||||
"title": "Features",
|
"title": "Enrichments",
|
||||||
|
"infPerSecond": "Inferences Per Second",
|
||||||
"embeddings": {
|
"embeddings": {
|
||||||
"image_embedding_speed": "Image Embedding Speed",
|
"image_embedding_speed": "Image Embedding Speed",
|
||||||
"face_embedding_speed": "Face Embedding Speed",
|
"face_embedding_speed": "Face Embedding Speed",
|
||||||
|
@ -143,3 +143,118 @@ export function CameraLineGraph({
|
|||||||
</div>
|
</div>
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type EventsPerSecondLineGraphProps = {
|
||||||
|
graphId: string;
|
||||||
|
unit: string;
|
||||||
|
name: string;
|
||||||
|
updateTimes: number[];
|
||||||
|
data: ApexAxisChartSeries;
|
||||||
|
};
|
||||||
|
export function EventsPerSecondsLineGraph({
|
||||||
|
graphId,
|
||||||
|
unit,
|
||||||
|
name,
|
||||||
|
updateTimes,
|
||||||
|
data,
|
||||||
|
}: EventsPerSecondLineGraphProps) {
|
||||||
|
const { data: config } = useSWR<FrigateConfig>("config", {
|
||||||
|
revalidateOnFocus: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
const { theme, systemTheme } = useTheme();
|
||||||
|
|
||||||
|
const lastValue = useMemo<number>(
|
||||||
|
// @ts-expect-error y is valid
|
||||||
|
() => data[0].data[data[0].data.length - 1]?.y ?? 0,
|
||||||
|
[data],
|
||||||
|
);
|
||||||
|
|
||||||
|
const formatTime = useCallback(
|
||||||
|
(val: unknown) => {
|
||||||
|
return formatUnixTimestampToDateTime(
|
||||||
|
updateTimes[Math.round(val as number) - 1],
|
||||||
|
{
|
||||||
|
timezone: config?.ui.timezone,
|
||||||
|
strftime_fmt:
|
||||||
|
config?.ui.time_format == "24hour" ? "%H:%M" : "%I:%M %p",
|
||||||
|
},
|
||||||
|
);
|
||||||
|
},
|
||||||
|
[config, updateTimes],
|
||||||
|
);
|
||||||
|
|
||||||
|
const options = useMemo(() => {
|
||||||
|
return {
|
||||||
|
chart: {
|
||||||
|
id: graphId,
|
||||||
|
selection: {
|
||||||
|
enabled: false,
|
||||||
|
},
|
||||||
|
toolbar: {
|
||||||
|
show: false,
|
||||||
|
},
|
||||||
|
zoom: {
|
||||||
|
enabled: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
colors: GRAPH_COLORS,
|
||||||
|
grid: {
|
||||||
|
show: false,
|
||||||
|
},
|
||||||
|
legend: {
|
||||||
|
show: false,
|
||||||
|
},
|
||||||
|
dataLabels: {
|
||||||
|
enabled: false,
|
||||||
|
},
|
||||||
|
stroke: {
|
||||||
|
width: 1,
|
||||||
|
},
|
||||||
|
tooltip: {
|
||||||
|
theme: systemTheme || theme,
|
||||||
|
},
|
||||||
|
markers: {
|
||||||
|
size: 0,
|
||||||
|
},
|
||||||
|
xaxis: {
|
||||||
|
tickAmount: isMobileOnly ? 2 : 3,
|
||||||
|
tickPlacement: "on",
|
||||||
|
labels: {
|
||||||
|
rotate: 0,
|
||||||
|
formatter: formatTime,
|
||||||
|
},
|
||||||
|
axisBorder: {
|
||||||
|
show: false,
|
||||||
|
},
|
||||||
|
axisTicks: {
|
||||||
|
show: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
yaxis: {
|
||||||
|
show: true,
|
||||||
|
labels: {
|
||||||
|
formatter: (val: number) => Math.ceil(val).toString(),
|
||||||
|
},
|
||||||
|
min: 0,
|
||||||
|
},
|
||||||
|
} as ApexCharts.ApexOptions;
|
||||||
|
}, [graphId, systemTheme, theme, formatTime]);
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
ApexCharts.exec(graphId, "updateOptions", options, true, true);
|
||||||
|
}, [graphId, options]);
|
||||||
|
|
||||||
|
return (
|
||||||
|
<div className="flex w-full flex-col">
|
||||||
|
<div className="flex items-center gap-1">
|
||||||
|
<div className="text-xs text-muted-foreground">{name}</div>
|
||||||
|
<div className="text-xs text-primary">
|
||||||
|
{lastValue}
|
||||||
|
{unit}
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
<Chart type="line" options={options} series={data} height="120" />
|
||||||
|
</div>
|
||||||
|
);
|
||||||
|
}
|
@ -14,10 +14,10 @@ import CameraMetrics from "@/views/system/CameraMetrics";
|
|||||||
import { useHashState } from "@/hooks/use-overlay-state";
|
import { useHashState } from "@/hooks/use-overlay-state";
|
||||||
import { Toaster } from "@/components/ui/sonner";
|
import { Toaster } from "@/components/ui/sonner";
|
||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
import FeatureMetrics from "@/views/system/FeatureMetrics";
|
import EnrichmentMetrics from "@/views/system/EnrichmentMetrics";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
|
|
||||||
const allMetrics = ["general", "features", "storage", "cameras"] as const;
|
const allMetrics = ["general", "enrichments", "storage", "cameras"] as const;
|
||||||
type SystemMetric = (typeof allMetrics)[number];
|
type SystemMetric = (typeof allMetrics)[number];
|
||||||
|
|
||||||
function System() {
|
function System() {
|
||||||
@ -34,7 +34,7 @@ function System() {
|
|||||||
!config?.lpr.enabled &&
|
!config?.lpr.enabled &&
|
||||||
!config?.face_recognition.enabled
|
!config?.face_recognition.enabled
|
||||||
) {
|
) {
|
||||||
const index = metrics.indexOf("features");
|
const index = metrics.indexOf("enrichments");
|
||||||
metrics.splice(index, 1);
|
metrics.splice(index, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ function System() {
|
|||||||
aria-label={`Select ${item}`}
|
aria-label={`Select ${item}`}
|
||||||
>
|
>
|
||||||
{item == "general" && <LuActivity className="size-4" />}
|
{item == "general" && <LuActivity className="size-4" />}
|
||||||
{item == "features" && <LuSearchCode className="size-4" />}
|
{item == "enrichments" && <LuSearchCode className="size-4" />}
|
||||||
{item == "storage" && <LuHardDrive className="size-4" />}
|
{item == "storage" && <LuHardDrive className="size-4" />}
|
||||||
{item == "cameras" && <FaVideo className="size-4" />}
|
{item == "cameras" && <FaVideo className="size-4" />}
|
||||||
{isDesktop && (
|
{isDesktop && (
|
||||||
@ -122,8 +122,8 @@ function System() {
|
|||||||
setLastUpdated={setLastUpdated}
|
setLastUpdated={setLastUpdated}
|
||||||
/>
|
/>
|
||||||
)}
|
)}
|
||||||
{page == "features" && (
|
{page == "enrichments" && (
|
||||||
<FeatureMetrics
|
<EnrichmentMetrics
|
||||||
lastUpdated={lastUpdated}
|
lastUpdated={lastUpdated}
|
||||||
setLastUpdated={setLastUpdated}
|
setLastUpdated={setLastUpdated}
|
||||||
/>
|
/>
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
import { useFrigateStats } from "@/api/ws";
|
import { useFrigateStats } from "@/api/ws";
|
||||||
import { CameraLineGraph } from "@/components/graph/CameraGraph";
|
import { CameraLineGraph } from "@/components/graph/LineGraph";
|
||||||
import CameraInfoDialog from "@/components/overlay/CameraInfoDialog";
|
import CameraInfoDialog from "@/components/overlay/CameraInfoDialog";
|
||||||
import { Skeleton } from "@/components/ui/skeleton";
|
import { Skeleton } from "@/components/ui/skeleton";
|
||||||
import { FrigateConfig } from "@/types/frigateConfig";
|
import { FrigateConfig } from "@/types/frigateConfig";
|
||||||
|
@ -7,15 +7,16 @@ import { Skeleton } from "@/components/ui/skeleton";
|
|||||||
import { ThresholdBarGraph } from "@/components/graph/SystemGraph";
|
import { ThresholdBarGraph } from "@/components/graph/SystemGraph";
|
||||||
import { cn } from "@/lib/utils";
|
import { cn } from "@/lib/utils";
|
||||||
import { useTranslation } from "react-i18next";
|
import { useTranslation } from "react-i18next";
|
||||||
|
import { EventsPerSecondsLineGraph } from "@/components/graph/LineGraph";
|
||||||
|
|
||||||
type FeatureMetricsProps = {
|
type EnrichmentMetricsProps = {
|
||||||
lastUpdated: number;
|
lastUpdated: number;
|
||||||
setLastUpdated: (last: number) => void;
|
setLastUpdated: (last: number) => void;
|
||||||
};
|
};
|
||||||
export default function FeatureMetrics({
|
export default function EnrichmentMetrics({
|
||||||
lastUpdated,
|
lastUpdated,
|
||||||
setLastUpdated,
|
setLastUpdated,
|
||||||
}: FeatureMetricsProps) {
|
}: EnrichmentMetricsProps) {
|
||||||
// stats
|
// stats
|
||||||
const { t } = useTranslation(["views/system"]);
|
const { t } = useTranslation(["views/system"]);
|
||||||
|
|
||||||
@ -102,15 +103,26 @@ export default function FeatureMetrics({
|
|||||||
{embeddingInferenceTimeSeries.map((series) => (
|
{embeddingInferenceTimeSeries.map((series) => (
|
||||||
<div className="rounded-lg bg-background_alt p-2.5 md:rounded-2xl">
|
<div className="rounded-lg bg-background_alt p-2.5 md:rounded-2xl">
|
||||||
<div className="mb-5 capitalize">{series.name}</div>
|
<div className="mb-5 capitalize">{series.name}</div>
|
||||||
<ThresholdBarGraph
|
{series.name.endsWith("Speed") ? (
|
||||||
key={series.name}
|
<ThresholdBarGraph
|
||||||
graphId={`${series.name}-inference`}
|
key={series.name}
|
||||||
name={series.name}
|
graphId={`${series.name}-inference`}
|
||||||
unit="ms"
|
name={series.name}
|
||||||
threshold={EmbeddingThreshold}
|
unit="ms"
|
||||||
updateTimes={updateTimes}
|
threshold={EmbeddingThreshold}
|
||||||
data={[series]}
|
updateTimes={updateTimes}
|
||||||
/>
|
data={[series]}
|
||||||
|
/>
|
||||||
|
) : (
|
||||||
|
<EventsPerSecondsLineGraph
|
||||||
|
key={series.name}
|
||||||
|
graphId={`${series.name}-fps`}
|
||||||
|
unit=""
|
||||||
|
name={t("enrichments.infPerSecond")}
|
||||||
|
updateTimes={updateTimes}
|
||||||
|
data={[series]}
|
||||||
|
/>
|
||||||
|
)}
|
||||||
</div>
|
</div>
|
||||||
))}
|
))}
|
||||||
</>
|
</>
|
Loading…
Reference in New Issue
Block a user