Add metrics page for embeddings and face / license plate processing times (#15818)

* Get stats for embeddings inferences

* cleanup embeddings inferences

* Enable UI for feature metrics

* Change threshold

* Fix check

* Update python for actions

* Set python version

* Ignore type for now
This commit is contained in:
Nicolas Mowen 2025-01-05 08:47:57 -06:00 committed by Blake Blackshear
parent 0c13227f7d
commit a6ae208fe7
15 changed files with 309 additions and 39 deletions

View File

@ -6,7 +6,7 @@ on:
- "docs/**"
env:
DEFAULT_PYTHON: 3.9
DEFAULT_PYTHON: 3.11
jobs:
build_devcontainer:

View File

@ -41,6 +41,7 @@ from frigate.const import (
)
from frigate.db.sqlitevecq import SqliteVecQueueDatabase
from frigate.embeddings import EmbeddingsContext, manage_embeddings
from frigate.embeddings.types import EmbeddingsMetrics
from frigate.events.audio import AudioProcessor
from frigate.events.cleanup import EventCleanup
from frigate.events.external import ExternalEventProcessor
@ -89,6 +90,9 @@ class FrigateApp:
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
self.log_queue: Queue = mp.Queue()
self.camera_metrics: dict[str, CameraMetrics] = {}
self.embeddings_metrics: EmbeddingsMetrics | None = (
EmbeddingsMetrics() if config.semantic_search.enabled else None
)
self.ptz_metrics: dict[str, PTZMetrics] = {}
self.processes: dict[str, int] = {}
self.embeddings: Optional[EmbeddingsContext] = None
@ -235,7 +239,10 @@ class FrigateApp:
embedding_process = util.Process(
target=manage_embeddings,
name="embeddings_manager",
args=(self.config,),
args=(
self.config,
self.embeddings_metrics,
),
)
embedding_process.daemon = True
self.embedding_process = embedding_process
@ -497,7 +504,11 @@ class FrigateApp:
self.stats_emitter = StatsEmitter(
self.config,
stats_init(
self.config, self.camera_metrics, self.detectors, self.processes
self.config,
self.camera_metrics,
self.embeddings_metrics,
self.detectors,
self.processes,
),
self.stop_event,
)

View File

@ -21,12 +21,13 @@ from frigate.util.builtin import serialize
from frigate.util.services import listen
from .maintainer import EmbeddingMaintainer
from .types import EmbeddingsMetrics
from .util import ZScoreNormalization
logger = logging.getLogger(__name__)
def manage_embeddings(config: FrigateConfig) -> None:
def manage_embeddings(config: FrigateConfig, metrics: EmbeddingsMetrics) -> None:
# Only initialize embeddings if semantic search is enabled
if not config.semantic_search.enabled:
return
@ -60,6 +61,7 @@ def manage_embeddings(config: FrigateConfig) -> None:
maintainer = EmbeddingMaintainer(
db,
config,
metrics,
stop_event,
)
maintainer.start()

View File

@ -1,6 +1,7 @@
"""SQLite-vec embeddings database."""
import base64
import datetime
import logging
import os
import time
@ -21,6 +22,7 @@ from frigate.types import ModelStatusTypesEnum
from frigate.util.builtin import serialize
from .functions.onnx import GenericONNXEmbedding, ModelTypeEnum
from .types import EmbeddingsMetrics
logger = logging.getLogger(__name__)
@ -59,9 +61,15 @@ def get_metadata(event: Event) -> dict:
class Embeddings:
"""SQLite-vec embeddings database."""
def __init__(self, config: FrigateConfig, db: SqliteVecQueueDatabase) -> None:
def __init__(
self,
config: FrigateConfig,
db: SqliteVecQueueDatabase,
metrics: EmbeddingsMetrics,
) -> None:
self.config = config
self.db = db
self.metrics = metrics
self.requestor = InterProcessRequestor()
# Create tables if they don't exist
@ -173,6 +181,7 @@ class Embeddings:
@param: thumbnail bytes in jpg format
@param: upsert If embedding should be upserted into vec DB
"""
start = datetime.datetime.now().timestamp()
# Convert thumbnail bytes to PIL Image
embedding = self.vision_embedding([thumbnail])[0]
@ -185,6 +194,11 @@ class Embeddings:
(event_id, serialize(embedding)),
)
duration = datetime.datetime.now().timestamp() - start
self.metrics.image_embeddings_fps.value = (
self.metrics.image_embeddings_fps.value * 9 + duration
) / 10
return embedding
def batch_embed_thumbnail(
@ -195,6 +209,7 @@ class Embeddings:
@param: event_thumbs Map of Event IDs in DB to thumbnail bytes in jpg format
@param: upsert If embedding should be upserted into vec DB
"""
start = datetime.datetime.now().timestamp()
ids = list(event_thumbs.keys())
embeddings = self.vision_embedding(list(event_thumbs.values()))
@ -213,11 +228,17 @@ class Embeddings:
items,
)
duration = datetime.datetime.now().timestamp() - start
self.metrics.text_embeddings_sps.value = (
self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids))
) / 10
return embeddings
def embed_description(
self, event_id: str, description: str, upsert: bool = True
) -> ndarray:
start = datetime.datetime.now().timestamp()
embedding = self.text_embedding([description])[0]
if upsert:
@ -229,11 +250,17 @@ class Embeddings:
(event_id, serialize(embedding)),
)
duration = datetime.datetime.now().timestamp() - start
self.metrics.text_embeddings_sps.value = (
self.metrics.text_embeddings_sps.value * 9 + duration
) / 10
return embedding
def batch_embed_description(
self, event_descriptions: dict[str, str], upsert: bool = True
) -> ndarray:
start = datetime.datetime.now().timestamp()
# upsert embeddings one by one to avoid token limit
embeddings = []
@ -256,6 +283,11 @@ class Embeddings:
items,
)
duration = datetime.datetime.now().timestamp() - start
self.metrics.text_embeddings_sps.value = (
self.metrics.text_embeddings_sps.value * 9 + (duration / len(ids))
) / 10
return embeddings
def reindex(self) -> None:

View File

@ -1,6 +1,7 @@
"""Maintain embeddings in SQLite-vec."""
import base64
import datetime
import logging
import os
import random
@ -41,6 +42,7 @@ from frigate.util.image import SharedMemoryFrameManager, area, calculate_region
from frigate.util.model import FaceClassificationModel
from .embeddings import Embeddings
from .types import EmbeddingsMetrics
logger = logging.getLogger(__name__)
@ -54,11 +56,13 @@ class EmbeddingMaintainer(threading.Thread):
self,
db: SqliteQueueDatabase,
config: FrigateConfig,
metrics: EmbeddingsMetrics,
stop_event: MpEvent,
) -> None:
super().__init__(name="embeddings_maintainer")
self.config = config
self.embeddings = Embeddings(config, db)
self.metrics = metrics
self.embeddings = Embeddings(config, db, metrics)
# Check if we need to re-index events
if config.semantic_search.reindex:
@ -135,7 +139,8 @@ class EmbeddingMaintainer(threading.Thread):
)
elif topic == EmbeddingsRequestEnum.generate_search.value:
return serialize(
self.embeddings.text_embedding([data])[0], pack=False
self.embeddings.embed_description("", data, upsert=False),
pack=False,
)
elif topic == EmbeddingsRequestEnum.register_face.value:
if not self.face_recognition_enabled:
@ -219,10 +224,24 @@ class EmbeddingMaintainer(threading.Thread):
return
if self.face_recognition_enabled:
self._process_face(data, yuv_frame)
start = datetime.datetime.now().timestamp()
processed = self._process_face(data, yuv_frame)
if processed:
duration = datetime.datetime.now().timestamp() - start
self.metrics.face_rec_fps.value = (
self.metrics.face_rec_fps.value * 9 + duration
) / 10
if self.lpr_config.enabled:
self._process_license_plate(data, yuv_frame)
start = datetime.datetime.now().timestamp()
processed = self._process_license_plate(data, yuv_frame)
if processed:
duration = datetime.datetime.now().timestamp() - start
self.metrics.alpr_pps.value = (
self.metrics.alpr_pps.value * 9 + duration
) / 10
# no need to save our own thumbnails if genai is not enabled
# or if the object has become stationary
@ -402,14 +421,14 @@ class EmbeddingMaintainer(threading.Thread):
return face
def _process_face(self, obj_data: dict[str, any], frame: np.ndarray) -> None:
def _process_face(self, obj_data: dict[str, any], frame: np.ndarray) -> bool:
"""Look for faces in image."""
id = obj_data["id"]
# don't run for non person objects
if obj_data.get("label") != "person":
logger.debug("Not a processing face for non person object.")
return
return False
# don't overwrite sub label for objects that have a sub label
# that is not a face
@ -417,7 +436,7 @@ class EmbeddingMaintainer(threading.Thread):
logger.debug(
f"Not processing face due to existing sub label: {obj_data.get('sub_label')}."
)
return
return False
face: Optional[dict[str, any]] = None
@ -426,7 +445,7 @@ class EmbeddingMaintainer(threading.Thread):
person_box = obj_data.get("box")
if not person_box:
return None
return False
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
left, top, right, bottom = person_box
@ -435,7 +454,7 @@ class EmbeddingMaintainer(threading.Thread):
if not face_box:
logger.debug("Detected no faces for person object.")
return
return False
margin = int((face_box[2] - face_box[0]) * 0.25)
face_frame = person[
@ -451,7 +470,7 @@ class EmbeddingMaintainer(threading.Thread):
# don't run for object without attributes
if not obj_data.get("current_attributes"):
logger.debug("No attributes to parse.")
return
return False
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
for attr in attributes:
@ -463,14 +482,14 @@ class EmbeddingMaintainer(threading.Thread):
# no faces detected in this frame
if not face:
return
return False
face_box = face.get("box")
# check that face is valid
if not face_box or area(face_box) < self.config.face_recognition.min_area:
logger.debug(f"Invalid face box {face}")
return
return False
face_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
margin = int((face_box[2] - face_box[0]) * 0.25)
@ -487,7 +506,7 @@ class EmbeddingMaintainer(threading.Thread):
res = self.face_classifier.classify_face(face_frame)
if not res:
return
return False
sub_label, score = res
@ -512,13 +531,13 @@ class EmbeddingMaintainer(threading.Thread):
logger.debug(
f"Recognized face distance {score} is less than threshold {self.config.face_recognition.threshold}"
)
return
return True
if id in self.detected_faces and face_score <= self.detected_faces[id]:
logger.debug(
f"Recognized face distance {score} and overall score {face_score} is less than previous overall face score ({self.detected_faces.get(id)})."
)
return
return True
resp = requests.post(
f"{FRIGATE_LOCALHOST}/api/events/{id}/sub_label",
@ -532,6 +551,8 @@ class EmbeddingMaintainer(threading.Thread):
if resp.status_code == 200:
self.detected_faces[id] = face_score
return True
def _detect_license_plate(self, input: np.ndarray) -> tuple[int, int, int, int]:
"""Return the dimensions of the input image as [x, y, width, height]."""
height, width = input.shape[:2]
@ -539,19 +560,19 @@ class EmbeddingMaintainer(threading.Thread):
def _process_license_plate(
self, obj_data: dict[str, any], frame: np.ndarray
) -> None:
) -> bool:
"""Look for license plates in image."""
id = obj_data["id"]
# don't run for non car objects
if obj_data.get("label") != "car":
logger.debug("Not a processing license plate for non car object.")
return
return False
# don't run for stationary car objects
if obj_data.get("stationary") == True:
logger.debug("Not a processing license plate for a stationary car object.")
return
return False
# don't overwrite sub label for objects that have a sub label
# that is not a license plate
@ -559,7 +580,7 @@ class EmbeddingMaintainer(threading.Thread):
logger.debug(
f"Not processing license plate due to existing sub label: {obj_data.get('sub_label')}."
)
return
return False
license_plate: Optional[dict[str, any]] = None
@ -568,7 +589,7 @@ class EmbeddingMaintainer(threading.Thread):
car_box = obj_data.get("box")
if not car_box:
return None
return False
rgb = cv2.cvtColor(frame, cv2.COLOR_YUV2RGB_I420)
left, top, right, bottom = car_box
@ -577,7 +598,7 @@ class EmbeddingMaintainer(threading.Thread):
if not license_plate:
logger.debug("Detected no license plates for car object.")
return
return False
license_plate_frame = car[
license_plate[1] : license_plate[3], license_plate[0] : license_plate[2]
@ -587,7 +608,7 @@ class EmbeddingMaintainer(threading.Thread):
# don't run for object without attributes
if not obj_data.get("current_attributes"):
logger.debug("No attributes to parse.")
return
return False
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
for attr in attributes:
@ -601,7 +622,7 @@ class EmbeddingMaintainer(threading.Thread):
# no license plates detected in this frame
if not license_plate:
return
return False
license_plate_box = license_plate.get("box")
@ -611,7 +632,7 @@ class EmbeddingMaintainer(threading.Thread):
or area(license_plate_box) < self.config.lpr.min_area
):
logger.debug(f"Invalid license plate box {license_plate}")
return
return False
license_plate_frame = cv2.cvtColor(frame, cv2.COLOR_YUV2BGR_I420)
license_plate_frame = license_plate_frame[
@ -640,7 +661,7 @@ class EmbeddingMaintainer(threading.Thread):
else:
# no plates found
logger.debug("No text detected")
return
return True
top_plate, top_char_confidences, top_area = (
license_plates[0],
@ -686,14 +707,14 @@ class EmbeddingMaintainer(threading.Thread):
f"length={len(top_plate)}, avg_conf={avg_confidence:.2f}, area={top_area} "
f"vs Previous: length={len(prev_plate)}, avg_conf={prev_avg_confidence:.2f}, area={prev_area}"
)
return
return True
# Check against minimum confidence threshold
if avg_confidence < self.lpr_config.threshold:
logger.debug(
f"Average confidence {avg_confidence} is less than threshold ({self.lpr_config.threshold})"
)
return
return True
# Determine subLabel based on known plates, use regex matching
# Default to the detected plate, use label name if there's a match
@ -723,6 +744,8 @@ class EmbeddingMaintainer(threading.Thread):
"area": top_area,
}
return True
def _create_thumbnail(self, yuv_frame, box, height=500) -> Optional[bytes]:
"""Return jpg thumbnail of a region of the frame."""
frame = cv2.cvtColor(yuv_frame, cv2.COLOR_YUV2BGR_I420)

View File

@ -0,0 +1,17 @@
"""Embeddings types."""
import multiprocessing as mp
from multiprocessing.sharedctypes import Synchronized
class EmbeddingsMetrics:
image_embeddings_fps: Synchronized
text_embeddings_sps: Synchronized
face_rec_fps: Synchronized
alpr_pps: Synchronized
def __init__(self):
self.image_embeddings_fps = mp.Value("d", 0.01)
self.text_embeddings_sps = mp.Value("d", 0.01)
self.face_rec_fps = mp.Value("d", 0.01)
self.alpr_pps = mp.Value("d", 0.01)

View File

@ -1,5 +1,5 @@
[mypy]
python_version = 3.9
python_version = 3.11
show_error_codes = true
follow_imports = normal
ignore_missing_imports = true

View File

@ -26,7 +26,7 @@ class Service(ABC):
self.__dict__["name"] = name
self.__manager = manager or ServiceManager.current()
self.__lock = asyncio.Lock(loop=self.__manager._event_loop)
self.__lock = asyncio.Lock(loop=self.__manager._event_loop) # type: ignore[call-arg]
self.__manager._register(self)
@property

View File

@ -14,6 +14,7 @@ from requests.exceptions import RequestException
from frigate.camera import CameraMetrics
from frigate.config import FrigateConfig
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
from frigate.embeddings.types import EmbeddingsMetrics
from frigate.object_detection import ObjectDetectProcess
from frigate.types import StatsTrackingTypes
from frigate.util.services import (
@ -51,11 +52,13 @@ def get_latest_version(config: FrigateConfig) -> str:
def stats_init(
config: FrigateConfig,
camera_metrics: dict[str, CameraMetrics],
embeddings_metrics: EmbeddingsMetrics | None,
detectors: dict[str, ObjectDetectProcess],
processes: dict[str, int],
) -> StatsTrackingTypes:
stats_tracking: StatsTrackingTypes = {
"camera_metrics": camera_metrics,
"embeddings_metrics": embeddings_metrics,
"detectors": detectors,
"started": int(time.time()),
"latest_frigate_version": get_latest_version(config),
@ -279,6 +282,27 @@ def stats_snapshot(
}
stats["detection_fps"] = round(total_detection_fps, 2)
if config.semantic_search.enabled:
embeddings_metrics = stats_tracking["embeddings_metrics"]
stats["embeddings"] = {
"image_embedding_speed": round(
embeddings_metrics.image_embeddings_fps.value * 1000, 2
),
"text_embedding_speed": round(
embeddings_metrics.text_embeddings_sps.value * 1000, 2
),
}
if config.face_recognition.enabled:
stats["embeddings"]["face_recognition_speed"] = round(
embeddings_metrics.face_rec_fps.value * 1000, 2
)
if config.lpr.enabled:
stats["embeddings"]["plate_recognition_speed"] = round(
embeddings_metrics.alpr_pps.value * 1000, 2
)
get_processing_stats(config, stats, hwaccel_errors)
stats["service"] = {

View File

@ -2,11 +2,13 @@ from enum import Enum
from typing import TypedDict
from frigate.camera import CameraMetrics
from frigate.embeddings.types import EmbeddingsMetrics
from frigate.object_detection import ObjectDetectProcess
class StatsTrackingTypes(TypedDict):
camera_metrics: dict[str, CameraMetrics]
embeddings_metrics: EmbeddingsMetrics | None
detectors: dict[str, ObjectDetectProcess]
started: int
latest_frigate_version: str

View File

@ -309,7 +309,7 @@ function FaceAttempt({
<div className="capitalize">{data.name}</div>
<div
className={cn(
Number.parseFloat(data.score) > threshold
Number.parseFloat(data.score) >= threshold
? "text-success"
: "text-danger",
)}

View File

@ -1,12 +1,12 @@
import useSWR from "swr";
import { FrigateStats } from "@/types/stats";
import { useEffect, useState } from "react";
import { useEffect, useMemo, useState } from "react";
import TimeAgo from "@/components/dynamic/TimeAgo";
import { ToggleGroup, ToggleGroupItem } from "@/components/ui/toggle-group";
import { isDesktop, isMobile } from "react-device-detect";
import GeneralMetrics from "@/views/system/GeneralMetrics";
import StorageMetrics from "@/views/system/StorageMetrics";
import { LuActivity, LuHardDrive } from "react-icons/lu";
import { LuActivity, LuHardDrive, LuSearchCode } from "react-icons/lu";
import { FaVideo } from "react-icons/fa";
import Logo from "@/components/Logo";
import useOptimisticState from "@/hooks/use-optimistic-state";
@ -14,11 +14,28 @@ import CameraMetrics from "@/views/system/CameraMetrics";
import { useHashState } from "@/hooks/use-overlay-state";
import { capitalizeFirstLetter } from "@/utils/stringUtil";
import { Toaster } from "@/components/ui/sonner";
import { FrigateConfig } from "@/types/frigateConfig";
import FeatureMetrics from "@/views/system/FeatureMetrics";
const metrics = ["general", "storage", "cameras"] as const;
type SystemMetric = (typeof metrics)[number];
const allMetrics = ["general", "features", "storage", "cameras"] as const;
type SystemMetric = (typeof allMetrics)[number];
function System() {
const { data: config } = useSWR<FrigateConfig>("config", {
revalidateOnFocus: false,
});
const metrics = useMemo(() => {
const metrics = [...allMetrics];
if (!config?.semantic_search.enabled) {
const index = metrics.indexOf("features");
metrics.splice(index, 1);
}
return metrics;
}, [config]);
// stats page
const [page, setPage] = useHashState<SystemMetric>();
@ -67,6 +84,7 @@ function System() {
aria-label={`Select ${item}`}
>
{item == "general" && <LuActivity className="size-4" />}
{item == "features" && <LuSearchCode className="size-4" />}
{item == "storage" && <LuHardDrive className="size-4" />}
{item == "cameras" && <FaVideo className="size-4" />}
{isDesktop && <div className="capitalize">{item}</div>}
@ -96,6 +114,12 @@ function System() {
setLastUpdated={setLastUpdated}
/>
)}
{page == "features" && (
<FeatureMetrics
lastUpdated={lastUpdated}
setLastUpdated={setLastUpdated}
/>
)}
{page == "storage" && <StorageMetrics setLastUpdated={setLastUpdated} />}
{page == "cameras" && (
<CameraMetrics

View File

@ -18,6 +18,11 @@ export const InferenceThreshold = {
error: 100,
} as Threshold;
export const EmbeddingThreshold = {
warning: 500,
error: 1000,
} as Threshold;
export const DetectorTempThreshold = {
warning: 72,
error: 80,

View File

@ -2,6 +2,7 @@ export interface FrigateStats {
cameras: { [camera_name: string]: CameraStats };
cpu_usages: { [pid: string]: CpuStats };
detectors: { [detectorKey: string]: DetectorStats };
embeddings?: EmbeddingsStats;
gpu_usages?: { [gpuKey: string]: GpuStats };
processes: { [processKey: string]: ExtraProcessStats };
service: ServiceStats;
@ -34,6 +35,13 @@ export type DetectorStats = {
pid: number;
};
export type EmbeddingsStats = {
image_embedding_speed: number;
face_embedding_speed: number;
plate_recognition_speed: number;
text_embedding_speed: number;
};
export type ExtraProcessStats = {
pid: number;
};

View File

@ -0,0 +1,122 @@
import useSWR from "swr";
import { FrigateStats } from "@/types/stats";
import { useEffect, useMemo, useState } from "react";
import { useFrigateStats } from "@/api/ws";
import { EmbeddingThreshold } from "@/types/graph";
import { Skeleton } from "@/components/ui/skeleton";
import { ThresholdBarGraph } from "@/components/graph/SystemGraph";
import { cn } from "@/lib/utils";
type FeatureMetricsProps = {
lastUpdated: number;
setLastUpdated: (last: number) => void;
};
export default function FeatureMetrics({
lastUpdated,
setLastUpdated,
}: FeatureMetricsProps) {
// stats
const { data: initialStats } = useSWR<FrigateStats[]>(
["stats/history", { keys: "embeddings,service" }],
{
revalidateOnFocus: false,
},
);
const [statsHistory, setStatsHistory] = useState<FrigateStats[]>([]);
const updatedStats = useFrigateStats();
useEffect(() => {
if (initialStats == undefined || initialStats.length == 0) {
return;
}
if (statsHistory.length == 0) {
setStatsHistory(initialStats);
return;
}
if (!updatedStats) {
return;
}
if (updatedStats.service.last_updated > lastUpdated) {
setStatsHistory([...statsHistory.slice(1), updatedStats]);
setLastUpdated(Date.now() / 1000);
}
}, [initialStats, updatedStats, statsHistory, lastUpdated, setLastUpdated]);
// timestamps
const updateTimes = useMemo(
() => statsHistory.map((stats) => stats.service.last_updated),
[statsHistory],
);
// features stats
const embeddingInferenceTimeSeries = useMemo(() => {
if (!statsHistory) {
return [];
}
const series: {
[key: string]: { name: string; data: { x: number; y: number }[] };
} = {};
statsHistory.forEach((stats, statsIdx) => {
if (!stats?.embeddings) {
return;
}
Object.entries(stats.embeddings).forEach(([rawKey, stat]) => {
const key = rawKey.replaceAll("_", " ");
if (!(key in series)) {
series[key] = { name: key, data: [] };
}
series[key].data.push({ x: statsIdx + 1, y: stat });
});
});
return Object.values(series);
}, [statsHistory]);
return (
<>
<div className="scrollbar-container mt-4 flex size-full flex-col overflow-y-auto">
<div className="text-sm font-medium text-muted-foreground">
Features
</div>
<div
className={cn(
"mt-4 grid w-full grid-cols-1 gap-2 sm:grid-cols-3",
embeddingInferenceTimeSeries && "sm:grid-cols-4",
)}
>
{statsHistory.length != 0 ? (
<>
{embeddingInferenceTimeSeries.map((series) => (
<div className="rounded-lg bg-background_alt p-2.5 md:rounded-2xl">
<div className="mb-5 capitalize">{series.name}</div>
<ThresholdBarGraph
key={series.name}
graphId={`${series.name}-inference`}
name={series.name}
unit="ms"
threshold={EmbeddingThreshold}
updateTimes={updateTimes}
data={[series]}
/>
</div>
))}
</>
) : (
<Skeleton className="aspect-video w-full rounded-lg md:rounded-2xl" />
)}
</div>
</div>
</>
);
}