Implement Wizard for Creating Classification Models (#20622)

* Implement extraction of images for classification state models

* Add object classification dataset preparation

* Add first step wizard

* Update i18n

* Add state classification image selection step

* Improve box handling

* Add object selector

* Improve object cropping implementation

* Fix state classification selection

* Finalize training and image selection step

* Cleanup

* Design optimizations

* Cleanup mobile styling

* Update no models screen

* Cleanups and fixes

* Fix bugs

* Improve model training and creation process

* Cleanup

* Dynamically add metrics for new model

* Add loading when hitting continue

* Improve image selection mechanism

* Remove unused translation keys

* Adjust wording

* Add retry button for image generation

* Make no models view more specific

* Adjust plus icon

* Adjust form label

* Start with correct type selected

* Cleanup sizing and more font colors

* Small tweaks

* Add tips and more info

* Cleanup dialog sizing

* Add cursor rule for frontend

* Cleanup

* remove underline

* Lazy loading
This commit is contained in:
Nicolas Mowen
2025-10-23 13:27:28 -06:00
committed by GitHub
parent 4df7793587
commit f5a57edcc9
18 changed files with 2450 additions and 79 deletions

View File

@@ -53,9 +53,17 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
self.tensor_output_details: dict[str, Any] | None = None
self.labelmap: dict[int, str] = {}
self.classifications_per_second = EventsPerSecond()
self.inference_speed = InferenceSpeed(
self.metrics.classification_speeds[self.model_config.name]
)
if (
self.metrics
and self.model_config.name in self.metrics.classification_speeds
):
self.inference_speed = InferenceSpeed(
self.metrics.classification_speeds[self.model_config.name]
)
else:
self.inference_speed = None
self.last_run = datetime.datetime.now().timestamp()
self.__build_detector()
@@ -83,12 +91,14 @@ class CustomStateClassificationProcessor(RealTimeProcessorApi):
def __update_metrics(self, duration: float) -> None:
self.classifications_per_second.update()
self.inference_speed.update(duration)
if self.inference_speed:
self.inference_speed.update(duration)
def process_frame(self, frame_data: dict[str, Any], frame: np.ndarray):
self.metrics.classification_cps[
self.model_config.name
].value = self.classifications_per_second.eps()
if self.metrics and self.model_config.name in self.metrics.classification_cps:
self.metrics.classification_cps[
self.model_config.name
].value = self.classifications_per_second.eps()
camera = frame_data.get("camera")
if camera not in self.model_config.state_config.cameras:
@@ -223,9 +233,17 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
self.detected_objects: dict[str, float] = {}
self.labelmap: dict[int, str] = {}
self.classifications_per_second = EventsPerSecond()
self.inference_speed = InferenceSpeed(
self.metrics.classification_speeds[self.model_config.name]
)
if (
self.metrics
and self.model_config.name in self.metrics.classification_speeds
):
self.inference_speed = InferenceSpeed(
self.metrics.classification_speeds[self.model_config.name]
)
else:
self.inference_speed = None
self.__build_detector()
@redirect_output_to_logger(logger, logging.DEBUG)
@@ -251,12 +269,14 @@ class CustomObjectClassificationProcessor(RealTimeProcessorApi):
def __update_metrics(self, duration: float) -> None:
self.classifications_per_second.update()
self.inference_speed.update(duration)
if self.inference_speed:
self.inference_speed.update(duration)
def process_frame(self, obj_data, frame):
self.metrics.classification_cps[
self.model_config.name
].value = self.classifications_per_second.eps()
if self.metrics and self.model_config.name in self.metrics.classification_cps:
self.metrics.classification_cps[
self.model_config.name
].value = self.classifications_per_second.eps()
if obj_data["false_positive"]:
return