Fix various typing issues (#18187)

* Fix the `Any` typing hint treewide

There has been confusion between the Any type[1] and the any function[2]
in typing hints.

[1] https://docs.python.org/3/library/typing.html#typing.Any
[2] https://docs.python.org/3/library/functions.html#any

* Fix typing for various frame_shape members

Frame shapes are most likely defined by height and width, so a single int
cannot express that.

* Wrap gpu stats functions in Optional[]

These can return `None`, so they need to be `Type | None`, which is what
`Optional` expresses very nicely.

* Fix return type in get_latest_segment_datetime

Returns a datetime object, not an integer.

* Make the return type of FrameManager.write optional

This is necessary since the SharedMemoryFrameManager.write function can
return None.

* Fix total_seconds() return type in get_tz_modifiers

The function returns a float, not an int.

https://docs.python.org/3/library/datetime.html#datetime.timedelta.total_seconds

* Account for floating point results in to_relative_box

Because the function uses division the return types may either be int or
float.

* Resolve ruff deprecation warning

The config has been split into formatter and linter, and the global
options are deprecated.
This commit is contained in:
Martin Weinelt
2025-05-13 16:27:20 +02:00
committed by GitHub
parent 2c9bfaa49c
commit 4d4d54d030
50 changed files with 191 additions and 164 deletions

View File

@@ -2,6 +2,7 @@
import logging
from abc import ABC, abstractmethod
from typing import Any
import numpy as np
@@ -24,7 +25,7 @@ class RealTimeProcessorApi(ABC):
pass
@abstractmethod
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray) -> None:
def process_frame(self, obj_data: dict[str, Any], frame: np.ndarray) -> None:
"""Processes the frame with object data.
Args:
obj_data (dict): containing data about focused object in frame.
@@ -37,8 +38,8 @@ class RealTimeProcessorApi(ABC):
@abstractmethod
def handle_request(
self, topic: str, request_data: dict[str, any]
) -> dict[str, any] | None:
self, topic: str, request_data: dict[str, Any]
) -> dict[str, Any] | None:
"""Handle metadata requests.
Args:
topic (str): topic that dictates what work is requested.

View File

@@ -2,6 +2,7 @@
import logging
import os
from typing import Any
import cv2
import numpy as np
@@ -35,8 +36,8 @@ class BirdRealTimeProcessor(RealTimeProcessorApi):
super().__init__(config, metrics)
self.interpreter: Interpreter = None
self.sub_label_publisher = sub_label_publisher
self.tensor_input_details: dict[str, any] = None
self.tensor_output_details: dict[str, any] = None
self.tensor_input_details: dict[str, Any] = None
self.tensor_output_details: dict[str, Any] = None
self.detected_birds: dict[str, float] = {}
self.labelmap: dict[int, str] = {}

View File

@@ -6,7 +6,7 @@ import json
import logging
import os
import shutil
from typing import Optional
from typing import Any, Optional
import cv2
import numpy as np
@@ -157,7 +157,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
self.faces_per_second.update()
self.inference_speed.update(duration)
def process_frame(self, obj_data: dict[str, any], frame: np.ndarray):
def process_frame(self, obj_data: dict[str, Any], frame: np.ndarray):
"""Look for faces in image."""
self.metrics.face_rec_fps.value = self.faces_per_second.eps()
camera = obj_data["camera"]
@@ -198,7 +198,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
logger.debug("Not processing due to hitting max rec attempts.")
return
face: Optional[dict[str, any]] = None
face: Optional[dict[str, Any]] = None
if self.requires_face_detection:
logger.debug("Running manual face detection.")
@@ -238,7 +238,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
logger.debug("No attributes to parse.")
return
attributes: list[dict[str, any]] = obj_data.get("current_attributes", [])
attributes: list[dict[str, Any]] = obj_data.get("current_attributes", [])
for attr in attributes:
if attr.get("label") != "face":
continue
@@ -323,7 +323,7 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
self.__update_metrics(datetime.datetime.now().timestamp() - start)
def handle_request(self, topic, request_data) -> dict[str, any] | None:
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
if topic == EmbeddingsRequestEnum.clear_face_classifier.value:
self.recognizer.clear()
elif topic == EmbeddingsRequestEnum.recognize_face.value:

View File

@@ -2,6 +2,7 @@
import json
import logging
from typing import Any
import numpy as np
@@ -30,7 +31,7 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
sub_label_publisher: EventMetadataPublisher,
metrics: DataProcessorMetrics,
model_runner: LicensePlateModelRunner,
detected_license_plates: dict[str, dict[str, any]],
detected_license_plates: dict[str, dict[str, Any]],
):
self.requestor = requestor
self.detected_license_plates = detected_license_plates
@@ -43,14 +44,14 @@ class LicensePlateRealTimeProcessor(LicensePlateProcessingMixin, RealTimeProcess
def process_frame(
self,
obj_data: dict[str, any],
obj_data: dict[str, Any],
frame: np.ndarray,
dedicated_lpr: bool | None = False,
):
"""Look for license plates in image."""
self.lpr_process(obj_data, frame, dedicated_lpr)
def handle_request(self, topic, request_data) -> dict[str, any] | None:
def handle_request(self, topic, request_data) -> dict[str, Any] | None:
return
def expire_object(self, object_id: str, camera: str):