Fix embeddings (#15072)

* Fix embeddings reading frames

* Fix event update reading

* Formatting

* Pin AIO http to fix build failure

* Pin starlette
This commit is contained in:
Nicolas Mowen 2024-11-19 11:20:04 -07:00 committed by GitHub
parent a67ff3843a
commit 66277fbb6c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 24 additions and 18 deletions

View File

@ -1,5 +1,7 @@
click == 8.1.*
# FastAPI
aiohttp == 3.11.2
starlette == 0.41.2
starlette-context == 0.3.6
fastapi == 0.115.*
uvicorn == 0.30.*

View File

@ -14,7 +14,7 @@ class EventUpdatePublisher(Publisher):
super().__init__("update")
def publish(
self, payload: tuple[EventTypeEnum, EventStateEnum, str, dict[str, any]]
self, payload: tuple[EventTypeEnum, EventStateEnum, str, str, dict[str, any]]
) -> None:
super().publish(payload)

View File

@ -114,7 +114,7 @@ class EmbeddingMaintainer(threading.Thread):
if update is None:
return
source_type, _, camera, data = update
source_type, _, camera, frame_name, data = update
if not camera or source_type != EventTypeEnum.tracked_object:
return
@ -134,8 +134,9 @@ class EmbeddingMaintainer(threading.Thread):
# Create our own thumbnail based on the bounding box and the frame time
try:
frame_id = f"{camera}{data['frame_time']}"
yuv_frame = self.frame_manager.get(frame_id, camera_config.frame_shape_yuv)
yuv_frame = self.frame_manager.get(
frame_name, camera_config.frame_shape_yuv
)
if yuv_frame is not None:
data["thumbnail"] = self._create_thumbnail(yuv_frame, data["box"])
@ -147,7 +148,7 @@ class EmbeddingMaintainer(threading.Thread):
self.tracked_events[data["id"]].append(data)
self.frame_manager.close(frame_id)
self.frame_manager.close(frame_name)
except FileNotFoundError:
pass

View File

@ -75,7 +75,7 @@ class EventProcessor(threading.Thread):
if update == None:
continue
source_type, event_type, camera, event_data = update
source_type, event_type, camera, _, event_data = update
logger.debug(
f"Event received: {source_type} {event_type} {camera} {event_data['id']}"

View File

@ -262,7 +262,7 @@ class CameraState:
# call event handlers
for c in self.callbacks["start"]:
c(self.name, new_obj, frame_time)
c(self.name, new_obj, frame_name)
for id in updated_ids:
updated_obj = tracked_objects[id]
@ -272,7 +272,7 @@ class CameraState:
if autotracker_update or significant_update:
for c in self.callbacks["autotrack"]:
c(self.name, updated_obj, frame_time)
c(self.name, updated_obj, frame_name)
if thumb_update and current_frame is not None:
# ensure this frame is stored in the cache
@ -293,7 +293,7 @@ class CameraState:
) or significant_update:
# call event handlers
for c in self.callbacks["update"]:
c(self.name, updated_obj, frame_time)
c(self.name, updated_obj, frame_name)
updated_obj.last_published = frame_time
for id in removed_ids:
@ -302,7 +302,7 @@ class CameraState:
if "end_time" not in removed_obj.obj_data:
removed_obj.obj_data["end_time"] = frame_time
for c in self.callbacks["end"]:
c(self.name, removed_obj, frame_time)
c(self.name, removed_obj, frame_name)
# TODO: can i switch to looking this up and only changing when an event ends?
# maintain best objects
@ -368,11 +368,11 @@ class CameraState:
):
self.best_objects[object_type] = obj
for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[object_type], frame_time)
c(self.name, self.best_objects[object_type], frame_name)
else:
self.best_objects[object_type] = obj
for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[object_type], frame_time)
c(self.name, self.best_objects[object_type], frame_name)
for c in self.callbacks["camera_activity"]:
c(self.name, camera_activity)
@ -447,7 +447,7 @@ class CameraState:
c(self.name, obj_name, 0)
self.active_object_counts[obj_name] = 0
for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[obj_name], frame_time)
c(self.name, self.best_objects[obj_name], frame_name)
# cleanup thumbnail frame cache
current_thumb_frames = {
@ -518,17 +518,18 @@ class TrackedObjectProcessor(threading.Thread):
self.zone_data = defaultdict(lambda: defaultdict(dict))
self.active_zone_data = defaultdict(lambda: defaultdict(dict))
def start(camera, obj: TrackedObject, current_frame_time):
def start(camera: str, obj: TrackedObject, frame_name: str):
self.event_sender.publish(
(
EventTypeEnum.tracked_object,
EventStateEnum.start,
camera,
frame_name,
obj.to_dict(),
)
)
def update(camera, obj: TrackedObject, current_frame_time):
def update(camera: str, obj: TrackedObject, frame_name: str):
obj.has_snapshot = self.should_save_snapshot(camera, obj)
obj.has_clip = self.should_retain_recording(camera, obj)
after = obj.to_dict()
@ -544,14 +545,15 @@ class TrackedObjectProcessor(threading.Thread):
EventTypeEnum.tracked_object,
EventStateEnum.update,
camera,
frame_name,
obj.to_dict(include_thumbnail=True),
)
)
def autotrack(camera, obj: TrackedObject, current_frame_time):
def autotrack(camera: str, obj: TrackedObject, frame_name: str):
self.ptz_autotracker_thread.ptz_autotracker.autotrack_object(camera, obj)
def end(camera, obj: TrackedObject, current_frame_time):
def end(camera: str, obj: TrackedObject, frame_name: str):
# populate has_snapshot
obj.has_snapshot = self.should_save_snapshot(camera, obj)
obj.has_clip = self.should_retain_recording(camera, obj)
@ -606,11 +608,12 @@ class TrackedObjectProcessor(threading.Thread):
EventTypeEnum.tracked_object,
EventStateEnum.end,
camera,
frame_name,
obj.to_dict(include_thumbnail=True),
)
)
def snapshot(camera, obj: TrackedObject, current_frame_time):
def snapshot(camera, obj: TrackedObject, frame_name: str):
mqtt_config: MqttConfig = self.config.cameras[camera].mqtt
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
jpg_bytes = obj.get_jpg_bytes(