Refactor event cleanup to consider review severity (#15415)

* Keep track of objects max review severity

* Refactor cleanup to split snapshots and clips

* Cleanup events based on review severity

* Cleanup review imports

* Don't catch detections
This commit is contained in:
Nicolas Mowen 2024-12-09 09:25:45 -06:00 committed by GitHub
parent d0cc8cb64b
commit 0b9c4c18dd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 187 additions and 84 deletions

View File

@ -3,7 +3,7 @@ from typing import Union
from pydantic import BaseModel from pydantic import BaseModel
from pydantic.json_schema import SkipJsonSchema from pydantic.json_schema import SkipJsonSchema
from frigate.review.maintainer import SeverityEnum from frigate.review.types import SeverityEnum
class ReviewQueryParams(BaseModel): class ReviewQueryParams(BaseModel):

View File

@ -3,7 +3,7 @@ from typing import Dict
from pydantic import BaseModel, Json from pydantic import BaseModel, Json
from frigate.review.maintainer import SeverityEnum from frigate.review.types import SeverityEnum
class ReviewSegmentResponse(BaseModel): class ReviewSegmentResponse(BaseModel):

View File

@ -26,7 +26,7 @@ from frigate.api.defs.response.review_response import (
) )
from frigate.api.defs.tags import Tags from frigate.api.defs.tags import Tags
from frigate.models import Recordings, ReviewSegment from frigate.models import Recordings, ReviewSegment
from frigate.review.maintainer import SeverityEnum from frigate.review.types import SeverityEnum
from frigate.util.builtin import get_tz_modifiers from frigate.util.builtin import get_tz_modifiers
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View File

@ -4,7 +4,6 @@ import datetime
import logging import logging
import os import os
import threading import threading
from enum import Enum
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path from pathlib import Path
@ -16,11 +15,6 @@ from frigate.models import Event, Timeline
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class EventCleanupType(str, Enum):
clips = "clips"
snapshots = "snapshots"
CHUNK_SIZE = 50 CHUNK_SIZE = 50
@ -67,19 +61,11 @@ class EventCleanup(threading.Thread):
return self.camera_labels[camera]["labels"] return self.camera_labels[camera]["labels"]
def expire(self, media_type: EventCleanupType) -> list[str]: def expire_snapshots(self) -> list[str]:
## Expire events from unlisted cameras based on the global config ## Expire events from unlisted cameras based on the global config
if media_type == EventCleanupType.clips: retain_config = self.config.snapshots.retain
expire_days = max( file_extension = "jpg"
self.config.record.alerts.retain.days, update_params = {"has_snapshot": False}
self.config.record.detections.retain.days,
)
file_extension = None # mp4 clips are no longer stored in /clips
update_params = {"has_clip": False}
else:
retain_config = self.config.snapshots.retain
file_extension = "jpg"
update_params = {"has_snapshot": False}
distinct_labels = self.get_removed_camera_labels() distinct_labels = self.get_removed_camera_labels()
@ -87,10 +73,7 @@ class EventCleanup(threading.Thread):
# loop over object types in db # loop over object types in db
for event in distinct_labels: for event in distinct_labels:
# get expiration time for this label # get expiration time for this label
if media_type == EventCleanupType.snapshots: expire_days = retain_config.objects.get(event.label, retain_config.default)
expire_days = retain_config.objects.get(
event.label, retain_config.default
)
expire_after = ( expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days) datetime.datetime.now() - datetime.timedelta(days=expire_days)
@ -162,13 +145,7 @@ class EventCleanup(threading.Thread):
## Expire events from cameras based on the camera config ## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items(): for name, camera in self.config.cameras.items():
if media_type == EventCleanupType.clips: retain_config = camera.snapshots.retain
expire_days = max(
camera.record.alerts.retain.days,
camera.record.detections.retain.days,
)
else:
retain_config = camera.snapshots.retain
# get distinct objects in database for this camera # get distinct objects in database for this camera
distinct_labels = self.get_camera_labels(name) distinct_labels = self.get_camera_labels(name)
@ -176,10 +153,9 @@ class EventCleanup(threading.Thread):
# loop over object types in db # loop over object types in db
for event in distinct_labels: for event in distinct_labels:
# get expiration time for this label # get expiration time for this label
if media_type == EventCleanupType.snapshots: expire_days = retain_config.objects.get(
expire_days = retain_config.objects.get( event.label, retain_config.default
event.label, retain_config.default )
)
expire_after = ( expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days) datetime.datetime.now() - datetime.timedelta(days=expire_days)
@ -206,19 +182,143 @@ class EventCleanup(threading.Thread):
for event in expired_events: for event in expired_events:
events_to_update.append(event.id) events_to_update.append(event.id)
if media_type == EventCleanupType.snapshots: try:
try: media_name = f"{event.camera}-{event.id}"
media_name = f"{event.camera}-{event.id}" media_path = Path(
media_path = Path( f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}" )
) media_path.unlink(missing_ok=True)
media_path.unlink(missing_ok=True) media_path = Path(
media_path = Path( f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png" )
) media_path.unlink(missing_ok=True)
media_path.unlink(missing_ok=True) except OSError as e:
except OSError as e: logger.warning(f"Unable to delete event images: {e}")
logger.warning(f"Unable to delete event images: {e}")
# update the clips attribute for the db entry
for i in range(0, len(events_to_update), CHUNK_SIZE):
batch = events_to_update[i : i + CHUNK_SIZE]
logger.debug(f"Updating {update_params} for {len(batch)} events")
Event.update(update_params).where(Event.id << batch).execute()
return events_to_update
def expire_clips(self) -> list[str]:
## Expire events from unlisted cameras based on the global config
expire_days = max(
self.config.record.alerts.retain.days,
self.config.record.detections.retain.days,
)
file_extension = None # mp4 clips are no longer stored in /clips
update_params = {"has_clip": False}
# get expiration time for this label
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events: list[Event] = (
Event.select(
Event.id,
Event.camera,
)
.where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.retain_indefinitely == False,
)
.namedtuples()
.iterator()
)
logger.debug(f"{len(list(expired_events))} events can be expired")
# delete the media from disk
for expired in expired_events:
media_name = f"{expired.camera}-{expired.id}"
media_path = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
try:
media_path.unlink(missing_ok=True)
if file_extension == "jpg":
media_path = Path(
f"{os.path.join(CLIPS_DIR, media_name)}-clean.png"
)
media_path.unlink(missing_ok=True)
except OSError as e:
logger.warning(f"Unable to delete event images: {e}")
# update the clips attribute for the db entry
query = Event.select(Event.id).where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.retain_indefinitely == False,
)
events_to_update = []
for batch in query.iterator():
events_to_update.extend([event.id for event in batch])
if len(events_to_update) >= CHUNK_SIZE:
logger.debug(
f"Updating {update_params} for {len(events_to_update)} events"
)
Event.update(update_params).where(
Event.id << events_to_update
).execute()
events_to_update = []
# Update any remaining events
if events_to_update:
logger.debug(
f"Updating clips/snapshots attribute for {len(events_to_update)} events"
)
Event.update(update_params).where(Event.id << events_to_update).execute()
events_to_update = []
now = datetime.datetime.now()
## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items():
expire_days = max(
camera.record.alerts.retain.days,
camera.record.detections.retain.days,
)
alert_expire_date = (
now - datetime.timedelta(days=camera.record.alerts.retain.days)
).timestamp()
detection_expire_date = (
now - datetime.timedelta(days=camera.record.detections.retain.days)
).timestamp()
# grab all events after specific time
expired_events = (
Event.select(
Event.id,
Event.camera,
)
.where(
Event.camera == name,
Event.retain_indefinitely == False,
(
(
(Event.data["max_severity"] != "detection")
| (Event.data["max_severity"].is_null())
)
& (Event.end_time < alert_expire_date)
)
| (
(Event.data["max_severity"] == "detection")
& (Event.end_time < detection_expire_date)
),
)
.namedtuples()
.iterator()
)
# delete the grabbed clips from disk
# only snapshots are stored in /clips
# so no need to delete mp4 files
for event in expired_events:
events_to_update.append(event.id)
# update the clips attribute for the db entry # update the clips attribute for the db entry
for i in range(0, len(events_to_update), CHUNK_SIZE): for i in range(0, len(events_to_update), CHUNK_SIZE):
@ -230,8 +330,9 @@ class EventCleanup(threading.Thread):
def run(self) -> None: def run(self) -> None:
# only expire events every 5 minutes # only expire events every 5 minutes
while not self.stop_event.wait(300): while not self.stop_event.wait(1):
events_with_expired_clips = self.expire(EventCleanupType.clips) events_with_expired_clips = self.expire_clips()
return
# delete timeline entries for events that have expired recordings # delete timeline entries for events that have expired recordings
# delete up to 100,000 at a time # delete up to 100,000 at a time
@ -242,7 +343,7 @@ class EventCleanup(threading.Thread):
Timeline.source_id << deleted_events_list[i : i + max_deletes] Timeline.source_id << deleted_events_list[i : i + max_deletes]
).execute() ).execute()
self.expire(EventCleanupType.snapshots) self.expire_snapshots()
# drop events from db where has_clip and has_snapshot are false # drop events from db where has_clip and has_snapshot are false
events = ( events = (

View File

@ -210,6 +210,7 @@ class EventProcessor(threading.Thread):
"top_score": event_data["top_score"], "top_score": event_data["top_score"],
"attributes": attributes, "attributes": attributes,
"type": "object", "type": "object",
"max_severity": event_data.get("max_severity"),
}, },
} }

View File

@ -702,30 +702,7 @@ class TrackedObjectProcessor(threading.Thread):
return False return False
# If the object is not considered an alert or detection # If the object is not considered an alert or detection
review_config = self.config.cameras[camera].review if obj.max_severity is None:
if not (
(
obj.obj_data["label"] in review_config.alerts.labels
and (
not review_config.alerts.required_zones
or set(obj.entered_zones) & set(review_config.alerts.required_zones)
)
)
or (
(
not review_config.detections.labels
or obj.obj_data["label"] in review_config.detections.labels
)
and (
not review_config.detections.required_zones
or set(obj.entered_zones)
& set(review_config.detections.required_zones)
)
)
):
logger.debug(
f"Not creating clip for {obj.obj_data['id']} because it did not qualify as an alert or detection"
)
return False return False
return True return True

View File

@ -7,7 +7,6 @@ import random
import string import string
import sys import sys
import threading import threading
from enum import Enum
from multiprocessing.synchronize import Event as MpEvent from multiprocessing.synchronize import Event as MpEvent
from pathlib import Path from pathlib import Path
from typing import Optional from typing import Optional
@ -27,6 +26,7 @@ from frigate.const import (
from frigate.events.external import ManualEventState from frigate.events.external import ManualEventState
from frigate.models import ReviewSegment from frigate.models import ReviewSegment
from frigate.object_processing import TrackedObject from frigate.object_processing import TrackedObject
from frigate.review.types import SeverityEnum
from frigate.util.image import SharedMemoryFrameManager, calculate_16_9_crop from frigate.util.image import SharedMemoryFrameManager, calculate_16_9_crop
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -39,11 +39,6 @@ THRESHOLD_ALERT_ACTIVITY = 120
THRESHOLD_DETECTION_ACTIVITY = 30 THRESHOLD_DETECTION_ACTIVITY = 30
class SeverityEnum(str, Enum):
alert = "alert"
detection = "detection"
class PendingReviewSegment: class PendingReviewSegment:
def __init__( def __init__(
self, self,

6
frigate/review/types.py Normal file
View File

@ -0,0 +1,6 @@
from enum import Enum
class SeverityEnum(str, Enum):
alert = "alert"
detection = "detection"

View File

@ -10,7 +10,7 @@ from playhouse.sqliteq import SqliteQueueDatabase
from frigate.api.fastapi_app import create_fastapi_app from frigate.api.fastapi_app import create_fastapi_app
from frigate.config import FrigateConfig from frigate.config import FrigateConfig
from frigate.models import Event, Recordings, ReviewSegment from frigate.models import Event, Recordings, ReviewSegment
from frigate.review.maintainer import SeverityEnum from frigate.review.types import SeverityEnum
from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS from frigate.test.const import TEST_DB, TEST_DB_CLEANUPS

View File

@ -3,7 +3,7 @@ from datetime import datetime, timedelta
from fastapi.testclient import TestClient from fastapi.testclient import TestClient
from frigate.models import Event, Recordings, ReviewSegment from frigate.models import Event, Recordings, ReviewSegment
from frigate.review.maintainer import SeverityEnum from frigate.review.types import SeverityEnum
from frigate.test.http_api.base_http_test import BaseTestHttp from frigate.test.http_api.base_http_test import BaseTestHttp

View File

@ -13,6 +13,7 @@ from frigate.config import (
CameraConfig, CameraConfig,
ModelConfig, ModelConfig,
) )
from frigate.review.types import SeverityEnum
from frigate.util.image import ( from frigate.util.image import (
area, area,
calculate_region, calculate_region,
@ -59,6 +60,27 @@ class TrackedObject:
self.pending_loitering = False self.pending_loitering = False
self.previous = self.to_dict() self.previous = self.to_dict()
@property
def max_severity(self) -> Optional[str]:
review_config = self.camera_config.review
if self.obj_data["label"] in review_config.alerts.labels and (
not review_config.alerts.required_zones
or set(self.entered_zones) & set(review_config.alerts.required_zones)
):
return SeverityEnum.alert
if (
not review_config.detections.labels
or self.obj_data["label"] in review_config.detections.labels
) and (
not review_config.detections.required_zones
or set(self.entered_zones) & set(review_config.detections.required_zones)
):
return SeverityEnum.detection
return None
def _is_false_positive(self): def _is_false_positive(self):
# once a true positive, always a true positive # once a true positive, always a true positive
if not self.false_positive: if not self.false_positive:
@ -232,6 +254,7 @@ class TrackedObject:
"attributes": self.attributes, "attributes": self.attributes,
"current_attributes": self.obj_data["attributes"], "current_attributes": self.obj_data["attributes"],
"pending_loitering": self.pending_loitering, "pending_loitering": self.pending_loitering,
"max_severity": self.max_severity,
} }
if include_thumbnail: if include_thumbnail: