Add retain.max_size to clean up storage once the specified limit has been reached

This commit is contained in:
Timothy 2025-08-01 15:07:30 +10:00
parent c3410cd13e
commit 189adf9d30
6 changed files with 151 additions and 0 deletions

View File

@ -70,6 +70,8 @@ record:
As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted. As of Frigate 0.12 if there is less than an hour left of storage, the oldest 2 hours of recordings will be deleted.
Additionally, you can configure a maximum storage size limit for recordings using the `max_size` option. When the total size of recordings exceeds this limit, Frigate will automatically clean up the oldest recordings to stay within the specified size.
## Configuring Recording Retention ## Configuring Recording Retention
Frigate supports both continuous and tracked object based recordings with separate retention modes and retention periods. Frigate supports both continuous and tracked object based recordings with separate retention modes and retention periods.
@ -93,6 +95,20 @@ record:
Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean) Continuous recording supports different retention modes [which are described below](#what-do-the-different-retain-modes-mean)
### Storage Size Limit
You can configure a maximum storage size limit for recordings. When the total size of recordings exceeds this limit, Frigate will automatically clean up the oldest recordings first.
```yaml
record:
enabled: True
retain:
days: 7
max_size: 5000 # <- maximum storage size in MB (5GB in this example)
```
The `max_size` parameter specifies the maximum total storage size in megabytes (MB) that recordings should consume. This works in addition to the time-based retention settings - recordings will be deleted when either the time limit OR the size limit is exceeded.
### Object Recording ### Object Recording
The number of days to record review items can be specified for review items classified as alerts as well as tracked objects. The number of days to record review items can be specified for review items classified as alerts as well as tracked objects.

View File

@ -452,6 +452,9 @@ record:
# active_objects - save all recording segments with active/moving objects # active_objects - save all recording segments with active/moving objects
# NOTE: this mode only applies when the days setting above is greater than 0 # NOTE: this mode only applies when the days setting above is greater than 0
mode: all mode: all
# Optional: Maximum storage size in MB for recordings (default: no limit)
# When total recording storage exceeds this limit, oldest recordings will be deleted
max_size: 5000
# Optional: Recording Export Settings # Optional: Recording Export Settings
export: export:
# Optional: Timelapse Output Args (default: shown below). # Optional: Timelapse Output Args (default: shown below).

View File

@ -31,6 +31,7 @@ class RetainModeEnum(str, Enum):
class RecordRetainConfig(FrigateBaseModel): class RecordRetainConfig(FrigateBaseModel):
days: float = Field(default=0, title="Default retention period.") days: float = Field(default=0, title="Default retention period.")
mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.") mode: RetainModeEnum = Field(default=RetainModeEnum.all, title="Retain mode.")
max_size: Optional[int] = Field(default=None, title="Maximum storage size in MB.")
class ReviewRetainConfig(FrigateBaseModel): class ReviewRetainConfig(FrigateBaseModel):

View File

@ -94,6 +94,22 @@ class StorageMaintainer(threading.Thread):
[b["bandwidth"] for b in self.camera_storage_stats.values()] [b["bandwidth"] for b in self.camera_storage_stats.values()]
) )
remaining_storage = round(shutil.disk_usage(RECORD_DIR).free / pow(2, 20), 1) remaining_storage = round(shutil.disk_usage(RECORD_DIR).free / pow(2, 20), 1)
# Check if max_size is configured and total usage exceeds it
max_size = self.config.record.retain.max_size
if max_size is not None:
total_usage = (
Recordings.select(fn.SUM(Recordings.segment_size))
.where(Recordings.segment_size != 0)
.scalar() or 0
) / pow(2, 20) # Convert to MB
if total_usage > max_size:
logger.debug(
f"Storage cleanup check: total usage {total_usage} MB exceeds max_size {max_size} MB."
)
return True
logger.debug( logger.debug(
f"Storage cleanup check: {hourly_bandwidth} hourly with remaining storage: {remaining_storage}." f"Storage cleanup check: {hourly_bandwidth} hourly with remaining storage: {remaining_storage}."
) )

View File

@ -1531,6 +1531,49 @@ class TestConfig(unittest.TestCase):
self.assertRaises(ValueError, lambda: FrigateConfig(**config)) self.assertRaises(ValueError, lambda: FrigateConfig(**config))
def test_record_retain_max_size(self):
config = {
"mqtt": {"host": "mqtt"},
"record": {"retain": {"max_size": 1000}},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.record.retain.max_size == 1000
def test_record_retain_max_size_default(self):
config = {
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
frigate_config = FrigateConfig(**config)
assert frigate_config.record.retain.max_size is None
if __name__ == "__main__": if __name__ == "__main__":
unittest.main(verbosity=2) unittest.main(verbosity=2)

View File

@ -260,6 +260,78 @@ class TestHttp(unittest.TestCase):
assert Recordings.get(Recordings.id == rec_k2_id) assert Recordings.get(Recordings.id == rec_k2_id)
assert Recordings.get(Recordings.id == rec_k3_id) assert Recordings.get(Recordings.id == rec_k3_id)
def test_storage_cleanup_with_max_size(self):
"""Test that storage cleanup is triggered when max_size is exceeded."""
config_with_max_size = {
"mqtt": {"host": "mqtt"},
"record": {"retain": {"max_size": 1}}, # 1 MB max size
"cameras": {
"front_door": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
"detect": {
"height": 1080,
"width": 1920,
"fps": 5,
},
}
},
}
config = FrigateConfig(**config_with_max_size)
storage = StorageMaintainer(config, MagicMock())
# Insert recordings that exceed max_size (1 MB = 1048576 bytes)
time_now = datetime.datetime.now().timestamp()
rec_id1 = "test1.recording"
rec_id2 = "test2.recording"
# Create recordings with size larger than max_size
_insert_mock_recording(
rec_id1,
os.path.join(self.test_dir, f"{rec_id1}.tmp"),
time_now - 3600,
time_now - 3590,
seg_size=524288, # 0.5 MB
)
_insert_mock_recording(
rec_id2,
os.path.join(self.test_dir, f"{rec_id2}.tmp"),
time_now - 1800,
time_now - 1790,
seg_size=524288, # 0.5 MB (total 1 MB)
)
storage.calculate_camera_bandwidth()
# Should trigger cleanup since total size (1 MB) exceeds max_size (1 MB)
assert storage.check_storage_needs_cleanup() == True
def test_storage_cleanup_without_max_size(self):
"""Test that max_size check is skipped when not configured."""
config = FrigateConfig(**self.minimal_config)
storage = StorageMaintainer(config, MagicMock())
time_now = datetime.datetime.now().timestamp()
rec_id = "test.recording"
# Create a large recording
_insert_mock_recording(
rec_id,
os.path.join(self.test_dir, f"{rec_id}.tmp"),
time_now - 3600,
time_now - 3590,
seg_size=1048576, # 1 MB
)
storage.calculate_camera_bandwidth()
# Should not trigger cleanup based on max_size since it's not configured
# (may still trigger based on free space, but that's a separate check)
assert storage.config.record.retain.max_size is None
def _insert_mock_event(id: str, start: int, end: int, retain: bool) -> Event: def _insert_mock_event(id: str, start: int, end: int, retain: bool) -> Event:
"""Inserts a basic event model with a given id.""" """Inserts a basic event model with a given id."""