mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-12-19 19:06:16 +01:00
more consistent use of iterators in select queries (#8258)
This commit is contained in:
parent
e13a176820
commit
9fc93c72a0
@ -83,14 +83,19 @@ class EventCleanup(threading.Thread):
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
# grab all events after specific time
|
||||
expired_events = Event.select(
|
||||
Event.id,
|
||||
Event.camera,
|
||||
).where(
|
||||
Event.camera.not_in(self.camera_keys),
|
||||
Event.start_time < expire_after,
|
||||
Event.label == event.label,
|
||||
Event.retain_indefinitely == False,
|
||||
expired_events = (
|
||||
Event.select(
|
||||
Event.id,
|
||||
Event.camera,
|
||||
)
|
||||
.where(
|
||||
Event.camera.not_in(self.camera_keys),
|
||||
Event.start_time < expire_after,
|
||||
Event.label == event.label,
|
||||
Event.retain_indefinitely == False,
|
||||
)
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
# delete the media from disk
|
||||
for event in expired_events:
|
||||
@ -136,14 +141,19 @@ class EventCleanup(threading.Thread):
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
# grab all events after specific time
|
||||
expired_events = Event.select(
|
||||
Event.id,
|
||||
Event.camera,
|
||||
).where(
|
||||
Event.camera == name,
|
||||
Event.start_time < expire_after,
|
||||
Event.label == event.label,
|
||||
Event.retain_indefinitely == False,
|
||||
expired_events = (
|
||||
Event.select(
|
||||
Event.id,
|
||||
Event.camera,
|
||||
)
|
||||
.where(
|
||||
Event.camera == name,
|
||||
Event.start_time < expire_after,
|
||||
Event.label == event.label,
|
||||
Event.retain_indefinitely == False,
|
||||
)
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# delete the grabbed clips from disk
|
||||
|
@ -958,9 +958,10 @@ def events():
|
||||
.order_by(Event.start_time.desc())
|
||||
.limit(limit)
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
return jsonify([e for e in events])
|
||||
return jsonify(list(events))
|
||||
|
||||
|
||||
@bp.route("/events/<camera_name>/<label>/create", methods=["POST"])
|
||||
@ -1490,6 +1491,7 @@ def recordings_summary(camera_name):
|
||||
),
|
||||
).desc()
|
||||
)
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_groups = (
|
||||
@ -1511,14 +1513,14 @@ def recordings_summary(camera_name):
|
||||
),
|
||||
),
|
||||
)
|
||||
.objects()
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_map = {g.hour: g.count for g in event_groups}
|
||||
|
||||
days = {}
|
||||
|
||||
for recording_group in recording_groups.objects():
|
||||
for recording_group in recording_groups:
|
||||
parts = recording_group.hour.split()
|
||||
hour = parts[1]
|
||||
day = parts[0]
|
||||
@ -1562,9 +1564,11 @@ def recordings(camera_name):
|
||||
Recordings.start_time <= before,
|
||||
)
|
||||
.order_by(Recordings.start_time)
|
||||
.dicts()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
return jsonify([e for e in recordings.dicts()])
|
||||
return jsonify(list(recordings))
|
||||
|
||||
|
||||
@bp.route("/<camera_name>/start/<int:start_ts>/end/<int:end_ts>/clip.mp4")
|
||||
@ -1669,6 +1673,7 @@ def vod_ts(camera_name, start_ts, end_ts):
|
||||
)
|
||||
.where(Recordings.camera == camera_name)
|
||||
.order_by(Recordings.start_time.asc())
|
||||
.iterator()
|
||||
)
|
||||
|
||||
clips = []
|
||||
|
@ -48,12 +48,17 @@ class RecordingCleanup(threading.Thread):
|
||||
expire_before = (
|
||||
datetime.datetime.now() - datetime.timedelta(days=expire_days)
|
||||
).timestamp()
|
||||
no_camera_recordings: Recordings = Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.path,
|
||||
).where(
|
||||
Recordings.camera.not_in(list(self.config.cameras.keys())),
|
||||
Recordings.end_time < expire_before,
|
||||
no_camera_recordings: Recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.path,
|
||||
)
|
||||
.where(
|
||||
Recordings.camera.not_in(list(self.config.cameras.keys())),
|
||||
Recordings.end_time < expire_before,
|
||||
)
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
deleted_recordings = set()
|
||||
@ -95,6 +100,8 @@ class RecordingCleanup(threading.Thread):
|
||||
Recordings.end_time < expire_date,
|
||||
)
|
||||
.order_by(Recordings.start_time)
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
# Get all the events to check against
|
||||
@ -111,14 +118,14 @@ class RecordingCleanup(threading.Thread):
|
||||
Event.has_clip,
|
||||
)
|
||||
.order_by(Event.start_time)
|
||||
.objects()
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
# loop over recordings and see if they overlap with any non-expired events
|
||||
# TODO: expire segments based on segment stats according to config
|
||||
event_start = 0
|
||||
deleted_recordings = set()
|
||||
for recording in recordings.objects().iterator():
|
||||
for recording in recordings:
|
||||
keep = False
|
||||
# Now look for a reason to keep this recording segment
|
||||
for idx in range(event_start, len(events)):
|
||||
|
@ -163,6 +163,8 @@ class RecordingMaintainer(threading.Thread):
|
||||
Event.has_clip,
|
||||
)
|
||||
.order_by(Event.start_time)
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
tasks.extend(
|
||||
|
@ -99,13 +99,19 @@ class StorageMaintainer(threading.Thread):
|
||||
[b["bandwidth"] for b in self.camera_storage_stats.values()]
|
||||
)
|
||||
|
||||
recordings: Recordings = Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.segment_size,
|
||||
Recordings.path,
|
||||
).order_by(Recordings.start_time.asc())
|
||||
recordings: Recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.start_time,
|
||||
Recordings.end_time,
|
||||
Recordings.segment_size,
|
||||
Recordings.path,
|
||||
)
|
||||
.order_by(Recordings.start_time.asc())
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
retained_events: Event = (
|
||||
Event.select(
|
||||
Event.start_time,
|
||||
@ -116,12 +122,12 @@ class StorageMaintainer(threading.Thread):
|
||||
Event.has_clip,
|
||||
)
|
||||
.order_by(Event.start_time.asc())
|
||||
.objects()
|
||||
.namedtuples()
|
||||
)
|
||||
|
||||
event_start = 0
|
||||
deleted_recordings = set()
|
||||
for recording in recordings.objects().iterator():
|
||||
for recording in recordings:
|
||||
# check if 1 hour of storage has been reclaimed
|
||||
if deleted_segments_size > hourly_bandwidth:
|
||||
break
|
||||
@ -162,13 +168,18 @@ class StorageMaintainer(threading.Thread):
|
||||
logger.error(
|
||||
f"Could not clear {hourly_bandwidth} MB, currently {deleted_segments_size} MB have been cleared. Retained recordings must be deleted."
|
||||
)
|
||||
recordings = Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.path,
|
||||
Recordings.segment_size,
|
||||
).order_by(Recordings.start_time.asc())
|
||||
recordings = (
|
||||
Recordings.select(
|
||||
Recordings.id,
|
||||
Recordings.path,
|
||||
Recordings.segment_size,
|
||||
)
|
||||
.order_by(Recordings.start_time.asc())
|
||||
.namedtuples()
|
||||
.iterator()
|
||||
)
|
||||
|
||||
for recording in recordings.objects().iterator():
|
||||
for recording in recordings:
|
||||
if deleted_segments_size > hourly_bandwidth:
|
||||
break
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user