mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
DB Optimizations (#6712)
* Enable auto vacuums * Enable auto vacuum * Fix separator * Fix separator and remove incorrect log * Limit to 1 row since that is all that is used * Add index on camera + segment_size * Formatting * Increase timeout and cache_size * Set DB mode to NORMAL synchronous level * Formatting * Vacuum every 2 weeks * Remove fstring * Use string * Use consts
This commit is contained in:
parent
20b52a96bc
commit
8bc76d19db
@ -1,3 +1,4 @@
|
||||
import datetime
|
||||
import logging
|
||||
import multiprocessing as mp
|
||||
import os
|
||||
@ -167,6 +168,15 @@ class FrigateApp:
|
||||
self.timeline_queue: Queue = mp.Queue()
|
||||
|
||||
def init_database(self) -> None:
|
||||
def vacuum_db(db: SqliteExtDatabase) -> None:
|
||||
db.execute_sql("VACUUM;")
|
||||
|
||||
try:
|
||||
with open(f"{CONFIG_DIR}/.vacuum", "w") as f:
|
||||
f.write(str(datetime.datetime.now().timestamp()))
|
||||
except PermissionError:
|
||||
logger.error("Unable to write to /config to save DB state")
|
||||
|
||||
# Migrate DB location
|
||||
old_db_path = DEFAULT_DB_PATH
|
||||
if not os.path.isfile(self.config.database.path) and os.path.isfile(
|
||||
@ -182,6 +192,24 @@ class FrigateApp:
|
||||
router = Router(migrate_db)
|
||||
router.run()
|
||||
|
||||
# check if vacuum needs to be run
|
||||
if os.path.exists(f"{CONFIG_DIR}/.vacuum"):
|
||||
with open(f"{CONFIG_DIR}/.vacuum") as f:
|
||||
try:
|
||||
timestamp = int(f.readline())
|
||||
except Exception:
|
||||
timestamp = 0
|
||||
|
||||
if (
|
||||
timestamp
|
||||
< (
|
||||
datetime.datetime.now() - datetime.timedelta(weeks=2)
|
||||
).timestamp()
|
||||
):
|
||||
vacuum_db(migrate_db)
|
||||
else:
|
||||
vacuum_db(migrate_db)
|
||||
|
||||
migrate_db.close()
|
||||
|
||||
def init_go2rtc(self) -> None:
|
||||
@ -205,7 +233,15 @@ class FrigateApp:
|
||||
def bind_database(self) -> None:
|
||||
"""Bind db to the main process."""
|
||||
# NOTE: all db accessing processes need to be created before the db can be bound to the main process
|
||||
self.db = SqliteQueueDatabase(self.config.database.path)
|
||||
self.db = SqliteQueueDatabase(
|
||||
self.config.database.path,
|
||||
pragmas={
|
||||
"auto_vacuum": "FULL", # Does not defragment database
|
||||
"cache_size": -512 * 1000, # 512MB of cache,
|
||||
"synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous
|
||||
},
|
||||
timeout=60,
|
||||
)
|
||||
models = [Event, Recordings, Timeline]
|
||||
self.db.bind(models)
|
||||
|
||||
|
@ -180,7 +180,9 @@ class RecordingCleanup(threading.Thread):
|
||||
|
||||
# find all the recordings older than the oldest recording in the db
|
||||
try:
|
||||
oldest_recording = Recordings.select().order_by(Recordings.start_time).get()
|
||||
oldest_recording = (
|
||||
Recordings.select().order_by(Recordings.start_time).limit(1).get()
|
||||
)
|
||||
|
||||
p = Path(oldest_recording.path)
|
||||
oldest_timestamp = p.stat().st_mtime - 1
|
||||
|
@ -37,7 +37,15 @@ def manage_recordings(
|
||||
setproctitle("frigate.recording_manager")
|
||||
listen()
|
||||
|
||||
db = SqliteQueueDatabase(config.database.path)
|
||||
db = SqliteQueueDatabase(
|
||||
config.database.path,
|
||||
pragmas={
|
||||
"auto_vacuum": "FULL", # Does not defragment database
|
||||
"cache_size": -512 * 1000, # 512MB of cache
|
||||
"synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous
|
||||
},
|
||||
timeout=60,
|
||||
)
|
||||
models = [Event, Recordings, Timeline]
|
||||
db.bind(models)
|
||||
|
||||
@ -48,5 +56,3 @@ def manage_recordings(
|
||||
|
||||
cleanup = RecordingCleanup(config, stop_event)
|
||||
cleanup.start()
|
||||
|
||||
logger.info("recording_manager: exiting subprocess")
|
||||
|
@ -36,9 +36,7 @@ class StorageMaintainer(threading.Thread):
|
||||
self.camera_storage_stats[camera] = {
|
||||
"needs_refresh": (
|
||||
Recordings.select(fn.COUNT(Recordings.id))
|
||||
.where(
|
||||
Recordings.camera == camera, Recordings.segment_size != 0
|
||||
)
|
||||
.where(Recordings.camera == camera, Recordings.segment_size > 0)
|
||||
.scalar()
|
||||
< 50
|
||||
)
|
||||
@ -48,7 +46,7 @@ class StorageMaintainer(threading.Thread):
|
||||
try:
|
||||
bandwidth = round(
|
||||
Recordings.select(fn.AVG(bandwidth_equation))
|
||||
.where(Recordings.camera == camera, Recordings.segment_size != 0)
|
||||
.where(Recordings.camera == camera, Recordings.segment_size > 0)
|
||||
.limit(100)
|
||||
.scalar()
|
||||
* 3600,
|
||||
@ -178,6 +176,7 @@ class StorageMaintainer(threading.Thread):
|
||||
|
||||
def run(self):
|
||||
"""Check every 5 minutes if storage needs to be cleaned up."""
|
||||
self.calculate_camera_bandwidth()
|
||||
while not self.stop_event.wait(300):
|
||||
if not self.camera_storage_stats or True in [
|
||||
r["needs_refresh"] for r in self.camera_storage_stats.values()
|
||||
|
35
migrations/017_update_indexes.py
Normal file
35
migrations/017_update_indexes.py
Normal file
@ -0,0 +1,35 @@
|
||||
"""Peewee migrations -- 017_update_indexes.py.
|
||||
|
||||
Some examples (model - class or model name)::
|
||||
|
||||
> Model = migrator.orm['model_name'] # Return model in current state by name
|
||||
|
||||
> migrator.sql(sql) # Run custom SQL
|
||||
> migrator.python(func, *args, **kwargs) # Run python code
|
||||
> migrator.create_model(Model) # Create a model (could be used as decorator)
|
||||
> migrator.remove_model(model, cascade=True) # Remove a model
|
||||
> migrator.add_fields(model, **fields) # Add fields to a model
|
||||
> migrator.change_fields(model, **fields) # Change fields
|
||||
> migrator.remove_fields(model, *field_names, cascade=True)
|
||||
> migrator.rename_field(model, old_field_name, new_field_name)
|
||||
> migrator.rename_table(model, new_table_name)
|
||||
> migrator.add_index(model, *col_names, unique=False)
|
||||
> migrator.drop_index(model, *col_names)
|
||||
> migrator.add_not_null(model, *field_names)
|
||||
> migrator.drop_not_null(model, *field_names)
|
||||
> migrator.add_default(model, field_name, default)
|
||||
|
||||
"""
|
||||
import peewee as pw
|
||||
|
||||
SQL = pw.SQL
|
||||
|
||||
|
||||
def migrate(migrator, database, fake=False, **kwargs):
|
||||
migrator.sql(
|
||||
'CREATE INDEX "recordings_camera_segment_size" ON "recordings" ("camera", "segment_size")'
|
||||
)
|
||||
|
||||
|
||||
def rollback(migrator, database, fake=False, **kwargs):
|
||||
pass
|
Loading…
Reference in New Issue
Block a user