diff --git a/frigate/app.py b/frigate/app.py index 2db8728b2..840b80710 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -1,3 +1,4 @@ +import datetime import logging import multiprocessing as mp import os @@ -167,6 +168,15 @@ class FrigateApp: self.timeline_queue: Queue = mp.Queue() def init_database(self) -> None: + def vacuum_db(db: SqliteExtDatabase) -> None: + db.execute_sql("VACUUM;") + + try: + with open(f"{CONFIG_DIR}/.vacuum", "w") as f: + f.write(str(datetime.datetime.now().timestamp())) + except PermissionError: + logger.error("Unable to write to /config to save DB state") + # Migrate DB location old_db_path = DEFAULT_DB_PATH if not os.path.isfile(self.config.database.path) and os.path.isfile( @@ -182,6 +192,24 @@ class FrigateApp: router = Router(migrate_db) router.run() + # check if vacuum needs to be run + if os.path.exists(f"{CONFIG_DIR}/.vacuum"): + with open(f"{CONFIG_DIR}/.vacuum") as f: + try: + timestamp = int(f.readline()) + except Exception: + timestamp = 0 + + if ( + timestamp + < ( + datetime.datetime.now() - datetime.timedelta(weeks=2) + ).timestamp() + ): + vacuum_db(migrate_db) + else: + vacuum_db(migrate_db) + migrate_db.close() def init_go2rtc(self) -> None: @@ -205,7 +233,15 @@ class FrigateApp: def bind_database(self) -> None: """Bind db to the main process.""" # NOTE: all db accessing processes need to be created before the db can be bound to the main process - self.db = SqliteQueueDatabase(self.config.database.path) + self.db = SqliteQueueDatabase( + self.config.database.path, + pragmas={ + "auto_vacuum": "FULL", # Does not defragment database + "cache_size": -512 * 1000, # 512MB of cache, + "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous + }, + timeout=60, + ) models = [Event, Recordings, Timeline] self.db.bind(models) diff --git a/frigate/record/cleanup.py b/frigate/record/cleanup.py index bb54d8b86..f2e9a4fb5 100644 --- a/frigate/record/cleanup.py +++ b/frigate/record/cleanup.py @@ -180,7 +180,9 @@ class RecordingCleanup(threading.Thread): # find all the recordings older than the oldest recording in the db try: - oldest_recording = Recordings.select().order_by(Recordings.start_time).get() + oldest_recording = ( + Recordings.select().order_by(Recordings.start_time).limit(1).get() + ) p = Path(oldest_recording.path) oldest_timestamp = p.stat().st_mtime - 1 diff --git a/frigate/record/record.py b/frigate/record/record.py index 3aaf56476..9d3106d0f 100644 --- a/frigate/record/record.py +++ b/frigate/record/record.py @@ -37,7 +37,15 @@ def manage_recordings( setproctitle("frigate.recording_manager") listen() - db = SqliteQueueDatabase(config.database.path) + db = SqliteQueueDatabase( + config.database.path, + pragmas={ + "auto_vacuum": "FULL", # Does not defragment database + "cache_size": -512 * 1000, # 512MB of cache + "synchronous": "NORMAL", # Safe when using WAL https://www.sqlite.org/pragma.html#pragma_synchronous + }, + timeout=60, + ) models = [Event, Recordings, Timeline] db.bind(models) @@ -48,5 +56,3 @@ def manage_recordings( cleanup = RecordingCleanup(config, stop_event) cleanup.start() - - logger.info("recording_manager: exiting subprocess") diff --git a/frigate/storage.py b/frigate/storage.py index a7986752b..d2cab553a 100644 --- a/frigate/storage.py +++ b/frigate/storage.py @@ -36,9 +36,7 @@ class StorageMaintainer(threading.Thread): self.camera_storage_stats[camera] = { "needs_refresh": ( Recordings.select(fn.COUNT(Recordings.id)) - .where( - Recordings.camera == camera, Recordings.segment_size != 0 - ) + .where(Recordings.camera == camera, Recordings.segment_size > 0) .scalar() < 50 ) @@ -48,7 +46,7 @@ class StorageMaintainer(threading.Thread): try: bandwidth = round( Recordings.select(fn.AVG(bandwidth_equation)) - .where(Recordings.camera == camera, Recordings.segment_size != 0) + .where(Recordings.camera == camera, Recordings.segment_size > 0) .limit(100) .scalar() * 3600, @@ -178,6 +176,7 @@ class StorageMaintainer(threading.Thread): def run(self): """Check every 5 minutes if storage needs to be cleaned up.""" + self.calculate_camera_bandwidth() while not self.stop_event.wait(300): if not self.camera_storage_stats or True in [ r["needs_refresh"] for r in self.camera_storage_stats.values() diff --git a/migrations/017_update_indexes.py b/migrations/017_update_indexes.py new file mode 100644 index 000000000..8aa53f8ee --- /dev/null +++ b/migrations/017_update_indexes.py @@ -0,0 +1,35 @@ +"""Peewee migrations -- 017_update_indexes.py. + +Some examples (model - class or model name):: + + > Model = migrator.orm['model_name'] # Return model in current state by name + + > migrator.sql(sql) # Run custom SQL + > migrator.python(func, *args, **kwargs) # Run python code + > migrator.create_model(Model) # Create a model (could be used as decorator) + > migrator.remove_model(model, cascade=True) # Remove a model + > migrator.add_fields(model, **fields) # Add fields to a model + > migrator.change_fields(model, **fields) # Change fields + > migrator.remove_fields(model, *field_names, cascade=True) + > migrator.rename_field(model, old_field_name, new_field_name) + > migrator.rename_table(model, new_table_name) + > migrator.add_index(model, *col_names, unique=False) + > migrator.drop_index(model, *col_names) + > migrator.add_not_null(model, *field_names) + > migrator.drop_not_null(model, *field_names) + > migrator.add_default(model, field_name, default) + +""" +import peewee as pw + +SQL = pw.SQL + + +def migrate(migrator, database, fake=False, **kwargs): + migrator.sql( + 'CREATE INDEX "recordings_camera_segment_size" ON "recordings" ("camera", "segment_size")' + ) + + +def rollback(migrator, database, fake=False, **kwargs): + pass