Rework motion data calculation (#10459)

* Store motion data as a percent of total area

* Exclude historical data

* Use max so cameras without motion don't invlidate good data:
This commit is contained in:
Nicolas Mowen 2024-03-14 13:57:14 -06:00 committed by GitHub
parent 6a02e65fc2
commit 61c4ed9f12
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 34 additions and 34 deletions

View File

@ -5,12 +5,7 @@ from datetime import datetime, timedelta
from functools import reduce from functools import reduce
import pandas as pd import pandas as pd
from flask import ( from flask import Blueprint, jsonify, make_response, request
Blueprint,
jsonify,
make_response,
request,
)
from peewee import Case, DoesNotExist, fn, operator from peewee import Case, DoesNotExist, fn, operator
from frigate.models import Recordings, ReviewSegment from frigate.models import Recordings, ReviewSegment
@ -363,35 +358,23 @@ def motion_activity():
) )
clauses = [(Recordings.start_time > after) & (Recordings.end_time < before)] clauses = [(Recordings.start_time > after) & (Recordings.end_time < before)]
clauses.append((Recordings.motion <= 100))
if cameras != "all": if cameras != "all":
camera_list = cameras.split(",") camera_list = cameras.split(",")
clauses.append((Recordings.camera << camera_list)) clauses.append((Recordings.camera << camera_list))
all_recordings: list[Recordings] = ( data: list[Recordings] = (
Recordings.select( Recordings.select(
Recordings.start_time, Recordings.start_time,
Recordings.duration,
Recordings.objects,
Recordings.motion, Recordings.motion,
) )
.where(reduce(operator.and_, clauses)) .where(reduce(operator.and_, clauses))
.order_by(Recordings.start_time.asc()) .order_by(Recordings.start_time.asc())
.dicts()
.iterator() .iterator()
) )
# format is: { timestamp: segment_start_ts, motion: [0-100], audio: [0 - -100] }
# periods where active objects / audio was detected will cause motion to be scaled down
data: list[dict[str, float]] = []
for rec in all_recordings:
data.append(
{
"start_time": rec.start_time,
"motion": rec.motion if rec.objects == 0 else 0,
}
)
# get scale in seconds # get scale in seconds
scale = request.args.get("scale", type=int, default=30) scale = request.args.get("scale", type=int, default=30)
@ -403,16 +386,10 @@ def motion_activity():
df.set_index(["start_time"], inplace=True) df.set_index(["start_time"], inplace=True)
# normalize data # normalize data
df = df.resample(f"{scale}S").sum().fillna(0.0) df = (
mean = df["motion"].mean() df.resample(f"{scale}S")
std = df["motion"].std() .apply(lambda x: max(x, key=abs, default=0.0))
df["motion"] = (df["motion"] - mean) / std .fillna(0.0)
outliers = df.quantile(0.999)["motion"]
df[df > outliers] = outliers
df["motion"] = (
(df["motion"] - df["motion"].min())
/ (df["motion"].max() - df["motion"].min())
* 100
) )
# change types for output # change types for output

View File

@ -71,6 +71,13 @@ class RecordingMaintainer(threading.Thread):
self.audio_recordings_info: dict[str, list] = defaultdict(list) self.audio_recordings_info: dict[str, list] = defaultdict(list)
self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {} self.end_time_cache: dict[str, Tuple[datetime.datetime, float]] = {}
self.camera_frame_area: dict[str, int] = {}
for camera in self.config.cameras.values():
self.camera_frame_area[camera.name] = (
camera.detect.width * camera.detect.height * 0.1
)
async def move_files(self) -> None: async def move_files(self) -> None:
cache_files = [ cache_files = [
d d
@ -289,8 +296,9 @@ class RecordingMaintainer(threading.Thread):
def segment_stats( def segment_stats(
self, camera: str, start_time: datetime.datetime, end_time: datetime.datetime self, camera: str, start_time: datetime.datetime, end_time: datetime.datetime
) -> SegmentInfo: ) -> SegmentInfo:
video_frame_count = 0
active_count = 0 active_count = 0
motion_count = 0 total_motion_area = 0
for frame in self.object_recordings_info[camera]: for frame in self.object_recordings_info[camera]:
# frame is after end time of segment # frame is after end time of segment
if frame[0] > end_time.timestamp(): if frame[0] > end_time.timestamp():
@ -299,6 +307,7 @@ class RecordingMaintainer(threading.Thread):
if frame[0] < start_time.timestamp(): if frame[0] < start_time.timestamp():
continue continue
video_frame_count += 1
active_count += len( active_count += len(
[ [
o o
@ -307,7 +316,21 @@ class RecordingMaintainer(threading.Thread):
] ]
) )
motion_count += sum([area(box) for box in frame[2]]) total_motion_area += sum([area(box) for box in frame[2]])
if video_frame_count > 0:
normalized_motion_area = min(
int(
(
total_motion_area
/ (self.camera_frame_area[camera] * video_frame_count)
)
* 100
),
100,
)
else:
normalized_motion_area = 0
audio_values = [] audio_values = []
for frame in self.audio_recordings_info[camera]: for frame in self.audio_recordings_info[camera]:
@ -327,7 +350,7 @@ class RecordingMaintainer(threading.Thread):
average_dBFS = 0 if not audio_values else np.average(audio_values) average_dBFS = 0 if not audio_values else np.average(audio_values)
return SegmentInfo(motion_count, active_count, round(average_dBFS)) return SegmentInfo(normalized_motion_area, active_count, round(average_dBFS))
async def move_segment( async def move_segment(
self, self,