mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-26 19:06:11 +01:00
fix process clip
This commit is contained in:
parent
096c21f105
commit
72833686f1
@ -1,25 +1,68 @@
|
|||||||
import sys
|
|
||||||
import click
|
|
||||||
import os
|
|
||||||
import datetime
|
import datetime
|
||||||
from unittest import TestCase, main
|
import json
|
||||||
from frigate.video import process_frames, start_or_restart_ffmpeg, capture_frames, get_frame_shape
|
import logging
|
||||||
from frigate.util import DictFrameManager, SharedMemoryFrameManager, EventsPerSecond, draw_box_with_label
|
|
||||||
from frigate.motion import MotionDetector
|
|
||||||
from frigate.edgetpu import LocalObjectDetector
|
|
||||||
from frigate.objects import ObjectTracker
|
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
import numpy as np
|
import os
|
||||||
|
import subprocess as sp
|
||||||
|
import sys
|
||||||
|
from unittest import TestCase, main
|
||||||
|
|
||||||
|
import click
|
||||||
import cv2
|
import cv2
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
|
||||||
|
from frigate.edgetpu import LocalObjectDetector
|
||||||
|
from frigate.motion import MotionDetector
|
||||||
from frigate.object_processing import COLOR_MAP, CameraState
|
from frigate.object_processing import COLOR_MAP, CameraState
|
||||||
|
from frigate.objects import ObjectTracker
|
||||||
|
from frigate.util import (DictFrameManager, EventsPerSecond,
|
||||||
|
SharedMemoryFrameManager, draw_box_with_label)
|
||||||
|
from frigate.video import (capture_frames, process_frames,
|
||||||
|
start_or_restart_ffmpeg)
|
||||||
|
|
||||||
|
logging.basicConfig()
|
||||||
|
logging.root.setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def get_frame_shape(source):
|
||||||
|
ffprobe_cmd = " ".join([
|
||||||
|
'ffprobe',
|
||||||
|
'-v',
|
||||||
|
'panic',
|
||||||
|
'-show_error',
|
||||||
|
'-show_streams',
|
||||||
|
'-of',
|
||||||
|
'json',
|
||||||
|
'"'+source+'"'
|
||||||
|
])
|
||||||
|
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
|
||||||
|
(output, err) = p.communicate()
|
||||||
|
p_status = p.wait()
|
||||||
|
info = json.loads(output)
|
||||||
|
|
||||||
|
video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
|
||||||
|
|
||||||
|
if video_info['height'] != 0 and video_info['width'] != 0:
|
||||||
|
return (video_info['height'], video_info['width'], 3)
|
||||||
|
|
||||||
|
# fallback to using opencv if ffprobe didnt succeed
|
||||||
|
video = cv2.VideoCapture(source)
|
||||||
|
ret, frame = video.read()
|
||||||
|
frame_shape = frame.shape
|
||||||
|
video.release()
|
||||||
|
return frame_shape
|
||||||
|
|
||||||
class ProcessClip():
|
class ProcessClip():
|
||||||
def __init__(self, clip_path, frame_shape, config):
|
def __init__(self, clip_path, frame_shape, config: FrigateConfig):
|
||||||
self.clip_path = clip_path
|
self.clip_path = clip_path
|
||||||
self.frame_shape = frame_shape
|
|
||||||
self.camera_name = 'camera'
|
self.camera_name = 'camera'
|
||||||
self.frame_manager = DictFrameManager()
|
self.config = config
|
||||||
# self.frame_manager = SharedMemoryFrameManager()
|
self.camera_config = self.config.cameras['camera']
|
||||||
|
self.frame_shape = self.camera_config.frame_shape
|
||||||
|
self.ffmpeg_cmd = [c['cmd'] for c in self.camera_config.ffmpeg_cmds if 'detect' in c['roles']][0]
|
||||||
|
self.frame_manager = SharedMemoryFrameManager()
|
||||||
self.frame_queue = mp.Queue()
|
self.frame_queue = mp.Queue()
|
||||||
self.detected_objects_queue = mp.Queue()
|
self.detected_objects_queue = mp.Queue()
|
||||||
self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
|
self.camera_state = CameraState(self.camera_name, config, self.frame_manager)
|
||||||
@ -27,12 +70,11 @@ class ProcessClip():
|
|||||||
def load_frames(self):
|
def load_frames(self):
|
||||||
fps = EventsPerSecond()
|
fps = EventsPerSecond()
|
||||||
skipped_fps = EventsPerSecond()
|
skipped_fps = EventsPerSecond()
|
||||||
stop_event = mp.Event()
|
|
||||||
detection_frame = mp.Value('d', datetime.datetime.now().timestamp()+100000)
|
|
||||||
current_frame = mp.Value('d', 0.0)
|
current_frame = mp.Value('d', 0.0)
|
||||||
ffmpeg_cmd = f"ffmpeg -hide_banner -loglevel panic -i {self.clip_path} -f rawvideo -pix_fmt rgb24 pipe:".split(" ")
|
frame_size = self.camera_config.frame_shape_yuv[0] * self.camera_config.frame_shape_yuv[1]
|
||||||
ffmpeg_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.frame_shape[0]*self.frame_shape[1]*self.frame_shape[2])
|
ffmpeg_process = start_or_restart_ffmpeg(self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size)
|
||||||
capture_frames(ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue, 1, fps, skipped_fps, stop_event, detection_frame, current_frame)
|
capture_frames(ffmpeg_process, self.camera_name, self.camera_config.frame_shape_yuv, self.frame_manager,
|
||||||
|
self.frame_queue, fps, skipped_fps, current_frame)
|
||||||
ffmpeg_process.wait()
|
ffmpeg_process.wait()
|
||||||
ffmpeg_process.communicate()
|
ffmpeg_process.communicate()
|
||||||
|
|
||||||
@ -43,23 +85,28 @@ class ProcessClip():
|
|||||||
|
|
||||||
object_detector = LocalObjectDetector(labels='/labelmap.txt')
|
object_detector = LocalObjectDetector(labels='/labelmap.txt')
|
||||||
object_tracker = ObjectTracker(10)
|
object_tracker = ObjectTracker(10)
|
||||||
process_fps = mp.Value('d', 0.0)
|
process_info = {
|
||||||
detection_fps = mp.Value('d', 0.0)
|
'process_fps': mp.Value('d', 0.0),
|
||||||
current_frame = mp.Value('d', 0.0)
|
'detection_fps': mp.Value('d', 0.0),
|
||||||
|
'detection_frame': mp.Value('d', 0.0)
|
||||||
|
}
|
||||||
stop_event = mp.Event()
|
stop_event = mp.Event()
|
||||||
|
model_shape = (self.config.model.height, self.config.model.width)
|
||||||
|
|
||||||
process_frames(self.camera_name, self.frame_queue, self.frame_shape, self.frame_manager, motion_detector, object_detector, object_tracker, self.detected_objects_queue,
|
process_frames(self.camera_name, self.frame_queue, self.frame_shape, model_shape,
|
||||||
process_fps, detection_fps, current_frame, objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
|
self.frame_manager, motion_detector, object_detector, object_tracker,
|
||||||
|
self.detected_objects_queue, process_info,
|
||||||
|
objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
|
||||||
|
|
||||||
def objects_found(self, debug_path=None):
|
def objects_found(self, debug_path=None):
|
||||||
obj_detected = False
|
obj_detected = False
|
||||||
top_computed_score = 0.0
|
top_computed_score = 0.0
|
||||||
def handle_event(name, obj):
|
def handle_event(name, obj, frame_time):
|
||||||
nonlocal obj_detected
|
nonlocal obj_detected
|
||||||
nonlocal top_computed_score
|
nonlocal top_computed_score
|
||||||
if obj['computed_score'] > top_computed_score:
|
if obj.computed_score > top_computed_score:
|
||||||
top_computed_score = obj['computed_score']
|
top_computed_score = obj.computed_score
|
||||||
if not obj['false_positive']:
|
if not obj.false_positive:
|
||||||
obj_detected = True
|
obj_detected = True
|
||||||
self.camera_state.on('new', handle_event)
|
self.camera_state.on('new', handle_event)
|
||||||
self.camera_state.on('update', handle_event)
|
self.camera_state.on('update', handle_event)
|
||||||
@ -71,7 +118,8 @@ class ProcessClip():
|
|||||||
|
|
||||||
self.camera_state.update(frame_time, current_tracked_objects)
|
self.camera_state.update(frame_time, current_tracked_objects)
|
||||||
for obj in self.camera_state.tracked_objects.values():
|
for obj in self.camera_state.tracked_objects.values():
|
||||||
print(f"{frame_time}: {obj['id']} - {obj['computed_score']} - {obj['score_history']}")
|
obj_data = obj.to_dict()
|
||||||
|
print(f"{frame_time}: {obj_data['id']} - {obj_data['label']} - {obj_data['score']} - {obj.score_history}")
|
||||||
|
|
||||||
self.frame_manager.delete(self.camera_state.previous_frame_id)
|
self.frame_manager.delete(self.camera_state.previous_frame_id)
|
||||||
|
|
||||||
@ -81,7 +129,7 @@ class ProcessClip():
|
|||||||
}
|
}
|
||||||
|
|
||||||
def save_debug_frame(self, debug_path, frame_time, tracked_objects):
|
def save_debug_frame(self, debug_path, frame_time, tracked_objects):
|
||||||
current_frame = self.frame_manager.get(f"{self.camera_name}{frame_time}", self.frame_shape)
|
current_frame = cv2.cvtColor(self.frame_manager.get(f"{self.camera_name}{frame_time}", self.camera_config.frame_shape_yuv), cv2.COLOR_YUV2BGR_I420)
|
||||||
# draw the bounding boxes on the frame
|
# draw the bounding boxes on the frame
|
||||||
for obj in tracked_objects:
|
for obj in tracked_objects:
|
||||||
thickness = 2
|
thickness = 2
|
||||||
@ -95,12 +143,12 @@ class ProcessClip():
|
|||||||
|
|
||||||
# draw the bounding boxes on the frame
|
# draw the bounding boxes on the frame
|
||||||
box = obj['box']
|
box = obj['box']
|
||||||
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
|
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['id'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
|
||||||
# draw the regions on the frame
|
# draw the regions on the frame
|
||||||
region = obj['region']
|
region = obj['region']
|
||||||
draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
|
draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
|
||||||
|
|
||||||
cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", cv2.cvtColor(current_frame, cv2.COLOR_RGB2BGR))
|
cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", current_frame)
|
||||||
|
|
||||||
@click.command()
|
@click.command()
|
||||||
@click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
|
@click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
|
||||||
@ -116,29 +164,37 @@ def process(path, label, threshold, debug_path):
|
|||||||
elif os.path.isfile(path):
|
elif os.path.isfile(path):
|
||||||
clips.append(path)
|
clips.append(path)
|
||||||
|
|
||||||
config = {
|
json_config = {
|
||||||
'snapshots': {
|
'mqtt': {
|
||||||
'show_timestamp': False,
|
'host': 'mqtt'
|
||||||
'draw_zones': False
|
|
||||||
},
|
},
|
||||||
'zones': {},
|
'cameras': {
|
||||||
'objects': {
|
'camera': {
|
||||||
'track': [label],
|
'ffmpeg': {
|
||||||
'filters': {
|
'inputs': [
|
||||||
'person': {
|
{ 'path': 'path.mp4', 'global_args': '', 'input_args': '', 'roles': ['detect'] }
|
||||||
'threshold': threshold
|
]
|
||||||
}
|
},
|
||||||
|
'height': 1920,
|
||||||
|
'width': 1080
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
for c in clips:
|
for c in clips:
|
||||||
|
logger.info(c)
|
||||||
frame_shape = get_frame_shape(c)
|
frame_shape = get_frame_shape(c)
|
||||||
config['frame_shape'] = frame_shape
|
|
||||||
|
json_config['cameras']['camera']['height'] = frame_shape[0]
|
||||||
|
json_config['cameras']['camera']['width'] = frame_shape[1]
|
||||||
|
json_config['cameras']['camera']['ffmpeg']['inputs'][0]['path'] = c
|
||||||
|
|
||||||
|
config = FrigateConfig(config=FRIGATE_CONFIG_SCHEMA(json_config))
|
||||||
|
|
||||||
process_clip = ProcessClip(c, frame_shape, config)
|
process_clip = ProcessClip(c, frame_shape, config)
|
||||||
process_clip.load_frames()
|
process_clip.load_frames()
|
||||||
process_clip.process_frames(objects_to_track=config['objects']['track'])
|
process_clip.process_frames(objects_to_track=[label])
|
||||||
|
|
||||||
results.append((c, process_clip.objects_found(debug_path)))
|
results.append((c, process_clip.objects_found(debug_path)))
|
||||||
|
|
||||||
@ -149,4 +205,4 @@ def process(path, label, threshold, debug_path):
|
|||||||
print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
|
print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
process()
|
process()
|
@ -112,16 +112,15 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
|
|||||||
frame_name = f"{camera_name}{current_frame.value}"
|
frame_name = f"{camera_name}{current_frame.value}"
|
||||||
frame_buffer = frame_manager.create(frame_name, frame_size)
|
frame_buffer = frame_manager.create(frame_name, frame_size)
|
||||||
try:
|
try:
|
||||||
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
||||||
except:
|
except Exception as e:
|
||||||
logger.info(f"{camera_name}: ffmpeg sent a broken frame. something is wrong.")
|
logger.info(f"{camera_name}: ffmpeg sent a broken frame. {e}")
|
||||||
|
|
||||||
if ffmpeg_process.poll() != None:
|
if ffmpeg_process.poll() != None:
|
||||||
logger.info(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
|
logger.info(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
|
||||||
frame_manager.delete(frame_name)
|
frame_manager.delete(frame_name)
|
||||||
break
|
break
|
||||||
|
continue
|
||||||
continue
|
|
||||||
|
|
||||||
frame_rate.update()
|
frame_rate.update()
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user