formatting cleanup

This commit is contained in:
Blake Blackshear 2021-02-17 07:23:32 -06:00
parent b8f72a5bcb
commit 39ff49e054
23 changed files with 2621 additions and 1736 deletions

View File

@ -1,4 +1,6 @@
import faulthandler; faulthandler.enable()
import faulthandler
faulthandler.enable()
import sys
import threading
@ -6,10 +8,10 @@ threading.current_thread().name = "frigate"
from frigate.app import FrigateApp
cli = sys.modules['flask.cli']
cli = sys.modules["flask.cli"]
cli.show_server_banner = lambda *x: None
if __name__ == '__main__':
if __name__ == "__main__":
frigate_app = FrigateApp()
frigate_app.start()

View File

@ -31,7 +31,8 @@ from frigate.zeroconf import broadcast_zeroconf
logger = logging.getLogger(__name__)
class FrigateApp():
class FrigateApp:
def __init__(self):
self.stop_event = mp.Event()
self.config: FrigateConfig = None
@ -56,60 +57,78 @@ class FrigateApp():
tmpfs_size = self.config.clips.tmpfs_cache_size
if tmpfs_size:
logger.info(f"Creating tmpfs of size {tmpfs_size}")
rc = os.system(f"mount -t tmpfs -o size={tmpfs_size} tmpfs {CACHE_DIR}")
if rc != 0:
logger.error(f"Failed to create tmpfs, error code: {rc}")
logger.info(f"Creating tmpfs of size {tmpfs_size}")
rc = os.system(f"mount -t tmpfs -o size={tmpfs_size} tmpfs {CACHE_DIR}")
if rc != 0:
logger.error(f"Failed to create tmpfs, error code: {rc}")
def init_logger(self):
self.log_process = mp.Process(target=log_process, args=(self.log_queue,), name='log_process')
self.log_process = mp.Process(
target=log_process, args=(self.log_queue,), name="log_process"
)
self.log_process.daemon = True
self.log_process.start()
root_configurer(self.log_queue)
def init_config(self):
config_file = os.environ.get('CONFIG_FILE', '/config/config.yml')
config_file = os.environ.get("CONFIG_FILE", "/config/config.yml")
self.config = FrigateConfig(config_file=config_file)
for camera_name in self.config.cameras.keys():
# create camera_metrics
self.camera_metrics[camera_name] = {
'camera_fps': mp.Value('d', 0.0),
'skipped_fps': mp.Value('d', 0.0),
'process_fps': mp.Value('d', 0.0),
'detection_enabled': mp.Value('i', self.config.cameras[camera_name].detect.enabled),
'detection_fps': mp.Value('d', 0.0),
'detection_frame': mp.Value('d', 0.0),
'read_start': mp.Value('d', 0.0),
'ffmpeg_pid': mp.Value('i', 0),
'frame_queue': mp.Queue(maxsize=2),
"camera_fps": mp.Value("d", 0.0),
"skipped_fps": mp.Value("d", 0.0),
"process_fps": mp.Value("d", 0.0),
"detection_enabled": mp.Value(
"i", self.config.cameras[camera_name].detect.enabled
),
"detection_fps": mp.Value("d", 0.0),
"detection_frame": mp.Value("d", 0.0),
"read_start": mp.Value("d", 0.0),
"ffmpeg_pid": mp.Value("i", 0),
"frame_queue": mp.Queue(maxsize=2),
}
def check_config(self):
for name, camera in self.config.cameras.items():
assigned_roles = list(set([r for i in camera.ffmpeg.inputs for r in i.roles]))
if not camera.clips.enabled and 'clips' in assigned_roles:
logger.warning(f"Camera {name} has clips assigned to an input, but clips is not enabled.")
elif camera.clips.enabled and not 'clips' in assigned_roles:
logger.warning(f"Camera {name} has clips enabled, but clips is not assigned to an input.")
assigned_roles = list(
set([r for i in camera.ffmpeg.inputs for r in i.roles])
)
if not camera.clips.enabled and "clips" in assigned_roles:
logger.warning(
f"Camera {name} has clips assigned to an input, but clips is not enabled."
)
elif camera.clips.enabled and not "clips" in assigned_roles:
logger.warning(
f"Camera {name} has clips enabled, but clips is not assigned to an input."
)
if not camera.record.enabled and 'record' in assigned_roles:
logger.warning(f"Camera {name} has record assigned to an input, but record is not enabled.")
elif camera.record.enabled and not 'record' in assigned_roles:
logger.warning(f"Camera {name} has record enabled, but record is not assigned to an input.")
if not camera.record.enabled and "record" in assigned_roles:
logger.warning(
f"Camera {name} has record assigned to an input, but record is not enabled."
)
elif camera.record.enabled and not "record" in assigned_roles:
logger.warning(
f"Camera {name} has record enabled, but record is not assigned to an input."
)
if not camera.rtmp.enabled and "rtmp" in assigned_roles:
logger.warning(
f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled."
)
elif camera.rtmp.enabled and not "rtmp" in assigned_roles:
logger.warning(
f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input."
)
if not camera.rtmp.enabled and 'rtmp' in assigned_roles:
logger.warning(f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled.")
elif camera.rtmp.enabled and not 'rtmp' in assigned_roles:
logger.warning(f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input.")
def set_log_levels(self):
logging.getLogger().setLevel(self.config.logger.default)
for log, level in self.config.logger.logs.items():
logging.getLogger(log).setLevel(level)
if not 'geventwebsocket.handler' in self.config.logger.logs:
logging.getLogger('geventwebsocket.handler').setLevel('ERROR')
if not "geventwebsocket.handler" in self.config.logger.logs:
logging.getLogger("geventwebsocket.handler").setLevel("ERROR")
def init_queues(self):
# Queues for clip processing
@ -117,13 +136,15 @@ class FrigateApp():
self.event_processed_queue = mp.Queue()
# Queue for cameras to push tracked objects to
self.detected_frames_queue = mp.Queue(maxsize=len(self.config.cameras.keys())*2)
self.detected_frames_queue = mp.Queue(
maxsize=len(self.config.cameras.keys()) * 2
)
def init_database(self):
migrate_db = SqliteExtDatabase(self.config.database.path)
# Run migrations
del(logging.getLogger('peewee_migrate').handlers[:])
del logging.getLogger("peewee_migrate").handlers[:]
router = Router(migrate_db)
router.run()
@ -137,7 +158,13 @@ class FrigateApp():
self.stats_tracking = stats_init(self.camera_metrics, self.detectors)
def init_web_server(self):
self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor, self.mqtt_client)
self.flask_app = create_app(
self.config,
self.db,
self.stats_tracking,
self.detected_frames_processor,
self.mqtt_client,
)
def init_mqtt(self):
self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics)
@ -146,56 +173,108 @@ class FrigateApp():
model_shape = (self.config.model.height, self.config.model.width)
for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event()
shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=self.config.model.height*self.config.model.width*3)
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
shm_in = mp.shared_memory.SharedMemory(
name=name,
create=True,
size=self.config.model.height * self.config.model.width * 3,
)
shm_out = mp.shared_memory.SharedMemory(
name=f"out-{name}", create=True, size=20 * 6 * 4
)
self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out)
for name, detector in self.config.detectors.items():
if detector.type == 'cpu':
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, 'cpu', detector.num_threads)
if detector.type == 'edgetpu':
self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, detector.device, detector.num_threads)
if detector.type == "cpu":
self.detectors[name] = EdgeTPUProcess(
name,
self.detection_queue,
self.detection_out_events,
model_shape,
"cpu",
detector.num_threads,
)
if detector.type == "edgetpu":
self.detectors[name] = EdgeTPUProcess(
name,
self.detection_queue,
self.detection_out_events,
model_shape,
detector.device,
detector.num_threads,
)
def start_detected_frames_processor(self):
self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix,
self.detected_frames_queue, self.event_queue, self.event_processed_queue, self.stop_event)
self.detected_frames_processor = TrackedObjectProcessor(
self.config,
self.mqtt_client,
self.config.mqtt.topic_prefix,
self.detected_frames_queue,
self.event_queue,
self.event_processed_queue,
self.stop_event,
)
self.detected_frames_processor.start()
def start_camera_processors(self):
model_shape = (self.config.model.height, self.config.model.width)
for name, config in self.config.cameras.items():
camera_process = mp.Process(target=track_camera, name=f"camera_processor:{name}", args=(name, config, model_shape,
self.detection_queue, self.detection_out_events[name], self.detected_frames_queue,
self.camera_metrics[name]))
camera_process = mp.Process(
target=track_camera,
name=f"camera_processor:{name}",
args=(
name,
config,
model_shape,
self.detection_queue,
self.detection_out_events[name],
self.detected_frames_queue,
self.camera_metrics[name],
),
)
camera_process.daemon = True
self.camera_metrics[name]['process'] = camera_process
self.camera_metrics[name]["process"] = camera_process
camera_process.start()
logger.info(f"Camera processor started for {name}: {camera_process.pid}")
def start_camera_capture_processes(self):
for name, config in self.config.cameras.items():
capture_process = mp.Process(target=capture_camera, name=f"camera_capture:{name}", args=(name, config,
self.camera_metrics[name]))
capture_process = mp.Process(
target=capture_camera,
name=f"camera_capture:{name}",
args=(name, config, self.camera_metrics[name]),
)
capture_process.daemon = True
self.camera_metrics[name]['capture_process'] = capture_process
self.camera_metrics[name]["capture_process"] = capture_process
capture_process.start()
logger.info(f"Capture process started for {name}: {capture_process.pid}")
def start_event_processor(self):
self.event_processor = EventProcessor(self.config, self.camera_metrics, self.event_queue, self.event_processed_queue, self.stop_event)
self.event_processor = EventProcessor(
self.config,
self.camera_metrics,
self.event_queue,
self.event_processed_queue,
self.stop_event,
)
self.event_processor.start()
def start_event_cleanup(self):
self.event_cleanup = EventCleanup(self.config, self.stop_event)
self.event_cleanup.start()
def start_recording_maintainer(self):
self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event)
self.recording_maintainer.start()
def start_stats_emitter(self):
self.stats_emitter = StatsEmitter(self.config, self.stats_tracking, self.mqtt_client, self.config.mqtt.topic_prefix, self.stop_event)
self.stats_emitter = StatsEmitter(
self.config,
self.stats_tracking,
self.mqtt_client,
self.config.mqtt.topic_prefix,
self.stop_event,
)
self.stats_emitter.start()
def start_watchdog(self):
@ -238,14 +317,16 @@ class FrigateApp():
def receiveSignal(signalNumber, frame):
self.stop()
sys.exit()
signal.signal(signal.SIGTERM, receiveSignal)
server = pywsgi.WSGIServer(('127.0.0.1', 5001), self.flask_app, handler_class=WebSocketHandler)
server = pywsgi.WSGIServer(
("127.0.0.1", 5001), self.flask_app, handler_class=WebSocketHandler
)
server.serve_forever()
self.stop()
def stop(self):
logger.info(f"Stopping...")
self.stop_event.set()

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,3 @@
CLIPS_DIR = '/media/frigate/clips'
RECORD_DIR = '/media/frigate/recordings'
CACHE_DIR = '/tmp/cache'
CLIPS_DIR = "/media/frigate/clips"
RECORD_DIR = "/media/frigate/recordings"
CACHE_DIR = "/tmp/cache"

View File

@ -1,48 +1,49 @@
import datetime
import hashlib
import logging
import multiprocessing as mp
import os
import queue
import threading
import signal
import threading
from abc import ABC, abstractmethod
from multiprocessing.connection import Connection
from setproctitle import setproctitle
from typing import Dict
import numpy as np
import tflite_runtime.interpreter as tflite
from setproctitle import setproctitle
from tflite_runtime.interpreter import load_delegate
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen
logger = logging.getLogger(__name__)
def load_labels(path, encoding='utf-8'):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, 'r', encoding=encoding) as f:
lines = f.readlines()
if not lines:
return {}
if lines[0].split(' ', maxsplit=1)[0].isdigit():
pairs = [line.split(' ', maxsplit=1) for line in lines]
return {int(index): label.strip() for index, label in pairs}
else:
return {index: line.strip() for index, line in enumerate(lines)}
def load_labels(path, encoding="utf-8"):
"""Loads labels from file (with or without index numbers).
Args:
path: path to label file.
encoding: label file encoding.
Returns:
Dictionary mapping indices to labels.
"""
with open(path, "r", encoding=encoding) as f:
lines = f.readlines()
if not lines:
return {}
if lines[0].split(" ", maxsplit=1)[0].isdigit():
pairs = [line.split(" ", maxsplit=1) for line in lines]
return {int(index): label.strip() for index, label in pairs}
else:
return {index: line.strip() for index, line in enumerate(lines)}
class ObjectDetector(ABC):
@abstractmethod
def detect(self, tensor_input, threshold = .4):
def detect(self, tensor_input, threshold=0.4):
pass
class LocalObjectDetector(ObjectDetector):
def __init__(self, tf_device=None, num_threads=3, labels=None):
self.fps = EventsPerSecond()
@ -57,27 +58,29 @@ class LocalObjectDetector(ObjectDetector):
edge_tpu_delegate = None
if tf_device != 'cpu':
if tf_device != "cpu":
try:
logger.info(f"Attempting to load TPU as {device_config['device']}")
edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', device_config)
edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
logger.info("TPU found")
self.interpreter = tflite.Interpreter(
model_path='/edgetpu_model.tflite',
experimental_delegates=[edge_tpu_delegate])
model_path="/edgetpu_model.tflite",
experimental_delegates=[edge_tpu_delegate],
)
except ValueError:
logger.info("No EdgeTPU detected.")
raise
else:
self.interpreter = tflite.Interpreter(
model_path='/cpu_model.tflite', num_threads=num_threads)
model_path="/cpu_model.tflite", num_threads=num_threads
)
self.interpreter.allocate_tensors()
self.tensor_input_details = self.interpreter.get_input_details()
self.tensor_output_details = self.interpreter.get_output_details()
def detect(self, tensor_input, threshold=.4):
def detect(self, tensor_input, threshold=0.4):
detections = []
raw_detections = self.detect_raw(tensor_input)
@ -85,28 +88,49 @@ class LocalObjectDetector(ObjectDetector):
for d in raw_detections:
if d[1] < threshold:
break
detections.append((
self.labels[int(d[0])],
float(d[1]),
(d[2], d[3], d[4], d[5])
))
detections.append(
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
)
self.fps.update()
return detections
def detect_raw(self, tensor_input):
self.interpreter.set_tensor(self.tensor_input_details[0]['index'], tensor_input)
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
self.interpreter.invoke()
boxes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[0]['index']))
label_codes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[1]['index']))
scores = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[2]['index']))
boxes = np.squeeze(
self.interpreter.get_tensor(self.tensor_output_details[0]["index"])
)
label_codes = np.squeeze(
self.interpreter.get_tensor(self.tensor_output_details[1]["index"])
)
scores = np.squeeze(
self.interpreter.get_tensor(self.tensor_output_details[2]["index"])
)
detections = np.zeros((20,6), np.float32)
detections = np.zeros((20, 6), np.float32)
for i, score in enumerate(scores):
detections[i] = [label_codes[i], score, boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]]
detections[i] = [
label_codes[i],
score,
boxes[i][0],
boxes[i][1],
boxes[i][2],
boxes[i][3],
]
return detections
def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, model_shape, tf_device, num_threads):
def run_detector(
name: str,
detection_queue: mp.Queue,
out_events: Dict[str, mp.Event],
avg_speed,
start,
model_shape,
tf_device,
num_threads,
):
threading.current_thread().name = f"detector:{name}"
logger = logging.getLogger(f"detector.{name}")
logger.info(f"Starting detection process: {os.getpid()}")
@ -114,9 +138,10 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.
listen()
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
@ -126,12 +151,9 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.
outputs = {}
for name in out_events.keys():
out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False)
out_np = np.ndarray((20,6), dtype=np.float32, buffer=out_shm.buf)
outputs[name] = {
'shm': out_shm,
'np': out_np
}
out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf)
outputs[name] = {"shm": out_shm, "np": out_np}
while True:
if stop_event.is_set():
break
@ -140,7 +162,9 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.
connection_id = detection_queue.get(timeout=5)
except queue.Empty:
continue
input_frame = frame_manager.get(connection_id, (1,model_shape[0],model_shape[1],3))
input_frame = frame_manager.get(
connection_id, (1, model_shape[0], model_shape[1], 3)
)
if input_frame is None:
continue
@ -148,26 +172,35 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.
# detect and send the output
start.value = datetime.datetime.now().timestamp()
detections = object_detector.detect_raw(input_frame)
duration = datetime.datetime.now().timestamp()-start.value
outputs[connection_id]['np'][:] = detections[:]
duration = datetime.datetime.now().timestamp() - start.value
outputs[connection_id]["np"][:] = detections[:]
out_events[connection_id].set()
start.value = 0.0
avg_speed.value = (avg_speed.value*9 + duration)/10
class EdgeTPUProcess():
def __init__(self, name, detection_queue, out_events, model_shape, tf_device=None, num_threads=3):
avg_speed.value = (avg_speed.value * 9 + duration) / 10
class EdgeTPUProcess:
def __init__(
self,
name,
detection_queue,
out_events,
model_shape,
tf_device=None,
num_threads=3,
):
self.name = name
self.out_events = out_events
self.detection_queue = detection_queue
self.avg_inference_speed = mp.Value('d', 0.01)
self.detection_start = mp.Value('d', 0.0)
self.avg_inference_speed = mp.Value("d", 0.01)
self.detection_start = mp.Value("d", 0.0)
self.detect_process = None
self.model_shape = model_shape
self.tf_device = tf_device
self.num_threads = num_threads
self.start_or_restart()
def stop(self):
self.detect_process.terminate()
logging.info("Waiting for detection process to exit gracefully...")
@ -181,11 +214,25 @@ class EdgeTPUProcess():
self.detection_start.value = 0.0
if (not self.detect_process is None) and self.detect_process.is_alive():
self.stop()
self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.model_shape, self.tf_device, self.num_threads))
self.detect_process = mp.Process(
target=run_detector,
name=f"detector:{self.name}",
args=(
self.name,
self.detection_queue,
self.out_events,
self.avg_inference_speed,
self.detection_start,
self.model_shape,
self.tf_device,
self.num_threads,
),
)
self.detect_process.daemon = True
self.detect_process.start()
class RemoteObjectDetector():
class RemoteObjectDetector:
def __init__(self, name, labels, detection_queue, event, model_shape):
self.labels = load_labels(labels)
self.name = name
@ -193,11 +240,15 @@ class RemoteObjectDetector():
self.detection_queue = detection_queue
self.event = event
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
self.np_shm = np.ndarray((1,model_shape[0],model_shape[1],3), dtype=np.uint8, buffer=self.shm.buf)
self.out_shm = mp.shared_memory.SharedMemory(name=f"out-{self.name}", create=False)
self.out_np_shm = np.ndarray((20,6), dtype=np.float32, buffer=self.out_shm.buf)
def detect(self, tensor_input, threshold=.4):
self.np_shm = np.ndarray(
(1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf
)
self.out_shm = mp.shared_memory.SharedMemory(
name=f"out-{self.name}", create=False
)
self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf)
def detect(self, tensor_input, threshold=0.4):
detections = []
# copy input to shared memory
@ -213,14 +264,12 @@ class RemoteObjectDetector():
for d in self.out_np_shm:
if d[1] < threshold:
break
detections.append((
self.labels[int(d[0])],
float(d[1]),
(d[2], d[3], d[4], d[5])
))
detections.append(
(self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5]))
)
self.fps.update()
return detections
def cleanup(self):
self.shm.unlink()
self.out_shm.unlink()

View File

@ -20,10 +20,13 @@ from peewee import fn
logger = logging.getLogger(__name__)
class EventProcessor(threading.Thread):
def __init__(self, config, camera_processes, event_queue, event_processed_queue, stop_event):
def __init__(
self, config, camera_processes, event_queue, event_processed_queue, stop_event
):
threading.Thread.__init__(self)
self.name = 'event_processor'
self.name = "event_processor"
self.config = config
self.camera_processes = camera_processes
self.cached_clips = {}
@ -33,31 +36,35 @@ class EventProcessor(threading.Thread):
self.stop_event = stop_event
def should_create_clip(self, camera, event_data):
if event_data['false_positive']:
if event_data["false_positive"]:
return False
# if there are required zones and there is no overlap
required_zones = self.config.cameras[camera].clips.required_zones
if len(required_zones) > 0 and not set(event_data['entered_zones']) & set(required_zones):
logger.debug(f"Not creating clip for {event_data['id']} because it did not enter required zones")
if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set(
required_zones
):
logger.debug(
f"Not creating clip for {event_data['id']} because it did not enter required zones"
)
return False
return True
def refresh_cache(self):
cached_files = os.listdir(CACHE_DIR)
files_in_use = []
for process in psutil.process_iter():
try:
if process.name() != 'ffmpeg':
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split('/')[-1])
files_in_use.append(nt.path.split("/")[-1])
except:
continue
@ -65,119 +72,154 @@ class EventProcessor(threading.Thread):
if f in files_in_use or f in self.cached_clips:
continue
camera = '-'.join(f.split('-')[:-1])
start_time = datetime.datetime.strptime(f.split('-')[-1].split('.')[0], '%Y%m%d%H%M%S')
ffprobe_cmd = " ".join([
'ffprobe',
'-v',
'error',
'-show_entries',
'format=duration',
'-of',
'default=noprint_wrappers=1:nokey=1',
f"{os.path.join(CACHE_DIR,f)}"
])
camera = "-".join(f.split("-")[:-1])
start_time = datetime.datetime.strptime(
f.split("-")[-1].split(".")[0], "%Y%m%d%H%M%S"
)
ffprobe_cmd = " ".join(
[
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{os.path.join(CACHE_DIR,f)}",
]
)
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
if p_status == 0:
duration = float(output.decode('utf-8').strip())
duration = float(output.decode("utf-8").strip())
else:
logger.info(f"bad file: {f}")
os.remove(os.path.join(CACHE_DIR,f))
os.remove(os.path.join(CACHE_DIR, f))
continue
self.cached_clips[f] = {
'path': f,
'camera': camera,
'start_time': start_time.timestamp(),
'duration': duration
"path": f,
"camera": camera,
"start_time": start_time.timestamp(),
"duration": duration,
}
if len(self.events_in_process) > 0:
earliest_event = min(self.events_in_process.values(), key=lambda x:x['start_time'])['start_time']
earliest_event = min(
self.events_in_process.values(), key=lambda x: x["start_time"]
)["start_time"]
else:
earliest_event = datetime.datetime.now().timestamp()
# if the earliest event exceeds the max seconds, cap it
max_seconds = self.config.clips.max_seconds
if datetime.datetime.now().timestamp()-earliest_event > max_seconds:
earliest_event = datetime.datetime.now().timestamp()-max_seconds
if datetime.datetime.now().timestamp() - earliest_event > max_seconds:
earliest_event = datetime.datetime.now().timestamp() - max_seconds
for f, data in list(self.cached_clips.items()):
if earliest_event-90 > data['start_time']+data['duration']:
if earliest_event - 90 > data["start_time"] + data["duration"]:
del self.cached_clips[f]
logger.debug(f"Cleaning up cached file {f}")
os.remove(os.path.join(CACHE_DIR,f))
os.remove(os.path.join(CACHE_DIR, f))
# if we are still using more than 90% of the cache, proactively cleanup
cache_usage = shutil.disk_usage("/tmp/cache")
if cache_usage.used/cache_usage.total > .9 and cache_usage.free < 200000000 and len(self.cached_clips) > 0:
if (
cache_usage.used / cache_usage.total > 0.9
and cache_usage.free < 200000000
and len(self.cached_clips) > 0
):
logger.warning("More than 90% of the cache is used.")
logger.warning("Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config.")
logger.warning(
"Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config."
)
logger.warning("Proactively cleaning up the cache...")
while cache_usage.used/cache_usage.total > .9:
oldest_clip = min(self.cached_clips.values(), key=lambda x:x['start_time'])
del self.cached_clips[oldest_clip['path']]
os.remove(os.path.join(CACHE_DIR,oldest_clip['path']))
while cache_usage.used / cache_usage.total > 0.9:
oldest_clip = min(
self.cached_clips.values(), key=lambda x: x["start_time"]
)
del self.cached_clips[oldest_clip["path"]]
os.remove(os.path.join(CACHE_DIR, oldest_clip["path"]))
cache_usage = shutil.disk_usage("/tmp/cache")
def create_clip(self, camera, event_data, pre_capture, post_capture):
# get all clips from the camera with the event sorted
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
# if there are no clips in the cache or we are still waiting on a needed file check every 5 seconds
wait_count = 0
while len(sorted_clips) == 0 or sorted_clips[-1]['start_time'] + sorted_clips[-1]['duration'] < event_data['end_time']+post_capture:
while (
len(sorted_clips) == 0
or sorted_clips[-1]["start_time"] + sorted_clips[-1]["duration"]
< event_data["end_time"] + post_capture
):
if wait_count > 4:
logger.warning(f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event.")
logger.warning(
f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event."
)
return False
logger.debug(f"No cache clips for {camera}. Waiting...")
time.sleep(5)
self.refresh_cache()
# get all clips from the camera with the event sorted
sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time'])
sorted_clips = sorted(
[c for c in self.cached_clips.values() if c["camera"] == camera],
key=lambda i: i["start_time"],
)
wait_count += 1
playlist_start = event_data['start_time']-pre_capture
playlist_end = event_data['end_time']+post_capture
playlist_start = event_data["start_time"] - pre_capture
playlist_end = event_data["end_time"] + post_capture
playlist_lines = []
for clip in sorted_clips:
# clip ends before playlist start time, skip
if clip['start_time']+clip['duration'] < playlist_start:
if clip["start_time"] + clip["duration"] < playlist_start:
continue
# clip starts after playlist ends, finish
if clip['start_time'] > playlist_end:
if clip["start_time"] > playlist_end:
break
playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'")
# if this is the starting clip, add an inpoint
if clip['start_time'] < playlist_start:
playlist_lines.append(f"inpoint {int(playlist_start-clip['start_time'])}")
if clip["start_time"] < playlist_start:
playlist_lines.append(
f"inpoint {int(playlist_start-clip['start_time'])}"
)
# if this is the ending clip, add an outpoint
if clip['start_time']+clip['duration'] > playlist_end:
playlist_lines.append(f"outpoint {int(playlist_end-clip['start_time'])}")
if clip["start_time"] + clip["duration"] > playlist_end:
playlist_lines.append(
f"outpoint {int(playlist_end-clip['start_time'])}"
)
clip_name = f"{camera}-{event_data['id']}"
ffmpeg_cmd = [
'ffmpeg',
'-y',
'-protocol_whitelist',
'pipe,file',
'-f',
'concat',
'-safe',
'0',
'-i',
'-',
'-c',
'copy',
'-movflags',
'+faststart',
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4"
"ffmpeg",
"-y",
"-protocol_whitelist",
"pipe,file",
"-f",
"concat",
"-safe",
"0",
"-i",
"-",
"-c",
"copy",
"-movflags",
"+faststart",
f"{os.path.join(CLIPS_DIR, clip_name)}.mp4",
]
p = sp.run(ffmpeg_cmd, input="\n".join(playlist_lines), encoding='ascii', capture_output=True)
p = sp.run(
ffmpeg_cmd,
input="\n".join(playlist_lines),
encoding="ascii",
capture_output=True,
)
if p.returncode != 0:
logger.error(p.stderr)
return False
@ -199,68 +241,80 @@ class EventProcessor(threading.Thread):
logger.debug(f"Event received: {event_type} {camera} {event_data['id']}")
self.refresh_cache()
if event_type == 'start':
self.events_in_process[event_data['id']] = event_data
if event_type == "start":
self.events_in_process[event_data["id"]] = event_data
if event_type == 'end':
if event_type == "end":
clips_config = self.config.cameras[camera].clips
clip_created = False
if self.should_create_clip(camera, event_data):
if clips_config.enabled and (clips_config.objects is None or event_data['label'] in clips_config.objects):
clip_created = self.create_clip(camera, event_data, clips_config.pre_capture, clips_config.post_capture)
if clip_created or event_data['has_snapshot']:
if clips_config.enabled and (
clips_config.objects is None
or event_data["label"] in clips_config.objects
):
clip_created = self.create_clip(
camera,
event_data,
clips_config.pre_capture,
clips_config.post_capture,
)
if clip_created or event_data["has_snapshot"]:
Event.create(
id=event_data['id'],
label=event_data['label'],
id=event_data["id"],
label=event_data["label"],
camera=camera,
start_time=event_data['start_time'],
end_time=event_data['end_time'],
top_score=event_data['top_score'],
false_positive=event_data['false_positive'],
zones=list(event_data['entered_zones']),
thumbnail=event_data['thumbnail'],
start_time=event_data["start_time"],
end_time=event_data["end_time"],
top_score=event_data["top_score"],
false_positive=event_data["false_positive"],
zones=list(event_data["entered_zones"]),
thumbnail=event_data["thumbnail"],
has_clip=clip_created,
has_snapshot=event_data['has_snapshot'],
has_snapshot=event_data["has_snapshot"],
)
del self.events_in_process[event_data['id']]
self.event_processed_queue.put((event_data['id'], camera))
del self.events_in_process[event_data["id"]]
self.event_processed_queue.put((event_data["id"], camera))
class EventCleanup(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
self.name = 'event_cleanup'
self.name = "event_cleanup"
self.config = config
self.stop_event = stop_event
self.camera_keys = list(self.config.cameras.keys())
def expire(self, media):
## Expire events from unlisted cameras based on the global config
if media == 'clips':
if media == "clips":
retain_config = self.config.clips.retain
file_extension = 'mp4'
update_params = {'has_clip': False}
file_extension = "mp4"
update_params = {"has_clip": False}
else:
retain_config = self.config.snapshots.retain
file_extension = 'jpg'
update_params = {'has_snapshot': False}
distinct_labels = (Event.select(Event.label)
.where(Event.camera.not_in(self.camera_keys))
.distinct())
file_extension = "jpg"
update_params = {"has_snapshot": False}
distinct_labels = (
Event.select(Event.label)
.where(Event.camera.not_in(self.camera_keys))
.distinct()
)
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events = (
Event.select()
.where(Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label)
expired_events = Event.select().where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
# delete the media from disk
for event in expired_events:
@ -268,56 +322,57 @@ class EventCleanup(threading.Thread):
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = (
Event.update(update_params)
.where(Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label)
update_query = Event.update(update_params).where(
Event.camera.not_in(self.camera_keys),
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
## Expire events from cameras based on the camera config
for name, camera in self.config.cameras.items():
if media == 'clips':
if media == "clips":
retain_config = camera.clips.retain
else:
retain_config = camera.snapshots.retain
# get distinct objects in database for this camera
distinct_labels = (Event.select(Event.label)
.where(Event.camera == name)
.distinct())
distinct_labels = (
Event.select(Event.label).where(Event.camera == name).distinct()
)
# loop over object types in db
for l in distinct_labels:
# get expiration time for this label
expire_days = retain_config.objects.get(l.label, retain_config.default)
expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp()
expire_after = (
datetime.datetime.now() - datetime.timedelta(days=expire_days)
).timestamp()
# grab all events after specific time
expired_events = (
Event.select()
.where(Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label)
expired_events = Event.select().where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
# delete the grabbed clips from disk
for event in expired_events:
media_name = f"{event.camera}-{event.id}"
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}")
media = Path(
f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}"
)
media.unlink(missing_ok=True)
# update the clips attribute for the db entry
update_query = (
Event.update(update_params)
.where( Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label)
update_query = Event.update(update_params).where(
Event.camera == name,
Event.start_time < expire_after,
Event.label == l.label,
)
update_query.execute()
def purge_duplicates(self):
duplicate_query = """with grouped_events as (
select id,
label,
camera,
label,
camera,
has_snapshot,
has_clip,
row_number() over (
@ -327,7 +382,7 @@ class EventCleanup(threading.Thread):
from event
)
select distinct id, camera, has_snapshot, has_clip from grouped_events
select distinct id, camera, has_snapshot, has_clip from grouped_events
where copy_number > 1;"""
duplicate_events = Event.raw(duplicate_query)
@ -341,13 +396,15 @@ class EventCleanup(threading.Thread):
media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4")
media.unlink(missing_ok=True)
(Event.delete()
.where( Event.id << [event.id for event in duplicate_events] )
.execute())
(
Event.delete()
.where(Event.id << [event.id for event in duplicate_events])
.execute()
)
def run(self):
counter = 0
while(True):
while True:
if self.stop_event.is_set():
logger.info(f"Exiting event cleanup...")
break
@ -359,14 +416,12 @@ class EventCleanup(threading.Thread):
continue
counter = 0
self.expire('clips')
self.expire('snapshots')
self.expire("clips")
self.expire("snapshots")
self.purge_duplicates()
# drop events from db where has_clip and has_snapshot are false
delete_query = (
Event.delete()
.where( Event.has_clip == False,
Event.has_snapshot == False)
delete_query = Event.delete().where(
Event.has_clip == False, Event.has_snapshot == False
)
delete_query.execute()

View File

@ -9,8 +9,15 @@ from functools import reduce
import cv2
import gevent
import numpy as np
from flask import (Blueprint, Flask, Response, current_app, jsonify,
make_response, request)
from flask import (
Blueprint,
Flask,
Response,
current_app,
jsonify,
make_response,
request,
)
from flask_sockets import Sockets
from peewee import SqliteDatabase, operator, fn, DoesNotExist
from playhouse.shortcuts import model_to_dict
@ -23,10 +30,11 @@ from frigate.version import VERSION
logger = logging.getLogger(__name__)
bp = Blueprint('frigate', __name__)
ws = Blueprint('ws', __name__)
bp = Blueprint("frigate", __name__)
ws = Blueprint("ws", __name__)
class MqttBackend():
class MqttBackend:
"""Interface for registering and updating WebSocket clients."""
def __init__(self, mqtt_client, topic_prefix):
@ -42,36 +50,48 @@ class MqttBackend():
try:
json_message = json.loads(message)
json_message = {
'topic': f"{self.topic_prefix}/{json_message['topic']}",
'payload': json_message['payload'],
'retain': json_message.get('retain', False)
"topic": f"{self.topic_prefix}/{json_message['topic']}",
"payload": json_message.get["payload"],
"retain": json_message.get("retain", False),
}
except:
logger.warning("Unable to parse websocket message as valid json.")
return
logger.debug(f"Publishing mqtt message from websockets at {json_message['topic']}.")
self.mqtt_client.publish(json_message['topic'], json_message['payload'], retain=json_message['retain'])
logger.debug(
f"Publishing mqtt message from websockets at {json_message['topic']}."
)
self.mqtt_client.publish(
json_message["topic"],
json_message["payload"],
retain=json_message["retain"],
)
def run(self):
def send(client, userdata, message):
"""Sends mqtt messages to clients."""
try:
logger.debug(f"Received mqtt message on {message.topic}.")
ws_message = json.dumps({
'topic': message.topic.replace(f"{self.topic_prefix}/",""),
'payload': message.payload.decode()
})
ws_message = json.dumps(
{
"topic": message.topic.replace(f"{self.topic_prefix}/", ""),
"payload": message.payload.decode(),
}
)
except:
# if the payload can't be decoded don't relay to clients
logger.debug(f"MQTT payload for {message.topic} wasn't text. Skipping...")
logger.debug(
f"MQTT payload for {message.topic} wasn't text. Skipping..."
)
return
for client in self.clients:
try:
client.send(ws_message)
except:
logger.debug("Removing websocket client due to a closed connection.")
logger.debug(
"Removing websocket client due to a closed connection."
)
self.clients.remove(client)
self.mqtt_client.message_callback_add(f"{self.topic_prefix}/#", send)
@ -80,7 +100,14 @@ class MqttBackend():
"""Maintains mqtt subscription in the background."""
gevent.spawn(self.run)
def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detected_frames_processor, mqtt_client):
def create_app(
frigate_config,
database: SqliteDatabase,
stats_tracking,
detected_frames_processor,
mqtt_client,
):
app = Flask(__name__)
sockets = Sockets(app)
@ -105,14 +132,16 @@ def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detecte
return app
@bp.route('/')
@bp.route("/")
def is_healthy():
return "Frigate is running. Alive and healthy!"
@bp.route('/events/summary')
@bp.route("/events/summary")
def events_summary():
has_clip = request.args.get('has_clip', type=int)
has_snapshot = request.args.get('has_snapshot', type=int)
has_clip = request.args.get("has_clip", type=int)
has_snapshot = request.args.get("has_snapshot", type=int)
clauses = []
@ -126,35 +155,40 @@ def events_summary():
clauses.append((1 == 1))
groups = (
Event
.select(
Event.camera,
Event.label,
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')).alias('day'),
Event.zones,
fn.COUNT(Event.id).alias('count')
)
.where(reduce(operator.and_, clauses))
.group_by(
Event.camera,
Event.label,
fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')),
Event.zones
)
Event.select(
Event.camera,
Event.label,
fn.strftime(
"%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")
).alias("day"),
Event.zones,
fn.COUNT(Event.id).alias("count"),
)
.where(reduce(operator.and_, clauses))
.group_by(
Event.camera,
Event.label,
fn.strftime(
"%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime")
),
Event.zones,
)
)
return jsonify([e for e in groups.dicts()])
@bp.route('/events/<id>')
@bp.route("/events/<id>")
def event(id):
try:
return model_to_dict(Event.get(Event.id == id))
except DoesNotExist:
return "Event not found", 404
@bp.route('/events/<id>/thumbnail.jpg')
@bp.route("/events/<id>/thumbnail.jpg")
def event_thumbnail(id):
format = request.args.get('format', 'ios')
format = request.args.get("format", "ios")
thumbnail_bytes = None
try:
event = Event.get(Event.id == id)
@ -162,7 +196,9 @@ def event_thumbnail(id):
except DoesNotExist:
# see if the object is currently being tracked
try:
for camera_state in current_app.detected_frames_processor.camera_states.values():
for (
camera_state
) in current_app.detected_frames_processor.camera_states.values():
if id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(id)
if not tracked_obj is None:
@ -174,18 +210,27 @@ def event_thumbnail(id):
return "Event not found", 404
# android notifications prefer a 2:1 ratio
if format == 'android':
if format == "android":
jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8)
img = cv2.imdecode(jpg_as_np, flags=1)
thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1]*0.5), int(img.shape[1]*0.5), cv2.BORDER_CONSTANT, (0,0,0))
ret, jpg = cv2.imencode('.jpg', thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
thumbnail = cv2.copyMakeBorder(
img,
0,
0,
int(img.shape[1] * 0.5),
int(img.shape[1] * 0.5),
cv2.BORDER_CONSTANT,
(0, 0, 0),
)
ret, jpg = cv2.imencode(".jpg", thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
thumbnail_bytes = jpg.tobytes()
response = make_response(thumbnail_bytes)
response.headers['Content-Type'] = 'image/jpg'
response.headers["Content-Type"] = "image/jpg"
return response
@bp.route('/events/<id>/snapshot.jpg')
@bp.route("/events/<id>/snapshot.jpg")
def event_snapshot(id):
jpg_bytes = None
try:
@ -193,20 +238,24 @@ def event_snapshot(id):
if not event.has_snapshot:
return "Snapshot not available", 404
# read snapshot from disk
with open(os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), 'rb') as image_file:
with open(
os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), "rb"
) as image_file:
jpg_bytes = image_file.read()
except DoesNotExist:
# see if the object is currently being tracked
try:
for camera_state in current_app.detected_frames_processor.camera_states.values():
for (
camera_state
) in current_app.detected_frames_processor.camera_states.values():
if id in camera_state.tracked_objects:
tracked_obj = camera_state.tracked_objects.get(id)
if not tracked_obj is None:
jpg_bytes = tracked_obj.get_jpg_bytes(
timestamp=request.args.get('timestamp', type=int),
bounding_box=request.args.get('bbox', type=int),
crop=request.args.get('crop', type=int),
height=request.args.get('h', type=int)
timestamp=request.args.get("timestamp", type=int),
bounding_box=request.args.get("bbox", type=int),
crop=request.args.get("crop", type=int),
height=request.args.get("h", type=int),
)
except:
return "Event not found", 404
@ -214,20 +263,21 @@ def event_snapshot(id):
return "Event not found", 404
response = make_response(jpg_bytes)
response.headers['Content-Type'] = 'image/jpg'
response.headers["Content-Type"] = "image/jpg"
return response
@bp.route('/events')
@bp.route("/events")
def events():
limit = request.args.get('limit', 100)
camera = request.args.get('camera')
label = request.args.get('label')
zone = request.args.get('zone')
after = request.args.get('after', type=float)
before = request.args.get('before', type=float)
has_clip = request.args.get('has_clip', type=int)
has_snapshot = request.args.get('has_snapshot', type=int)
include_thumbnails = request.args.get('include_thumbnails', default=1, type=int)
limit = request.args.get("limit", 100)
camera = request.args.get("camera")
label = request.args.get("label")
zone = request.args.get("zone")
after = request.args.get("after", type=float)
before = request.args.get("before", type=float)
has_clip = request.args.get("has_clip", type=int)
has_snapshot = request.args.get("has_snapshot", type=int)
include_thumbnails = request.args.get("include_thumbnails", default=1, type=int)
clauses = []
excluded_fields = []
@ -239,7 +289,7 @@ def events():
clauses.append((Event.label == label))
if zone:
clauses.append((Event.zones.cast('text') % f"*\"{zone}\"*"))
clauses.append((Event.zones.cast("text") % f'*"{zone}"*'))
if after:
clauses.append((Event.start_time >= after))
@ -259,116 +309,142 @@ def events():
if len(clauses) == 0:
clauses.append((1 == 1))
events = (Event.select()
.where(reduce(operator.and_, clauses))
.order_by(Event.start_time.desc())
.limit(limit))
events = (
Event.select()
.where(reduce(operator.and_, clauses))
.order_by(Event.start_time.desc())
.limit(limit)
)
return jsonify([model_to_dict(e, exclude=excluded_fields) for e in events])
@bp.route('/config')
@bp.route("/config")
def config():
return jsonify(current_app.frigate_config.to_dict())
@bp.route('/version')
@bp.route("/version")
def version():
return VERSION
@bp.route('/stats')
@bp.route("/stats")
def stats():
stats = stats_snapshot(current_app.stats_tracking)
return jsonify(stats)
@bp.route('/<camera_name>/<label>/best.jpg')
@bp.route("/<camera_name>/<label>/best.jpg")
def best(camera_name, label):
if camera_name in current_app.frigate_config.cameras:
best_object = current_app.detected_frames_processor.get_best(camera_name, label)
best_frame = best_object.get('frame')
best_frame = best_object.get("frame")
if best_frame is None:
best_frame = np.zeros((720,1280,3), np.uint8)
best_frame = np.zeros((720, 1280, 3), np.uint8)
else:
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_YUV2BGR_I420)
crop = bool(request.args.get('crop', 0, type=int))
crop = bool(request.args.get("crop", 0, type=int))
if crop:
box = best_object.get('box', (0,0,300,300))
region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
box = best_object.get("box", (0, 0, 300, 300))
region = calculate_region(
best_frame.shape, box[0], box[1], box[2], box[3], 1.1
)
best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
height = int(request.args.get('h', str(best_frame.shape[0])))
width = int(height*best_frame.shape[1]/best_frame.shape[0])
height = int(request.args.get("h", str(best_frame.shape[0])))
width = int(height * best_frame.shape[1] / best_frame.shape[0])
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode('.jpg', best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
best_frame = cv2.resize(
best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA
)
ret, jpg = cv2.imencode(".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
response.headers["Content-Type"] = "image/jpg"
return response
else:
return "Camera named {} not found".format(camera_name), 404
@bp.route('/<camera_name>')
@bp.route("/<camera_name>")
def mjpeg_feed(camera_name):
fps = int(request.args.get('fps', '3'))
height = int(request.args.get('h', '360'))
fps = int(request.args.get("fps", "3"))
height = int(request.args.get("h", "360"))
draw_options = {
'bounding_boxes': request.args.get('bbox', type=int),
'timestamp': request.args.get('timestamp', type=int),
'zones': request.args.get('zones', type=int),
'mask': request.args.get('mask', type=int),
'motion_boxes': request.args.get('motion', type=int),
'regions': request.args.get('regions', type=int),
"bounding_boxes": request.args.get("bbox", type=int),
"timestamp": request.args.get("timestamp", type=int),
"zones": request.args.get("zones", type=int),
"mask": request.args.get("mask", type=int),
"motion_boxes": request.args.get("motion", type=int),
"regions": request.args.get("regions", type=int),
}
if camera_name in current_app.frigate_config.cameras:
# return a multipart response
return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height, draw_options),
mimetype='multipart/x-mixed-replace; boundary=frame')
return Response(
imagestream(
current_app.detected_frames_processor,
camera_name,
fps,
height,
draw_options,
),
mimetype="multipart/x-mixed-replace; boundary=frame",
)
else:
return "Camera named {} not found".format(camera_name), 404
@bp.route('/<camera_name>/latest.jpg')
@bp.route("/<camera_name>/latest.jpg")
def latest_frame(camera_name):
draw_options = {
'bounding_boxes': request.args.get('bbox', type=int),
'timestamp': request.args.get('timestamp', type=int),
'zones': request.args.get('zones', type=int),
'mask': request.args.get('mask', type=int),
'motion_boxes': request.args.get('motion', type=int),
'regions': request.args.get('regions', type=int),
"bounding_boxes": request.args.get("bbox", type=int),
"timestamp": request.args.get("timestamp", type=int),
"zones": request.args.get("zones", type=int),
"mask": request.args.get("mask", type=int),
"motion_boxes": request.args.get("motion", type=int),
"regions": request.args.get("regions", type=int),
}
if camera_name in current_app.frigate_config.cameras:
# max out at specified FPS
frame = current_app.detected_frames_processor.get_current_frame(camera_name, draw_options)
frame = current_app.detected_frames_processor.get_current_frame(
camera_name, draw_options
)
if frame is None:
frame = np.zeros((720,1280,3), np.uint8)
frame = np.zeros((720, 1280, 3), np.uint8)
height = int(request.args.get('h', str(frame.shape[0])))
width = int(height*frame.shape[1]/frame.shape[0])
height = int(request.args.get("h", str(frame.shape[0])))
width = int(height * frame.shape[1] / frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
ret, jpg = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
response = make_response(jpg.tobytes())
response.headers['Content-Type'] = 'image/jpg'
response.headers["Content-Type"] = "image/jpg"
return response
else:
return "Camera named {} not found".format(camera_name), 404
def imagestream(detected_frames_processor, camera_name, fps, height, draw_options):
while True:
# max out at specified FPS
gevent.sleep(1/fps)
frame = detected_frames_processor.get_current_frame(camera_name, draw_options)
if frame is None:
frame = np.zeros((height,int(height*16/9),3), np.uint8)
frame = np.zeros((height, int(height * 16 / 9), 3), np.uint8)
width = int(height*frame.shape[1]/frame.shape[0])
width = int(height * frame.shape[1] / frame.shape[0])
frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_LINEAR)
ret, jpg = cv2.imencode('.jpg', frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n')
ret, jpg = cv2.imencode(".jpg", frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
yield (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n" + jpg.tobytes() + b"\r\n\r\n"
)
@ws.route('/ws')
@ws.route("/ws")
def echo_socket(socket):
current_app.mqtt_backend.register(socket)

View File

@ -13,22 +13,25 @@ from collections import deque
def listener_configurer():
root = logging.getLogger()
console_handler = logging.StreamHandler()
formatter = logging.Formatter('%(name)-30s %(levelname)-8s: %(message)s')
formatter = logging.Formatter("%(name)-30s %(levelname)-8s: %(message)s")
console_handler.setFormatter(formatter)
root.addHandler(console_handler)
root.setLevel(logging.INFO)
def root_configurer(queue):
h = handlers.QueueHandler(queue)
root = logging.getLogger()
root.addHandler(h)
root.setLevel(logging.INFO)
def log_process(log_queue):
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
@ -45,6 +48,7 @@ def log_process(log_queue):
logger = logging.getLogger(record.name)
logger.handle(record)
# based on https://codereview.stackexchange.com/a/17959
class LogPipe(threading.Thread):
def __init__(self, log_name, level):
@ -61,23 +65,20 @@ class LogPipe(threading.Thread):
self.start()
def fileno(self):
"""Return the write file descriptor of the pipe
"""
"""Return the write file descriptor of the pipe"""
return self.fdWrite
def run(self):
"""Run the thread, logging everything.
"""
for line in iter(self.pipeReader.readline, ''):
self.deque.append(line.strip('\n'))
"""Run the thread, logging everything."""
for line in iter(self.pipeReader.readline, ""):
self.deque.append(line.strip("\n"))
self.pipeReader.close()
def dump(self):
while len(self.deque) > 0:
self.logger.log(self.level, self.deque.popleft())
def close(self):
"""Close the write end of the pipe.
"""
"""Close the write end of the pipe."""
os.close(self.fdWrite)

View File

@ -4,26 +4,37 @@ import numpy as np
from frigate.config import MotionConfig
class MotionDetector():
class MotionDetector:
def __init__(self, frame_shape, config: MotionConfig):
self.config = config
self.frame_shape = frame_shape
self.resize_factor = frame_shape[0]/config.frame_height
self.motion_frame_size = (config.frame_height, config.frame_height*frame_shape[1]//frame_shape[0])
self.resize_factor = frame_shape[0] / config.frame_height
self.motion_frame_size = (
config.frame_height,
config.frame_height * frame_shape[1] // frame_shape[0],
)
self.avg_frame = np.zeros(self.motion_frame_size, np.float)
self.avg_delta = np.zeros(self.motion_frame_size, np.float)
self.motion_frame_count = 0
self.frame_counter = 0
resized_mask = cv2.resize(config.mask, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
self.mask = np.where(resized_mask==[0])
resized_mask = cv2.resize(
config.mask,
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
interpolation=cv2.INTER_LINEAR,
)
self.mask = np.where(resized_mask == [0])
def detect(self, frame):
motion_boxes = []
gray = frame[0:self.frame_shape[0], 0:self.frame_shape[1]]
gray = frame[0 : self.frame_shape[0], 0 : self.frame_shape[1]]
# resize frame
resized_frame = cv2.resize(gray, dsize=(self.motion_frame_size[1], self.motion_frame_size[0]), interpolation=cv2.INTER_LINEAR)
resized_frame = cv2.resize(
gray,
dsize=(self.motion_frame_size[1], self.motion_frame_size[0]),
interpolation=cv2.INTER_LINEAR,
)
# TODO: can I improve the contrast of the grayscale image here?
@ -48,7 +59,9 @@ class MotionDetector():
# compute the threshold image for the current frame
# TODO: threshold
current_thresh = cv2.threshold(frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
current_thresh = cv2.threshold(
frameDelta, self.config.threshold, 255, cv2.THRESH_BINARY
)[1]
# black out everything in the avg_delta where there isnt motion in the current frame
avg_delta_image = cv2.convertScaleAbs(self.avg_delta)
@ -56,7 +69,9 @@ class MotionDetector():
# then look for deltas above the threshold, but only in areas where there is a delta
# in the current frame. this prevents deltas from previous frames from being included
thresh = cv2.threshold(avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.threshold(
avg_delta_image, self.config.threshold, 255, cv2.THRESH_BINARY
)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
@ -70,16 +85,27 @@ class MotionDetector():
contour_area = cv2.contourArea(c)
if contour_area > self.config.contour_area:
x, y, w, h = cv2.boundingRect(c)
motion_boxes.append((int(x*self.resize_factor), int(y*self.resize_factor), int((x+w)*self.resize_factor), int((y+h)*self.resize_factor)))
motion_boxes.append(
(
int(x * self.resize_factor),
int(y * self.resize_factor),
int((x + w) * self.resize_factor),
int((y + h) * self.resize_factor),
)
)
if len(motion_boxes) > 0:
self.motion_frame_count += 1
if self.motion_frame_count >= 10:
# only average in the current frame if the difference persists for a bit
cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
cv2.accumulateWeighted(
resized_frame, self.avg_frame, self.config.frame_alpha
)
else:
# when no motion, just keep averaging the frames together
cv2.accumulateWeighted(resized_frame, self.avg_frame, self.config.frame_alpha)
cv2.accumulateWeighted(
resized_frame, self.avg_frame, self.config.frame_alpha
)
self.motion_frame_count = 0
return motion_boxes

View File

@ -7,6 +7,7 @@ from frigate.config import FrigateConfig
logger = logging.getLogger(__name__)
def create_mqtt_client(config: FrigateConfig, camera_metrics):
mqtt_config = config.mqtt
@ -14,15 +15,15 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
payload = message.payload.decode()
logger.debug(f"on_clips_toggle: {message.topic} {payload}")
camera_name = message.topic.split('/')[-3]
camera_name = message.topic.split("/")[-3]
clips_settings = config.cameras[camera_name].clips
if payload == 'ON':
if payload == "ON":
if not clips_settings.enabled:
logger.info(f"Turning on clips for {camera_name} via mqtt")
clips_settings._enabled = True
elif payload == 'OFF':
elif payload == "OFF":
if clips_settings.enabled:
logger.info(f"Turning off clips for {camera_name} via mqtt")
clips_settings._enabled = False
@ -36,15 +37,15 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
payload = message.payload.decode()
logger.debug(f"on_snapshots_toggle: {message.topic} {payload}")
camera_name = message.topic.split('/')[-3]
camera_name = message.topic.split("/")[-3]
snapshots_settings = config.cameras[camera_name].snapshots
if payload == 'ON':
if payload == "ON":
if not snapshots_settings.enabled:
logger.info(f"Turning on snapshots for {camera_name} via mqtt")
snapshots_settings._enabled = True
elif payload == 'OFF':
elif payload == "OFF":
if snapshots_settings.enabled:
logger.info(f"Turning off snapshots for {camera_name} via mqtt")
snapshots_settings._enabled = False
@ -53,21 +54,21 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
state_topic = f"{message.topic[:-4]}/state"
client.publish(state_topic, payload, retain=True)
def on_detect_command(client, userdata, message):
payload = message.payload.decode()
logger.debug(f"on_detect_toggle: {message.topic} {payload}")
camera_name = message.topic.split('/')[-3]
camera_name = message.topic.split("/")[-3]
detect_settings = config.cameras[camera_name].detect
if payload == 'ON':
if payload == "ON":
if not camera_metrics[camera_name]["detection_enabled"].value:
logger.info(f"Turning on detection for {camera_name} via mqtt")
camera_metrics[camera_name]["detection_enabled"].value = True
detect_settings._enabled = True
elif payload == 'OFF':
elif payload == "OFF":
if camera_metrics[camera_name]["detection_enabled"].value:
logger.info(f"Turning off detection for {camera_name} via mqtt")
camera_metrics[camera_name]["detection_enabled"].value = False
@ -88,21 +89,32 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
elif rc == 5:
logger.error("MQTT Not authorized")
else:
logger.error("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
logger.error(
"Unable to connect to MQTT: Connection refused. Error code: "
+ str(rc)
)
logger.info("MQTT connected")
client.subscribe(f"{mqtt_config.topic_prefix}/#")
client.publish(mqtt_config.topic_prefix+'/available', 'online', retain=True)
client.publish(mqtt_config.topic_prefix + "/available", "online", retain=True)
client = mqtt.Client(client_id=mqtt_config.client_id)
client = mqtt.Client(client_id=mqtt_config.client_id)
client.on_connect = on_connect
client.will_set(mqtt_config.topic_prefix+'/available', payload='offline', qos=1, retain=True)
client.will_set(
mqtt_config.topic_prefix + "/available", payload="offline", qos=1, retain=True
)
# register callbacks
for name in config.cameras.keys():
client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/clips/set", on_clips_command)
client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/snapshots/set", on_snapshots_command)
client.message_callback_add(f"{mqtt_config.topic_prefix}/{name}/detect/set", on_detect_command)
client.message_callback_add(
f"{mqtt_config.topic_prefix}/{name}/clips/set", on_clips_command
)
client.message_callback_add(
f"{mqtt_config.topic_prefix}/{name}/snapshots/set", on_snapshots_command
)
client.message_callback_add(
f"{mqtt_config.topic_prefix}/{name}/detect/set", on_detect_command
)
if not mqtt_config.user is None:
client.username_pw_set(mqtt_config.user, password=mqtt_config.password)
@ -115,10 +127,20 @@ def create_mqtt_client(config: FrigateConfig, camera_metrics):
client.loop_start()
for name in config.cameras.keys():
client.publish(f"{mqtt_config.topic_prefix}/{name}/clips/state", 'ON' if config.cameras[name].clips.enabled else 'OFF', retain=True)
client.publish(f"{mqtt_config.topic_prefix}/{name}/snapshots/state", 'ON' if config.cameras[name].snapshots.enabled else 'OFF', retain=True)
client.publish(f"{mqtt_config.topic_prefix}/{name}/detect/state", 'ON' if config.cameras[name].detect.enabled else 'OFF', retain=True)
client.subscribe(f"{mqtt_config.topic_prefix}/#")
client.publish(
f"{mqtt_config.topic_prefix}/{name}/clips/state",
"ON" if config.cameras[name].clips.enabled else "OFF",
retain=True,
)
client.publish(
f"{mqtt_config.topic_prefix}/{name}/snapshots/state",
"ON" if config.cameras[name].snapshots.enabled else "OFF",
retain=True,
)
client.publish(
f"{mqtt_config.topic_prefix}/{name}/detect/state",
"ON" if config.cameras[name].detect.enabled else "OFF",
retain=True,
)
return client

View File

@ -24,44 +24,49 @@ from frigate.util import SharedMemoryFrameManager, draw_box_with_label, calculat
logger = logging.getLogger(__name__)
PATH_TO_LABELS = '/labelmap.txt'
PATH_TO_LABELS = "/labelmap.txt"
LABELS = load_labels(PATH_TO_LABELS)
cmap = plt.cm.get_cmap('tab10', len(LABELS.keys()))
cmap = plt.cm.get_cmap("tab10", len(LABELS.keys()))
COLOR_MAP = {}
for key, val in LABELS.items():
COLOR_MAP[val] = tuple(int(round(255 * c)) for c in cmap(key)[:3])
def on_edge(box, frame_shape):
if (
box[0] == 0 or
box[1] == 0 or
box[2] == frame_shape[1]-1 or
box[3] == frame_shape[0]-1
box[0] == 0
or box[1] == 0
or box[2] == frame_shape[1] - 1
or box[3] == frame_shape[0] - 1
):
return True
def is_better_thumbnail(current_thumb, new_obj, frame_shape) -> bool:
# larger is better
# cutoff images are less ideal, but they should also be smaller?
# better scores are obviously better too
# if the new_thumb is on an edge, and the current thumb is not
if on_edge(new_obj['box'], frame_shape) and not on_edge(current_thumb['box'], frame_shape):
if on_edge(new_obj["box"], frame_shape) and not on_edge(
current_thumb["box"], frame_shape
):
return False
# if the score is better by more than 5%
if new_obj['score'] > current_thumb['score']+.05:
if new_obj["score"] > current_thumb["score"] + 0.05:
return True
# if the area is 10% larger
if new_obj['area'] > current_thumb['area']*1.1:
if new_obj["area"] > current_thumb["area"] * 1.1:
return True
return False
class TrackedObject():
class TrackedObject:
def __init__(self, camera, camera_config: CameraConfig, frame_cache, obj_data):
self.obj_data = obj_data
self.camera = camera
@ -78,14 +83,14 @@ class TrackedObject():
self.previous = self.to_dict()
# start the score history
self.score_history = [self.obj_data['score']]
self.score_history = [self.obj_data["score"]]
def _is_false_positive(self):
# once a true positive, always a true positive
if not self.false_positive:
return False
threshold = self.camera_config.objects.filters[self.obj_data['label']].threshold
threshold = self.camera_config.objects.filters[self.obj_data["label"]].threshold
if self.computed_score < threshold:
return True
return False
@ -94,17 +99,17 @@ class TrackedObject():
scores = self.score_history[:]
# pad with zeros if you dont have at least 3 scores
if len(scores) < 3:
scores += [0.0]*(3 - len(scores))
scores += [0.0] * (3 - len(scores))
return median(scores)
def update(self, current_frame_time, obj_data):
significant_update = False
self.obj_data.update(obj_data)
# if the object is not in the current frame, add a 0.0 to the score history
if self.obj_data['frame_time'] != current_frame_time:
if self.obj_data["frame_time"] != current_frame_time:
self.score_history.append(0.0)
else:
self.score_history.append(self.obj_data['score'])
self.score_history.append(self.obj_data["score"])
# only keep the last 10 scores
if len(self.score_history) > 10:
self.score_history = self.score_history[-10:]
@ -117,27 +122,26 @@ class TrackedObject():
if not self.false_positive:
# determine if this frame is a better thumbnail
if (
self.thumbnail_data is None
or is_better_thumbnail(self.thumbnail_data, self.obj_data, self.camera_config.frame_shape)
if self.thumbnail_data is None or is_better_thumbnail(
self.thumbnail_data, self.obj_data, self.camera_config.frame_shape
):
self.thumbnail_data = {
'frame_time': self.obj_data['frame_time'],
'box': self.obj_data['box'],
'area': self.obj_data['area'],
'region': self.obj_data['region'],
'score': self.obj_data['score']
"frame_time": self.obj_data["frame_time"],
"box": self.obj_data["box"],
"area": self.obj_data["area"],
"region": self.obj_data["region"],
"score": self.obj_data["score"],
}
significant_update = True
# check zones
current_zones = []
bottom_center = (self.obj_data['centroid'][0], self.obj_data['box'][3])
bottom_center = (self.obj_data["centroid"][0], self.obj_data["box"][3])
# check each zone
for name, zone in self.camera_config.zones.items():
contour = zone.contour
# check if the object is in the zone
if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0):
if cv2.pointPolygonTest(contour, bottom_center, False) >= 0:
# if the object passed the filters once, dont apply again
if name in self.current_zones or not zone_filtered(self, zone.filters):
current_zones.append(name)
@ -152,91 +156,131 @@ class TrackedObject():
def to_dict(self, include_thumbnail: bool = False):
return {
'id': self.obj_data['id'],
'camera': self.camera,
'frame_time': self.obj_data['frame_time'],
'label': self.obj_data['label'],
'top_score': self.top_score,
'false_positive': self.false_positive,
'start_time': self.obj_data['start_time'],
'end_time': self.obj_data.get('end_time', None),
'score': self.obj_data['score'],
'box': self.obj_data['box'],
'area': self.obj_data['area'],
'region': self.obj_data['region'],
'current_zones': self.current_zones.copy(),
'entered_zones': list(self.entered_zones).copy(),
'thumbnail': base64.b64encode(self.get_thumbnail()).decode('utf-8') if include_thumbnail else None
"id": self.obj_data["id"],
"camera": self.camera,
"frame_time": self.obj_data["frame_time"],
"label": self.obj_data["label"],
"top_score": self.top_score,
"false_positive": self.false_positive,
"start_time": self.obj_data["start_time"],
"end_time": self.obj_data.get("end_time", None),
"score": self.obj_data["score"],
"box": self.obj_data["box"],
"area": self.obj_data["area"],
"region": self.obj_data["region"],
"current_zones": self.current_zones.copy(),
"entered_zones": list(self.entered_zones).copy(),
"thumbnail": base64.b64encode(self.get_thumbnail()).decode("utf-8")
if include_thumbnail
else None,
}
def get_thumbnail(self):
if self.thumbnail_data is None or not self.thumbnail_data['frame_time'] in self.frame_cache:
ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
if (
self.thumbnail_data is None
or not self.thumbnail_data["frame_time"] in self.frame_cache
):
ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
jpg_bytes = self.get_jpg_bytes(timestamp=False, bounding_box=False, crop=True, height=175)
jpg_bytes = self.get_jpg_bytes(
timestamp=False, bounding_box=False, crop=True, height=175
)
if jpg_bytes:
return jpg_bytes
else:
ret, jpg = cv2.imencode('.jpg', np.zeros((175,175,3), np.uint8))
ret, jpg = cv2.imencode(".jpg", np.zeros((175, 175, 3), np.uint8))
return jpg.tobytes()
def get_jpg_bytes(self, timestamp=False, bounding_box=False, crop=False, height=None):
def get_jpg_bytes(
self, timestamp=False, bounding_box=False, crop=False, height=None
):
if self.thumbnail_data is None:
return None
try:
best_frame = cv2.cvtColor(self.frame_cache[self.thumbnail_data['frame_time']], cv2.COLOR_YUV2BGR_I420)
best_frame = cv2.cvtColor(
self.frame_cache[self.thumbnail_data["frame_time"]],
cv2.COLOR_YUV2BGR_I420,
)
except KeyError:
logger.warning(f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache")
logger.warning(
f"Unable to create jpg because frame {self.thumbnail_data['frame_time']} is not in the cache"
)
return None
if bounding_box:
thickness = 2
color = COLOR_MAP[self.obj_data['label']]
color = COLOR_MAP[self.obj_data["label"]]
# draw the bounding boxes on the frame
box = self.thumbnail_data['box']
draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], self.obj_data['label'], f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}", thickness=thickness, color=color)
box = self.thumbnail_data["box"]
draw_box_with_label(
best_frame,
box[0],
box[1],
box[2],
box[3],
self.obj_data["label"],
f"{int(self.thumbnail_data['score']*100)}% {int(self.thumbnail_data['area'])}",
thickness=thickness,
color=color,
)
if crop:
box = self.thumbnail_data['box']
region = calculate_region(best_frame.shape, box[0], box[1], box[2], box[3], 1.1)
best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
box = self.thumbnail_data["box"]
region = calculate_region(
best_frame.shape, box[0], box[1], box[2], box[3], 1.1
)
best_frame = best_frame[region[1] : region[3], region[0] : region[2]]
if height:
width = int(height*best_frame.shape[1]/best_frame.shape[0])
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
width = int(height * best_frame.shape[1] / best_frame.shape[0])
best_frame = cv2.resize(
best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA
)
if timestamp:
time_to_show = datetime.datetime.fromtimestamp(self.thumbnail_data['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
time_to_show = datetime.datetime.fromtimestamp(
self.thumbnail_data["frame_time"]
).strftime("%m/%d/%Y %H:%M:%S")
size = cv2.getTextSize(
time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2
)
text_width = size[0][0]
desired_size = max(150, 0.33*best_frame.shape[1])
font_scale = desired_size/text_width
cv2.putText(best_frame, time_to_show, (5, best_frame.shape[0]-7), cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale, color=(255, 255, 255), thickness=2)
desired_size = max(150, 0.33 * best_frame.shape[1])
font_scale = desired_size / text_width
cv2.putText(
best_frame,
time_to_show,
(5, best_frame.shape[0] - 7),
cv2.FONT_HERSHEY_SIMPLEX,
fontScale=font_scale,
color=(255, 255, 255),
thickness=2,
)
ret, jpg = cv2.imencode('.jpg', best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
ret, jpg = cv2.imencode(".jpg", best_frame, [int(cv2.IMWRITE_JPEG_QUALITY), 70])
if ret:
return jpg.tobytes()
else:
return None
def zone_filtered(obj: TrackedObject, object_config):
object_name = obj.obj_data['label']
object_name = obj.obj_data["label"]
if object_name in object_config:
obj_settings = object_config[object_name]
# if the min area is larger than the
# detected object, don't add it to detected objects
if obj_settings.min_area > obj.obj_data['area']:
if obj_settings.min_area > obj.obj_data["area"]:
return True
# if the detected object is larger than the
# max area, don't add it to detected objects
if obj_settings.max_area < obj.obj_data['area']:
if obj_settings.max_area < obj.obj_data["area"]:
return True
# if the score is lower than the threshold, skip
@ -245,8 +289,9 @@ def zone_filtered(obj: TrackedObject, object_config):
return False
# Maintains the state of a camera
class CameraState():
class CameraState:
def __init__(self, name, config, frame_manager):
self.name = name
self.config = config
@ -269,46 +314,87 @@ class CameraState():
with self.current_frame_lock:
frame_copy = np.copy(self._current_frame)
frame_time = self.current_frame_time
tracked_objects = {k: v.to_dict() for k,v in self.tracked_objects.items()}
tracked_objects = {k: v.to_dict() for k, v in self.tracked_objects.items()}
motion_boxes = self.motion_boxes.copy()
regions = self.regions.copy()
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420)
# draw on the frame
if draw_options.get('bounding_boxes'):
if draw_options.get("bounding_boxes"):
# draw the bounding boxes on the frame
for obj in tracked_objects.values():
thickness = 2
color = COLOR_MAP[obj['label']]
color = COLOR_MAP[obj["label"]]
if obj['frame_time'] != frame_time:
if obj["frame_time"] != frame_time:
thickness = 1
color = (255,0,0)
color = (255, 0, 0)
# draw the bounding boxes on the frame
box = obj['box']
draw_box_with_label(frame_copy, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
box = obj["box"]
draw_box_with_label(
frame_copy,
box[0],
box[1],
box[2],
box[3],
obj["label"],
f"{int(obj['score']*100)}% {int(obj['area'])}",
thickness=thickness,
color=color,
)
if draw_options.get('regions'):
if draw_options.get("regions"):
for region in regions:
cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 2)
cv2.rectangle(
frame_copy,
(region[0], region[1]),
(region[2], region[3]),
(0, 255, 0),
2,
)
if draw_options.get('zones'):
if draw_options.get("zones"):
for name, zone in self.camera_config.zones.items():
thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2
thickness = (
8
if any(
[
name in obj["current_zones"]
for obj in tracked_objects.values()
]
)
else 2
)
cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
if draw_options.get('mask'):
mask_overlay = np.where(self.camera_config.motion.mask==[0])
frame_copy[mask_overlay] = [0,0,0]
if draw_options.get("mask"):
mask_overlay = np.where(self.camera_config.motion.mask == [0])
frame_copy[mask_overlay] = [0, 0, 0]
if draw_options.get('motion_boxes'):
if draw_options.get("motion_boxes"):
for m_box in motion_boxes:
cv2.rectangle(frame_copy, (m_box[0], m_box[1]), (m_box[2], m_box[3]), (0,0,255), 2)
cv2.rectangle(
frame_copy,
(m_box[0], m_box[1]),
(m_box[2], m_box[3]),
(0, 0, 255),
2,
)
if draw_options.get('timestamp'):
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
if draw_options.get("timestamp"):
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime(
"%m/%d/%Y %H:%M:%S"
)
cv2.putText(
frame_copy,
time_to_show,
(10, 30),
cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.8,
color=(255, 255, 255),
thickness=2,
)
return frame_copy
@ -324,7 +410,9 @@ class CameraState():
self.regions = regions
# get the new frame
frame_id = f"{self.name}{frame_time}"
current_frame = self.frame_manager.get(frame_id, self.camera_config.frame_shape_yuv)
current_frame = self.frame_manager.get(
frame_id, self.camera_config.frame_shape_yuv
)
current_ids = current_detections.keys()
previous_ids = self.tracked_objects.keys()
@ -333,10 +421,12 @@ class CameraState():
updated_ids = list(set(current_ids).intersection(previous_ids))
for id in new_ids:
new_obj = self.tracked_objects[id] = TrackedObject(self.name, self.camera_config, self.frame_cache, current_detections[id])
new_obj = self.tracked_objects[id] = TrackedObject(
self.name, self.camera_config, self.frame_cache, current_detections[id]
)
# call event handlers
for c in self.callbacks['start']:
for c in self.callbacks["start"]:
c(self.name, new_obj, frame_time)
for id in updated_ids:
@ -345,75 +435,107 @@ class CameraState():
if significant_update:
# ensure this frame is stored in the cache
if updated_obj.thumbnail_data['frame_time'] == frame_time and frame_time not in self.frame_cache:
if (
updated_obj.thumbnail_data["frame_time"] == frame_time
and frame_time not in self.frame_cache
):
self.frame_cache[frame_time] = np.copy(current_frame)
updated_obj.last_updated = frame_time
# if it has been more than 5 seconds since the last publish
# and the last update is greater than the last publish
if frame_time - updated_obj.last_published > 5 and updated_obj.last_updated > updated_obj.last_published:
if (
frame_time - updated_obj.last_published > 5
and updated_obj.last_updated > updated_obj.last_published
):
# call event handlers
for c in self.callbacks['update']:
for c in self.callbacks["update"]:
c(self.name, updated_obj, frame_time)
updated_obj.last_published = frame_time
for id in removed_ids:
# publish events to mqtt
removed_obj = self.tracked_objects[id]
if not 'end_time' in removed_obj.obj_data:
removed_obj.obj_data['end_time'] = frame_time
for c in self.callbacks['end']:
if not "end_time" in removed_obj.obj_data:
removed_obj.obj_data["end_time"] = frame_time
for c in self.callbacks["end"]:
c(self.name, removed_obj, frame_time)
# TODO: can i switch to looking this up and only changing when an event ends?
# maintain best objects
for obj in self.tracked_objects.values():
object_type = obj.obj_data['label']
object_type = obj.obj_data["label"]
# if the object's thumbnail is not from the current frame
if obj.false_positive or obj.thumbnail_data['frame_time'] != self.current_frame_time:
if (
obj.false_positive
or obj.thumbnail_data["frame_time"] != self.current_frame_time
):
continue
if object_type in self.best_objects:
current_best = self.best_objects[object_type]
now = datetime.datetime.now().timestamp()
# if the object is a higher score than the current best score
# or the current object is older than desired, use the new object
if (is_better_thumbnail(current_best.thumbnail_data, obj.thumbnail_data, self.camera_config.frame_shape)
or (now - current_best.thumbnail_data['frame_time']) > self.camera_config.best_image_timeout):
if (
is_better_thumbnail(
current_best.thumbnail_data,
obj.thumbnail_data,
self.camera_config.frame_shape,
)
or (now - current_best.thumbnail_data["frame_time"])
> self.camera_config.best_image_timeout
):
self.best_objects[object_type] = obj
for c in self.callbacks['snapshot']:
for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[object_type], frame_time)
else:
self.best_objects[object_type] = obj
for c in self.callbacks['snapshot']:
for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[object_type], frame_time)
# update overall camera state for each object type
obj_counter = Counter()
for obj in self.tracked_objects.values():
if not obj.false_positive:
obj_counter[obj.obj_data['label']] += 1
obj_counter[obj.obj_data["label"]] += 1
# report on detected objects
for obj_name, count in obj_counter.items():
if count != self.object_counts[obj_name]:
self.object_counts[obj_name] = count
for c in self.callbacks['object_status']:
for c in self.callbacks["object_status"]:
c(self.name, obj_name, count)
# expire any objects that are >0 and no longer detected
expired_objects = [obj_name for obj_name, count in self.object_counts.items() if count > 0 and not obj_name in obj_counter]
expired_objects = [
obj_name
for obj_name, count in self.object_counts.items()
if count > 0 and not obj_name in obj_counter
]
for obj_name in expired_objects:
self.object_counts[obj_name] = 0
for c in self.callbacks['object_status']:
for c in self.callbacks["object_status"]:
c(self.name, obj_name, 0)
for c in self.callbacks['snapshot']:
for c in self.callbacks["snapshot"]:
c(self.name, self.best_objects[obj_name], frame_time)
# cleanup thumbnail frame cache
current_thumb_frames = set([obj.thumbnail_data['frame_time'] for obj in self.tracked_objects.values() if not obj.false_positive])
current_best_frames = set([obj.thumbnail_data['frame_time'] for obj in self.best_objects.values()])
thumb_frames_to_delete = [t for t in self.frame_cache.keys() if not t in current_thumb_frames and not t in current_best_frames]
current_thumb_frames = set(
[
obj.thumbnail_data["frame_time"]
for obj in self.tracked_objects.values()
if not obj.false_positive
]
)
current_best_frames = set(
[obj.thumbnail_data["frame_time"] for obj in self.best_objects.values()]
)
thumb_frames_to_delete = [
t
for t in self.frame_cache.keys()
if not t in current_thumb_frames and not t in current_best_frames
]
for t in thumb_frames_to_delete:
del self.frame_cache[t]
@ -423,8 +545,18 @@ class CameraState():
self.frame_manager.delete(self.previous_frame_id)
self.previous_frame_id = frame_id
class TrackedObjectProcessor(threading.Thread):
def __init__(self, config: FrigateConfig, client, topic_prefix, tracked_objects_queue, event_queue, event_processed_queue, stop_event):
def __init__(
self,
config: FrigateConfig,
client,
topic_prefix,
tracked_objects_queue,
event_queue,
event_processed_queue,
stop_event,
):
threading.Thread.__init__(self)
self.name = "detected_frames_processor"
self.config = config
@ -438,37 +570,56 @@ class TrackedObjectProcessor(threading.Thread):
self.frame_manager = SharedMemoryFrameManager()
def start(camera, obj: TrackedObject, current_frame_time):
self.event_queue.put(('start', camera, obj.to_dict()))
self.event_queue.put(("start", camera, obj.to_dict()))
def update(camera, obj: TrackedObject, current_frame_time):
after = obj.to_dict()
message = { 'before': obj.previous, 'after': after, 'type': 'new' if obj.previous['false_positive'] else 'update' }
self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
message = {
"before": obj.previous,
"after": after,
"type": "new" if obj.previous["false_positive"] else "update",
}
self.client.publish(
f"{self.topic_prefix}/events", json.dumps(message), retain=False
)
obj.previous = after
def end(camera, obj: TrackedObject, current_frame_time):
snapshot_config = self.config.cameras[camera].snapshots
event_data = obj.to_dict(include_thumbnail=True)
event_data['has_snapshot'] = False
event_data["has_snapshot"] = False
if not obj.false_positive:
message = { 'before': obj.previous, 'after': obj.to_dict(), 'type': 'end' }
self.client.publish(f"{self.topic_prefix}/events", json.dumps(message), retain=False)
message = {
"before": obj.previous,
"after": obj.to_dict(),
"type": "end",
}
self.client.publish(
f"{self.topic_prefix}/events", json.dumps(message), retain=False
)
# write snapshot to disk if enabled
if snapshot_config.enabled and self.should_save_snapshot(camera, obj):
jpg_bytes = obj.get_jpg_bytes(
timestamp=snapshot_config.timestamp,
bounding_box=snapshot_config.bounding_box,
crop=snapshot_config.crop,
height=snapshot_config.height
height=snapshot_config.height,
)
if jpg_bytes is None:
logger.warning(f"Unable to save snapshot for {obj.obj_data['id']}.")
logger.warning(
f"Unable to save snapshot for {obj.obj_data['id']}."
)
else:
with open(os.path.join(CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"), 'wb') as j:
with open(
os.path.join(
CLIPS_DIR, f"{camera}-{obj.obj_data['id']}.jpg"
),
"wb",
) as j:
j.write(jpg_bytes)
event_data['has_snapshot'] = True
self.event_queue.put(('end', camera, event_data))
event_data["has_snapshot"] = True
self.event_queue.put(("end", camera, event_data))
def snapshot(camera, obj: TrackedObject, current_frame_time):
mqtt_config = self.config.cameras[camera].mqtt
if mqtt_config.enabled and self.should_mqtt_snapshot(camera, obj):
@ -476,24 +627,32 @@ class TrackedObjectProcessor(threading.Thread):
timestamp=mqtt_config.timestamp,
bounding_box=mqtt_config.bounding_box,
crop=mqtt_config.crop,
height=mqtt_config.height
height=mqtt_config.height,
)
if jpg_bytes is None:
logger.warning(f"Unable to send mqtt snapshot for {obj.obj_data['id']}.")
logger.warning(
f"Unable to send mqtt snapshot for {obj.obj_data['id']}."
)
else:
self.client.publish(f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot", jpg_bytes, retain=True)
self.client.publish(
f"{self.topic_prefix}/{camera}/{obj.obj_data['label']}/snapshot",
jpg_bytes,
retain=True,
)
def object_status(camera, object_name, status):
self.client.publish(f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False)
self.client.publish(
f"{self.topic_prefix}/{camera}/{object_name}", status, retain=False
)
for camera in self.config.cameras.keys():
camera_state = CameraState(camera, self.config, self.frame_manager)
camera_state.on('start', start)
camera_state.on('update', update)
camera_state.on('end', end)
camera_state.on('snapshot', snapshot)
camera_state.on('object_status', object_status)
camera_state.on("start", start)
camera_state.on("update", update)
camera_state.on("end", end)
camera_state.on("snapshot", snapshot)
camera_state.on("object_status", object_status)
self.camera_states[camera] = camera_state
# {
@ -510,7 +669,9 @@ class TrackedObjectProcessor(threading.Thread):
# if there are required zones and there is no overlap
required_zones = self.config.cameras[camera].snapshots.required_zones
if len(required_zones) > 0 and not obj.entered_zones & set(required_zones):
logger.debug(f"Not creating snapshot for {obj.obj_data['id']} because it did not enter required zones")
logger.debug(
f"Not creating snapshot for {obj.obj_data['id']} because it did not enter required zones"
)
return False
return True
@ -519,7 +680,9 @@ class TrackedObjectProcessor(threading.Thread):
# if there are required zones and there is no overlap
required_zones = self.config.cameras[camera].mqtt.required_zones
if len(required_zones) > 0 and not obj.entered_zones & set(required_zones):
logger.debug(f"Not sending mqtt for {obj.obj_data['id']} because it did not enter required zones")
logger.debug(
f"Not sending mqtt for {obj.obj_data['id']} because it did not enter required zones"
)
return False
return True
@ -530,7 +693,9 @@ class TrackedObjectProcessor(threading.Thread):
if label in camera_state.best_objects:
best_obj = camera_state.best_objects[label]
best = best_obj.thumbnail_data.copy()
best['frame'] = camera_state.frame_cache.get(best_obj.thumbnail_data['frame_time'])
best["frame"] = camera_state.frame_cache.get(
best_obj.thumbnail_data["frame_time"]
)
return best
else:
return {}
@ -545,13 +710,21 @@ class TrackedObjectProcessor(threading.Thread):
break
try:
camera, frame_time, current_tracked_objects, motion_boxes, regions = self.tracked_objects_queue.get(True, 10)
(
camera,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = self.tracked_objects_queue.get(True, 10)
except queue.Empty:
continue
camera_state = self.camera_states[camera]
camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
camera_state.update(
frame_time, current_tracked_objects, motion_boxes, regions
)
# update zone counts for each label
# for each zone in the current camera
@ -560,23 +733,35 @@ class TrackedObjectProcessor(threading.Thread):
obj_counter = Counter()
for obj in camera_state.tracked_objects.values():
if zone in obj.current_zones and not obj.false_positive:
obj_counter[obj.obj_data['label']] += 1
obj_counter[obj.obj_data["label"]] += 1
# update counts and publish status
for label in set(list(self.zone_data[zone].keys()) + list(obj_counter.keys())):
for label in set(
list(self.zone_data[zone].keys()) + list(obj_counter.keys())
):
# if we have previously published a count for this zone/label
zone_label = self.zone_data[zone][label]
if camera in zone_label:
current_count = sum(zone_label.values())
zone_label[camera] = obj_counter[label] if label in obj_counter else 0
zone_label[camera] = (
obj_counter[label] if label in obj_counter else 0
)
new_count = sum(zone_label.values())
if new_count != current_count:
self.client.publish(f"{self.topic_prefix}/{zone}/{label}", new_count, retain=False)
self.client.publish(
f"{self.topic_prefix}/{zone}/{label}",
new_count,
retain=False,
)
# if this is a new zone/label combo for this camera
else:
if label in obj_counter:
zone_label[camera] = obj_counter[label]
self.client.publish(f"{self.topic_prefix}/{zone}/{label}", obj_counter[label], retain=False)
self.client.publish(
f"{self.topic_prefix}/{zone}/{label}",
obj_counter[label],
retain=False,
)
# cleanup event finished queue
while not self.event_processed_queue.empty():

View File

@ -16,24 +16,24 @@ from frigate.config import DetectConfig
from frigate.util import draw_box_with_label
class ObjectTracker():
class ObjectTracker:
def __init__(self, config: DetectConfig):
self.tracked_objects = {}
self.disappeared = {}
self.max_disappeared = config.max_disappeared
def register(self, index, obj):
rand_id = ''.join(random.choices(string.ascii_lowercase + string.digits, k=6))
rand_id = "".join(random.choices(string.ascii_lowercase + string.digits, k=6))
id = f"{obj['frame_time']}-{rand_id}"
obj['id'] = id
obj['start_time'] = obj['frame_time']
obj["id"] = id
obj["start_time"] = obj["frame_time"]
self.tracked_objects[id] = obj
self.disappeared[id] = 0
def deregister(self, id):
del self.tracked_objects[id]
del self.disappeared[id]
def update(self, id, new_obj):
self.disappeared[id] = 0
self.tracked_objects[id].update(new_obj)
@ -42,45 +42,49 @@ class ObjectTracker():
# group by name
new_object_groups = defaultdict(lambda: [])
for obj in new_objects:
new_object_groups[obj[0]].append({
'label': obj[0],
'score': obj[1],
'box': obj[2],
'area': obj[3],
'region': obj[4],
'frame_time': frame_time
})
new_object_groups[obj[0]].append(
{
"label": obj[0],
"score": obj[1],
"box": obj[2],
"area": obj[3],
"region": obj[4],
"frame_time": frame_time,
}
)
# update any tracked objects with labels that are not
# seen in the current objects and deregister if needed
for obj in list(self.tracked_objects.values()):
if not obj['label'] in new_object_groups:
if self.disappeared[obj['id']] >= self.max_disappeared:
self.deregister(obj['id'])
if not obj["label"] in new_object_groups:
if self.disappeared[obj["id"]] >= self.max_disappeared:
self.deregister(obj["id"])
else:
self.disappeared[obj['id']] += 1
self.disappeared[obj["id"]] += 1
if len(new_objects) == 0:
return
# track objects for each label type
for label, group in new_object_groups.items():
current_objects = [o for o in self.tracked_objects.values() if o['label'] == label]
current_ids = [o['id'] for o in current_objects]
current_centroids = np.array([o['centroid'] for o in current_objects])
current_objects = [
o for o in self.tracked_objects.values() if o["label"] == label
]
current_ids = [o["id"] for o in current_objects]
current_centroids = np.array([o["centroid"] for o in current_objects])
# compute centroids of new objects
for obj in group:
centroid_x = int((obj['box'][0]+obj['box'][2]) / 2.0)
centroid_y = int((obj['box'][1]+obj['box'][3]) / 2.0)
obj['centroid'] = (centroid_x, centroid_y)
centroid_x = int((obj["box"][0] + obj["box"][2]) / 2.0)
centroid_y = int((obj["box"][1] + obj["box"][3]) / 2.0)
obj["centroid"] = (centroid_x, centroid_y)
if len(current_objects) == 0:
for index, obj in enumerate(group):
self.register(index, obj)
return
new_centroids = np.array([o['centroid'] for o in group])
new_centroids = np.array([o["centroid"] for o in group])
# compute the distance between each pair of tracked
# centroids and new centroids, respectively -- our
@ -130,9 +134,9 @@ class ObjectTracker():
unusedCols = set(range(0, D.shape[1])).difference(usedCols)
# in the event that the number of object centroids is
# equal or greater than the number of input centroids
# we need to check and see if some of these objects have
# potentially disappeared
# equal or greater than the number of input centroids
# we need to check and see if some of these objects have
# potentially disappeared
if D.shape[0] >= D.shape[1]:
for row in unusedRows:
id = current_ids[row]

View File

@ -16,37 +16,43 @@ from frigate.edgetpu import LocalObjectDetector
from frigate.motion import MotionDetector
from frigate.object_processing import COLOR_MAP, CameraState
from frigate.objects import ObjectTracker
from frigate.util import (DictFrameManager, EventsPerSecond,
SharedMemoryFrameManager, draw_box_with_label)
from frigate.video import (capture_frames, process_frames,
start_or_restart_ffmpeg)
from frigate.util import (
DictFrameManager,
EventsPerSecond,
SharedMemoryFrameManager,
draw_box_with_label,
)
from frigate.video import capture_frames, process_frames, start_or_restart_ffmpeg
logging.basicConfig()
logging.root.setLevel(logging.DEBUG)
logger = logging.getLogger(__name__)
def get_frame_shape(source):
ffprobe_cmd = " ".join([
'ffprobe',
'-v',
'panic',
'-show_error',
'-show_streams',
'-of',
'json',
'"'+source+'"'
])
ffprobe_cmd = " ".join(
[
"ffprobe",
"-v",
"panic",
"-show_error",
"-show_streams",
"-of",
"json",
'"' + source + '"',
]
)
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
info = json.loads(output)
video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
video_info = [s for s in info["streams"] if s["codec_type"] == "video"][0]
if video_info["height"] != 0 and video_info["width"] != 0:
return (video_info["height"], video_info["width"], 3)
if video_info['height'] != 0 and video_info['width'] != 0:
return (video_info['height'], video_info['width'], 3)
# fallback to using opencv if ffprobe didnt succeed
video = cv2.VideoCapture(source)
ret, frame = video.read()
@ -54,14 +60,17 @@ def get_frame_shape(source):
video.release()
return frame_shape
class ProcessClip():
class ProcessClip:
def __init__(self, clip_path, frame_shape, config: FrigateConfig):
self.clip_path = clip_path
self.camera_name = 'camera'
self.camera_name = "camera"
self.config = config
self.camera_config = self.config.cameras['camera']
self.camera_config = self.config.cameras["camera"]
self.frame_shape = self.camera_config.frame_shape
self.ffmpeg_cmd = [c['cmd'] for c in self.camera_config.ffmpeg_cmds if 'detect' in c['roles']][0]
self.ffmpeg_cmd = [
c["cmd"] for c in self.camera_config.ffmpeg_cmds if "detect" in c["roles"]
][0]
self.frame_manager = SharedMemoryFrameManager()
self.frame_queue = mp.Queue()
self.detected_objects_queue = mp.Queue()
@ -70,37 +79,66 @@ class ProcessClip():
def load_frames(self):
fps = EventsPerSecond()
skipped_fps = EventsPerSecond()
current_frame = mp.Value('d', 0.0)
frame_size = self.camera_config.frame_shape_yuv[0] * self.camera_config.frame_shape_yuv[1]
ffmpeg_process = start_or_restart_ffmpeg(self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size)
capture_frames(ffmpeg_process, self.camera_name, self.camera_config.frame_shape_yuv, self.frame_manager,
self.frame_queue, fps, skipped_fps, current_frame)
current_frame = mp.Value("d", 0.0)
frame_size = (
self.camera_config.frame_shape_yuv[0]
* self.camera_config.frame_shape_yuv[1]
)
ffmpeg_process = start_or_restart_ffmpeg(
self.ffmpeg_cmd, logger, sp.DEVNULL, frame_size
)
capture_frames(
ffmpeg_process,
self.camera_name,
self.camera_config.frame_shape_yuv,
self.frame_manager,
self.frame_queue,
fps,
skipped_fps,
current_frame,
)
ffmpeg_process.wait()
ffmpeg_process.communicate()
def process_frames(self, objects_to_track=['person'], object_filters={}):
def process_frames(self, objects_to_track=["person"], object_filters={}):
mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
mask[:] = 255
motion_detector = MotionDetector(self.frame_shape, mask, self.camera_config.motion)
motion_detector = MotionDetector(
self.frame_shape, mask, self.camera_config.motion
)
object_detector = LocalObjectDetector(labels='/labelmap.txt')
object_detector = LocalObjectDetector(labels="/labelmap.txt")
object_tracker = ObjectTracker(self.camera_config.detect)
process_info = {
'process_fps': mp.Value('d', 0.0),
'detection_fps': mp.Value('d', 0.0),
'detection_frame': mp.Value('d', 0.0)
"process_fps": mp.Value("d", 0.0),
"detection_fps": mp.Value("d", 0.0),
"detection_frame": mp.Value("d", 0.0),
}
stop_event = mp.Event()
model_shape = (self.config.model.height, self.config.model.width)
process_frames(self.camera_name, self.frame_queue, self.frame_shape, model_shape,
self.frame_manager, motion_detector, object_detector, object_tracker,
self.detected_objects_queue, process_info,
objects_to_track, object_filters, mask, stop_event, exit_on_empty=True)
process_frames(
self.camera_name,
self.frame_queue,
self.frame_shape,
model_shape,
self.frame_manager,
motion_detector,
object_detector,
object_tracker,
self.detected_objects_queue,
process_info,
objects_to_track,
object_filters,
mask,
stop_event,
exit_on_empty=True,
)
def top_object(self, debug_path=None):
obj_detected = False
top_computed_score = 0.0
def handle_event(name, obj, frame_time):
nonlocal obj_detected
nonlocal top_computed_score
@ -108,48 +146,85 @@ class ProcessClip():
top_computed_score = obj.computed_score
if not obj.false_positive:
obj_detected = True
self.camera_state.on('new', handle_event)
self.camera_state.on('update', handle_event)
while(not self.detected_objects_queue.empty()):
camera_name, frame_time, current_tracked_objects, motion_boxes, regions = self.detected_objects_queue.get()
self.camera_state.on("new", handle_event)
self.camera_state.on("update", handle_event)
while not self.detected_objects_queue.empty():
(
camera_name,
frame_time,
current_tracked_objects,
motion_boxes,
regions,
) = self.detected_objects_queue.get()
if not debug_path is None:
self.save_debug_frame(debug_path, frame_time, current_tracked_objects.values())
self.save_debug_frame(
debug_path, frame_time, current_tracked_objects.values()
)
self.camera_state.update(
frame_time, current_tracked_objects, motion_boxes, regions
)
self.camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions)
self.frame_manager.delete(self.camera_state.previous_frame_id)
return {
'object_detected': obj_detected,
'top_score': top_computed_score
}
return {"object_detected": obj_detected, "top_score": top_computed_score}
def save_debug_frame(self, debug_path, frame_time, tracked_objects):
current_frame = cv2.cvtColor(self.frame_manager.get(f"{self.camera_name}{frame_time}", self.camera_config.frame_shape_yuv), cv2.COLOR_YUV2BGR_I420)
current_frame = cv2.cvtColor(
self.frame_manager.get(
f"{self.camera_name}{frame_time}", self.camera_config.frame_shape_yuv
),
cv2.COLOR_YUV2BGR_I420,
)
# draw the bounding boxes on the frame
for obj in tracked_objects:
thickness = 2
color = (0,0,175)
color = (0, 0, 175)
if obj['frame_time'] != frame_time:
if obj["frame_time"] != frame_time:
thickness = 1
color = (255,0,0)
color = (255, 0, 0)
else:
color = (255,255,0)
color = (255, 255, 0)
# draw the bounding boxes on the frame
box = obj['box']
draw_box_with_label(current_frame, box[0], box[1], box[2], box[3], obj['id'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
box = obj["box"]
draw_box_with_label(
current_frame,
box[0],
box[1],
box[2],
box[3],
obj["id"],
f"{int(obj['score']*100)}% {int(obj['area'])}",
thickness=thickness,
color=color,
)
# draw the regions on the frame
region = obj['region']
draw_box_with_label(current_frame, region[0], region[1], region[2], region[3], 'region', "", thickness=1, color=(0,255,0))
cv2.imwrite(f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg", current_frame)
region = obj["region"]
draw_box_with_label(
current_frame,
region[0],
region[1],
region[2],
region[3],
"region",
"",
thickness=1,
color=(0, 255, 0),
)
cv2.imwrite(
f"{os.path.join(debug_path, os.path.basename(self.clip_path))}.{int(frame_time*1000000)}.jpg",
current_frame,
)
@click.command()
@click.option("-p", "--path", required=True, help="Path to clip or directory to test.")
@click.option("-l", "--label", default='person', help="Label name to detect.")
@click.option("-l", "--label", default="person", help="Label name to detect.")
@click.option("-t", "--threshold", default=0.85, help="Threshold value for objects.")
@click.option("-s", "--scores", default=None, help="File to save csv of top scores")
@click.option("--debug-path", default=None, help="Path to output frames for debugging.")
@ -159,34 +234,37 @@ def process(path, label, threshold, scores, debug_path):
files = os.listdir(path)
files.sort()
clips = [os.path.join(path, file) for file in files]
elif os.path.isfile(path):
elif os.path.isfile(path):
clips.append(path)
json_config = {
'mqtt': {
'host': 'mqtt'
},
'cameras': {
'camera': {
'ffmpeg': {
'inputs': [
{ 'path': 'path.mp4', 'global_args': '', 'input_args': '', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"cameras": {
"camera": {
"ffmpeg": {
"inputs": [
{
"path": "path.mp4",
"global_args": "",
"input_args": "",
"roles": ["detect"],
}
]
},
'height': 1920,
'width': 1080
"height": 1920,
"width": 1080,
}
}
},
}
results = []
for c in clips:
logger.info(c)
frame_shape = get_frame_shape(c)
json_config['cameras']['camera']['height'] = frame_shape[0]
json_config['cameras']['camera']['width'] = frame_shape[1]
json_config['cameras']['camera']['ffmpeg']['inputs'][0]['path'] = c
json_config["cameras"]["camera"]["height"] = frame_shape[0]
json_config["cameras"]["camera"]["width"] = frame_shape[1]
json_config["cameras"]["camera"]["ffmpeg"]["inputs"][0]["path"] = c
config = FrigateConfig(config=FRIGATE_CONFIG_SCHEMA(json_config))
@ -197,12 +275,15 @@ def process(path, label, threshold, scores, debug_path):
results.append((c, process_clip.top_object(debug_path)))
if not scores is None:
with open(scores, 'w') as writer:
with open(scores, "w") as writer:
for result in results:
writer.write(f"{result[0]},{result[1]['top_score']}\n")
positive_count = sum(1 for result in results if result[1]['object_detected'])
print(f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s).")
if __name__ == '__main__':
positive_count = sum(1 for result in results if result[1]["object_detected"])
print(
f"Objects were detected in {positive_count}/{len(results)}({positive_count/len(results)*100:.2f}%) clip(s)."
)
if __name__ == "__main__":
process()

View File

@ -18,41 +18,47 @@ logger = logging.getLogger(__name__)
SECONDS_IN_DAY = 60 * 60 * 24
def remove_empty_directories(directory):
# list all directories recursively and sort them by path,
# longest first
paths = sorted(
[x[0] for x in os.walk(RECORD_DIR)],
key=lambda p: len(str(p)),
reverse=True,
)
for path in paths:
# don't delete the parent
if path == RECORD_DIR:
continue
if len(os.listdir(path)) == 0:
os.rmdir(path)
# list all directories recursively and sort them by path,
# longest first
paths = sorted(
[x[0] for x in os.walk(RECORD_DIR)],
key=lambda p: len(str(p)),
reverse=True,
)
for path in paths:
# don't delete the parent
if path == RECORD_DIR:
continue
if len(os.listdir(path)) == 0:
os.rmdir(path)
class RecordingMaintainer(threading.Thread):
def __init__(self, config: FrigateConfig, stop_event):
threading.Thread.__init__(self)
self.name = 'recording_maint'
self.name = "recording_maint"
self.config = config
self.stop_event = stop_event
def move_files(self):
recordings = [d for d in os.listdir(RECORD_DIR) if os.path.isfile(os.path.join(RECORD_DIR, d)) and d.endswith(".mp4")]
recordings = [
d
for d in os.listdir(RECORD_DIR)
if os.path.isfile(os.path.join(RECORD_DIR, d)) and d.endswith(".mp4")
]
files_in_use = []
for process in psutil.process_iter():
try:
if process.name() != 'ffmpeg':
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
if nt.path.startswith(RECORD_DIR):
files_in_use.append(nt.path.split('/')[-1])
files_in_use.append(nt.path.split("/")[-1])
except:
continue
@ -60,44 +66,53 @@ class RecordingMaintainer(threading.Thread):
if f in files_in_use:
continue
camera = '-'.join(f.split('-')[:-1])
start_time = datetime.datetime.strptime(f.split('-')[-1].split('.')[0], '%Y%m%d%H%M%S')
ffprobe_cmd = " ".join([
'ffprobe',
'-v',
'error',
'-show_entries',
'format=duration',
'-of',
'default=noprint_wrappers=1:nokey=1',
f"{os.path.join(RECORD_DIR,f)}"
])
camera = "-".join(f.split("-")[:-1])
start_time = datetime.datetime.strptime(
f.split("-")[-1].split(".")[0], "%Y%m%d%H%M%S"
)
ffprobe_cmd = " ".join(
[
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
f"{os.path.join(RECORD_DIR,f)}",
]
)
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
if p_status == 0:
duration = float(output.decode('utf-8').strip())
duration = float(output.decode("utf-8").strip())
else:
logger.info(f"bad file: {f}")
os.remove(os.path.join(RECORD_DIR,f))
os.remove(os.path.join(RECORD_DIR, f))
continue
directory = os.path.join(RECORD_DIR, start_time.strftime('%Y-%m/%d/%H'), camera)
directory = os.path.join(
RECORD_DIR, start_time.strftime("%Y-%m/%d/%H"), camera
)
if not os.path.exists(directory):
os.makedirs(directory)
file_name = f"{start_time.strftime('%M.%S.mp4')}"
os.rename(os.path.join(RECORD_DIR,f), os.path.join(directory,file_name))
os.rename(os.path.join(RECORD_DIR, f), os.path.join(directory, file_name))
def expire_files(self):
delete_before = {}
for name, camera in self.config.cameras.items():
delete_before[name] = datetime.datetime.now().timestamp() - SECONDS_IN_DAY*camera.record.retain_days
delete_before[name] = (
datetime.datetime.now().timestamp()
- SECONDS_IN_DAY * camera.record.retain_days
)
for p in Path('/media/frigate/recordings').rglob("*.mp4"):
for p in Path("/media/frigate/recordings").rglob("*.mp4"):
if not p.parent.name in delete_before:
continue
if p.stat().st_mtime < delete_before[p.parent.name]:
@ -106,7 +121,7 @@ class RecordingMaintainer(threading.Thread):
def run(self):
counter = 0
self.expire_files()
while(True):
while True:
if self.stop_event.is_set():
logger.info(f"Exiting recording maintenance...")
break
@ -120,6 +135,3 @@ class RecordingMaintainer(threading.Thread):
counter = 0
self.move_files()

View File

@ -11,14 +11,16 @@ from frigate.version import VERSION
logger = logging.getLogger(__name__)
def stats_init(camera_metrics, detectors):
stats_tracking = {
'camera_metrics': camera_metrics,
'detectors': detectors,
'started': int(time.time())
"camera_metrics": camera_metrics,
"detectors": detectors,
"started": int(time.time()),
}
return stats_tracking
def get_fs_type(path):
bestMatch = ""
fsType = ""
@ -28,53 +30,62 @@ def get_fs_type(path):
bestMatch = part.mountpoint
return fsType
def stats_snapshot(stats_tracking):
camera_metrics = stats_tracking['camera_metrics']
camera_metrics = stats_tracking["camera_metrics"]
stats = {}
total_detection_fps = 0
for name, camera_stats in camera_metrics.items():
total_detection_fps += camera_stats['detection_fps'].value
total_detection_fps += camera_stats["detection_fps"].value
stats[name] = {
'camera_fps': round(camera_stats['camera_fps'].value, 2),
'process_fps': round(camera_stats['process_fps'].value, 2),
'skipped_fps': round(camera_stats['skipped_fps'].value, 2),
'detection_fps': round(camera_stats['detection_fps'].value, 2),
'pid': camera_stats['process'].pid,
'capture_pid': camera_stats['capture_process'].pid
"camera_fps": round(camera_stats["camera_fps"].value, 2),
"process_fps": round(camera_stats["process_fps"].value, 2),
"skipped_fps": round(camera_stats["skipped_fps"].value, 2),
"detection_fps": round(camera_stats["detection_fps"].value, 2),
"pid": camera_stats["process"].pid,
"capture_pid": camera_stats["capture_process"].pid,
}
stats['detectors'] = {}
stats["detectors"] = {}
for name, detector in stats_tracking["detectors"].items():
stats['detectors'][name] = {
'inference_speed': round(detector.avg_inference_speed.value * 1000, 2),
'detection_start': detector.detection_start.value,
'pid': detector.detect_process.pid
stats["detectors"][name] = {
"inference_speed": round(detector.avg_inference_speed.value * 1000, 2),
"detection_start": detector.detection_start.value,
"pid": detector.detect_process.pid,
}
stats['detection_fps'] = round(total_detection_fps, 2)
stats["detection_fps"] = round(total_detection_fps, 2)
stats['service'] = {
'uptime': (int(time.time()) - stats_tracking['started']),
'version': VERSION,
'storage': {}
stats["service"] = {
"uptime": (int(time.time()) - stats_tracking["started"]),
"version": VERSION,
"storage": {},
}
for path in [RECORD_DIR, CLIPS_DIR, CACHE_DIR, "/dev/shm"]:
storage_stats = shutil.disk_usage(path)
stats['service']['storage'][path] = {
'total': round(storage_stats.total/1000000, 1),
'used': round(storage_stats.used/1000000, 1),
'free': round(storage_stats.free/1000000, 1),
'mount_type': get_fs_type(path)
stats["service"]["storage"][path] = {
"total": round(storage_stats.total / 1000000, 1),
"used": round(storage_stats.used / 1000000, 1),
"free": round(storage_stats.free / 1000000, 1),
"mount_type": get_fs_type(path),
}
return stats
class StatsEmitter(threading.Thread):
def __init__(self, config: FrigateConfig, stats_tracking, mqtt_client, topic_prefix, stop_event):
def __init__(
self,
config: FrigateConfig,
stats_tracking,
mqtt_client,
topic_prefix,
stop_event,
):
threading.Thread.__init__(self)
self.name = 'frigate_stats_emitter'
self.name = "frigate_stats_emitter"
self.config = config
self.stats_tracking = stats_tracking
self.mqtt_client = mqtt_client
@ -88,5 +99,7 @@ class StatsEmitter(threading.Thread):
logger.info(f"Exiting watchdog...")
break
stats = stats_snapshot(self.stats_tracking)
self.mqtt_client.publish(f"{self.topic_prefix}/stats", json.dumps(stats), retain=False)
self.mqtt_client.publish(
f"{self.topic_prefix}/stats", json.dumps(stats), retain=False
)
time.sleep(self.config.mqtt.stats_interval)

View File

@ -3,431 +3,339 @@ from unittest import TestCase, main
import voluptuous as vol
from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
class TestConfig(TestCase):
def setUp(self):
self.minimal = {
'mqtt': {
'host': 'mqtt'
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920
"height": 1080,
"width": 1920,
}
}
},
}
def test_empty(self):
FRIGATE_CONFIG_SCHEMA({})
def test_minimal(self):
FRIGATE_CONFIG_SCHEMA(self.minimal)
def test_config_class(self):
FrigateConfig(config=self.minimal)
def test_inherit_tracked_objects(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'objects': {
'track': ['person', 'dog']
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"objects": {"track": ["person", "dog"]},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920
"height": 1080,
"width": 1920,
}
}
},
}
frigate_config = FrigateConfig(config=config)
assert('dog' in frigate_config.cameras['back'].objects.track)
assert "dog" in frigate_config.cameras["back"].objects.track
def test_override_tracked_objects(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'objects': {
'track': ['person', 'dog']
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"objects": {"track": ["person", "dog"]},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920,
'objects': {
'track': ['cat']
}
"height": 1080,
"width": 1920,
"objects": {"track": ["cat"]},
}
}
},
}
frigate_config = FrigateConfig(config=config)
assert('cat' in frigate_config.cameras['back'].objects.track)
assert "cat" in frigate_config.cameras["back"].objects.track
def test_default_object_filters(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'objects': {
'track': ['person', 'dog']
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"objects": {"track": ["person", "dog"]},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920
"height": 1080,
"width": 1920,
}
}
},
}
frigate_config = FrigateConfig(config=config)
assert('dog' in frigate_config.cameras['back'].objects.filters)
assert "dog" in frigate_config.cameras["back"].objects.filters
def test_inherit_object_filters(self):
config = {
'mqtt': {
'host': 'mqtt'
"mqtt": {"host": "mqtt"},
"objects": {
"track": ["person", "dog"],
"filters": {"dog": {"threshold": 0.7}},
},
'objects': {
'track': ['person', 'dog'],
'filters': {
'dog': {
'threshold': 0.7
}
}
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920
"height": 1080,
"width": 1920,
}
}
},
}
frigate_config = FrigateConfig(config=config)
assert('dog' in frigate_config.cameras['back'].objects.filters)
assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
assert "dog" in frigate_config.cameras["back"].objects.filters
assert frigate_config.cameras["back"].objects.filters["dog"].threshold == 0.7
def test_override_object_filters(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920,
'objects': {
'track': ['person', 'dog'],
'filters': {
'dog': {
'threshold': 0.7
}
}
}
"height": 1080,
"width": 1920,
"objects": {
"track": ["person", "dog"],
"filters": {"dog": {"threshold": 0.7}},
},
}
}
},
}
frigate_config = FrigateConfig(config=config)
assert('dog' in frigate_config.cameras['back'].objects.filters)
assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
assert "dog" in frigate_config.cameras["back"].objects.filters
assert frigate_config.cameras["back"].objects.filters["dog"].threshold == 0.7
def test_global_object_mask(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'objects': {
'track': ['person', 'dog']
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"objects": {"track": ["person", "dog"]},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920,
'objects': {
'mask': '0,0,1,1,0,1',
'filters': {
'dog': {
'mask': '1,1,1,1,1,1'
}
}
}
"height": 1080,
"width": 1920,
"objects": {
"mask": "0,0,1,1,0,1",
"filters": {"dog": {"mask": "1,1,1,1,1,1"}},
},
}
}
},
}
frigate_config = FrigateConfig(config=config)
assert('dog' in frigate_config.cameras['back'].objects.filters)
assert(len(frigate_config.cameras['back'].objects.filters['dog']._raw_mask) == 2)
assert(len(frigate_config.cameras['back'].objects.filters['person']._raw_mask) == 1)
assert "dog" in frigate_config.cameras["back"].objects.filters
assert len(frigate_config.cameras["back"].objects.filters["dog"]._raw_mask) == 2
assert (
len(frigate_config.cameras["back"].objects.filters["person"]._raw_mask) == 1
)
def test_ffmpeg_params_global(self):
config = {
'ffmpeg': {
'input_args': ['-re']
},
'mqtt': {
'host': 'mqtt'
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"ffmpeg": {"input_args": ["-re"]},
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920,
'objects': {
'track': ['person', 'dog'],
'filters': {
'dog': {
'threshold': 0.7
}
}
}
"height": 1080,
"width": 1920,
"objects": {
"track": ["person", "dog"],
"filters": {"dog": {"threshold": 0.7}},
},
}
}
},
}
frigate_config = FrigateConfig(config=config)
assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
assert "-re" in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
def test_ffmpeg_params_camera(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
],
'input_args': ['-re']
"input_args": ["-re"],
},
"height": 1080,
"width": 1920,
"objects": {
"track": ["person", "dog"],
"filters": {"dog": {"threshold": 0.7}},
},
'height': 1080,
'width': 1920,
'objects': {
'track': ['person', 'dog'],
'filters': {
'dog': {
'threshold': 0.7
}
}
}
}
}
},
}
frigate_config = FrigateConfig(config=config)
assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
assert "-re" in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
def test_ffmpeg_params_input(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'], 'input_args': ['-re'] }
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect"],
"input_args": ["-re"],
}
]
},
'height': 1080,
'width': 1920,
'objects': {
'track': ['person', 'dog'],
'filters': {
'dog': {
'threshold': 0.7
}
}
}
"height": 1080,
"width": 1920,
"objects": {
"track": ["person", "dog"],
"filters": {"dog": {"threshold": 0.7}},
},
}
}
},
}
frigate_config = FrigateConfig(config=config)
assert('-re' in frigate_config.cameras['back'].ffmpeg_cmds[0]['cmd'])
assert "-re" in frigate_config.cameras["back"].ffmpeg_cmds[0]["cmd"]
def test_inherit_clips_retention(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'clips': {
'retain': {
'default': 20,
'objects': {
'person': 30
}
}
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"clips": {"retain": {"default": 20, "objects": {"person": 30}}},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920
"height": 1080,
"width": 1920,
}
}
},
}
frigate_config = FrigateConfig(config=config)
assert(frigate_config.cameras['back'].clips.retain.objects['person'] == 30)
assert frigate_config.cameras["back"].clips.retain.objects["person"] == 30
def test_roles_listed_twice_throws_error(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'clips': {
'retain': {
'default': 20,
'objects': {
'person': 30
}
}
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] },
{ 'path': 'rtsp://10.0.0.1:554/video2', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"clips": {"retain": {"default": 20, "objects": {"person": 30}}},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]},
{"path": "rtsp://10.0.0.1:554/video2", "roles": ["detect"]},
]
},
'height': 1080,
'width': 1920
"height": 1080,
"width": 1920,
}
}
},
}
self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
def test_zone_matching_camera_name_throws_error(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'clips': {
'retain': {
'default': 20,
'objects': {
'person': 30
}
}
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"clips": {"retain": {"default": 20, "objects": {"person": 30}}},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920,
'zones': {
'back': {
'coordinates': '1,1,1,1,1,1'
}
}
"height": 1080,
"width": 1920,
"zones": {"back": {"coordinates": "1,1,1,1,1,1"}},
}
}
},
}
self.assertRaises(vol.MultipleInvalid, lambda: FrigateConfig(config=config))
def test_clips_should_default_to_global_objects(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'clips': {
'retain': {
'default': 20,
'objects': {
'person': 30
}
}
},
'objects': {
'track': ['person', 'dog']
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect'] }
"mqtt": {"host": "mqtt"},
"clips": {"retain": {"default": 20, "objects": {"person": 30}}},
"objects": {"track": ["person", "dog"]},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{"path": "rtsp://10.0.0.1:554/video", "roles": ["detect"]}
]
},
'height': 1080,
'width': 1920,
'clips': {
'enabled': True
}
"height": 1080,
"width": 1920,
"clips": {"enabled": True},
}
}
},
}
config = FrigateConfig(config=config)
assert(config.cameras['back'].clips.objects is None)
assert config.cameras["back"].clips.objects is None
def test_role_assigned_but_not_enabled(self):
json_config = {
'mqtt': {
'host': 'mqtt'
},
'cameras': {
'back': {
'ffmpeg': {
'inputs': [
{ 'path': 'rtsp://10.0.0.1:554/video', 'roles': ['detect', 'rtmp'] },
{ 'path': 'rtsp://10.0.0.1:554/record', 'roles': ['record'] }
"mqtt": {"host": "mqtt"},
"cameras": {
"back": {
"ffmpeg": {
"inputs": [
{
"path": "rtsp://10.0.0.1:554/video",
"roles": ["detect", "rtmp"],
},
{"path": "rtsp://10.0.0.1:554/record", "roles": ["record"]},
]
},
'height': 1080,
'width': 1920
"height": 1080,
"width": 1920,
}
}
},
}
config = FrigateConfig(config=json_config)
ffmpeg_cmds = config.cameras['back'].ffmpeg_cmds
assert(len(ffmpeg_cmds) == 1)
assert(not 'clips' in ffmpeg_cmds[0]['roles'])
ffmpeg_cmds = config.cameras["back"].ffmpeg_cmds
assert len(ffmpeg_cmds) == 1
assert not "clips" in ffmpeg_cmds[0]["roles"]
if __name__ == '__main__':
if __name__ == "__main__":
main(verbosity=2)

View File

@ -3,37 +3,39 @@ import numpy as np
from unittest import TestCase, main
from frigate.util import yuv_region_2_rgb
class TestYuvRegion2RGB(TestCase):
def setUp(self):
self.bgr_frame = np.zeros((100, 200, 3), np.uint8)
self.bgr_frame[:] = (0, 0, 255)
self.bgr_frame[5:55, 5:55] = (255,0,0)
self.bgr_frame[5:55, 5:55] = (255, 0, 0)
# cv2.imwrite(f"bgr_frame.jpg", self.bgr_frame)
self.yuv_frame = cv2.cvtColor(self.bgr_frame, cv2.COLOR_BGR2YUV_I420)
def test_crop_yuv(self):
cropped = yuv_region_2_rgb(self.yuv_frame, (10,10,50,50))
cropped = yuv_region_2_rgb(self.yuv_frame, (10, 10, 50, 50))
# ensure the upper left pixel is blue
assert(np.all(cropped[0, 0] == [0, 0, 255]))
assert np.all(cropped[0, 0] == [0, 0, 255])
def test_crop_yuv_out_of_bounds(self):
cropped = yuv_region_2_rgb(self.yuv_frame, (0,0,200,200))
cropped = yuv_region_2_rgb(self.yuv_frame, (0, 0, 200, 200))
# cv2.imwrite(f"cropped.jpg", cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
# ensure the upper left pixel is red
# the yuv conversion has some noise
assert(np.all(cropped[0, 0] == [255, 1, 0]))
assert np.all(cropped[0, 0] == [255, 1, 0])
# ensure the bottom right is black
assert(np.all(cropped[199, 199] == [0, 0, 0]))
assert np.all(cropped[199, 199] == [0, 0, 0])
def test_crop_yuv_portrait(self):
bgr_frame = np.zeros((1920, 1080, 3), np.uint8)
bgr_frame[:] = (0, 0, 255)
bgr_frame[5:55, 5:55] = (255,0,0)
bgr_frame[5:55, 5:55] = (255, 0, 0)
# cv2.imwrite(f"bgr_frame.jpg", self.bgr_frame)
yuv_frame = cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2YUV_I420)
cropped = yuv_region_2_rgb(yuv_frame, (0, 852, 648, 1500))
# cv2.imwrite(f"cropped.jpg", cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
if __name__ == '__main__':
if __name__ == "__main__":
main(verbosity=2)

View File

@ -19,9 +19,20 @@ import numpy as np
logger = logging.getLogger(__name__)
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
def draw_box_with_label(
frame,
x_min,
y_min,
x_max,
y_max,
label,
info,
thickness=2,
color=None,
position="ul",
):
if color is None:
color = (0,0,255)
color = (0, 0, 255)
display_text = "{}: {}".format(label, info)
cv2.rectangle(frame, (x_min, y_min), (x_max, y_max), color, thickness)
font_scale = 0.5
@ -32,113 +43,122 @@ def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thicknes
text_height = size[0][1]
line_height = text_height + size[1]
# set the text start position
if position == 'ul':
if position == "ul":
text_offset_x = x_min
text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
elif position == 'ur':
text_offset_x = x_max - (text_width+8)
text_offset_y = 0 if y_min < line_height else y_min - (line_height+8)
elif position == 'bl':
text_offset_y = 0 if y_min < line_height else y_min - (line_height + 8)
elif position == "ur":
text_offset_x = x_max - (text_width + 8)
text_offset_y = 0 if y_min < line_height else y_min - (line_height + 8)
elif position == "bl":
text_offset_x = x_min
text_offset_y = y_max
elif position == 'br':
text_offset_x = x_max - (text_width+8)
elif position == "br":
text_offset_x = x_max - (text_width + 8)
text_offset_y = y_max
# make the coords of the box with a small padding of two pixels
textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height))
textbox_coords = (
(text_offset_x, text_offset_y),
(text_offset_x + text_width + 2, text_offset_y + line_height),
)
cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
cv2.putText(frame, display_text, (text_offset_x, text_offset_y + line_height - 3), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
cv2.putText(
frame,
display_text,
(text_offset_x, text_offset_y + line_height - 3),
font,
fontScale=font_scale,
color=(0, 0, 0),
thickness=2,
)
def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
def calculate_region(frame_shape, xmin, ymin, xmax, ymax, multiplier=2):
# size is the longest edge and divisible by 4
size = int(max(xmax-xmin, ymax-ymin)//4*4*multiplier)
size = int(max(xmax - xmin, ymax - ymin) // 4 * 4 * multiplier)
# dont go any smaller than 300
if size < 300:
size = 300
# x_offset is midpoint of bounding box minus half the size
x_offset = int((xmax-xmin)/2.0+xmin-size/2.0)
x_offset = int((xmax - xmin) / 2.0 + xmin - size / 2.0)
# if outside the image
if x_offset < 0:
x_offset = 0
elif x_offset > (frame_shape[1]-size):
x_offset = max(0, (frame_shape[1]-size))
elif x_offset > (frame_shape[1] - size):
x_offset = max(0, (frame_shape[1] - size))
# y_offset is midpoint of bounding box minus half the size
y_offset = int((ymax-ymin)/2.0+ymin-size/2.0)
y_offset = int((ymax - ymin) / 2.0 + ymin - size / 2.0)
# # if outside the image
if y_offset < 0:
y_offset = 0
elif y_offset > (frame_shape[0]-size):
y_offset = max(0, (frame_shape[0]-size))
elif y_offset > (frame_shape[0] - size):
y_offset = max(0, (frame_shape[0] - size))
return (x_offset, y_offset, x_offset + size, y_offset + size)
return (x_offset, y_offset, x_offset+size, y_offset+size)
def get_yuv_crop(frame_shape, crop):
# crop should be (x1,y1,x2,y2)
frame_height = frame_shape[0]//3*2
frame_height = frame_shape[0] // 3 * 2
frame_width = frame_shape[1]
# compute the width/height of the uv channels
uv_width = frame_width//2 # width of the uv channels
uv_height = frame_height//4 # height of the uv channels
uv_width = frame_width // 2 # width of the uv channels
uv_height = frame_height // 4 # height of the uv channels
# compute the offset for upper left corner of the uv channels
uv_x_offset = crop[0]//2 # x offset of the uv channels
uv_y_offset = crop[1]//4 # y offset of the uv channels
uv_x_offset = crop[0] // 2 # x offset of the uv channels
uv_y_offset = crop[1] // 4 # y offset of the uv channels
# compute the width/height of the uv crops
uv_crop_width = (crop[2] - crop[0])//2 # width of the cropped uv channels
uv_crop_height = (crop[3] - crop[1])//4 # height of the cropped uv channels
uv_crop_width = (crop[2] - crop[0]) // 2 # width of the cropped uv channels
uv_crop_height = (crop[3] - crop[1]) // 4 # height of the cropped uv channels
# ensure crop dimensions are multiples of 2 and 4
y = (
crop[0],
crop[1],
crop[0] + uv_crop_width*2,
crop[1] + uv_crop_height*4
)
y = (crop[0], crop[1], crop[0] + uv_crop_width * 2, crop[1] + uv_crop_height * 4)
u1 = (
0 + uv_x_offset,
0 + uv_x_offset,
frame_height + uv_y_offset,
0 + uv_x_offset + uv_crop_width,
frame_height + uv_y_offset + uv_crop_height
0 + uv_x_offset + uv_crop_width,
frame_height + uv_y_offset + uv_crop_height,
)
u2 = (
uv_width + uv_x_offset,
uv_width + uv_x_offset,
frame_height + uv_y_offset,
uv_width + uv_x_offset + uv_crop_width,
frame_height + uv_y_offset + uv_crop_height
uv_width + uv_x_offset + uv_crop_width,
frame_height + uv_y_offset + uv_crop_height,
)
v1 = (
0 + uv_x_offset,
frame_height + uv_height + uv_y_offset,
0 + uv_x_offset + uv_crop_width,
frame_height + uv_height + uv_y_offset + uv_crop_height
0 + uv_x_offset,
frame_height + uv_height + uv_y_offset,
0 + uv_x_offset + uv_crop_width,
frame_height + uv_height + uv_y_offset + uv_crop_height,
)
v2 = (
uv_width + uv_x_offset,
frame_height + uv_height + uv_y_offset,
uv_width + uv_x_offset + uv_crop_width,
frame_height + uv_height + uv_y_offset + uv_crop_height
uv_width + uv_x_offset,
frame_height + uv_height + uv_y_offset,
uv_width + uv_x_offset + uv_crop_width,
frame_height + uv_height + uv_y_offset + uv_crop_height,
)
return y, u1, u2, v1, v2
def yuv_region_2_rgb(frame, region):
try:
height = frame.shape[0]//3*2
height = frame.shape[0] // 3 * 2
width = frame.shape[1]
# get the crop box if the region extends beyond the frame
crop_x1 = max(0, region[0])
crop_y1 = max(0, region[1])
# ensure these are a multiple of 4
crop_x2 = min(width, region[2])
crop_x2 = min(width, region[2])
crop_y2 = min(height, region[3])
crop_box = (crop_x1, crop_y1, crop_x2, crop_y2)
@ -148,64 +168,65 @@ def yuv_region_2_rgb(frame, region):
y_channel_x_offset = abs(min(0, region[0]))
y_channel_y_offset = abs(min(0, region[1]))
uv_channel_x_offset = y_channel_x_offset//2
uv_channel_y_offset = y_channel_y_offset//4
uv_channel_x_offset = y_channel_x_offset // 2
uv_channel_y_offset = y_channel_y_offset // 4
# create the yuv region frame
# make sure the size is a multiple of 4
size = (region[3] - region[1])//4*4
yuv_cropped_frame = np.zeros((size+size//2, size), np.uint8)
size = (region[3] - region[1]) // 4 * 4
yuv_cropped_frame = np.zeros((size + size // 2, size), np.uint8)
# fill in black
yuv_cropped_frame[:] = 128
yuv_cropped_frame[0:size,0:size] = 16
yuv_cropped_frame[0:size, 0:size] = 16
# copy the y channel
yuv_cropped_frame[
y_channel_y_offset:y_channel_y_offset + y[3] - y[1],
y_channel_x_offset:y_channel_x_offset + y[2] - y[0]
] = frame[
y[1]:y[3],
y[0]:y[2]
]
y_channel_y_offset : y_channel_y_offset + y[3] - y[1],
y_channel_x_offset : y_channel_x_offset + y[2] - y[0],
] = frame[y[1] : y[3], y[0] : y[2]]
uv_crop_width = u1[2] - u1[0]
uv_crop_height = u1[3] - u1[1]
# copy u1
yuv_cropped_frame[
size + uv_channel_y_offset:size + uv_channel_y_offset + uv_crop_height,
0 + uv_channel_x_offset:0 + uv_channel_x_offset + uv_crop_width
] = frame[
u1[1]:u1[3],
u1[0]:u1[2]
]
size + uv_channel_y_offset : size + uv_channel_y_offset + uv_crop_height,
0 + uv_channel_x_offset : 0 + uv_channel_x_offset + uv_crop_width,
] = frame[u1[1] : u1[3], u1[0] : u1[2]]
# copy u2
yuv_cropped_frame[
size + uv_channel_y_offset:size + uv_channel_y_offset + uv_crop_height,
size//2 + uv_channel_x_offset:size//2 + uv_channel_x_offset + uv_crop_width
] = frame[
u2[1]:u2[3],
u2[0]:u2[2]
]
size + uv_channel_y_offset : size + uv_channel_y_offset + uv_crop_height,
size // 2
+ uv_channel_x_offset : size // 2
+ uv_channel_x_offset
+ uv_crop_width,
] = frame[u2[1] : u2[3], u2[0] : u2[2]]
# copy v1
yuv_cropped_frame[
size+size//4 + uv_channel_y_offset:size+size//4 + uv_channel_y_offset + uv_crop_height,
0 + uv_channel_x_offset:0 + uv_channel_x_offset + uv_crop_width
] = frame[
v1[1]:v1[3],
v1[0]:v1[2]
]
size
+ size // 4
+ uv_channel_y_offset : size
+ size // 4
+ uv_channel_y_offset
+ uv_crop_height,
0 + uv_channel_x_offset : 0 + uv_channel_x_offset + uv_crop_width,
] = frame[v1[1] : v1[3], v1[0] : v1[2]]
# copy v2
yuv_cropped_frame[
size+size//4 + uv_channel_y_offset:size+size//4 + uv_channel_y_offset + uv_crop_height,
size//2 + uv_channel_x_offset:size//2 + uv_channel_x_offset + uv_crop_width
] = frame[
v2[1]:v2[3],
v2[0]:v2[2]
]
size
+ size // 4
+ uv_channel_y_offset : size
+ size // 4
+ uv_channel_y_offset
+ uv_crop_height,
size // 2
+ uv_channel_x_offset : size // 2
+ uv_channel_x_offset
+ uv_crop_width,
] = frame[v2[1] : v2[3], v2[0] : v2[2]]
return cv2.cvtColor(yuv_cropped_frame, cv2.COLOR_YUV2RGB_I420)
except:
@ -213,27 +234,32 @@ def yuv_region_2_rgb(frame, region):
print(f"region: {region}")
raise
def intersection(box_a, box_b):
return (
max(box_a[0], box_b[0]),
max(box_a[1], box_b[1]),
min(box_a[2], box_b[2]),
min(box_a[3], box_b[3])
min(box_a[3], box_b[3]),
)
def area(box):
return (box[2]-box[0] + 1)*(box[3]-box[1] + 1)
return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)
def intersection_over_union(box_a, box_b):
# determine the (x, y)-coordinates of the intersection rectangle
intersect = intersection(box_a, box_b)
# compute the area of intersection rectangle
inter_area = max(0, intersect[2] - intersect[0] + 1) * max(0, intersect[3] - intersect[1] + 1)
inter_area = max(0, intersect[2] - intersect[0] + 1) * max(
0, intersect[3] - intersect[1] + 1
)
if inter_area == 0:
return 0.0
# compute the area of both the prediction and ground-truth
# rectangles
box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1)
@ -247,25 +273,29 @@ def intersection_over_union(box_a, box_b):
# return the intersection over union value
return iou
def clipped(obj, frame_shape):
# if the object is within 5 pixels of the region border, and the region is not on the edge
# consider the object to be clipped
box = obj[2]
region = obj[4]
if ((region[0] > 5 and box[0]-region[0] <= 5) or
(region[1] > 5 and box[1]-region[1] <= 5) or
(frame_shape[1]-region[2] > 5 and region[2]-box[2] <= 5) or
(frame_shape[0]-region[3] > 5 and region[3]-box[3] <= 5)):
if (
(region[0] > 5 and box[0] - region[0] <= 5)
or (region[1] > 5 and box[1] - region[1] <= 5)
or (frame_shape[1] - region[2] > 5 and region[2] - box[2] <= 5)
or (frame_shape[0] - region[3] > 5 and region[3] - box[3] <= 5)
):
return True
else:
return False
class EventsPerSecond:
def __init__(self, max_events=1000):
self._start = None
self._max_events = max_events
self._timestamps = []
def start(self):
self._start = datetime.datetime.now().timestamp()
@ -274,23 +304,28 @@ class EventsPerSecond:
self.start()
self._timestamps.append(datetime.datetime.now().timestamp())
# truncate the list when it goes 100 over the max_size
if len(self._timestamps) > self._max_events+100:
self._timestamps = self._timestamps[(1-self._max_events):]
if len(self._timestamps) > self._max_events + 100:
self._timestamps = self._timestamps[(1 - self._max_events) :]
def eps(self, last_n_seconds=10):
if self._start is None:
self.start()
# compute the (approximate) events in the last n seconds
# compute the (approximate) events in the last n seconds
now = datetime.datetime.now().timestamp()
seconds = min(now-self._start, last_n_seconds)
return len([t for t in self._timestamps if t > (now-last_n_seconds)]) / seconds
seconds = min(now - self._start, last_n_seconds)
return (
len([t for t in self._timestamps if t > (now - last_n_seconds)]) / seconds
)
def print_stack(sig, frame):
traceback.print_stack(frame)
def listen():
signal.signal(signal.SIGUSR1, print_stack)
def create_mask(frame_shape, mask):
mask_img = np.zeros(frame_shape, np.uint8)
mask_img[:] = 255
@ -304,11 +339,15 @@ def create_mask(frame_shape, mask):
return mask_img
def add_mask(mask, mask_img):
points = mask.split(',')
contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
points = mask.split(",")
contour = np.array(
[[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)]
)
cv2.fillPoly(mask_img, pts=[contour], color=(0))
class FrameManager(ABC):
@abstractmethod
def create(self, name, size) -> AnyStr:
@ -326,29 +365,31 @@ class FrameManager(ABC):
def delete(self, name):
pass
class DictFrameManager(FrameManager):
def __init__(self):
self.frames = {}
def create(self, name, size) -> AnyStr:
mem = bytearray(size)
self.frames[name] = mem
return mem
def get(self, name, shape):
mem = self.frames[name]
return np.ndarray(shape, dtype=np.uint8, buffer=mem)
def close(self, name):
pass
def delete(self, name):
del self.frames[name]
class SharedMemoryFrameManager(FrameManager):
def __init__(self):
self.shm_store = {}
def create(self, name, size) -> AnyStr:
shm = shared_memory.SharedMemory(name=name, create=True, size=size)
self.shm_store[name] = shm

View File

@ -1,12 +1,7 @@
import base64
import copy
import ctypes
import datetime
import itertools
import json
import logging
import multiprocessing as mp
import os
import queue
import subprocess as sp
import signal
@ -16,7 +11,7 @@ from collections import defaultdict
from setproctitle import setproctitle
from typing import Dict, List
import cv2
from cv2 import cv2
import numpy as np
from frigate.config import CameraConfig
@ -24,19 +19,25 @@ from frigate.edgetpu import RemoteObjectDetector
from frigate.log import LogPipe
from frigate.motion import MotionDetector
from frigate.objects import ObjectTracker
from frigate.util import (EventsPerSecond, FrameManager,
SharedMemoryFrameManager, area, calculate_region,
clipped, draw_box_with_label, intersection,
intersection_over_union, listen, yuv_region_2_rgb)
from frigate.util import (
EventsPerSecond,
FrameManager,
SharedMemoryFrameManager,
calculate_region,
clipped,
listen,
yuv_region_2_rgb,
)
logger = logging.getLogger(__name__)
def filtered(obj, objects_to_track, object_filters):
object_name = obj[0]
if not object_name in objects_to_track:
return True
if object_name in object_filters:
obj_settings = object_filters[object_name]
@ -44,7 +45,7 @@ def filtered(obj, objects_to_track, object_filters):
# detected object, don't add it to detected objects
if obj_settings.min_area > obj[3]:
return True
# if the detected object is larger than the
# max area, don't add it to detected objects
if obj_settings.max_area < obj[3]:
@ -53,29 +54,36 @@ def filtered(obj, objects_to_track, object_filters):
# if the score is lower than the min_score, skip
if obj_settings.min_score > obj[1]:
return True
if not obj_settings.mask is None:
# compute the coordinates of the object and make sure
# the location isnt outside the bounds of the image (can happen from rounding)
y_location = min(int(obj[2][3]), len(obj_settings.mask)-1)
x_location = min(int((obj[2][2]-obj[2][0])/2.0)+obj[2][0], len(obj_settings.mask[0])-1)
y_location = min(int(obj[2][3]), len(obj_settings.mask) - 1)
x_location = min(
int((obj[2][2] - obj[2][0]) / 2.0) + obj[2][0],
len(obj_settings.mask[0]) - 1,
)
# if the object is in a masked location, don't add it to detected objects
if obj_settings.mask[y_location][x_location] == 0:
return True
return False
def create_tensor_input(frame, model_shape, region):
cropped_frame = yuv_region_2_rgb(frame, region)
# Resize to 300x300 if needed
if cropped_frame.shape != (model_shape[0], model_shape[1], 3):
cropped_frame = cv2.resize(cropped_frame, dsize=model_shape, interpolation=cv2.INTER_LINEAR)
cropped_frame = cv2.resize(
cropped_frame, dsize=model_shape, interpolation=cv2.INTER_LINEAR
)
# Expand dimensions since the model expects images to have shape: [1, height, width, 3]
return np.expand_dims(cropped_frame, axis=0)
def stop_ffmpeg(ffmpeg_process, logger):
logger.info("Terminating the existing ffmpeg process...")
ffmpeg_process.terminate()
@ -88,18 +96,43 @@ def stop_ffmpeg(ffmpeg_process, logger):
ffmpeg_process.communicate()
ffmpeg_process = None
def start_or_restart_ffmpeg(ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None):
def start_or_restart_ffmpeg(
ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None
):
if not ffmpeg_process is None:
stop_ffmpeg(ffmpeg_process, logger)
if frame_size is None:
process = sp.Popen(ffmpeg_cmd, stdout = sp.DEVNULL, stderr=logpipe, stdin = sp.DEVNULL, start_new_session=True)
process = sp.Popen(
ffmpeg_cmd,
stdout=sp.DEVNULL,
stderr=logpipe,
stdin=sp.DEVNULL,
start_new_session=True,
)
else:
process = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, stderr=logpipe, stdin = sp.DEVNULL, bufsize=frame_size*10, start_new_session=True)
process = sp.Popen(
ffmpeg_cmd,
stdout=sp.PIPE,
stderr=logpipe,
stdin=sp.DEVNULL,
bufsize=frame_size * 10,
start_new_session=True,
)
return process
def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager,
frame_queue, fps:mp.Value, skipped_fps: mp.Value, current_frame: mp.Value):
def capture_frames(
ffmpeg_process,
camera_name,
frame_shape,
frame_manager: FrameManager,
frame_queue,
fps: mp.Value,
skipped_fps: mp.Value,
current_frame: mp.Value,
):
frame_size = frame_shape[0] * frame_shape[1]
frame_rate = EventsPerSecond()
@ -119,7 +152,9 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
logger.info(f"{camera_name}: ffmpeg sent a broken frame. {e}")
if ffmpeg_process.poll() != None:
logger.info(f"{camera_name}: ffmpeg process is not running. exiting capture thread...")
logger.info(
f"{camera_name}: ffmpeg process is not running. exiting capture thread..."
)
frame_manager.delete(frame_name)
break
continue
@ -138,8 +173,11 @@ def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: Fram
# add to the queue
frame_queue.put(current_frame.value)
class CameraWatchdog(threading.Thread):
def __init__(self, camera_name, config, frame_queue, camera_fps, ffmpeg_pid, stop_event):
def __init__(
self, camera_name, config, frame_queue, camera_fps, ffmpeg_pid, stop_event
):
threading.Thread.__init__(self)
self.logger = logging.getLogger(f"watchdog.{camera_name}")
self.camera_name = camera_name
@ -159,22 +197,27 @@ class CameraWatchdog(threading.Thread):
self.start_ffmpeg_detect()
for c in self.config.ffmpeg_cmds:
if 'detect' in c['roles']:
if "detect" in c["roles"]:
continue
logpipe = LogPipe(f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}", logging.ERROR)
self.ffmpeg_other_processes.append({
'cmd': c['cmd'],
'logpipe': logpipe,
'process': start_or_restart_ffmpeg(c['cmd'], self.logger, logpipe)
})
logpipe = LogPipe(
f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}",
logging.ERROR,
)
self.ffmpeg_other_processes.append(
{
"cmd": c["cmd"],
"logpipe": logpipe,
"process": start_or_restart_ffmpeg(c["cmd"], self.logger, logpipe),
}
)
time.sleep(10)
while True:
if self.stop_event.is_set():
stop_ffmpeg(self.ffmpeg_detect_process, self.logger)
for p in self.ffmpeg_other_processes:
stop_ffmpeg(p['process'], self.logger)
p['logpipe'].close()
stop_ffmpeg(p["process"], self.logger)
p["logpipe"].close()
self.logpipe.close()
break
@ -184,7 +227,9 @@ class CameraWatchdog(threading.Thread):
self.logpipe.dump()
self.start_ffmpeg_detect()
elif now - self.capture_thread.current_frame.value > 20:
self.logger.info(f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg...")
self.logger.info(
f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg..."
)
self.ffmpeg_detect_process.terminate()
try:
self.logger.info("Waiting for ffmpeg to exit gracefully...")
@ -193,25 +238,37 @@ class CameraWatchdog(threading.Thread):
self.logger.info("FFmpeg didnt exit. Force killing...")
self.ffmpeg_detect_process.kill()
self.ffmpeg_detect_process.communicate()
for p in self.ffmpeg_other_processes:
poll = p['process'].poll()
poll = p["process"].poll()
if poll == None:
continue
p['logpipe'].dump()
p['process'] = start_or_restart_ffmpeg(p['cmd'], self.logger, p['logpipe'], ffmpeg_process=p['process'])
p["logpipe"].dump()
p["process"] = start_or_restart_ffmpeg(
p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"]
)
# wait a bit before checking again
time.sleep(10)
def start_ffmpeg_detect(self):
ffmpeg_cmd = [c['cmd'] for c in self.config.ffmpeg_cmds if 'detect' in c['roles']][0]
self.ffmpeg_detect_process = start_or_restart_ffmpeg(ffmpeg_cmd, self.logger, self.logpipe, self.frame_size)
ffmpeg_cmd = [
c["cmd"] for c in self.config.ffmpeg_cmds if "detect" in c["roles"]
][0]
self.ffmpeg_detect_process = start_or_restart_ffmpeg(
ffmpeg_cmd, self.logger, self.logpipe, self.frame_size
)
self.ffmpeg_pid.value = self.ffmpeg_detect_process.pid
self.capture_thread = CameraCapture(self.camera_name, self.ffmpeg_detect_process, self.frame_shape, self.frame_queue,
self.camera_fps)
self.capture_thread = CameraCapture(
self.camera_name,
self.ffmpeg_detect_process,
self.frame_shape,
self.frame_queue,
self.camera_fps,
)
self.capture_thread.start()
class CameraCapture(threading.Thread):
def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
threading.Thread.__init__(self)
@ -223,32 +280,59 @@ class CameraCapture(threading.Thread):
self.skipped_fps = EventsPerSecond()
self.frame_manager = SharedMemoryFrameManager()
self.ffmpeg_process = ffmpeg_process
self.current_frame = mp.Value('d', 0.0)
self.current_frame = mp.Value("d", 0.0)
self.last_frame = 0
def run(self):
self.skipped_fps.start()
capture_frames(self.ffmpeg_process, self.camera_name, self.frame_shape, self.frame_manager, self.frame_queue,
self.fps, self.skipped_fps, self.current_frame)
capture_frames(
self.ffmpeg_process,
self.camera_name,
self.frame_shape,
self.frame_manager,
self.frame_queue,
self.fps,
self.skipped_fps,
self.current_frame,
)
def capture_camera(name, config: CameraConfig, process_info):
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
frame_queue = process_info['frame_queue']
camera_watchdog = CameraWatchdog(name, config, frame_queue, process_info['camera_fps'], process_info['ffmpeg_pid'], stop_event)
frame_queue = process_info["frame_queue"]
camera_watchdog = CameraWatchdog(
name,
config,
frame_queue,
process_info["camera_fps"],
process_info["ffmpeg_pid"],
stop_event,
)
camera_watchdog.start()
camera_watchdog.join()
def track_camera(name, config: CameraConfig, model_shape, detection_queue, result_connection, detected_objects_queue, process_info):
def track_camera(
name,
config: CameraConfig,
model_shape,
detection_queue,
result_connection,
detected_objects_queue,
process_info,
):
stop_event = mp.Event()
def receiveSignal(signalNumber, frame):
stop_event.set()
signal.signal(signal.SIGTERM, receiveSignal)
signal.signal(signal.SIGINT, receiveSignal)
@ -256,71 +340,113 @@ def track_camera(name, config: CameraConfig, model_shape, detection_queue, resul
setproctitle(f"frigate.process:{name}")
listen()
frame_queue = process_info['frame_queue']
detection_enabled = process_info['detection_enabled']
frame_queue = process_info["frame_queue"]
detection_enabled = process_info["detection_enabled"]
frame_shape = config.frame_shape
objects_to_track = config.objects.track
object_filters = config.objects.filters
motion_detector = MotionDetector(frame_shape, config.motion)
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection, model_shape)
object_detector = RemoteObjectDetector(
name, "/labelmap.txt", detection_queue, result_connection, model_shape
)
object_tracker = ObjectTracker(config.detect)
frame_manager = SharedMemoryFrameManager()
process_frames(name, frame_queue, frame_shape, model_shape, frame_manager, motion_detector, object_detector,
object_tracker, detected_objects_queue, process_info, objects_to_track, object_filters, detection_enabled, stop_event)
process_frames(
name,
frame_queue,
frame_shape,
model_shape,
frame_manager,
motion_detector,
object_detector,
object_tracker,
detected_objects_queue,
process_info,
objects_to_track,
object_filters,
detection_enabled,
stop_event,
)
logger.info(f"{name}: exiting subprocess")
def reduce_boxes(boxes):
if len(boxes) == 0:
return []
reduced_boxes = cv2.groupRectangles([list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2)[0]
reduced_boxes = cv2.groupRectangles(
[list(b) for b in itertools.chain(boxes, boxes)], 1, 0.2
)[0]
return [tuple(b) for b in reduced_boxes]
# modified from https://stackoverflow.com/a/40795835
def intersects_any(box_a, boxes):
for box in boxes:
if box_a[2] < box[0] or box_a[0] > box[2] or box_a[1] > box[3] or box_a[3] < box[1]:
if (
box_a[2] < box[0]
or box_a[0] > box[2]
or box_a[1] > box[3]
or box_a[3] < box[1]
):
continue
return True
def detect(object_detector, frame, model_shape, region, objects_to_track, object_filters):
def detect(
object_detector, frame, model_shape, region, objects_to_track, object_filters
):
tensor_input = create_tensor_input(frame, model_shape, region)
detections = []
region_detections = object_detector.detect(tensor_input)
for d in region_detections:
box = d[2]
size = region[2]-region[0]
size = region[2] - region[0]
x_min = int((box[1] * size) + region[0])
y_min = int((box[0] * size) + region[1])
x_max = int((box[3] * size) + region[0])
y_max = int((box[2] * size) + region[1])
det = (d[0],
det = (
d[0],
d[1],
(x_min, y_min, x_max, y_max),
(x_max-x_min)*(y_max-y_min),
region)
(x_max - x_min) * (y_max - y_min),
region,
)
# apply object filters
if filtered(det, objects_to_track, object_filters):
continue
detections.append(det)
return detections
def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_shape,
frame_manager: FrameManager, motion_detector: MotionDetector,
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue, process_info: Dict,
objects_to_track: List[str], object_filters, detection_enabled: mp.Value, stop_event,
exit_on_empty: bool = False):
fps = process_info['process_fps']
detection_fps = process_info['detection_fps']
current_frame_time = process_info['detection_frame']
def process_frames(
camera_name: str,
frame_queue: mp.Queue,
frame_shape,
model_shape,
frame_manager: FrameManager,
motion_detector: MotionDetector,
object_detector: RemoteObjectDetector,
object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue,
process_info: Dict,
objects_to_track: List[str],
object_filters,
detection_enabled: mp.Value,
stop_event,
exit_on_empty: bool = False,
):
fps = process_info["process_fps"]
detection_fps = process_info["detection_fps"]
current_frame_time = process_info["detection_frame"]
fps_tracker = EventsPerSecond()
fps_tracker.start()
@ -340,7 +466,9 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
current_frame_time.value = frame_time
frame = frame_manager.get(f"{camera_name}{frame_time}", (frame_shape[0]*3//2, frame_shape[1]))
frame = frame_manager.get(
f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
)
if frame is None:
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
@ -349,7 +477,9 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
if not detection_enabled.value:
fps.value = fps_tracker.eps()
object_tracker.match_and_update(frame_time, [])
detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, [], []))
detected_objects_queue.put(
(camera_name, frame_time, object_tracker.tracked_objects, [], [])
)
detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{camera_name}{frame_time}")
continue
@ -358,27 +488,44 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
motion_boxes = motion_detector.detect(frame)
# only get the tracked object boxes that intersect with motion
tracked_object_boxes = [obj['box'] for obj in object_tracker.tracked_objects.values() if intersects_any(obj['box'], motion_boxes)]
tracked_object_boxes = [
obj["box"]
for obj in object_tracker.tracked_objects.values()
if intersects_any(obj["box"], motion_boxes)
]
# combine motion boxes with known locations of existing objects
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
# compute regions
regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
for a in combined_boxes]
regions = [
calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.2)
for a in combined_boxes
]
# combine overlapping regions
combined_regions = reduce_boxes(regions)
# re-compute regions
regions = [calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
for a in combined_regions]
regions = [
calculate_region(frame_shape, a[0], a[1], a[2], a[3], 1.0)
for a in combined_regions
]
# resize regions and detect
detections = []
for region in regions:
detections.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
detections.extend(
detect(
object_detector,
frame,
model_shape,
region,
objects_to_track,
object_filters,
)
)
#########
# merge objects, check for clipped objects and look again up to 4 times
#########
@ -396,8 +543,10 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
for group in detected_object_groups.values():
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
boxes = [(o[2][0], o[2][1], o[2][2]-o[2][0], o[2][3]-o[2][1])
for o in group]
boxes = [
(o[2][0], o[2][1], o[2][2] - o[2][0], o[2][3] - o[2][1])
for o in group
]
confidences = [o[1] for o in group]
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
@ -406,17 +555,26 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
if clipped(obj, frame_shape):
box = obj[2]
# calculate a new region that will hopefully get the entire object
region = calculate_region(frame_shape,
box[0], box[1],
box[2], box[3])
region = calculate_region(
frame_shape, box[0], box[1], box[2], box[3]
)
regions.append(region)
selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters))
selected_objects.extend(
detect(
object_detector,
frame,
model_shape,
region,
objects_to_track,
object_filters,
)
)
refining = True
else:
selected_objects.append(obj)
selected_objects.append(obj)
# set the detections list to only include top, complete objects
# and new detections
detections = selected_objects
@ -426,18 +584,28 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s
# Limit to the detections overlapping with motion areas
# to avoid picking up stationary background objects
detections_with_motion = [d for d in detections if intersects_any(d[2], motion_boxes)]
detections_with_motion = [
d for d in detections if intersects_any(d[2], motion_boxes)
]
# now that we have refined our detections, we need to track objects
object_tracker.match_and_update(frame_time, detections_with_motion)
# add to the queue if not full
if(detected_objects_queue.full()):
if detected_objects_queue.full():
frame_manager.delete(f"{camera_name}{frame_time}")
continue
else:
fps_tracker.update()
fps.value = fps_tracker.eps()
detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions))
detected_objects_queue.put(
(
camera_name,
frame_time,
object_tracker.tracked_objects,
motion_boxes,
regions,
)
)
detection_fps.value = object_detector.fps.eps()
frame_manager.close(f"{camera_name}{frame_time}")

View File

@ -7,10 +7,11 @@ import signal
logger = logging.getLogger(__name__)
class FrigateWatchdog(threading.Thread):
def __init__(self, detectors, stop_event):
threading.Thread.__init__(self)
self.name = 'frigate_watchdog'
self.name = "frigate_watchdog"
self.detectors = detectors
self.stop_event = stop_event
@ -29,9 +30,10 @@ class FrigateWatchdog(threading.Thread):
# check the detection processes
for detector in self.detectors.values():
detection_start = detector.detection_start.value
if (detection_start > 0.0 and
now - detection_start > 10):
logger.info("Detection appears to be stuck. Restarting detection process...")
if detection_start > 0.0 and now - detection_start > 10:
logger.info(
"Detection appears to be stuck. Restarting detection process..."
)
detector.start_or_restart()
elif not detector.detect_process.is_alive():
logger.info("Detection appears to have stopped. Exiting frigate...")

View File

@ -31,6 +31,7 @@ def get_local_ip() -> str:
finally:
sock.close()
def broadcast_zeroconf(frigate_id):
zeroconf = Zeroconf(interfaces=InterfaceChoice.Default, ip_version=IPVersion.V4Only)

View File

@ -32,10 +32,14 @@ except ImportError:
SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.sql('CREATE TABLE IF NOT EXISTS "event" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "label" VARCHAR(20) NOT NULL, "camera" VARCHAR(20) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "top_score" REAL NOT NULL, "false_positive" INTEGER NOT NULL, "zones" JSON NOT NULL, "thumbnail" TEXT NOT NULL)')
migrator.sql(
'CREATE TABLE IF NOT EXISTS "event" ("id" VARCHAR(30) NOT NULL PRIMARY KEY, "label" VARCHAR(20) NOT NULL, "camera" VARCHAR(20) NOT NULL, "start_time" DATETIME NOT NULL, "end_time" DATETIME NOT NULL, "top_score" REAL NOT NULL, "false_positive" INTEGER NOT NULL, "zones" JSON NOT NULL, "thumbnail" TEXT NOT NULL)'
)
migrator.sql('CREATE INDEX IF NOT EXISTS "event_label" ON "event" ("label")')
migrator.sql('CREATE INDEX IF NOT EXISTS "event_camera" ON "event" ("camera")')
def rollback(migrator, database, fake=False, **kwargs):
pass

View File

@ -35,7 +35,12 @@ SQL = pw.SQL
def migrate(migrator, database, fake=False, **kwargs):
migrator.add_fields(Event, has_clip=pw.BooleanField(default=True), has_snapshot=pw.BooleanField(default=True))
migrator.add_fields(
Event,
has_clip=pw.BooleanField(default=True),
has_snapshot=pw.BooleanField(default=True),
)
def rollback(migrator, database, fake=False, **kwargs):
migrator.remove_fields(Event, ['has_clip', 'has_snapshot'])
migrator.remove_fields(Event, ["has_clip", "has_snapshot"])