2019-02-26 03:27:02 +01:00
|
|
|
import datetime
|
2020-11-04 13:31:25 +01:00
|
|
|
import itertools
|
2020-11-04 04:26:39 +01:00
|
|
|
import logging
|
2019-03-30 02:49:27 +01:00
|
|
|
import multiprocessing as mp
|
2020-11-04 13:31:25 +01:00
|
|
|
import queue
|
2022-02-04 14:40:53 +01:00
|
|
|
import random
|
2020-11-29 23:19:59 +01:00
|
|
|
import signal
|
2021-10-31 17:12:44 +01:00
|
|
|
import subprocess as sp
|
2020-11-04 13:31:25 +01:00
|
|
|
import threading
|
|
|
|
import time
|
2019-12-14 22:18:21 +01:00
|
|
|
from collections import defaultdict
|
2020-11-04 13:31:25 +01:00
|
|
|
from typing import Dict, List
|
|
|
|
|
|
|
|
import numpy as np
|
2021-11-04 17:57:26 +01:00
|
|
|
from cv2 import cv2, reduce
|
2021-10-31 17:12:44 +01:00
|
|
|
from setproctitle import setproctitle
|
2020-11-04 13:31:25 +01:00
|
|
|
|
2021-10-31 17:48:49 +01:00
|
|
|
from frigate.config import CameraConfig, DetectConfig
|
2020-02-16 04:07:54 +01:00
|
|
|
from frigate.edgetpu import RemoteObjectDetector
|
2020-12-04 13:59:03 +01:00
|
|
|
from frigate.log import LogPipe
|
2020-02-16 04:07:54 +01:00
|
|
|
from frigate.motion import MotionDetector
|
2020-11-04 13:31:25 +01:00
|
|
|
from frigate.objects import ObjectTracker
|
2021-02-17 14:23:32 +01:00
|
|
|
from frigate.util import (
|
|
|
|
EventsPerSecond,
|
|
|
|
FrameManager,
|
|
|
|
SharedMemoryFrameManager,
|
2021-10-31 17:12:44 +01:00
|
|
|
area,
|
2021-02-17 14:23:32 +01:00
|
|
|
calculate_region,
|
|
|
|
clipped,
|
2021-10-31 17:12:44 +01:00
|
|
|
intersection,
|
|
|
|
intersection_over_union,
|
2021-02-17 14:23:32 +01:00
|
|
|
listen,
|
|
|
|
yuv_region_2_rgb,
|
|
|
|
)
|
2019-02-26 03:27:02 +01:00
|
|
|
|
2020-11-04 04:26:39 +01:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2021-01-15 14:52:28 +01:00
|
|
|
def filtered(obj, objects_to_track, object_filters):
|
2020-02-16 04:07:54 +01:00
|
|
|
object_name = obj[0]
|
|
|
|
|
|
|
|
if not object_name in objects_to_track:
|
|
|
|
return True
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-02-16 04:07:54 +01:00
|
|
|
if object_name in object_filters:
|
|
|
|
obj_settings = object_filters[object_name]
|
|
|
|
|
|
|
|
# if the min area is larger than the
|
|
|
|
# detected object, don't add it to detected objects
|
2020-11-03 15:15:58 +01:00
|
|
|
if obj_settings.min_area > obj[3]:
|
2020-02-16 04:07:54 +01:00
|
|
|
return True
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-02-16 04:07:54 +01:00
|
|
|
# if the detected object is larger than the
|
|
|
|
# max area, don't add it to detected objects
|
2020-11-03 15:15:58 +01:00
|
|
|
if obj_settings.max_area < obj[3]:
|
2020-02-16 04:07:54 +01:00
|
|
|
return True
|
|
|
|
|
2020-09-07 19:17:42 +02:00
|
|
|
# if the score is lower than the min_score, skip
|
2020-11-03 15:15:58 +01:00
|
|
|
if obj_settings.min_score > obj[1]:
|
2020-02-16 04:07:54 +01:00
|
|
|
return True
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2021-01-15 14:52:28 +01:00
|
|
|
if not obj_settings.mask is None:
|
|
|
|
# compute the coordinates of the object and make sure
|
|
|
|
# the location isnt outside the bounds of the image (can happen from rounding)
|
2021-02-17 14:23:32 +01:00
|
|
|
y_location = min(int(obj[2][3]), len(obj_settings.mask) - 1)
|
|
|
|
x_location = min(
|
|
|
|
int((obj[2][2] - obj[2][0]) / 2.0) + obj[2][0],
|
|
|
|
len(obj_settings.mask[0]) - 1,
|
|
|
|
)
|
2021-01-15 14:52:28 +01:00
|
|
|
|
|
|
|
# if the object is in a masked location, don't add it to detected objects
|
|
|
|
if obj_settings.mask[y_location][x_location] == 0:
|
|
|
|
return True
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-09-07 19:17:42 +02:00
|
|
|
return False
|
2019-12-15 14:25:40 +01:00
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-12-09 14:18:53 +01:00
|
|
|
def create_tensor_input(frame, model_shape, region):
|
2021-11-17 14:28:53 +01:00
|
|
|
cropped_frame = yuv_region_2_rgb(frame, region)
|
2019-03-30 02:49:27 +01:00
|
|
|
|
2020-02-16 04:07:54 +01:00
|
|
|
# Resize to 300x300 if needed
|
2020-12-09 14:18:53 +01:00
|
|
|
if cropped_frame.shape != (model_shape[0], model_shape[1], 3):
|
2021-02-17 14:23:32 +01:00
|
|
|
cropped_frame = cv2.resize(
|
|
|
|
cropped_frame, dsize=model_shape, interpolation=cv2.INTER_LINEAR
|
|
|
|
)
|
|
|
|
|
2020-12-09 14:18:53 +01:00
|
|
|
# Expand dimensions since the model expects images to have shape: [1, height, width, 3]
|
2020-02-16 04:07:54 +01:00
|
|
|
return np.expand_dims(cropped_frame, axis=0)
|
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-12-04 13:59:03 +01:00
|
|
|
def stop_ffmpeg(ffmpeg_process, logger):
|
2020-11-29 23:19:59 +01:00
|
|
|
logger.info("Terminating the existing ffmpeg process...")
|
|
|
|
ffmpeg_process.terminate()
|
|
|
|
try:
|
|
|
|
logger.info("Waiting for ffmpeg to exit gracefully...")
|
|
|
|
ffmpeg_process.communicate(timeout=30)
|
|
|
|
except sp.TimeoutExpired:
|
|
|
|
logger.info("FFmpeg didnt exit. Force killing...")
|
|
|
|
ffmpeg_process.kill()
|
|
|
|
ffmpeg_process.communicate()
|
|
|
|
ffmpeg_process = None
|
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
|
|
|
def start_or_restart_ffmpeg(
|
|
|
|
ffmpeg_cmd, logger, logpipe: LogPipe, frame_size=None, ffmpeg_process=None
|
|
|
|
):
|
Use dataclasses for config handling
Use config data classes to eliminate some of the boilerplate associated
with setting up the configuration. In particular, using dataclasses
removes a lot of the boilerplate around assigning properties to the
object and allows these to be easily immutable by freezing them. In the
case of simple, non-nested dataclasses, this also provides more
convenient `asdict` helpers.
To set this up, where previously the objects would be parsed from the
config via the `__init__` method, create a `build` classmethod that does
this and calls the dataclass initializer.
Some of the objects are mutated at runtime, in particular some of the
zones are mutated to set the color (this might be able to be refactored
out) and some of the camera functionality can be enabled/disabled. Some
of the configs with `enabled` properties don't seem to have mqtt hooks
to be able to toggle this, in particular, the clips, snapshots, and
detect can be toggled but rtmp and record configs do not, but all of
these configs are still not frozen in case there is some other
functionality I am missing.
There are a couple other minor fixes here, one that was introduced
by me recently where `max_seconds` was not defined, the other to
properly `get()` the message payload when handling publishing mqtt
messages sent via websocket.
2021-05-23 00:28:15 +02:00
|
|
|
if ffmpeg_process is not None:
|
2020-12-04 13:59:03 +01:00
|
|
|
stop_ffmpeg(ffmpeg_process, logger)
|
2020-02-27 02:02:12 +01:00
|
|
|
|
2020-11-29 22:55:53 +01:00
|
|
|
if frame_size is None:
|
2021-02-17 14:23:32 +01:00
|
|
|
process = sp.Popen(
|
|
|
|
ffmpeg_cmd,
|
|
|
|
stdout=sp.DEVNULL,
|
|
|
|
stderr=logpipe,
|
|
|
|
stdin=sp.DEVNULL,
|
|
|
|
start_new_session=True,
|
|
|
|
)
|
2020-11-29 22:55:53 +01:00
|
|
|
else:
|
2021-02-17 14:23:32 +01:00
|
|
|
process = sp.Popen(
|
|
|
|
ffmpeg_cmd,
|
|
|
|
stdout=sp.PIPE,
|
|
|
|
stderr=logpipe,
|
|
|
|
stdin=sp.DEVNULL,
|
|
|
|
bufsize=frame_size * 10,
|
|
|
|
start_new_session=True,
|
|
|
|
)
|
2020-03-10 03:12:19 +01:00
|
|
|
return process
|
2020-02-27 02:02:12 +01:00
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
|
|
|
def capture_frames(
|
|
|
|
ffmpeg_process,
|
|
|
|
camera_name,
|
|
|
|
frame_shape,
|
|
|
|
frame_manager: FrameManager,
|
|
|
|
frame_queue,
|
|
|
|
fps: mp.Value,
|
|
|
|
skipped_fps: mp.Value,
|
|
|
|
current_frame: mp.Value,
|
|
|
|
):
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2020-11-03 15:15:58 +01:00
|
|
|
frame_size = frame_shape[0] * frame_shape[1]
|
2020-10-25 16:05:21 +01:00
|
|
|
frame_rate = EventsPerSecond()
|
2020-10-26 13:59:22 +01:00
|
|
|
frame_rate.start()
|
2020-10-25 16:05:21 +01:00
|
|
|
skipped_eps = EventsPerSecond()
|
|
|
|
skipped_eps.start()
|
2020-08-22 14:05:20 +02:00
|
|
|
while True:
|
2020-10-25 16:05:21 +01:00
|
|
|
fps.value = frame_rate.eps()
|
|
|
|
skipped_fps = skipped_eps.eps()
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2020-09-07 19:17:42 +02:00
|
|
|
current_frame.value = datetime.datetime.now().timestamp()
|
2020-10-24 18:36:04 +02:00
|
|
|
frame_name = f"{camera_name}{current_frame.value}"
|
|
|
|
frame_buffer = frame_manager.create(frame_name, frame_size)
|
|
|
|
try:
|
2020-12-12 16:12:15 +01:00
|
|
|
frame_buffer[:] = ffmpeg_process.stdout.read(frame_size)
|
|
|
|
except Exception as e:
|
2022-02-06 15:46:41 +01:00
|
|
|
logger.error(f"{camera_name}: Unable to read frames from ffmpeg process.")
|
2020-12-12 16:12:15 +01:00
|
|
|
|
|
|
|
if ffmpeg_process.poll() != None:
|
2022-02-06 15:46:41 +01:00
|
|
|
logger.error(
|
2021-02-17 14:23:32 +01:00
|
|
|
f"{camera_name}: ffmpeg process is not running. exiting capture thread..."
|
|
|
|
)
|
2020-12-12 16:12:15 +01:00
|
|
|
frame_manager.delete(frame_name)
|
|
|
|
break
|
|
|
|
continue
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2020-10-25 16:05:21 +01:00
|
|
|
frame_rate.update()
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2020-10-10 13:57:43 +02:00
|
|
|
# if the queue is full, skip this frame
|
|
|
|
if frame_queue.full():
|
2020-10-25 16:05:21 +01:00
|
|
|
skipped_eps.update()
|
2020-10-24 18:36:04 +02:00
|
|
|
frame_manager.delete(frame_name)
|
2020-08-22 14:05:20 +02:00
|
|
|
continue
|
|
|
|
|
2020-10-24 18:36:04 +02:00
|
|
|
# close the frame
|
|
|
|
frame_manager.close(frame_name)
|
2020-09-22 04:02:00 +02:00
|
|
|
|
2020-08-22 14:05:20 +02:00
|
|
|
# add to the queue
|
2020-09-07 19:17:42 +02:00
|
|
|
frame_queue.put(current_frame.value)
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-10-25 16:05:21 +01:00
|
|
|
class CameraWatchdog(threading.Thread):
|
2021-02-17 14:23:32 +01:00
|
|
|
def __init__(
|
|
|
|
self, camera_name, config, frame_queue, camera_fps, ffmpeg_pid, stop_event
|
|
|
|
):
|
2020-10-25 16:05:21 +01:00
|
|
|
threading.Thread.__init__(self)
|
2020-12-04 13:59:03 +01:00
|
|
|
self.logger = logging.getLogger(f"watchdog.{camera_name}")
|
2020-11-04 13:28:07 +01:00
|
|
|
self.camera_name = camera_name
|
2020-10-25 16:05:21 +01:00
|
|
|
self.config = config
|
|
|
|
self.capture_thread = None
|
2020-11-29 22:55:53 +01:00
|
|
|
self.ffmpeg_detect_process = None
|
2020-12-04 13:59:03 +01:00
|
|
|
self.logpipe = LogPipe(f"ffmpeg.{self.camera_name}.detect", logging.ERROR)
|
2020-11-29 22:55:53 +01:00
|
|
|
self.ffmpeg_other_processes = []
|
2020-10-25 16:05:21 +01:00
|
|
|
self.camera_fps = camera_fps
|
2020-10-26 13:59:05 +01:00
|
|
|
self.ffmpeg_pid = ffmpeg_pid
|
2020-10-25 16:05:21 +01:00
|
|
|
self.frame_queue = frame_queue
|
2020-11-03 15:15:58 +01:00
|
|
|
self.frame_shape = self.config.frame_shape_yuv
|
|
|
|
self.frame_size = self.frame_shape[0] * self.frame_shape[1]
|
2020-11-29 23:19:59 +01:00
|
|
|
self.stop_event = stop_event
|
2020-10-25 16:05:21 +01:00
|
|
|
|
|
|
|
def run(self):
|
2020-11-29 22:55:53 +01:00
|
|
|
self.start_ffmpeg_detect()
|
|
|
|
|
|
|
|
for c in self.config.ffmpeg_cmds:
|
2021-02-17 14:23:32 +01:00
|
|
|
if "detect" in c["roles"]:
|
2020-11-29 22:55:53 +01:00
|
|
|
continue
|
2021-02-17 14:23:32 +01:00
|
|
|
logpipe = LogPipe(
|
|
|
|
f"ffmpeg.{self.camera_name}.{'_'.join(sorted(c['roles']))}",
|
|
|
|
logging.ERROR,
|
|
|
|
)
|
|
|
|
self.ffmpeg_other_processes.append(
|
|
|
|
{
|
|
|
|
"cmd": c["cmd"],
|
|
|
|
"logpipe": logpipe,
|
|
|
|
"process": start_or_restart_ffmpeg(c["cmd"], self.logger, logpipe),
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2020-10-25 16:05:21 +01:00
|
|
|
time.sleep(10)
|
2021-05-21 17:39:14 +02:00
|
|
|
while not self.stop_event.wait(10):
|
2020-10-25 16:05:21 +01:00
|
|
|
now = datetime.datetime.now().timestamp()
|
|
|
|
|
|
|
|
if not self.capture_thread.is_alive():
|
2021-08-14 21:04:00 +02:00
|
|
|
self.logger.error(
|
2022-02-06 15:46:41 +01:00
|
|
|
f"Ffmpeg process crashed unexpectedly for {self.camera_name}."
|
2021-08-14 21:04:00 +02:00
|
|
|
)
|
|
|
|
self.logger.error(
|
2021-08-16 14:38:53 +02:00
|
|
|
"The following ffmpeg logs include the last 100 lines prior to exit."
|
2021-08-14 21:04:00 +02:00
|
|
|
)
|
2021-01-30 14:50:17 +01:00
|
|
|
self.logpipe.dump()
|
2020-11-29 22:55:53 +01:00
|
|
|
self.start_ffmpeg_detect()
|
|
|
|
elif now - self.capture_thread.current_frame.value > 20:
|
2021-02-17 14:23:32 +01:00
|
|
|
self.logger.info(
|
|
|
|
f"No frames received from {self.camera_name} in 20 seconds. Exiting ffmpeg..."
|
|
|
|
)
|
2020-11-29 22:55:53 +01:00
|
|
|
self.ffmpeg_detect_process.terminate()
|
2020-10-25 16:05:21 +01:00
|
|
|
try:
|
2020-12-04 13:59:03 +01:00
|
|
|
self.logger.info("Waiting for ffmpeg to exit gracefully...")
|
2020-11-29 22:55:53 +01:00
|
|
|
self.ffmpeg_detect_process.communicate(timeout=30)
|
2020-10-25 16:05:21 +01:00
|
|
|
except sp.TimeoutExpired:
|
2020-12-04 13:59:03 +01:00
|
|
|
self.logger.info("FFmpeg didnt exit. Force killing...")
|
2020-11-29 22:55:53 +01:00
|
|
|
self.ffmpeg_detect_process.kill()
|
|
|
|
self.ffmpeg_detect_process.communicate()
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-11-29 22:55:53 +01:00
|
|
|
for p in self.ffmpeg_other_processes:
|
2021-02-17 14:23:32 +01:00
|
|
|
poll = p["process"].poll()
|
2021-06-25 18:37:21 +02:00
|
|
|
if poll is None:
|
2020-11-29 22:55:53 +01:00
|
|
|
continue
|
2021-02-17 14:23:32 +01:00
|
|
|
p["logpipe"].dump()
|
|
|
|
p["process"] = start_or_restart_ffmpeg(
|
|
|
|
p["cmd"], self.logger, p["logpipe"], ffmpeg_process=p["process"]
|
|
|
|
)
|
|
|
|
|
2021-05-21 17:39:14 +02:00
|
|
|
stop_ffmpeg(self.ffmpeg_detect_process, self.logger)
|
|
|
|
for p in self.ffmpeg_other_processes:
|
|
|
|
stop_ffmpeg(p["process"], self.logger)
|
|
|
|
p["logpipe"].close()
|
|
|
|
self.logpipe.close()
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-11-29 22:55:53 +01:00
|
|
|
def start_ffmpeg_detect(self):
|
2021-02-17 14:23:32 +01:00
|
|
|
ffmpeg_cmd = [
|
|
|
|
c["cmd"] for c in self.config.ffmpeg_cmds if "detect" in c["roles"]
|
|
|
|
][0]
|
|
|
|
self.ffmpeg_detect_process = start_or_restart_ffmpeg(
|
|
|
|
ffmpeg_cmd, self.logger, self.logpipe, self.frame_size
|
|
|
|
)
|
2020-11-29 22:55:53 +01:00
|
|
|
self.ffmpeg_pid.value = self.ffmpeg_detect_process.pid
|
2021-02-17 14:23:32 +01:00
|
|
|
self.capture_thread = CameraCapture(
|
|
|
|
self.camera_name,
|
|
|
|
self.ffmpeg_detect_process,
|
|
|
|
self.frame_shape,
|
|
|
|
self.frame_queue,
|
|
|
|
self.camera_fps,
|
|
|
|
)
|
2020-11-01 17:55:11 +01:00
|
|
|
self.capture_thread.start()
|
2020-10-25 16:05:21 +01:00
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-03-14 21:32:51 +01:00
|
|
|
class CameraCapture(threading.Thread):
|
2020-11-04 13:28:07 +01:00
|
|
|
def __init__(self, camera_name, ffmpeg_process, frame_shape, frame_queue, fps):
|
2020-03-14 21:32:51 +01:00
|
|
|
threading.Thread.__init__(self)
|
2020-11-04 13:28:07 +01:00
|
|
|
self.name = f"capture:{camera_name}"
|
|
|
|
self.camera_name = camera_name
|
2020-03-14 21:32:51 +01:00
|
|
|
self.frame_shape = frame_shape
|
|
|
|
self.frame_queue = frame_queue
|
|
|
|
self.fps = fps
|
2020-04-19 17:07:27 +02:00
|
|
|
self.skipped_fps = EventsPerSecond()
|
2020-09-22 04:02:00 +02:00
|
|
|
self.frame_manager = SharedMemoryFrameManager()
|
2020-03-14 21:32:51 +01:00
|
|
|
self.ffmpeg_process = ffmpeg_process
|
2021-02-17 14:23:32 +01:00
|
|
|
self.current_frame = mp.Value("d", 0.0)
|
2020-04-19 17:07:27 +02:00
|
|
|
self.last_frame = 0
|
2020-03-14 21:32:51 +01:00
|
|
|
|
|
|
|
def run(self):
|
2020-04-19 17:07:27 +02:00
|
|
|
self.skipped_fps.start()
|
2021-02-17 14:23:32 +01:00
|
|
|
capture_frames(
|
|
|
|
self.ffmpeg_process,
|
|
|
|
self.camera_name,
|
|
|
|
self.frame_shape,
|
|
|
|
self.frame_manager,
|
|
|
|
self.frame_queue,
|
|
|
|
self.fps,
|
|
|
|
self.skipped_fps,
|
|
|
|
self.current_frame,
|
|
|
|
)
|
|
|
|
|
2020-03-14 21:32:51 +01:00
|
|
|
|
2020-11-03 15:15:58 +01:00
|
|
|
def capture_camera(name, config: CameraConfig, process_info):
|
2020-11-29 23:19:59 +01:00
|
|
|
stop_event = mp.Event()
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-11-29 23:19:59 +01:00
|
|
|
def receiveSignal(signalNumber, frame):
|
|
|
|
stop_event.set()
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-11-29 23:19:59 +01:00
|
|
|
signal.signal(signal.SIGTERM, receiveSignal)
|
|
|
|
signal.signal(signal.SIGINT, receiveSignal)
|
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
frame_queue = process_info["frame_queue"]
|
|
|
|
camera_watchdog = CameraWatchdog(
|
|
|
|
name,
|
|
|
|
config,
|
|
|
|
frame_queue,
|
|
|
|
process_info["camera_fps"],
|
|
|
|
process_info["ffmpeg_pid"],
|
|
|
|
stop_event,
|
|
|
|
)
|
2020-10-25 16:05:21 +01:00
|
|
|
camera_watchdog.start()
|
|
|
|
camera_watchdog.join()
|
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
|
|
|
def track_camera(
|
|
|
|
name,
|
|
|
|
config: CameraConfig,
|
|
|
|
model_shape,
|
2021-07-08 05:57:19 +02:00
|
|
|
labelmap,
|
2021-02-17 14:23:32 +01:00
|
|
|
detection_queue,
|
|
|
|
result_connection,
|
|
|
|
detected_objects_queue,
|
|
|
|
process_info,
|
|
|
|
):
|
2020-11-29 23:19:59 +01:00
|
|
|
stop_event = mp.Event()
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-11-29 23:19:59 +01:00
|
|
|
def receiveSignal(signalNumber, frame):
|
|
|
|
stop_event.set()
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2020-11-29 23:19:59 +01:00
|
|
|
signal.signal(signal.SIGTERM, receiveSignal)
|
|
|
|
signal.signal(signal.SIGINT, receiveSignal)
|
|
|
|
|
2020-11-04 13:28:07 +01:00
|
|
|
threading.current_thread().name = f"process:{name}"
|
2021-01-03 20:41:02 +01:00
|
|
|
setproctitle(f"frigate.process:{name}")
|
2020-03-10 03:12:19 +01:00
|
|
|
listen()
|
2020-02-16 04:07:54 +01:00
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
frame_queue = process_info["frame_queue"]
|
|
|
|
detection_enabled = process_info["detection_enabled"]
|
2020-10-25 16:05:21 +01:00
|
|
|
|
2020-11-03 15:15:58 +01:00
|
|
|
frame_shape = config.frame_shape
|
|
|
|
objects_to_track = config.objects.track
|
|
|
|
object_filters = config.objects.filters
|
2020-02-16 04:07:54 +01:00
|
|
|
|
2021-01-15 14:52:28 +01:00
|
|
|
motion_detector = MotionDetector(frame_shape, config.motion)
|
2021-02-17 14:23:32 +01:00
|
|
|
object_detector = RemoteObjectDetector(
|
2021-07-08 05:57:19 +02:00
|
|
|
name, labelmap, detection_queue, result_connection, model_shape
|
2021-02-17 14:23:32 +01:00
|
|
|
)
|
2020-02-16 04:07:54 +01:00
|
|
|
|
2020-12-19 06:00:13 +01:00
|
|
|
object_tracker = ObjectTracker(config.detect)
|
2020-03-14 21:32:51 +01:00
|
|
|
|
2020-09-22 04:02:00 +02:00
|
|
|
frame_manager = SharedMemoryFrameManager()
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
process_frames(
|
|
|
|
name,
|
|
|
|
frame_queue,
|
|
|
|
frame_shape,
|
|
|
|
model_shape,
|
2021-10-31 17:48:49 +01:00
|
|
|
config.detect,
|
2021-02-17 14:23:32 +01:00
|
|
|
frame_manager,
|
|
|
|
motion_detector,
|
|
|
|
object_detector,
|
|
|
|
object_tracker,
|
|
|
|
detected_objects_queue,
|
|
|
|
process_info,
|
|
|
|
objects_to_track,
|
|
|
|
object_filters,
|
|
|
|
detection_enabled,
|
|
|
|
stop_event,
|
|
|
|
)
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2020-11-04 04:26:39 +01:00
|
|
|
logger.info(f"{name}: exiting subprocess")
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2021-10-30 14:24:26 +02:00
|
|
|
def box_overlaps(b1, b2):
|
|
|
|
if b1[2] < b2[0] or b1[0] > b2[2] or b1[1] > b2[3] or b1[3] < b2[1]:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2021-11-04 17:57:26 +01:00
|
|
|
def reduce_boxes(boxes, iou_threshold=0.0):
|
2021-10-30 14:24:26 +02:00
|
|
|
clusters = []
|
|
|
|
|
|
|
|
for box in boxes:
|
|
|
|
matched = 0
|
|
|
|
for cluster in clusters:
|
2021-11-04 17:57:26 +01:00
|
|
|
if intersection_over_union(box, cluster) > iou_threshold:
|
2021-10-30 14:24:26 +02:00
|
|
|
matched = 1
|
|
|
|
cluster[0] = min(cluster[0], box[0])
|
|
|
|
cluster[1] = min(cluster[1], box[1])
|
|
|
|
cluster[2] = max(cluster[2], box[2])
|
|
|
|
cluster[3] = max(cluster[3], box[3])
|
|
|
|
|
|
|
|
if not matched:
|
|
|
|
clusters.append(list(box))
|
|
|
|
|
|
|
|
return [tuple(c) for c in clusters]
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2021-02-03 12:58:33 +01:00
|
|
|
def intersects_any(box_a, boxes):
|
|
|
|
for box in boxes:
|
2021-10-30 14:24:26 +02:00
|
|
|
if box_overlaps(box_a, box):
|
2021-11-04 15:25:17 +01:00
|
|
|
return True
|
2021-10-30 14:24:26 +02:00
|
|
|
return False
|
2021-02-03 12:58:33 +01:00
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
|
|
|
def detect(
|
|
|
|
object_detector, frame, model_shape, region, objects_to_track, object_filters
|
|
|
|
):
|
2020-12-09 14:18:53 +01:00
|
|
|
tensor_input = create_tensor_input(frame, model_shape, region)
|
2020-08-22 14:05:20 +02:00
|
|
|
|
|
|
|
detections = []
|
|
|
|
region_detections = object_detector.detect(tensor_input)
|
|
|
|
for d in region_detections:
|
|
|
|
box = d[2]
|
2021-02-17 14:23:32 +01:00
|
|
|
size = region[2] - region[0]
|
2021-08-15 16:14:13 +02:00
|
|
|
x_min = int((box[1] * size) + region[0])
|
|
|
|
y_min = int((box[0] * size) + region[1])
|
|
|
|
x_max = int((box[3] * size) + region[0])
|
|
|
|
y_max = int((box[2] * size) + region[1])
|
2021-02-17 14:23:32 +01:00
|
|
|
det = (
|
|
|
|
d[0],
|
2020-08-22 14:05:20 +02:00
|
|
|
d[1],
|
|
|
|
(x_min, y_min, x_max, y_max),
|
2021-02-17 14:23:32 +01:00
|
|
|
(x_max - x_min) * (y_max - y_min),
|
|
|
|
region,
|
|
|
|
)
|
2020-08-22 14:05:20 +02:00
|
|
|
# apply object filters
|
2021-01-15 14:52:28 +01:00
|
|
|
if filtered(det, objects_to_track, object_filters):
|
2020-08-22 14:05:20 +02:00
|
|
|
continue
|
|
|
|
detections.append(det)
|
|
|
|
return detections
|
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
|
|
|
|
def process_frames(
|
|
|
|
camera_name: str,
|
|
|
|
frame_queue: mp.Queue,
|
|
|
|
frame_shape,
|
|
|
|
model_shape,
|
2021-10-31 17:48:49 +01:00
|
|
|
detect_config: DetectConfig,
|
2021-02-17 14:23:32 +01:00
|
|
|
frame_manager: FrameManager,
|
|
|
|
motion_detector: MotionDetector,
|
|
|
|
object_detector: RemoteObjectDetector,
|
|
|
|
object_tracker: ObjectTracker,
|
|
|
|
detected_objects_queue: mp.Queue,
|
|
|
|
process_info: Dict,
|
|
|
|
objects_to_track: List[str],
|
|
|
|
object_filters,
|
|
|
|
detection_enabled: mp.Value,
|
|
|
|
stop_event,
|
|
|
|
exit_on_empty: bool = False,
|
|
|
|
):
|
|
|
|
|
|
|
|
fps = process_info["process_fps"]
|
|
|
|
detection_fps = process_info["detection_fps"]
|
|
|
|
current_frame_time = process_info["detection_frame"]
|
2020-10-25 16:05:21 +01:00
|
|
|
|
2020-02-16 04:07:54 +01:00
|
|
|
fps_tracker = EventsPerSecond()
|
|
|
|
fps_tracker.start()
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2022-02-05 14:10:00 +01:00
|
|
|
startup_scan_counter = 0
|
|
|
|
|
2021-05-21 17:39:14 +02:00
|
|
|
while not stop_event.is_set():
|
2020-11-01 17:37:17 +01:00
|
|
|
if exit_on_empty and frame_queue.empty():
|
2020-11-04 04:26:39 +01:00
|
|
|
logger.info(f"Exiting track_objects...")
|
2020-11-01 17:37:17 +01:00
|
|
|
break
|
2020-03-14 21:32:51 +01:00
|
|
|
|
2020-08-22 14:05:20 +02:00
|
|
|
try:
|
|
|
|
frame_time = frame_queue.get(True, 10)
|
|
|
|
except queue.Empty:
|
2020-03-14 21:32:51 +01:00
|
|
|
continue
|
2020-04-19 17:07:27 +02:00
|
|
|
|
2020-08-22 14:05:20 +02:00
|
|
|
current_frame_time.value = frame_time
|
|
|
|
|
2021-02-17 14:23:32 +01:00
|
|
|
frame = frame_manager.get(
|
|
|
|
f"{camera_name}{frame_time}", (frame_shape[0] * 3 // 2, frame_shape[1])
|
|
|
|
)
|
2020-09-14 14:40:26 +02:00
|
|
|
|
|
|
|
if frame is None:
|
2020-11-04 04:26:39 +01:00
|
|
|
logger.info(f"{camera_name}: frame {frame_time} is not in memory store.")
|
2020-09-14 14:40:26 +02:00
|
|
|
continue
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2020-02-16 04:07:54 +01:00
|
|
|
# look for motion
|
|
|
|
motion_boxes = motion_detector.detect(frame)
|
|
|
|
|
2022-02-06 21:49:54 +01:00
|
|
|
regions = []
|
2020-08-22 14:05:20 +02:00
|
|
|
|
2022-02-06 21:49:54 +01:00
|
|
|
# if detection is disabled
|
|
|
|
if not detection_enabled.value:
|
|
|
|
object_tracker.match_and_update(frame_time, [])
|
|
|
|
else:
|
|
|
|
# get stationary object ids
|
|
|
|
# check every Nth frame for stationary objects
|
|
|
|
# disappeared objects are not stationary
|
|
|
|
# also check for overlapping motion boxes
|
|
|
|
stationary_object_ids = [
|
|
|
|
obj["id"]
|
|
|
|
for obj in object_tracker.tracked_objects.values()
|
2022-02-11 14:34:42 +01:00
|
|
|
# if there hasn't been motion for 10 frames
|
|
|
|
if obj["motionless_count"] >= 10
|
2022-02-06 21:49:54 +01:00
|
|
|
# and it isn't due for a periodic check
|
|
|
|
and (
|
2022-02-13 15:58:44 +01:00
|
|
|
detect_config.stationary.interval == 0
|
|
|
|
or obj["motionless_count"] % detect_config.stationary.interval != 0
|
2022-02-06 21:49:54 +01:00
|
|
|
)
|
|
|
|
# and it hasn't disappeared
|
|
|
|
and object_tracker.disappeared[obj["id"]] == 0
|
|
|
|
# and it doesn't overlap with any current motion boxes
|
|
|
|
and not intersects_any(obj["box"], motion_boxes)
|
|
|
|
]
|
|
|
|
|
|
|
|
# get tracked object boxes that aren't stationary
|
|
|
|
tracked_object_boxes = [
|
|
|
|
obj["box"]
|
|
|
|
for obj in object_tracker.tracked_objects.values()
|
|
|
|
if not obj["id"] in stationary_object_ids
|
|
|
|
]
|
|
|
|
|
|
|
|
# combine motion boxes with known locations of existing objects
|
|
|
|
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
|
|
|
|
|
|
|
|
region_min_size = max(model_shape[0], model_shape[1])
|
|
|
|
# compute regions
|
|
|
|
regions = [
|
2022-02-05 14:10:00 +01:00
|
|
|
calculate_region(
|
2022-02-06 21:49:54 +01:00
|
|
|
frame_shape,
|
|
|
|
a[0],
|
|
|
|
a[1],
|
|
|
|
a[2],
|
|
|
|
a[3],
|
|
|
|
region_min_size,
|
|
|
|
multiplier=random.uniform(1.2, 1.5),
|
2022-02-05 14:10:00 +01:00
|
|
|
)
|
2022-02-06 21:49:54 +01:00
|
|
|
for a in combined_boxes
|
|
|
|
]
|
|
|
|
|
|
|
|
# consolidate regions with heavy overlap
|
|
|
|
regions = [
|
|
|
|
calculate_region(
|
|
|
|
frame_shape, a[0], a[1], a[2], a[3], region_min_size, multiplier=1.0
|
2021-02-17 14:23:32 +01:00
|
|
|
)
|
2022-02-06 21:49:54 +01:00
|
|
|
for a in reduce_boxes(regions, 0.4)
|
|
|
|
]
|
|
|
|
|
|
|
|
# if starting up, get the next startup scan region
|
|
|
|
if startup_scan_counter < 9:
|
|
|
|
ymin = int(frame_shape[0] / 3 * startup_scan_counter / 3)
|
|
|
|
ymax = int(frame_shape[0] / 3 + ymin)
|
|
|
|
xmin = int(frame_shape[1] / 3 * startup_scan_counter / 3)
|
|
|
|
xmax = int(frame_shape[1] / 3 + xmin)
|
|
|
|
regions.append(
|
|
|
|
calculate_region(
|
|
|
|
frame_shape,
|
|
|
|
xmin,
|
|
|
|
ymin,
|
|
|
|
xmax,
|
|
|
|
ymax,
|
|
|
|
region_min_size,
|
|
|
|
multiplier=1.2,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
startup_scan_counter += 1
|
2021-02-17 14:23:32 +01:00
|
|
|
|
2022-02-06 21:49:54 +01:00
|
|
|
# resize regions and detect
|
|
|
|
# seed with stationary objects
|
|
|
|
detections = [
|
|
|
|
(
|
|
|
|
obj["label"],
|
|
|
|
obj["score"],
|
|
|
|
obj["box"],
|
|
|
|
obj["area"],
|
|
|
|
obj["region"],
|
|
|
|
)
|
|
|
|
for obj in object_tracker.tracked_objects.values()
|
|
|
|
if obj["id"] in stationary_object_ids
|
|
|
|
]
|
|
|
|
|
|
|
|
for region in regions:
|
|
|
|
detections.extend(
|
|
|
|
detect(
|
|
|
|
object_detector,
|
|
|
|
frame,
|
|
|
|
model_shape,
|
|
|
|
region,
|
|
|
|
objects_to_track,
|
|
|
|
object_filters,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
#########
|
|
|
|
# merge objects, check for clipped objects and look again up to 4 times
|
|
|
|
#########
|
|
|
|
refining = len(regions) > 0
|
|
|
|
refine_count = 0
|
|
|
|
while refining and refine_count < 4:
|
|
|
|
refining = False
|
|
|
|
|
|
|
|
# group by name
|
|
|
|
detected_object_groups = defaultdict(lambda: [])
|
|
|
|
for detection in detections:
|
|
|
|
detected_object_groups[detection[0]].append(detection)
|
|
|
|
|
|
|
|
selected_objects = []
|
|
|
|
for group in detected_object_groups.values():
|
|
|
|
|
|
|
|
# apply non-maxima suppression to suppress weak, overlapping bounding boxes
|
|
|
|
boxes = [
|
|
|
|
(o[2][0], o[2][1], o[2][2] - o[2][0], o[2][3] - o[2][1])
|
|
|
|
for o in group
|
|
|
|
]
|
|
|
|
confidences = [o[1] for o in group]
|
|
|
|
idxs = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
|
|
|
|
|
|
|
|
for index in idxs:
|
2022-02-21 13:32:17 +01:00
|
|
|
index = index if isinstance(index, np.int32) else index[0]
|
|
|
|
obj = group[index]
|
2022-02-06 21:49:54 +01:00
|
|
|
if clipped(obj, frame_shape):
|
|
|
|
box = obj[2]
|
|
|
|
# calculate a new region that will hopefully get the entire object
|
|
|
|
region = calculate_region(
|
|
|
|
frame_shape,
|
|
|
|
box[0],
|
|
|
|
box[1],
|
|
|
|
box[2],
|
|
|
|
box[3],
|
|
|
|
region_min_size,
|
2021-02-17 14:23:32 +01:00
|
|
|
)
|
2022-02-05 15:29:22 +01:00
|
|
|
|
2022-02-06 21:49:54 +01:00
|
|
|
regions.append(region)
|
|
|
|
|
|
|
|
selected_objects.extend(
|
|
|
|
detect(
|
|
|
|
object_detector,
|
|
|
|
frame,
|
|
|
|
model_shape,
|
|
|
|
region,
|
|
|
|
objects_to_track,
|
|
|
|
object_filters,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
refining = True
|
|
|
|
else:
|
|
|
|
selected_objects.append(obj)
|
|
|
|
# set the detections list to only include top, complete objects
|
|
|
|
# and new detections
|
|
|
|
detections = selected_objects
|
|
|
|
|
|
|
|
if refining:
|
|
|
|
refine_count += 1
|
|
|
|
|
|
|
|
## drop detections that overlap too much
|
|
|
|
consolidated_detections = []
|
|
|
|
|
|
|
|
# if detection was run on this frame, consolidate
|
|
|
|
if len(regions) > 0:
|
|
|
|
# group by name
|
|
|
|
detected_object_groups = defaultdict(lambda: [])
|
|
|
|
for detection in detections:
|
|
|
|
detected_object_groups[detection[0]].append(detection)
|
|
|
|
|
|
|
|
# loop over detections grouped by label
|
|
|
|
for group in detected_object_groups.values():
|
|
|
|
# if the group only has 1 item, skip
|
|
|
|
if len(group) == 1:
|
|
|
|
consolidated_detections.append(group[0])
|
|
|
|
continue
|
|
|
|
|
|
|
|
# sort smallest to largest by area
|
|
|
|
sorted_by_area = sorted(group, key=lambda g: g[3])
|
|
|
|
|
|
|
|
for current_detection_idx in range(0, len(sorted_by_area)):
|
|
|
|
current_detection = sorted_by_area[current_detection_idx][2]
|
|
|
|
overlap = 0
|
|
|
|
for to_check_idx in range(
|
|
|
|
min(current_detection_idx + 1, len(sorted_by_area)),
|
|
|
|
len(sorted_by_area),
|
2022-02-05 15:29:22 +01:00
|
|
|
):
|
2022-02-06 21:49:54 +01:00
|
|
|
to_check = sorted_by_area[to_check_idx][2]
|
|
|
|
# if 90% of smaller detection is inside of another detection, consolidate
|
|
|
|
if (
|
|
|
|
area(intersection(current_detection, to_check))
|
|
|
|
/ area(current_detection)
|
|
|
|
> 0.9
|
|
|
|
):
|
|
|
|
overlap = 1
|
|
|
|
break
|
|
|
|
if overlap == 0:
|
|
|
|
consolidated_detections.append(
|
|
|
|
sorted_by_area[current_detection_idx]
|
|
|
|
)
|
|
|
|
# now that we have refined our detections, we need to track objects
|
|
|
|
object_tracker.match_and_update(frame_time, consolidated_detections)
|
|
|
|
# else, just update the frame times for the stationary objects
|
|
|
|
else:
|
|
|
|
object_tracker.update_frame_times(frame_time)
|
2020-02-16 04:07:54 +01:00
|
|
|
|
2020-10-24 18:36:04 +02:00
|
|
|
# add to the queue if not full
|
2021-02-17 14:23:32 +01:00
|
|
|
if detected_objects_queue.full():
|
2021-01-16 03:52:59 +01:00
|
|
|
frame_manager.delete(f"{camera_name}{frame_time}")
|
|
|
|
continue
|
2020-10-24 18:36:04 +02:00
|
|
|
else:
|
2021-01-16 03:52:59 +01:00
|
|
|
fps_tracker.update()
|
|
|
|
fps.value = fps_tracker.eps()
|
2021-02-17 14:23:32 +01:00
|
|
|
detected_objects_queue.put(
|
|
|
|
(
|
|
|
|
camera_name,
|
|
|
|
frame_time,
|
|
|
|
object_tracker.tracked_objects,
|
|
|
|
motion_boxes,
|
|
|
|
regions,
|
|
|
|
)
|
|
|
|
)
|
2021-01-16 03:52:59 +01:00
|
|
|
detection_fps.value = object_detector.fps.eps()
|
|
|
|
frame_manager.close(f"{camera_name}{frame_time}")
|