create typed config classes

This commit is contained in:
Blake Blackshear 2020-11-03 08:15:58 -06:00
parent b7c09a9b38
commit af303cbf2a
9 changed files with 733 additions and 241 deletions

View File

@ -7,100 +7,31 @@ import multiprocessing as mp
from playhouse.sqlite_ext import SqliteExtDatabase from playhouse.sqlite_ext import SqliteExtDatabase
from typing import Dict, List from typing import Dict, List
from frigate.config import FRIGATE_CONFIG_SCHEMA from frigate.config import FrigateConfig
from frigate.edgetpu import EdgeTPUProcess from frigate.edgetpu import EdgeTPUProcess
from frigate.events import EventProcessor from frigate.events import EventProcessor
from frigate.http import create_app from frigate.http import create_app
from frigate.models import Event from frigate.models import Event
from frigate.mqtt import create_mqtt_client from frigate.mqtt import create_mqtt_client
from frigate.object_processing import TrackedObjectProcessor from frigate.object_processing import TrackedObjectProcessor
from frigate.video import get_frame_shape, track_camera, get_ffmpeg_input, capture_camera from frigate.video import track_camera, capture_camera
from frigate.watchdog import FrigateWatchdog from frigate.watchdog import FrigateWatchdog
class FrigateApp(): class FrigateApp():
def __init__(self): def __init__(self):
self.stop_event = mp.Event() self.stop_event = mp.Event()
self.config: dict = None self.config: FrigateConfig = None
self.detection_queue = mp.Queue() self.detection_queue = mp.Queue()
self.detectors: Dict[str: EdgeTPUProcess] = {} self.detectors: Dict[str, EdgeTPUProcess] = {}
self.detection_out_events: Dict[str: mp.Event] = {} self.detection_out_events: Dict[str, mp.Event] = {}
self.detection_shms: List[mp.shared_memory.SharedMemory] = [] self.detection_shms: List[mp.shared_memory.SharedMemory] = []
self.camera_metrics = {} self.camera_metrics = {}
def init_config(self): def init_config(self):
# TODO: sub in FRIGATE_ENV vars
frigate_env_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
config_file = os.environ.get('CONFIG_FILE', '/config/config.yml') config_file = os.environ.get('CONFIG_FILE', '/config/config.yml')
self.config = FrigateConfig(config_file=config_file)
with open(config_file) as f: for camera_name in self.config.cameras.keys():
raw_config = f.read()
if config_file.endswith(".yml"):
config = yaml.safe_load(raw_config)
elif config_file.endswith(".json"):
config = json.loads(raw_config)
self.config = FRIGATE_CONFIG_SCHEMA(config)
if 'password' in self.config['mqtt']:
self.config['mqtt']['password'] = self.config['mqtt']['password'].format(**frigate_env_vars)
cache_dir = self.config['save_clips']['cache_dir']
clips_dir = self.config['save_clips']['clips_dir']
if not os.path.exists(cache_dir) and not os.path.islink(cache_dir):
os.makedirs(cache_dir)
if not os.path.exists(clips_dir) and not os.path.islink(clips_dir):
os.makedirs(clips_dir)
for camera_name, camera_config in self.config['cameras'].items():
# set shape
if 'width' in camera_config and 'height' in camera_config:
frame_shape = (camera_config['height'], camera_config['width'], 3)
else:
frame_shape = get_frame_shape(camera_config['ffmpeg']['input'])
camera_config['frame_shape'] = frame_shape
# build ffmpeg command
ffmpeg = camera_config['ffmpeg']
ffmpeg_input = ffmpeg['input'].format(**frigate_env_vars)
ffmpeg_global_args = ffmpeg.get('global_args', self.config['ffmpeg']['global_args'])
ffmpeg_hwaccel_args = ffmpeg.get('hwaccel_args', self.config['ffmpeg']['hwaccel_args'])
ffmpeg_input_args = ffmpeg.get('input_args', self.config['ffmpeg']['input_args'])
ffmpeg_output_args = ffmpeg.get('output_args', self.config['ffmpeg']['output_args'])
if not camera_config.get('fps') is None:
ffmpeg_output_args = ["-r", str(camera_config['fps'])] + ffmpeg_output_args
if camera_config['save_clips']['enabled']:
ffmpeg_output_args = [
"-f",
"segment",
"-segment_time",
"10",
"-segment_format",
"mp4",
"-reset_timestamps",
"1",
"-strftime",
"1",
"-c",
"copy",
"-an",
"-map",
"0",
f"{os.path.join(self.config['save_clips']['cache_dir'], camera_name)}-%Y%m%d%H%M%S.mp4"
] + ffmpeg_output_args
ffmpeg_cmd = (['ffmpeg'] +
ffmpeg_global_args +
ffmpeg_hwaccel_args +
ffmpeg_input_args +
['-i', ffmpeg_input] +
ffmpeg_output_args +
['pipe:'])
camera_config['ffmpeg_cmd'] = ffmpeg_cmd
# create camera_metrics # create camera_metrics
self.camera_metrics[camera_name] = { self.camera_metrics[camera_name] = {
'camera_fps': mp.Value('d', 0.0), 'camera_fps': mp.Value('d', 0.0),
@ -118,10 +49,10 @@ class FrigateApp():
self.event_queue = mp.Queue() self.event_queue = mp.Queue()
# Queue for cameras to push tracked objects to # Queue for cameras to push tracked objects to
self.detected_frames_queue = mp.Queue(maxsize=len(self.config['cameras'].keys())*2) self.detected_frames_queue = mp.Queue(maxsize=len(self.config.cameras.keys())*2)
def init_database(self): def init_database(self):
self.db = SqliteExtDatabase(f"/{os.path.join(self.config['save_clips']['clips_dir'], 'frigate.db')}") self.db = SqliteExtDatabase(f"/{os.path.join(self.config.save_clips.clips_dir, 'frigate.db')}")
models = [Event] models = [Event]
self.db.bind(models) self.db.bind(models)
self.db.create_tables(models, safe=True) self.db.create_tables(models, safe=True)
@ -130,38 +61,29 @@ class FrigateApp():
self.flask_app = create_app(self.config, self.db, self.camera_metrics, self.detectors, self.detected_frames_processor) self.flask_app = create_app(self.config, self.db, self.camera_metrics, self.detectors, self.detected_frames_processor)
def init_mqtt(self): def init_mqtt(self):
# TODO: create config class self.mqtt_client = create_mqtt_client(self.config.mqtt)
mqtt_config = self.config['mqtt']
self.mqtt_client = create_mqtt_client(
mqtt_config['host'],
mqtt_config['port'],
mqtt_config['topic_prefix'],
mqtt_config['client_id'],
mqtt_config.get('user'),
mqtt_config.get('password')
)
def start_detectors(self): def start_detectors(self):
for name in self.config['cameras'].keys(): for name in self.config.cameras.keys():
self.detection_out_events[name] = mp.Event() self.detection_out_events[name] = mp.Event()
shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=300*300*3) shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=300*300*3)
shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4) shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4)
self.detection_shms.append(shm_in) self.detection_shms.append(shm_in)
self.detection_shms.append(shm_out) self.detection_shms.append(shm_out)
for name, detector in self.config['detectors'].items(): for name, detector in self.config.detectors.items():
if detector['type'] == 'cpu': if detector.type == 'cpu':
self.detectors[name] = EdgeTPUProcess(self.detection_queue, out_events=self.detection_out_events, tf_device='cpu') self.detectors[name] = EdgeTPUProcess(self.detection_queue, out_events=self.detection_out_events, tf_device='cpu')
if detector['type'] == 'edgetpu': if detector.type == 'edgetpu':
self.detectors[name] = EdgeTPUProcess(self.detection_queue, out_events=self.detection_out_events, tf_device=detector['device']) self.detectors[name] = EdgeTPUProcess(self.detection_queue, out_events=self.detection_out_events, tf_device=detector.device)
def start_detected_frames_processor(self): def start_detected_frames_processor(self):
self.detected_frames_processor = TrackedObjectProcessor(self.config['cameras'], self.mqtt_client, self.config['mqtt']['topic_prefix'], self.detected_frames_processor = TrackedObjectProcessor(self.config.cameras, self.mqtt_client, self.config.mqtt.topic_prefix,
self.detected_frames_queue, self.event_queue, self.stop_event) self.detected_frames_queue, self.event_queue, self.stop_event)
self.detected_frames_processor.start() self.detected_frames_processor.start()
def start_camera_processors(self): def start_camera_processors(self):
for name, config in self.config['cameras'].items(): for name, config in self.config.cameras.items():
camera_process = mp.Process(target=track_camera, args=(name, config, camera_process = mp.Process(target=track_camera, args=(name, config,
self.detection_queue, self.detection_out_events[name], self.detected_frames_queue, self.detection_queue, self.detection_out_events[name], self.detected_frames_queue,
self.camera_metrics[name])) self.camera_metrics[name]))
@ -171,7 +93,7 @@ class FrigateApp():
print(f"Camera processor started for {name}: {camera_process.pid}") print(f"Camera processor started for {name}: {camera_process.pid}")
def start_camera_capture_processes(self): def start_camera_capture_processes(self):
for name, config in self.config['cameras'].items(): for name, config in self.config.cameras.items():
capture_process = mp.Process(target=capture_camera, args=(name, config, capture_process = mp.Process(target=capture_camera, args=(name, config,
self.camera_metrics[name])) self.camera_metrics[name]))
capture_process.daemon = True capture_process.daemon = True
@ -199,7 +121,7 @@ class FrigateApp():
self.init_web_server() self.init_web_server()
self.start_event_processor() self.start_event_processor()
self.start_watchdog() self.start_watchdog()
self.flask_app.run(host='0.0.0.0', port=self.config['web_port'], debug=False) self.flask_app.run(host='0.0.0.0', port=self.config.web_port, debug=False)
self.stop() self.stop()
def stop(self): def stop(self):

View File

@ -1,5 +1,17 @@
import base64
import json
import os
import yaml
from typing import Dict
import cv2
import matplotlib.pyplot as plt
import numpy as np
import voluptuous as vol import voluptuous as vol
from frigate.util import get_frame_shape
DETECTORS_SCHEMA = vol.Schema( DETECTORS_SCHEMA = vol.Schema(
{ {
vol.Required(str): { vol.Required(str): {
@ -66,12 +78,21 @@ FILTER_SCHEMA = vol.Schema(
} }
) )
OBJECTS_SCHEMA = vol.Schema( def filters_for_all_tracked_objects(object_config):
for tracked_object in object_config.get('track', ['person']):
if not 'filters' in object_config:
object_config['filters'] = {}
if not tracked_object in object_config['filters']:
object_config['filters'][tracked_object] = {}
return object_config
OBJECTS_SCHEMA = vol.Schema(vol.All(filters_for_all_tracked_objects,
{ {
vol.Optional('track', default=['person']): [str], vol.Optional('track', default=['person']): [str],
'filters': FILTER_SCHEMA.extend({vol.Optional('min_score', default=0.5): float}) # TODO: this should populate filters for all tracked objects
vol.Optional('filters', default = {}): FILTER_SCHEMA.extend({ str: {vol.Optional('min_score', default=0.5): float}})
} }
) ))
DEFAULT_CAMERA_MQTT = { DEFAULT_CAMERA_MQTT = {
'crop_to_region': True 'crop_to_region': True
@ -99,8 +120,8 @@ CAMERAS_SCHEMA = vol.Schema(
{ {
str: { str: {
vol.Required('ffmpeg'): CAMERA_FFMPEG_SCHEMA, vol.Required('ffmpeg'): CAMERA_FFMPEG_SCHEMA,
'height': int, vol.Required('height'): int,
'width': int, vol.Required('width'): int,
'fps': int, 'fps': int,
'mask': str, 'mask': str,
vol.Optional('best_image_timeout', default=60): int, vol.Optional('best_image_timeout', default=60): int,
@ -140,3 +161,438 @@ FRIGATE_CONFIG_SCHEMA = vol.Schema(
vol.Required('cameras', default={}): CAMERAS_SCHEMA vol.Required('cameras', default={}): CAMERAS_SCHEMA
} }
) )
class DetectorConfig():
def __init__(self, config):
self._type = config['type']
self._device = config['device']
@property
def type(self):
return self._type
@property
def device(self):
return self._device
class MqttConfig():
def __init__(self, config):
self._host = config['host']
self._port = config['port']
self._topic_prefix = config['topic_prefix']
self._client_id = config['client_id']
self._user = config.get('user')
self._password = config.get('password')
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def topic_prefix(self):
return self._topic_prefix
@property
def client_id(self):
return self._client_id
@property
def user(self):
return self._user
@property
def password(self):
return self._password
class SaveClipsConfig():
def __init__(self, config):
self._max_seconds = config['max_seconds']
self._clips_dir = config['clips_dir']
self._cache_dir = config['cache_dir']
@property
def max_seconds(self):
return self._max_seconds
@property
def clips_dir(self):
return self._clips_dir
@property
def cache_dir(self):
return self._cache_dir
class FfmpegConfig():
def __init__(self, global_config, config):
self._input = config.get('input')
self._global_args = config.get('global_args', global_config['global_args'])
self._hwaccel_args = config.get('hwaccel_args', global_config['hwaccel_args'])
self._input_args = config.get('input_args', global_config['input_args'])
self._output_args = config.get('output_args', global_config['output_args'])
@property
def input(self):
return self._input
@property
def global_args(self):
return self._global_args
@property
def hwaccel_args(self):
return self._hwaccel_args
@property
def input_args(self):
return self._input_args
@property
def output_args(self):
return self._output_args
class FilterConfig():
def __init__(self, config):
self._min_area = config['min_area']
self._max_area = config['max_area']
self._threshold = config['threshold']
self._min_score = config.get('min_score')
@property
def min_area(self):
return self._min_area
@property
def max_area(self):
return self._max_area
@property
def threshold(self):
return self._threshold
@property
def min_score(self):
return self._min_score
class ObjectConfig():
def __init__(self, global_config, config):
self._track = config.get('track', global_config['track'])
if 'filters' in config:
self._filters = { name: FilterConfig(c) for name, c in config['filters'].items() }
else:
self._filters = { name: FilterConfig(c) for name, c in global_config['filters'].items() }
@property
def track(self):
return self._track
@property
def filters(self) -> Dict[str, FilterConfig]:
return self._filters
class CameraSnapshotsConfig():
def __init__(self, config):
self._show_timestamp = config['show_timestamp']
self._draw_zones = config['draw_zones']
self._draw_bounding_boxes = config['draw_bounding_boxes']
@property
def show_timestamp(self):
return self._show_timestamp
@property
def draw_zones(self):
return self._draw_zones
@property
def draw_bounding_boxes(self):
return self._draw_bounding_boxes
class CameraSaveClipsConfig():
def __init__(self, config):
self._enabled = config['enabled']
self._pre_capture = config['pre_capture']
self._objects = config.get('objects')
@property
def enabled(self):
return self._enabled
@property
def pre_capture(self):
return self._pre_capture
@property
def objects(self):
return self._objects
class CameraMqttConfig():
def __init__(self, config):
self._crop_to_region = config['crop_to_region']
self._snapshot_height = config.get('snapshot_height')
@property
def crop_to_region(self):
return self._crop_to_region
@property
def snapshot_height(self):
return self._snapshot_height
class ZoneConfig():
def __init__(self, name, config):
self._coordinates = config['coordinates']
self._filters = { name: FilterConfig(c) for name, c in config['filters'].items() }
if isinstance(self._coordinates, list):
self._contour = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in self._coordinates])
elif isinstance(self._coordinates, str):
points = self._coordinates.split(',')
self._contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
else:
print(f"Unable to parse zone coordinates for {name}")
self._contour = np.array([])
self._color = (0,0,0)
@property
def coordinates(self):
return self._coordinates
@property
def contour(self):
return self._contour
@contour.setter
def contour(self, val):
self._contour = val
@property
def color(self):
return self._color
@color.setter
def color(self, val):
self._color = val
@property
def filters(self):
return self._filters
class CameraConfig():
def __init__(self, name, config, cache_dir, global_ffmpeg, global_objects):
self._name = name
self._ffmpeg = FfmpegConfig(global_ffmpeg, config['ffmpeg'])
self._height = config.get('height')
self._width = config.get('width')
self._frame_shape = (self._height, self._width)
self._frame_shape_yuv = (self._frame_shape[0]*3//2, self._frame_shape[1])
self._fps = config.get('fps')
self._mask = self._create_mask(config.get('mask'))
self._best_image_timeout = config['best_image_timeout']
self._mqtt = CameraMqttConfig(config['mqtt'])
self._zones = { name: ZoneConfig(name, z) for name, z in config['zones'].items() }
self._save_clips = CameraSaveClipsConfig(config['save_clips'])
self._snapshots = CameraSnapshotsConfig(config['snapshots'])
self._objects = ObjectConfig(global_objects, config.get('objects', {}))
self._ffmpeg_cmd = self._get_ffmpeg_cmd(cache_dir)
self._set_zone_colors(self._zones)
def _create_mask(self, mask):
if mask:
if mask.startswith('base64,'):
img = base64.b64decode(mask[7:])
np_img = np.fromstring(img, dtype=np.uint8)
mask_img = cv2.imdecode(np_img, cv2.IMREAD_GRAYSCALE)
elif mask.startswith('poly,'):
points = mask.split(',')[1:]
contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
mask_img = np.zeros(self.frame_shape, np.uint8)
mask_img[:] = 255
cv2.fillPoly(mask_img, pts=[contour], color=(0))
else:
mask_img = cv2.imread(f"/config/{mask}", cv2.IMREAD_GRAYSCALE)
else:
mask_img = None
if mask_img is None or mask_img.size == 0:
mask_img = np.zeros(self.frame_shape, np.uint8)
mask_img[:] = 255
return mask_img
def _get_ffmpeg_cmd(self, cache_dir):
ffmpeg_output_args = self.ffmpeg.output_args
if self.fps:
ffmpeg_output_args = ["-r", str(self.fps)] + ffmpeg_output_args
if self.save_clips.enabled:
ffmpeg_output_args = [
"-f",
"segment",
"-segment_time",
"10",
"-segment_format",
"mp4",
"-reset_timestamps",
"1",
"-strftime",
"1",
"-c",
"copy",
"-an",
f"{os.path.join(cache_dir, self.name)}-%Y%m%d%H%M%S.mp4"
] + ffmpeg_output_args
return (['ffmpeg'] +
self.ffmpeg.global_args +
self.ffmpeg.hwaccel_args +
self.ffmpeg.input_args +
['-i', self.ffmpeg.input] +
ffmpeg_output_args +
['pipe:'])
def _set_zone_colors(self, zones: Dict[str, ZoneConfig]):
# set colors for zones
all_zone_names = zones.keys()
zone_colors = {}
colors = plt.cm.get_cmap('tab10', len(all_zone_names))
for i, zone in enumerate(all_zone_names):
zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])
for name, zone in zones.items():
zone.color = zone_colors[name]
@property
def name(self):
return self._name
@property
def ffmpeg(self):
return self._ffmpeg
@property
def height(self):
return self._height
@property
def width(self):
return self._width
@property
def fps(self):
return self._fps
@property
def mask(self):
return self._mask
@property
def best_image_timeout(self):
return self._best_image_timeout
@property
def mqtt(self):
return self._mqtt
@property
def zones(self)-> Dict[str, ZoneConfig]:
return self._zones
@property
def save_clips(self):
return self._save_clips
@property
def snapshots(self):
return self._snapshots
@property
def objects(self):
return self._objects
@property
def frame_shape(self):
return self._frame_shape
@property
def frame_shape_yuv(self):
return self._frame_shape_yuv
@property
def ffmpeg_cmd(self):
return self._ffmpeg_cmd
class FrigateConfig():
def __init__(self, config_file=None, config=None):
if config is None and config_file is None:
raise ValueError('config or config_file must be defined')
elif not config_file is None:
config = self._load_file(config_file)
config = FRIGATE_CONFIG_SCHEMA(config)
config = self._sub_env_vars(config)
self._web_port = config['web_port']
self._detectors = { name: DetectorConfig(d) for name, d in config['detectors'].items() }
self._mqtt = MqttConfig(config['mqtt'])
self._save_clips = SaveClipsConfig(config['save_clips'])
self._cameras = { name: CameraConfig(name, c, self._save_clips.cache_dir, config['ffmpeg'], config['objects']) for name, c in config['cameras'].items() }
self._ensure_dirs()
def _sub_env_vars(self, config):
frigate_env_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
if 'password' in config['mqtt']:
config['mqtt']['password'] = config['mqtt']['password'].format(**frigate_env_vars)
for camera in config['cameras'].values():
camera['ffmpeg']['input'] = camera['ffmpeg']['input'].format(**frigate_env_vars)
return config
def _ensure_dirs(self):
cache_dir = self.save_clips.cache_dir
clips_dir = self.save_clips.clips_dir
if not os.path.exists(cache_dir) and not os.path.islink(cache_dir):
os.makedirs(cache_dir)
if not os.path.exists(clips_dir) and not os.path.islink(clips_dir):
os.makedirs(clips_dir)
def _load_file(self, config_file):
with open(config_file) as f:
raw_config = f.read()
if config_file.endswith(".yml"):
config = yaml.safe_load(raw_config)
elif config_file.endswith(".json"):
config = json.loads(raw_config)
return config
@property
def web_port(self):
return self._web_port
@property
def detectors(self) -> Dict[str, DetectorConfig]:
return self._detectors
@property
def mqtt(self):
return self._mqtt
@property
def save_clips(self):
return self._save_clips
@property
def cameras(self) -> Dict[str, CameraConfig]:
return self._cameras

View File

@ -14,8 +14,8 @@ class EventProcessor(threading.Thread):
def __init__(self, config, camera_processes, event_queue, stop_event): def __init__(self, config, camera_processes, event_queue, stop_event):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.config = config self.config = config
self.cache_dir = self.config['save_clips']['cache_dir'] self.cache_dir = self.config.save_clips.cache_dir
self.clips_dir = self.config['save_clips']['clips_dir'] self.clips_dir = self.config.save_clips.clips_dir
self.camera_processes = camera_processes self.camera_processes = camera_processes
self.cached_clips = {} self.cached_clips = {}
self.event_queue = event_queue self.event_queue = event_queue
@ -77,7 +77,7 @@ class EventProcessor(threading.Thread):
earliest_event = datetime.datetime.now().timestamp() earliest_event = datetime.datetime.now().timestamp()
# if the earliest event exceeds the max seconds, cap it # if the earliest event exceeds the max seconds, cap it
max_seconds = self.config['save_clips']['max_seconds'] max_seconds = self.config.save_clips.max_seconds
if datetime.datetime.now().timestamp()-earliest_event > max_seconds: if datetime.datetime.now().timestamp()-earliest_event > max_seconds:
earliest_event = datetime.datetime.now().timestamp()-max_seconds earliest_event = datetime.datetime.now().timestamp()-max_seconds
@ -163,15 +163,16 @@ class EventProcessor(threading.Thread):
self.refresh_cache() self.refresh_cache()
save_clips_config = self.config['cameras'][camera].get('save_clips', {}) save_clips_config = self.config.cameras[camera].save_clips
# if save clips is not enabled for this camera, just continue # if save clips is not enabled for this camera, just continue
if not save_clips_config.get('enabled', False): if not save_clips_config.enabled:
continue continue
# if specific objects are listed for this camera, only save clips for them # if specific objects are listed for this camera, only save clips for them
if 'objects' in save_clips_config: # TODO: default to all tracked objects rather than checking for None
if not event_data['label'] in save_clips_config['objects']: if save_clips_config.objects:
if not event_data['label'] in save_clips_config.objects:
continue continue
if event_type == 'start': if event_type == 'start':
@ -190,7 +191,7 @@ class EventProcessor(threading.Thread):
) )
if len(self.cached_clips) > 0 and not event_data['false_positive']: if len(self.cached_clips) > 0 and not event_data['false_positive']:
self.create_clip(camera, event_data, save_clips_config.get('pre_capture', 30)) self.create_clip(camera, event_data, save_clips_config.pre_capture)
del self.events_in_process[event_data['id']] del self.events_in_process[event_data['id']]

View File

@ -75,7 +75,7 @@ def stats():
@bp.route('/<camera_name>/<label>/best.jpg') @bp.route('/<camera_name>/<label>/best.jpg')
def best(camera_name, label): def best(camera_name, label):
if camera_name in current_app.frigate_config['cameras']: if camera_name in current_app.frigate_config.cameras:
best_object = current_app.detected_frames_processor.get_best(camera_name, label) best_object = current_app.detected_frames_processor.get_best(camera_name, label)
best_frame = best_object.get('frame') best_frame = best_object.get('frame')
if best_frame is None: if best_frame is None:
@ -103,7 +103,7 @@ def best(camera_name, label):
def mjpeg_feed(camera_name): def mjpeg_feed(camera_name):
fps = int(request.args.get('fps', '3')) fps = int(request.args.get('fps', '3'))
height = int(request.args.get('h', '360')) height = int(request.args.get('h', '360'))
if camera_name in current_app.frigate_config['cameras']: if camera_name in current_app.frigate_config.cameras:
# return a multipart response # return a multipart response
return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height), return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height),
mimetype='multipart/x-mixed-replace; boundary=frame') mimetype='multipart/x-mixed-replace; boundary=frame')
@ -112,7 +112,7 @@ def mjpeg_feed(camera_name):
@bp.route('/<camera_name>/latest.jpg') @bp.route('/<camera_name>/latest.jpg')
def latest_frame(camera_name): def latest_frame(camera_name):
if camera_name in current_app.frigate_config['cameras']: if camera_name in current_app.frigate_config.cameras:
# max out at specified FPS # max out at specified FPS
frame = current_app.detected_frames_processor.get_current_frame(camera_name) frame = current_app.detected_frames_processor.get_current_frame(camera_name)
if frame is None: if frame is None:

View File

@ -1,7 +1,9 @@
import paho.mqtt.client as mqtt import paho.mqtt.client as mqtt
def create_mqtt_client(host: str, port: int, client_id: str, topic_prefix: str, user: str, password: str): from frigate.config import MqttConfig
client = mqtt.Client(client_id=client_id)
def create_mqtt_client(config: MqttConfig):
client = mqtt.Client(client_id=config.client_id)
def on_connect(client, userdata, flags, rc): def on_connect(client, userdata, flags, rc):
# TODO: use logging library # TODO: use logging library
print("On connect called") print("On connect called")
@ -14,11 +16,11 @@ def create_mqtt_client(host: str, port: int, client_id: str, topic_prefix: str,
print ("MQTT Not authorized") print ("MQTT Not authorized")
else: else:
print ("Unable to connect to MQTT: Connection refused. Error code: " + str(rc)) print ("Unable to connect to MQTT: Connection refused. Error code: " + str(rc))
client.publish(topic_prefix+'/available', 'online', retain=True) client.publish(config.topic_prefix+'/available', 'online', retain=True)
client.on_connect = on_connect client.on_connect = on_connect
client.will_set(topic_prefix+'/available', payload='offline', qos=1, retain=True) client.will_set(config.topic_prefix+'/available', payload='offline', qos=1, retain=True)
if not user is None: if not config.user is None:
client.username_pw_set(user, password=password) client.username_pw_set(config.user, password=config.password)
client.connect(host, port, 60) client.connect(config.host, config.port, 60)
client.loop_start() client.loop_start()
return client return client

View File

@ -13,6 +13,7 @@ import itertools
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from frigate.util import draw_box_with_label, SharedMemoryFrameManager from frigate.util import draw_box_with_label, SharedMemoryFrameManager
from frigate.edgetpu import load_labels from frigate.edgetpu import load_labels
from frigate.config import CameraConfig
from typing import Callable, Dict from typing import Callable, Dict
from statistics import mean, median from statistics import mean, median
@ -33,16 +34,16 @@ def zone_filtered(obj, object_config):
# if the min area is larger than the # if the min area is larger than the
# detected object, don't add it to detected objects # detected object, don't add it to detected objects
if obj_settings.get('min_area',-1) > obj['area']: if obj_settings.min_area > obj['area']:
return True return True
# if the detected object is larger than the # if the detected object is larger than the
# max area, don't add it to detected objects # max area, don't add it to detected objects
if obj_settings.get('max_area', 24000000) < obj['area']: if obj_settings.max_area < obj['area']:
return True return True
# if the score is lower than the threshold, skip # if the score is lower than the threshold, skip
if obj_settings.get('threshold', 0) > obj['computed_score']: if obj_settings.threshold > obj['computed_score']:
return True return True
return False return False
@ -58,7 +59,7 @@ class CameraState():
self.object_status = defaultdict(lambda: 'OFF') self.object_status = defaultdict(lambda: 'OFF')
self.tracked_objects = {} self.tracked_objects = {}
self.zone_objects = defaultdict(lambda: []) self.zone_objects = defaultdict(lambda: [])
self._current_frame = np.zeros((self.config['frame_shape'][0]*3//2, self.config['frame_shape'][1]), np.uint8) self._current_frame = np.zeros(self.config.frame_shape_yuv, np.uint8)
self.current_frame_lock = threading.Lock() self.current_frame_lock = threading.Lock()
self.current_frame_time = 0.0 self.current_frame_time = 0.0
self.previous_frame_id = None self.previous_frame_id = None
@ -89,14 +90,14 @@ class CameraState():
region = obj['region'] region = obj['region']
cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1) cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1)
if self.config['snapshots']['show_timestamp']: if self.config.snapshots.show_timestamp:
time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S") time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S")
cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2) cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2)
if self.config['snapshots']['draw_zones']: if self.config.snapshots.draw_zones:
for name, zone in self.config['zones'].items(): for name, zone in self.config.zones.items():
thickness = 8 if any([name in obj['zones'] for obj in tracked_objects.values()]) else 2 thickness = 8 if any([name in obj['zones'] for obj in tracked_objects.values()]) else 2
cv2.drawContours(frame_copy, [zone['contour']], -1, zone['color'], thickness) cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness)
return frame_copy return frame_copy
@ -105,7 +106,7 @@ class CameraState():
if not obj.get('false_positive', True): if not obj.get('false_positive', True):
return False return False
threshold = self.config['objects'].get('filters', {}).get(obj['label'], {}).get('threshold', 0.85) threshold = self.config.objects.filters[obj['label']].threshold
if obj['computed_score'] < threshold: if obj['computed_score'] < threshold:
return True return True
return False return False
@ -124,7 +125,7 @@ class CameraState():
self.current_frame_time = frame_time self.current_frame_time = frame_time
# get the new frame and delete the old frame # get the new frame and delete the old frame
frame_id = f"{self.name}{frame_time}" frame_id = f"{self.name}{frame_time}"
current_frame = self.frame_manager.get(frame_id, (self.config['frame_shape'][0]*3//2, self.config['frame_shape'][1])) current_frame = self.frame_manager.get(frame_id, self.config.frame_shape_yuv)
current_ids = tracked_objects.keys() current_ids = tracked_objects.keys()
previous_ids = self.tracked_objects.keys() previous_ids = self.tracked_objects.keys()
@ -184,12 +185,12 @@ class CameraState():
current_zones = [] current_zones = []
bottom_center = (obj['centroid'][0], obj['box'][3]) bottom_center = (obj['centroid'][0], obj['box'][3])
# check each zone # check each zone
for name, zone in self.config['zones'].items(): for name, zone in self.config.zones.items():
contour = zone['contour'] contour = zone.contour
# check if the object is in the zone # check if the object is in the zone
if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0): if (cv2.pointPolygonTest(contour, bottom_center, False) >= 0):
# if the object passed the filters once, dont apply again # if the object passed the filters once, dont apply again
if name in obj.get('zones', []) or not zone_filtered(obj, zone.get('filters', {})): if name in obj.get('zones', []) or not zone_filtered(obj, zone.filters):
current_zones.append(name) current_zones.append(name)
obj['entered_zones'].add(name) obj['entered_zones'].add(name)
@ -208,7 +209,7 @@ class CameraState():
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
# if the object is a higher score than the current best score # if the object is a higher score than the current best score
# or the current object is older than desired, use the new object # or the current object is older than desired, use the new object
if obj_copy['score'] > current_best['score'] or (now - current_best['frame_time']) > self.config.get('best_image_timeout', 60): if obj_copy['score'] > current_best['score'] or (now - current_best['frame_time']) > self.config.best_image_timeout:
obj_copy['frame'] = np.copy(current_frame) obj_copy['frame'] = np.copy(current_frame)
self.best_objects[object_type] = obj_copy self.best_objects[object_type] = obj_copy
for c in self.callbacks['snapshot']: for c in self.callbacks['snapshot']:
@ -249,7 +250,7 @@ class CameraState():
self.previous_frame_id = frame_id self.previous_frame_id = frame_id
class TrackedObjectProcessor(threading.Thread): class TrackedObjectProcessor(threading.Thread):
def __init__(self, camera_config, client, topic_prefix, tracked_objects_queue, event_queue, stop_event): def __init__(self, camera_config: Dict[str, CameraConfig], client, topic_prefix, tracked_objects_queue, event_queue, stop_event):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.camera_config = camera_config self.camera_config = camera_config
self.client = client self.client = client
@ -296,22 +297,22 @@ class TrackedObjectProcessor(threading.Thread):
return return
best_frame = cv2.cvtColor(obj['frame'], cv2.COLOR_YUV2BGR_I420) best_frame = cv2.cvtColor(obj['frame'], cv2.COLOR_YUV2BGR_I420)
if self.camera_config[camera]['snapshots']['draw_bounding_boxes']: if self.camera_config[camera].snapshots.draw_bounding_boxes:
thickness = 2 thickness = 2
color = COLOR_MAP[obj['label']] color = COLOR_MAP[obj['label']]
box = obj['box'] box = obj['box']
draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color) draw_box_with_label(best_frame, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color)
mqtt_config = self.camera_config[camera].get('mqtt', {'crop_to_region': False}) mqtt_config = self.camera_config[camera].mqtt
if mqtt_config.get('crop_to_region'): if mqtt_config.crop_to_region:
region = obj['region'] region = obj['region']
best_frame = best_frame[region[1]:region[3], region[0]:region[2]] best_frame = best_frame[region[1]:region[3], region[0]:region[2]]
if 'snapshot_height' in mqtt_config: if mqtt_config.snapshot_height:
height = int(mqtt_config['snapshot_height']) height = mqtt_config.snapshot_height
width = int(height*best_frame.shape[1]/best_frame.shape[0]) width = int(height*best_frame.shape[1]/best_frame.shape[0])
best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA) best_frame = cv2.resize(best_frame, dsize=(width, height), interpolation=cv2.INTER_AREA)
if self.camera_config[camera]['snapshots']['show_timestamp']: if self.camera_config[camera].snapshots.show_timestamp:
time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S") time_to_show = datetime.datetime.fromtimestamp(obj['frame_time']).strftime("%m/%d/%Y %H:%M:%S")
size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2) size = cv2.getTextSize(time_to_show, cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, thickness=2)
text_width = size[0][0] text_width = size[0][0]
@ -351,26 +352,6 @@ class TrackedObjectProcessor(threading.Thread):
# } # }
# } # }
self.zone_data = defaultdict(lambda: defaultdict(lambda: set())) self.zone_data = defaultdict(lambda: defaultdict(lambda: set()))
# set colors for zones
all_zone_names = set([zone for config in self.camera_config.values() for zone in config['zones'].keys()])
zone_colors = {}
colors = plt.cm.get_cmap('tab10', len(all_zone_names))
for i, zone in enumerate(all_zone_names):
zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3])
# create zone contours
for camera_config in self.camera_config.values():
for zone_name, zone_config in camera_config['zones'].items():
zone_config['color'] = zone_colors[zone_name]
coordinates = zone_config['coordinates']
if isinstance(coordinates, list):
zone_config['contour'] = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in coordinates])
elif isinstance(coordinates, str):
points = coordinates.split(',')
zone_config['contour'] = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
else:
print(f"Unable to parse zone coordinates for {zone_name} - {camera}")
def get_best(self, camera, label): def get_best(self, camera, label):
best_objects = self.camera_states[camera].best_objects best_objects = self.camera_states[camera].best_objects
@ -398,7 +379,7 @@ class TrackedObjectProcessor(threading.Thread):
camera_state.update(frame_time, current_tracked_objects) camera_state.update(frame_time, current_tracked_objects)
# update zone status for each label # update zone status for each label
for zone in camera_state.config['zones'].keys(): for zone in camera_state.config.zones.keys():
# get labels for current camera and all labels in current zone # get labels for current camera and all labels in current zone
labels_for_camera = set([obj['label'] for obj in camera_state.tracked_objects.values() if zone in obj['zones'] and not obj['false_positive']]) labels_for_camera = set([obj['label'] for obj in camera_state.tracked_objects.values() if zone in obj['zones'] and not obj['false_positive']])
labels_to_check = labels_for_camera | set(self.zone_data[zone].keys()) labels_to_check = labels_for_camera | set(self.zone_data[zone].keys())

View File

@ -1,14 +1,11 @@
import json import json
from unittest import TestCase, main from unittest import TestCase, main
import voluptuous as vol import voluptuous as vol
from frigate.config import FRIGATE_CONFIG_SCHEMA from frigate.config import FRIGATE_CONFIG_SCHEMA, FrigateConfig
class TestConfig(TestCase): class TestConfig(TestCase):
def test_empty(self): def setUp(self):
FRIGATE_CONFIG_SCHEMA({}) self.minimal = {
def test_minimal(self):
minimal = {
'mqtt': { 'mqtt': {
'host': 'mqtt' 'host': 'mqtt'
}, },
@ -16,11 +13,169 @@ class TestConfig(TestCase):
'back': { 'back': {
'ffmpeg': { 'ffmpeg': {
'input': 'rtsp://10.0.0.1:554/video' 'input': 'rtsp://10.0.0.1:554/video'
},
'height': 1080,
'width': 1920
}
}
}
def test_empty(self):
FRIGATE_CONFIG_SCHEMA({})
def test_minimal(self):
FRIGATE_CONFIG_SCHEMA(self.minimal)
def test_config_class(self):
FrigateConfig(config=self.minimal)
def test_inherit_tracked_objects(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'objects': {
'track': ['person', 'dog']
},
'cameras': {
'back': {
'ffmpeg': {
'input': 'rtsp://10.0.0.1:554/video'
},
'height': 1080,
'width': 1920
}
}
}
frigate_config = FrigateConfig(config=config)
assert('dog' in frigate_config.cameras['back'].objects.track)
def test_override_tracked_objects(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'objects': {
'track': ['person', 'dog']
},
'cameras': {
'back': {
'ffmpeg': {
'input': 'rtsp://10.0.0.1:554/video'
},
'height': 1080,
'width': 1920,
'objects': {
'track': ['cat']
} }
} }
} }
} }
FRIGATE_CONFIG_SCHEMA(minimal) frigate_config = FrigateConfig(config=config)
assert('cat' in frigate_config.cameras['back'].objects.track)
def test_default_object_filters(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'objects': {
'track': ['person', 'dog']
},
'cameras': {
'back': {
'ffmpeg': {
'input': 'rtsp://10.0.0.1:554/video'
},
'height': 1080,
'width': 1920
}
}
}
frigate_config = FrigateConfig(config=config)
assert('dog' in frigate_config.cameras['back'].objects.filters)
def test_inherit_object_filters(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'objects': {
'track': ['person', 'dog'],
'filters': {
'dog': {
'threshold': 0.7
}
}
},
'cameras': {
'back': {
'ffmpeg': {
'input': 'rtsp://10.0.0.1:554/video'
},
'height': 1080,
'width': 1920
}
}
}
frigate_config = FrigateConfig(config=config)
assert('dog' in frigate_config.cameras['back'].objects.filters)
assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
def test_override_object_filters(self):
config = {
'mqtt': {
'host': 'mqtt'
},
'cameras': {
'back': {
'ffmpeg': {
'input': 'rtsp://10.0.0.1:554/video'
},
'height': 1080,
'width': 1920,
'objects': {
'track': ['person', 'dog'],
'filters': {
'dog': {
'threshold': 0.7
}
}
}
}
}
}
frigate_config = FrigateConfig(config=config)
assert('dog' in frigate_config.cameras['back'].objects.filters)
assert(frigate_config.cameras['back'].objects.filters['dog'].threshold == 0.7)
def test_ffmpeg_params(self):
config = {
'ffmpeg': {
'input_args': ['-re']
},
'mqtt': {
'host': 'mqtt'
},
'cameras': {
'back': {
'ffmpeg': {
'input': 'rtsp://10.0.0.1:554/video'
},
'height': 1080,
'width': 1920,
'objects': {
'track': ['person', 'dog'],
'filters': {
'dog': {
'threshold': 0.7
}
}
}
}
}
}
frigate_config = FrigateConfig(config=config)
assert('-re' in frigate_config.cameras['back'].ffmpeg_cmd)
if __name__ == '__main__': if __name__ == '__main__':
main(verbosity=2) main(verbosity=2)

View File

@ -4,7 +4,9 @@ import time
import signal import signal
import traceback import traceback
import collections import collections
import json
import numpy as np import numpy as np
import subprocess as sp
import cv2 import cv2
import threading import threading
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
@ -12,6 +14,36 @@ import hashlib
from multiprocessing import shared_memory from multiprocessing import shared_memory
from typing import AnyStr from typing import AnyStr
def get_frame_shape(source):
ffprobe_cmd = " ".join([
'ffprobe',
'-v',
'panic',
'-show_error',
'-show_streams',
'-of',
'json',
'"'+source+'"'
])
print(ffprobe_cmd)
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
info = json.loads(output)
print(info)
video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
if video_info['height'] != 0 and video_info['width'] != 0:
return (video_info['height'], video_info['width'], 3)
# fallback to using opencv if ffprobe didnt succeed
video = cv2.VideoCapture(source)
ret, frame = video.read()
frame_shape = frame.shape
video.release()
return frame_shape
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'): def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label, info, thickness=2, color=None, position='ul'):
if color is None: if color is None:
color = (0,0,255) color = (0,0,255)

View File

@ -14,45 +14,12 @@ import json
import base64 import base64
from typing import Dict, List from typing import Dict, List
from collections import defaultdict from collections import defaultdict
from frigate.config import CameraConfig
from frigate.util import draw_box_with_label, yuv_region_2_rgb, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond, listen, FrameManager, SharedMemoryFrameManager from frigate.util import draw_box_with_label, yuv_region_2_rgb, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond, listen, FrameManager, SharedMemoryFrameManager
from frigate.objects import ObjectTracker from frigate.objects import ObjectTracker
from frigate.edgetpu import RemoteObjectDetector from frigate.edgetpu import RemoteObjectDetector
from frigate.motion import MotionDetector from frigate.motion import MotionDetector
def get_frame_shape(source):
ffprobe_cmd = " ".join([
'ffprobe',
'-v',
'panic',
'-show_error',
'-show_streams',
'-of',
'json',
'"'+source+'"'
])
print(ffprobe_cmd)
p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
info = json.loads(output)
print(info)
video_info = [s for s in info['streams'] if s['codec_type'] == 'video'][0]
if video_info['height'] != 0 and video_info['width'] != 0:
return (video_info['height'], video_info['width'], 3)
# fallback to using opencv if ffprobe didnt succeed
video = cv2.VideoCapture(source)
ret, frame = video.read()
frame_shape = frame.shape
video.release()
return frame_shape
def get_ffmpeg_input(ffmpeg_input):
frigate_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
return ffmpeg_input.format(**frigate_vars)
def filtered(obj, objects_to_track, object_filters, mask=None): def filtered(obj, objects_to_track, object_filters, mask=None):
object_name = obj[0] object_name = obj[0]
@ -64,16 +31,16 @@ def filtered(obj, objects_to_track, object_filters, mask=None):
# if the min area is larger than the # if the min area is larger than the
# detected object, don't add it to detected objects # detected object, don't add it to detected objects
if obj_settings.get('min_area',-1) > obj[3]: if obj_settings.min_area > obj[3]:
return True return True
# if the detected object is larger than the # if the detected object is larger than the
# max area, don't add it to detected objects # max area, don't add it to detected objects
if obj_settings.get('max_area', 24000000) < obj[3]: if obj_settings.max_area < obj[3]:
return True return True
# if the score is lower than the min_score, skip # if the score is lower than the min_score, skip
if obj_settings.get('min_score', 0) > obj[1]: if obj_settings.min_score > obj[1]:
return True return True
# compute the coordinates of the object and make sure # compute the coordinates of the object and make sure
@ -118,7 +85,7 @@ def start_or_restart_ffmpeg(ffmpeg_cmd, frame_size, ffmpeg_process=None):
def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager, def capture_frames(ffmpeg_process, camera_name, frame_shape, frame_manager: FrameManager,
frame_queue, fps:mp.Value, skipped_fps: mp.Value, current_frame: mp.Value): frame_queue, fps:mp.Value, skipped_fps: mp.Value, current_frame: mp.Value):
frame_size = frame_shape[0] * frame_shape[1] * 3 // 2 frame_size = frame_shape[0] * frame_shape[1]
frame_rate = EventsPerSecond() frame_rate = EventsPerSecond()
frame_rate.start() frame_rate.start()
skipped_eps = EventsPerSecond() skipped_eps = EventsPerSecond()
@ -166,8 +133,8 @@ class CameraWatchdog(threading.Thread):
self.camera_fps = camera_fps self.camera_fps = camera_fps
self.ffmpeg_pid = ffmpeg_pid self.ffmpeg_pid = ffmpeg_pid
self.frame_queue = frame_queue self.frame_queue = frame_queue
self.frame_shape = self.config['frame_shape'] self.frame_shape = self.config.frame_shape_yuv
self.frame_size = self.frame_shape[0] * self.frame_shape[1] * 3 // 2 self.frame_size = self.frame_shape[0] * self.frame_shape[1]
def run(self): def run(self):
self.start_ffmpeg() self.start_ffmpeg()
@ -192,7 +159,7 @@ class CameraWatchdog(threading.Thread):
time.sleep(10) time.sleep(10)
def start_ffmpeg(self): def start_ffmpeg(self):
self.ffmpeg_process = start_or_restart_ffmpeg(self.config['ffmpeg_cmd'], self.frame_size) self.ffmpeg_process = start_or_restart_ffmpeg(self.config.ffmpeg_cmd, self.frame_size)
self.ffmpeg_pid.value = self.ffmpeg_process.pid self.ffmpeg_pid.value = self.ffmpeg_process.pid
self.capture_thread = CameraCapture(self.name, self.ffmpeg_process, self.frame_shape, self.frame_queue, self.capture_thread = CameraCapture(self.name, self.ffmpeg_process, self.frame_shape, self.frame_queue,
self.camera_fps) self.camera_fps)
@ -203,7 +170,6 @@ class CameraCapture(threading.Thread):
threading.Thread.__init__(self) threading.Thread.__init__(self)
self.name = name self.name = name
self.frame_shape = frame_shape self.frame_shape = frame_shape
self.frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
self.frame_queue = frame_queue self.frame_queue = frame_queue
self.fps = fps self.fps = fps
self.skipped_fps = EventsPerSecond() self.skipped_fps = EventsPerSecond()
@ -217,44 +183,21 @@ class CameraCapture(threading.Thread):
capture_frames(self.ffmpeg_process, self.name, self.frame_shape, self.frame_manager, self.frame_queue, capture_frames(self.ffmpeg_process, self.name, self.frame_shape, self.frame_manager, self.frame_queue,
self.fps, self.skipped_fps, self.current_frame) self.fps, self.skipped_fps, self.current_frame)
def capture_camera(name, config, process_info): def capture_camera(name, config: CameraConfig, process_info):
frame_queue = process_info['frame_queue'] frame_queue = process_info['frame_queue']
camera_watchdog = CameraWatchdog(name, config, frame_queue, process_info['camera_fps'], process_info['ffmpeg_pid']) camera_watchdog = CameraWatchdog(name, config, frame_queue, process_info['camera_fps'], process_info['ffmpeg_pid'])
camera_watchdog.start() camera_watchdog.start()
camera_watchdog.join() camera_watchdog.join()
def track_camera(name, config, detection_queue, result_connection, detected_objects_queue, process_info): def track_camera(name, config: CameraConfig, detection_queue, result_connection, detected_objects_queue, process_info):
listen() listen()
frame_queue = process_info['frame_queue'] frame_queue = process_info['frame_queue']
frame_shape = config['frame_shape'] frame_shape = config.frame_shape
objects_to_track = config.objects.track
# Merge the tracked object config with the global config object_filters = config.objects.filters
camera_objects_config = config.get('objects', {}) mask = config.mask
objects_to_track = camera_objects_config.get('track', [])
object_filters = camera_objects_config.get('filters', {})
# load in the mask for object detection
if 'mask' in config:
if config['mask'].startswith('base64,'):
img = base64.b64decode(config['mask'][7:])
npimg = np.fromstring(img, dtype=np.uint8)
mask = cv2.imdecode(npimg, cv2.IMREAD_GRAYSCALE)
elif config['mask'].startswith('poly,'):
points = config['mask'].split(',')[1:]
contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)])
mask = np.zeros((frame_shape[0], frame_shape[1]), np.uint8)
mask[:] = 255
cv2.fillPoly(mask, pts=[contour], color=(0))
else:
mask = cv2.imread("/config/{}".format(config['mask']), cv2.IMREAD_GRAYSCALE)
else:
mask = None
if mask is None or mask.size == 0:
mask = np.zeros((frame_shape[0], frame_shape[1]), np.uint8)
mask[:] = 255
motion_detector = MotionDetector(frame_shape, mask, resize_factor=6) motion_detector = MotionDetector(frame_shape, mask, resize_factor=6)
object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection) object_detector = RemoteObjectDetector(name, '/labelmap.txt', detection_queue, result_connection)
@ -301,7 +244,7 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape,
frame_manager: FrameManager, motion_detector: MotionDetector, frame_manager: FrameManager, motion_detector: MotionDetector,
object_detector: RemoteObjectDetector, object_tracker: ObjectTracker, object_detector: RemoteObjectDetector, object_tracker: ObjectTracker,
detected_objects_queue: mp.Queue, process_info: Dict, detected_objects_queue: mp.Queue, process_info: Dict,
objects_to_track: List[str], object_filters: Dict, mask, objects_to_track: List[str], object_filters, mask,
exit_on_empty: bool = False): exit_on_empty: bool = False):
fps = process_info['process_fps'] fps = process_info['process_fps']