diff --git a/frigate/__main__.py b/frigate/__main__.py index 2524c621a..0d9a3dbe2 100644 --- a/frigate/__main__.py +++ b/frigate/__main__.py @@ -1,4 +1,6 @@ -import faulthandler; faulthandler.enable() +import faulthandler + +faulthandler.enable() import sys import threading @@ -6,10 +8,10 @@ threading.current_thread().name = "frigate" from frigate.app import FrigateApp -cli = sys.modules['flask.cli'] +cli = sys.modules["flask.cli"] cli.show_server_banner = lambda *x: None -if __name__ == '__main__': +if __name__ == "__main__": frigate_app = FrigateApp() frigate_app.start() diff --git a/frigate/app.py b/frigate/app.py index 8e5178c79..1eae0d403 100644 --- a/frigate/app.py +++ b/frigate/app.py @@ -31,7 +31,8 @@ from frigate.zeroconf import broadcast_zeroconf logger = logging.getLogger(__name__) -class FrigateApp(): + +class FrigateApp: def __init__(self): self.stop_event = mp.Event() self.config: FrigateConfig = None @@ -56,60 +57,78 @@ class FrigateApp(): tmpfs_size = self.config.clips.tmpfs_cache_size if tmpfs_size: - logger.info(f"Creating tmpfs of size {tmpfs_size}") - rc = os.system(f"mount -t tmpfs -o size={tmpfs_size} tmpfs {CACHE_DIR}") - if rc != 0: - logger.error(f"Failed to create tmpfs, error code: {rc}") - + logger.info(f"Creating tmpfs of size {tmpfs_size}") + rc = os.system(f"mount -t tmpfs -o size={tmpfs_size} tmpfs {CACHE_DIR}") + if rc != 0: + logger.error(f"Failed to create tmpfs, error code: {rc}") + def init_logger(self): - self.log_process = mp.Process(target=log_process, args=(self.log_queue,), name='log_process') + self.log_process = mp.Process( + target=log_process, args=(self.log_queue,), name="log_process" + ) self.log_process.daemon = True self.log_process.start() root_configurer(self.log_queue) - + def init_config(self): - config_file = os.environ.get('CONFIG_FILE', '/config/config.yml') + config_file = os.environ.get("CONFIG_FILE", "/config/config.yml") self.config = FrigateConfig(config_file=config_file) for camera_name in self.config.cameras.keys(): # create camera_metrics self.camera_metrics[camera_name] = { - 'camera_fps': mp.Value('d', 0.0), - 'skipped_fps': mp.Value('d', 0.0), - 'process_fps': mp.Value('d', 0.0), - 'detection_enabled': mp.Value('i', self.config.cameras[camera_name].detect.enabled), - 'detection_fps': mp.Value('d', 0.0), - 'detection_frame': mp.Value('d', 0.0), - 'read_start': mp.Value('d', 0.0), - 'ffmpeg_pid': mp.Value('i', 0), - 'frame_queue': mp.Queue(maxsize=2), + "camera_fps": mp.Value("d", 0.0), + "skipped_fps": mp.Value("d", 0.0), + "process_fps": mp.Value("d", 0.0), + "detection_enabled": mp.Value( + "i", self.config.cameras[camera_name].detect.enabled + ), + "detection_fps": mp.Value("d", 0.0), + "detection_frame": mp.Value("d", 0.0), + "read_start": mp.Value("d", 0.0), + "ffmpeg_pid": mp.Value("i", 0), + "frame_queue": mp.Queue(maxsize=2), } - + def check_config(self): for name, camera in self.config.cameras.items(): - assigned_roles = list(set([r for i in camera.ffmpeg.inputs for r in i.roles])) - if not camera.clips.enabled and 'clips' in assigned_roles: - logger.warning(f"Camera {name} has clips assigned to an input, but clips is not enabled.") - elif camera.clips.enabled and not 'clips' in assigned_roles: - logger.warning(f"Camera {name} has clips enabled, but clips is not assigned to an input.") + assigned_roles = list( + set([r for i in camera.ffmpeg.inputs for r in i.roles]) + ) + if not camera.clips.enabled and "clips" in assigned_roles: + logger.warning( + f"Camera {name} has clips assigned to an input, but clips is not enabled." + ) + elif camera.clips.enabled and not "clips" in assigned_roles: + logger.warning( + f"Camera {name} has clips enabled, but clips is not assigned to an input." + ) - if not camera.record.enabled and 'record' in assigned_roles: - logger.warning(f"Camera {name} has record assigned to an input, but record is not enabled.") - elif camera.record.enabled and not 'record' in assigned_roles: - logger.warning(f"Camera {name} has record enabled, but record is not assigned to an input.") + if not camera.record.enabled and "record" in assigned_roles: + logger.warning( + f"Camera {name} has record assigned to an input, but record is not enabled." + ) + elif camera.record.enabled and not "record" in assigned_roles: + logger.warning( + f"Camera {name} has record enabled, but record is not assigned to an input." + ) + + if not camera.rtmp.enabled and "rtmp" in assigned_roles: + logger.warning( + f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled." + ) + elif camera.rtmp.enabled and not "rtmp" in assigned_roles: + logger.warning( + f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input." + ) - if not camera.rtmp.enabled and 'rtmp' in assigned_roles: - logger.warning(f"Camera {name} has rtmp assigned to an input, but rtmp is not enabled.") - elif camera.rtmp.enabled and not 'rtmp' in assigned_roles: - logger.warning(f"Camera {name} has rtmp enabled, but rtmp is not assigned to an input.") - def set_log_levels(self): logging.getLogger().setLevel(self.config.logger.default) for log, level in self.config.logger.logs.items(): logging.getLogger(log).setLevel(level) - - if not 'geventwebsocket.handler' in self.config.logger.logs: - logging.getLogger('geventwebsocket.handler').setLevel('ERROR') + + if not "geventwebsocket.handler" in self.config.logger.logs: + logging.getLogger("geventwebsocket.handler").setLevel("ERROR") def init_queues(self): # Queues for clip processing @@ -117,13 +136,15 @@ class FrigateApp(): self.event_processed_queue = mp.Queue() # Queue for cameras to push tracked objects to - self.detected_frames_queue = mp.Queue(maxsize=len(self.config.cameras.keys())*2) + self.detected_frames_queue = mp.Queue( + maxsize=len(self.config.cameras.keys()) * 2 + ) def init_database(self): migrate_db = SqliteExtDatabase(self.config.database.path) # Run migrations - del(logging.getLogger('peewee_migrate').handlers[:]) + del logging.getLogger("peewee_migrate").handlers[:] router = Router(migrate_db) router.run() @@ -137,7 +158,13 @@ class FrigateApp(): self.stats_tracking = stats_init(self.camera_metrics, self.detectors) def init_web_server(self): - self.flask_app = create_app(self.config, self.db, self.stats_tracking, self.detected_frames_processor, self.mqtt_client) + self.flask_app = create_app( + self.config, + self.db, + self.stats_tracking, + self.detected_frames_processor, + self.mqtt_client, + ) def init_mqtt(self): self.mqtt_client = create_mqtt_client(self.config, self.camera_metrics) @@ -146,56 +173,108 @@ class FrigateApp(): model_shape = (self.config.model.height, self.config.model.width) for name in self.config.cameras.keys(): self.detection_out_events[name] = mp.Event() - shm_in = mp.shared_memory.SharedMemory(name=name, create=True, size=self.config.model.height*self.config.model.width*3) - shm_out = mp.shared_memory.SharedMemory(name=f"out-{name}", create=True, size=20*6*4) + shm_in = mp.shared_memory.SharedMemory( + name=name, + create=True, + size=self.config.model.height * self.config.model.width * 3, + ) + shm_out = mp.shared_memory.SharedMemory( + name=f"out-{name}", create=True, size=20 * 6 * 4 + ) self.detection_shms.append(shm_in) self.detection_shms.append(shm_out) for name, detector in self.config.detectors.items(): - if detector.type == 'cpu': - self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, 'cpu', detector.num_threads) - if detector.type == 'edgetpu': - self.detectors[name] = EdgeTPUProcess(name, self.detection_queue, self.detection_out_events, model_shape, detector.device, detector.num_threads) + if detector.type == "cpu": + self.detectors[name] = EdgeTPUProcess( + name, + self.detection_queue, + self.detection_out_events, + model_shape, + "cpu", + detector.num_threads, + ) + if detector.type == "edgetpu": + self.detectors[name] = EdgeTPUProcess( + name, + self.detection_queue, + self.detection_out_events, + model_shape, + detector.device, + detector.num_threads, + ) def start_detected_frames_processor(self): - self.detected_frames_processor = TrackedObjectProcessor(self.config, self.mqtt_client, self.config.mqtt.topic_prefix, - self.detected_frames_queue, self.event_queue, self.event_processed_queue, self.stop_event) + self.detected_frames_processor = TrackedObjectProcessor( + self.config, + self.mqtt_client, + self.config.mqtt.topic_prefix, + self.detected_frames_queue, + self.event_queue, + self.event_processed_queue, + self.stop_event, + ) self.detected_frames_processor.start() def start_camera_processors(self): model_shape = (self.config.model.height, self.config.model.width) for name, config in self.config.cameras.items(): - camera_process = mp.Process(target=track_camera, name=f"camera_processor:{name}", args=(name, config, model_shape, - self.detection_queue, self.detection_out_events[name], self.detected_frames_queue, - self.camera_metrics[name])) + camera_process = mp.Process( + target=track_camera, + name=f"camera_processor:{name}", + args=( + name, + config, + model_shape, + self.detection_queue, + self.detection_out_events[name], + self.detected_frames_queue, + self.camera_metrics[name], + ), + ) camera_process.daemon = True - self.camera_metrics[name]['process'] = camera_process + self.camera_metrics[name]["process"] = camera_process camera_process.start() logger.info(f"Camera processor started for {name}: {camera_process.pid}") def start_camera_capture_processes(self): for name, config in self.config.cameras.items(): - capture_process = mp.Process(target=capture_camera, name=f"camera_capture:{name}", args=(name, config, - self.camera_metrics[name])) + capture_process = mp.Process( + target=capture_camera, + name=f"camera_capture:{name}", + args=(name, config, self.camera_metrics[name]), + ) capture_process.daemon = True - self.camera_metrics[name]['capture_process'] = capture_process + self.camera_metrics[name]["capture_process"] = capture_process capture_process.start() logger.info(f"Capture process started for {name}: {capture_process.pid}") - + def start_event_processor(self): - self.event_processor = EventProcessor(self.config, self.camera_metrics, self.event_queue, self.event_processed_queue, self.stop_event) + self.event_processor = EventProcessor( + self.config, + self.camera_metrics, + self.event_queue, + self.event_processed_queue, + self.stop_event, + ) self.event_processor.start() - + def start_event_cleanup(self): self.event_cleanup = EventCleanup(self.config, self.stop_event) self.event_cleanup.start() - + def start_recording_maintainer(self): self.recording_maintainer = RecordingMaintainer(self.config, self.stop_event) self.recording_maintainer.start() def start_stats_emitter(self): - self.stats_emitter = StatsEmitter(self.config, self.stats_tracking, self.mqtt_client, self.config.mqtt.topic_prefix, self.stop_event) + self.stats_emitter = StatsEmitter( + self.config, + self.stats_tracking, + self.mqtt_client, + self.config.mqtt.topic_prefix, + self.stop_event, + ) self.stats_emitter.start() def start_watchdog(self): @@ -238,14 +317,16 @@ class FrigateApp(): def receiveSignal(signalNumber, frame): self.stop() sys.exit() - + signal.signal(signal.SIGTERM, receiveSignal) - server = pywsgi.WSGIServer(('127.0.0.1', 5001), self.flask_app, handler_class=WebSocketHandler) + server = pywsgi.WSGIServer( + ("127.0.0.1", 5001), self.flask_app, handler_class=WebSocketHandler + ) server.serve_forever() self.stop() - + def stop(self): logger.info(f"Stopping...") self.stop_event.set() diff --git a/frigate/config.py b/frigate/config.py index 0878f3c1f..fc98d1d53 100644 --- a/frigate/config.py +++ b/frigate/config.py @@ -15,279 +15,341 @@ from frigate.util import create_mask logger = logging.getLogger(__name__) -DEFAULT_TRACKED_OBJECTS = ['person'] +DEFAULT_TRACKED_OBJECTS = ["person"] DETECTORS_SCHEMA = vol.Schema( { vol.Required(str): { - vol.Required('type', default='edgetpu'): vol.In(['cpu', 'edgetpu']), - vol.Optional('device', default='usb'): str, - vol.Optional('num_threads', default=3): int + vol.Required("type", default="edgetpu"): vol.In(["cpu", "edgetpu"]), + vol.Optional("device", default="usb"): str, + vol.Optional("num_threads", default=3): int, } } ) -DEFAULT_DETECTORS = { - 'coral': { - 'type': 'edgetpu', - 'device': 'usb' - } -} +DEFAULT_DETECTORS = {"coral": {"type": "edgetpu", "device": "usb"}} MQTT_SCHEMA = vol.Schema( { - vol.Required('host'): str, - vol.Optional('port', default=1883): int, - vol.Optional('topic_prefix', default='frigate'): str, - vol.Optional('client_id', default='frigate'): str, - vol.Optional('stats_interval', default=60): int, - 'user': str, - 'password': str + vol.Required("host"): str, + vol.Optional("port", default=1883): int, + vol.Optional("topic_prefix", default="frigate"): str, + vol.Optional("client_id", default="frigate"): str, + vol.Optional("stats_interval", default=60): int, + "user": str, + "password": str, } ) RETAIN_SCHEMA = vol.Schema( - { - vol.Required('default',default=10): int, - 'objects': { - str: int - } - } + {vol.Required("default", default=10): int, "objects": {str: int}} ) CLIPS_SCHEMA = vol.Schema( { - vol.Optional('max_seconds', default=300): int, - 'tmpfs_cache_size': str, - vol.Optional('retain', default={}): RETAIN_SCHEMA + vol.Optional("max_seconds", default=300): int, + "tmpfs_cache_size": str, + vol.Optional("retain", default={}): RETAIN_SCHEMA, } ) -FFMPEG_GLOBAL_ARGS_DEFAULT = ['-hide_banner','-loglevel','warning'] -FFMPEG_INPUT_ARGS_DEFAULT = ['-avoid_negative_ts', 'make_zero', - '-fflags', '+genpts+discardcorrupt', - '-rtsp_transport', 'tcp', - '-stimeout', '5000000', - '-use_wallclock_as_timestamps', '1'] -DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ['-f', 'rawvideo', - '-pix_fmt', 'yuv420p'] +FFMPEG_GLOBAL_ARGS_DEFAULT = ["-hide_banner", "-loglevel", "warning"] +FFMPEG_INPUT_ARGS_DEFAULT = [ + "-avoid_negative_ts", + "make_zero", + "-fflags", + "+genpts+discardcorrupt", + "-rtsp_transport", + "tcp", + "-stimeout", + "5000000", + "-use_wallclock_as_timestamps", + "1", +] +DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "rawvideo", "-pix_fmt", "yuv420p"] RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-c", "copy", "-f", "flv"] -SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "segment", "-segment_time", - "10", "-segment_format", "mp4", "-reset_timestamps", "1", "-strftime", - "1", "-c", "copy", "-an"] -RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = ["-f", "segment", "-segment_time", - "60", "-segment_format", "mp4", "-reset_timestamps", "1", "-strftime", - "1", "-c", "copy", "-an"] +SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT = [ + "-f", + "segment", + "-segment_time", + "10", + "-segment_format", + "mp4", + "-reset_timestamps", + "1", + "-strftime", + "1", + "-c", + "copy", + "-an", +] +RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT = [ + "-f", + "segment", + "-segment_time", + "60", + "-segment_format", + "mp4", + "-reset_timestamps", + "1", + "-strftime", + "1", + "-c", + "copy", + "-an", +] GLOBAL_FFMPEG_SCHEMA = vol.Schema( { - vol.Optional('global_args', default=FFMPEG_GLOBAL_ARGS_DEFAULT): vol.Any(str, [str]), - vol.Optional('hwaccel_args', default=[]): vol.Any(str, [str]), - vol.Optional('input_args', default=FFMPEG_INPUT_ARGS_DEFAULT): vol.Any(str, [str]), - vol.Optional('output_args', default={}): { - vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]), - vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]), - vol.Optional('clips', default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]), - vol.Optional('rtmp', default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]), - } + vol.Optional("global_args", default=FFMPEG_GLOBAL_ARGS_DEFAULT): vol.Any( + str, [str] + ), + vol.Optional("hwaccel_args", default=[]): vol.Any(str, [str]), + vol.Optional("input_args", default=FFMPEG_INPUT_ARGS_DEFAULT): vol.Any( + str, [str] + ), + vol.Optional("output_args", default={}): { + vol.Optional("detect", default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any( + str, [str] + ), + vol.Optional("record", default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any( + str, [str] + ), + vol.Optional( + "clips", default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT + ): vol.Any(str, [str]), + vol.Optional("rtmp", default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any( + str, [str] + ), + }, } ) MOTION_SCHEMA = vol.Schema( { - 'mask': vol.Any(str, [str]), - 'threshold': vol.Range(min=1, max=255), - 'contour_area': int, - 'delta_alpha': float, - 'frame_alpha': float, - 'frame_height': int + "mask": vol.Any(str, [str]), + "threshold": vol.Range(min=1, max=255), + "contour_area": int, + "delta_alpha": float, + "frame_alpha": float, + "frame_height": int, } ) -DETECT_SCHEMA = vol.Schema( - { - 'max_disappeared': int - } -) +DETECT_SCHEMA = vol.Schema({"max_disappeared": int}) FILTER_SCHEMA = vol.Schema( { str: { - 'min_area': int, - 'max_area': int, - 'threshold': float, - } + "min_area": int, + "max_area": int, + "threshold": float, + } } ) + def filters_for_all_tracked_objects(object_config): - for tracked_object in object_config.get('track', DEFAULT_TRACKED_OBJECTS): - if not 'filters' in object_config: - object_config['filters'] = {} - if not tracked_object in object_config['filters']: - object_config['filters'][tracked_object] = {} + for tracked_object in object_config.get("track", DEFAULT_TRACKED_OBJECTS): + if not "filters" in object_config: + object_config["filters"] = {} + if not tracked_object in object_config["filters"]: + object_config["filters"][tracked_object] = {} return object_config -OBJECTS_SCHEMA = vol.Schema(vol.All(filters_for_all_tracked_objects, - { - 'track': [str], - 'mask': vol.Any(str, [str]), - vol.Optional('filters', default = {}): FILTER_SCHEMA.extend( - { - str: { - 'min_score': float, - 'mask': vol.Any(str, [str]), + +OBJECTS_SCHEMA = vol.Schema( + vol.All( + filters_for_all_tracked_objects, + { + "track": [str], + "mask": vol.Any(str, [str]), + vol.Optional("filters", default={}): FILTER_SCHEMA.extend( + { + str: { + "min_score": float, + "mask": vol.Any(str, [str]), } - }) - } -)) + } + ), + }, + ) +) + def each_role_used_once(inputs): - roles = [role for i in inputs for role in i['roles']] + roles = [role for i in inputs for role in i["roles"]] roles_set = set(roles) if len(roles) > len(roles_set): raise ValueError return inputs + def detect_is_required(inputs): - roles = [role for i in inputs for role in i['roles']] - if not 'detect' in roles: + roles = [role for i in inputs for role in i["roles"]] + if not "detect" in roles: raise ValueError return inputs + CAMERA_FFMPEG_SCHEMA = vol.Schema( { - vol.Required('inputs'): vol.All([{ - vol.Required('path'): str, - vol.Required('roles'): ['detect', 'clips', 'record', 'rtmp'], - 'global_args': vol.Any(str, [str]), - 'hwaccel_args': vol.Any(str, [str]), - 'input_args': vol.Any(str, [str]), - }], vol.Msg(each_role_used_once, msg="Each input role may only be used once"), - vol.Msg(detect_is_required, msg="The detect role is required")), - 'global_args': vol.Any(str, [str]), - 'hwaccel_args': vol.Any(str, [str]), - 'input_args': vol.Any(str, [str]), - 'output_args': { - vol.Optional('detect', default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]), - vol.Optional('record', default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]), - vol.Optional('clips', default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]), - vol.Optional('rtmp', default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any(str, [str]), - } + vol.Required("inputs"): vol.All( + [ + { + vol.Required("path"): str, + vol.Required("roles"): ["detect", "clips", "record", "rtmp"], + "global_args": vol.Any(str, [str]), + "hwaccel_args": vol.Any(str, [str]), + "input_args": vol.Any(str, [str]), + } + ], + vol.Msg(each_role_used_once, msg="Each input role may only be used once"), + vol.Msg(detect_is_required, msg="The detect role is required"), + ), + "global_args": vol.Any(str, [str]), + "hwaccel_args": vol.Any(str, [str]), + "input_args": vol.Any(str, [str]), + "output_args": { + vol.Optional("detect", default=DETECT_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any( + str, [str] + ), + vol.Optional("record", default=RECORD_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any( + str, [str] + ), + vol.Optional( + "clips", default=SAVE_CLIPS_FFMPEG_OUTPUT_ARGS_DEFAULT + ): vol.Any(str, [str]), + vol.Optional("rtmp", default=RTMP_FFMPEG_OUTPUT_ARGS_DEFAULT): vol.Any( + str, [str] + ), + }, } ) + def ensure_zones_and_cameras_have_different_names(cameras): - zones = [zone for camera in cameras.values() for zone in camera['zones'].keys()] + zones = [zone for camera in cameras.values() for zone in camera["zones"].keys()] for zone in zones: if zone in cameras.keys(): raise ValueError return cameras -CAMERAS_SCHEMA = vol.Schema(vol.All( - { - str: { - vol.Required('ffmpeg'): CAMERA_FFMPEG_SCHEMA, - vol.Required('height'): int, - vol.Required('width'): int, - 'fps': int, - vol.Optional('best_image_timeout', default=60): int, - vol.Optional('zones', default={}): { - str: { - vol.Required('coordinates'): vol.Any(str, [str]), - vol.Optional('filters', default={}): FILTER_SCHEMA - } - }, - vol.Optional('clips', default={}): { - vol.Optional('enabled', default=False): bool, - vol.Optional('pre_capture', default=5): int, - vol.Optional('post_capture', default=5): int, - vol.Optional('required_zones', default=[]): [str], - 'objects': [str], - vol.Optional('retain', default={}): RETAIN_SCHEMA, - }, - vol.Optional('record', default={}): { - 'enabled': bool, - 'retain_days': int, - }, - vol.Optional('rtmp', default={}): { - vol.Required('enabled', default=True): bool, - }, - vol.Optional('snapshots', default={}): { - vol.Optional('enabled', default=False): bool, - vol.Optional('timestamp', default=False): bool, - vol.Optional('bounding_box', default=False): bool, - vol.Optional('crop', default=False): bool, - vol.Optional('required_zones', default=[]): [str], - 'height': int, - vol.Optional('retain', default={}): RETAIN_SCHEMA, - }, - vol.Optional('mqtt', default={}): { - vol.Optional('enabled', default=True): bool, - vol.Optional('timestamp', default=True): bool, - vol.Optional('bounding_box', default=True): bool, - vol.Optional('crop', default=True): bool, - vol.Optional('height', default=270): int, - vol.Optional('required_zones', default=[]): [str], - }, - vol.Optional('objects', default={}): OBJECTS_SCHEMA, - vol.Optional('motion', default={}): MOTION_SCHEMA, - vol.Optional('detect', default={}): DETECT_SCHEMA.extend({ - vol.Optional('enabled', default=True): bool - }) - } - }, vol.Msg(ensure_zones_and_cameras_have_different_names, msg='Zones cannot share names with cameras')) + +CAMERAS_SCHEMA = vol.Schema( + vol.All( + { + str: { + vol.Required("ffmpeg"): CAMERA_FFMPEG_SCHEMA, + vol.Required("height"): int, + vol.Required("width"): int, + "fps": int, + vol.Optional("best_image_timeout", default=60): int, + vol.Optional("zones", default={}): { + str: { + vol.Required("coordinates"): vol.Any(str, [str]), + vol.Optional("filters", default={}): FILTER_SCHEMA, + } + }, + vol.Optional("clips", default={}): { + vol.Optional("enabled", default=False): bool, + vol.Optional("pre_capture", default=5): int, + vol.Optional("post_capture", default=5): int, + vol.Optional("required_zones", default=[]): [str], + "objects": [str], + vol.Optional("retain", default={}): RETAIN_SCHEMA, + }, + vol.Optional("record", default={}): { + "enabled": bool, + "retain_days": int, + }, + vol.Optional("rtmp", default={}): { + vol.Required("enabled", default=True): bool, + }, + vol.Optional("snapshots", default={}): { + vol.Optional("enabled", default=False): bool, + vol.Optional("timestamp", default=False): bool, + vol.Optional("bounding_box", default=False): bool, + vol.Optional("crop", default=False): bool, + vol.Optional("required_zones", default=[]): [str], + "height": int, + vol.Optional("retain", default={}): RETAIN_SCHEMA, + }, + vol.Optional("mqtt", default={}): { + vol.Optional("enabled", default=True): bool, + vol.Optional("timestamp", default=True): bool, + vol.Optional("bounding_box", default=True): bool, + vol.Optional("crop", default=True): bool, + vol.Optional("height", default=270): int, + vol.Optional("required_zones", default=[]): [str], + }, + vol.Optional("objects", default={}): OBJECTS_SCHEMA, + vol.Optional("motion", default={}): MOTION_SCHEMA, + vol.Optional("detect", default={}): DETECT_SCHEMA.extend( + {vol.Optional("enabled", default=True): bool} + ), + } + }, + vol.Msg( + ensure_zones_and_cameras_have_different_names, + msg="Zones cannot share names with cameras", + ), + ) ) FRIGATE_CONFIG_SCHEMA = vol.Schema( { - vol.Optional('database', default={}): { - vol.Optional('path', default=os.path.join(CLIPS_DIR, 'frigate.db')): str + vol.Optional("database", default={}): { + vol.Optional("path", default=os.path.join(CLIPS_DIR, "frigate.db")): str }, - vol.Optional('model', default={'width': 320, 'height': 320}): { - vol.Required('width'): int, - vol.Required('height'): int + vol.Optional("model", default={"width": 320, "height": 320}): { + vol.Required("width"): int, + vol.Required("height"): int, }, - vol.Optional('detectors', default=DEFAULT_DETECTORS): DETECTORS_SCHEMA, - 'mqtt': MQTT_SCHEMA, - vol.Optional('logger', default={'default': 'info', 'logs': {}}): { - vol.Optional('default', default='info'): vol.In(['info', 'debug', 'warning', 'error', 'critical']), - vol.Optional('logs', default={}): {str: vol.In(['info', 'debug', 'warning', 'error', 'critical']) } + vol.Optional("detectors", default=DEFAULT_DETECTORS): DETECTORS_SCHEMA, + "mqtt": MQTT_SCHEMA, + vol.Optional("logger", default={"default": "info", "logs": {}}): { + vol.Optional("default", default="info"): vol.In( + ["info", "debug", "warning", "error", "critical"] + ), + vol.Optional("logs", default={}): { + str: vol.In(["info", "debug", "warning", "error", "critical"]) + }, }, - vol.Optional('snapshots', default={}): { - vol.Optional('retain', default={}): RETAIN_SCHEMA + vol.Optional("snapshots", default={}): { + vol.Optional("retain", default={}): RETAIN_SCHEMA }, - vol.Optional('clips', default={}): CLIPS_SCHEMA, - vol.Optional('record', default={}): { - vol.Optional('enabled', default=False): bool, - vol.Optional('retain_days', default=30): int, + vol.Optional("clips", default={}): CLIPS_SCHEMA, + vol.Optional("record", default={}): { + vol.Optional("enabled", default=False): bool, + vol.Optional("retain_days", default=30): int, }, - vol.Optional('ffmpeg', default={}): GLOBAL_FFMPEG_SCHEMA, - vol.Optional('objects', default={}): OBJECTS_SCHEMA, - vol.Optional('motion', default={}): MOTION_SCHEMA, - vol.Optional('detect', default={}): DETECT_SCHEMA, - vol.Required('cameras', default={}): CAMERAS_SCHEMA, - vol.Optional('environment_vars', default={}): { str: str } + vol.Optional("ffmpeg", default={}): GLOBAL_FFMPEG_SCHEMA, + vol.Optional("objects", default={}): OBJECTS_SCHEMA, + vol.Optional("motion", default={}): MOTION_SCHEMA, + vol.Optional("detect", default={}): DETECT_SCHEMA, + vol.Required("cameras", default={}): CAMERAS_SCHEMA, + vol.Optional("environment_vars", default={}): {str: str}, } ) -class DatabaseConfig(): + +class DatabaseConfig: def __init__(self, config): - self._path = config['path'] + self._path = config["path"] @property def path(self): return self._path def to_dict(self): - return { - 'path': self.path - } + return {"path": self.path} -class ModelConfig(): + +class ModelConfig: def __init__(self, config): - self._width = config['width'] - self._height = config['height'] + self._width = config["width"] + self._height = config["height"] @property def width(self): @@ -298,16 +360,14 @@ class ModelConfig(): return self._height def to_dict(self): - return { - 'width': self.width, - 'height': self.height - } + return {"width": self.width, "height": self.height} -class DetectorConfig(): + +class DetectorConfig: def __init__(self, config): - self._type = config['type'] - self._device = config['device'] - self._num_threads = config['num_threads'] + self._type = config["type"] + self._device = config["device"] + self._num_threads = config["num_threads"] @property def type(self): @@ -323,15 +383,16 @@ class DetectorConfig(): def to_dict(self): return { - 'type': self.type, - 'device': self.device, - 'num_threads': self.num_threads + "type": self.type, + "device": self.device, + "num_threads": self.num_threads, } -class LoggerConfig(): + +class LoggerConfig: def __init__(self, config): - self._default = config['default'].upper() - self._logs = {k: v.upper() for k, v in config['logs'].items()} + self._default = config["default"].upper() + self._logs = {k: v.upper() for k, v in config["logs"].items()} @property def default(self): @@ -342,21 +403,19 @@ class LoggerConfig(): return self._logs def to_dict(self): - return { - 'default': self.default, - 'logs': self.logs - } + return {"default": self.default, "logs": self.logs} -class MqttConfig(): + +class MqttConfig: def __init__(self, config): - self._host = config['host'] - self._port = config['port'] - self._topic_prefix = config['topic_prefix'] - self._client_id = config['client_id'] - self._user = config.get('user') - self._password = config.get('password') - self._stats_interval = config.get('stats_interval') - + self._host = config["host"] + self._port = config["port"] + self._topic_prefix = config["topic_prefix"] + self._client_id = config["client_id"] + self._user = config.get("user") + self._password = config.get("password") + self._stats_interval = config.get("stats_interval") + @property def host(self): return self._host @@ -387,21 +446,30 @@ class MqttConfig(): def to_dict(self): return { - 'host': self.host, - 'port': self.port, - 'topic_prefix': self.topic_prefix, - 'client_id': self.client_id, - 'user': self.user, - 'stats_interval': self.stats_interval + "host": self.host, + "port": self.port, + "topic_prefix": self.topic_prefix, + "client_id": self.client_id, + "user": self.user, + "stats_interval": self.stats_interval, } -class CameraInput(): + +class CameraInput: def __init__(self, camera_config, global_config, ffmpeg_input): - self._path = ffmpeg_input['path'] - self._roles = ffmpeg_input['roles'] - self._global_args = ffmpeg_input.get('global_args', camera_config.get('global_args', global_config['global_args'])) - self._hwaccel_args = ffmpeg_input.get('hwaccel_args', camera_config.get('hwaccel_args', global_config['hwaccel_args'])) - self._input_args = ffmpeg_input.get('input_args', camera_config.get('input_args', global_config['input_args'])) + self._path = ffmpeg_input["path"] + self._roles = ffmpeg_input["roles"] + self._global_args = ffmpeg_input.get( + "global_args", + camera_config.get("global_args", global_config["global_args"]), + ) + self._hwaccel_args = ffmpeg_input.get( + "hwaccel_args", + camera_config.get("hwaccel_args", global_config["hwaccel_args"]), + ) + self._input_args = ffmpeg_input.get( + "input_args", camera_config.get("input_args", global_config["input_args"]) + ) @property def path(self): @@ -413,20 +481,33 @@ class CameraInput(): @property def global_args(self): - return self._global_args if isinstance(self._global_args, list) else self._global_args.split(' ') + return ( + self._global_args + if isinstance(self._global_args, list) + else self._global_args.split(" ") + ) @property def hwaccel_args(self): - return self._hwaccel_args if isinstance(self._hwaccel_args, list) else self._hwaccel_args.split(' ') + return ( + self._hwaccel_args + if isinstance(self._hwaccel_args, list) + else self._hwaccel_args.split(" ") + ) @property def input_args(self): - return self._input_args if isinstance(self._input_args, list) else self._input_args.split(' ') + return ( + self._input_args + if isinstance(self._input_args, list) + else self._input_args.split(" ") + ) -class CameraFfmpegConfig(): + +class CameraFfmpegConfig: def __init__(self, global_config, config): - self._inputs = [CameraInput(config, global_config, i) for i in config['inputs']] - self._output_args = config.get('output_args', global_config['output_args']) + self._inputs = [CameraInput(config, global_config, i) for i in config["inputs"]] + self._output_args = config.get("output_args", global_config["output_args"]) @property def inputs(self): @@ -434,12 +515,16 @@ class CameraFfmpegConfig(): @property def output_args(self): - return {k: v if isinstance(v, list) else v.split(' ') for k, v in self._output_args.items()} + return { + k: v if isinstance(v, list) else v.split(" ") + for k, v in self._output_args.items() + } -class RetainConfig(): + +class RetainConfig: def __init__(self, global_config, config): - self._default = config.get('default', global_config.get('default')) - self._objects = config.get('objects', global_config.get('objects', {})) + self._default = config.get("default", global_config.get("default")) + self._objects = config.get("objects", global_config.get("objects", {})) @property def default(self): @@ -450,17 +535,15 @@ class RetainConfig(): return self._objects def to_dict(self): - return { - 'default': self.default, - 'objects': self.objects - } + return {"default": self.default, "objects": self.objects} -class ClipsConfig(): + +class ClipsConfig: def __init__(self, config): - self._max_seconds = config['max_seconds'] - self._tmpfs_cache_size = config.get('tmpfs_cache_size', '').strip() - self._retain = RetainConfig(config['retain'], config['retain']) - + self._max_seconds = config["max_seconds"] + self._tmpfs_cache_size = config.get("tmpfs_cache_size", "").strip() + self._retain = RetainConfig(config["retain"], config["retain"]) + @property def max_seconds(self): return self._max_seconds @@ -475,28 +558,28 @@ class ClipsConfig(): def to_dict(self): return { - 'max_seconds': self.max_seconds, - 'tmpfs_cache_size': self.tmpfs_cache_size, - 'retain': self.retain.to_dict() + "max_seconds": self.max_seconds, + "tmpfs_cache_size": self.tmpfs_cache_size, + "retain": self.retain.to_dict(), } -class SnapshotsConfig(): + +class SnapshotsConfig: def __init__(self, config): - self._retain = RetainConfig(config['retain'], config['retain']) + self._retain = RetainConfig(config["retain"], config["retain"]) @property def retain(self): return self._retain def to_dict(self): - return { - 'retain': self.retain.to_dict() - } + return {"retain": self.retain.to_dict()} -class RecordConfig(): + +class RecordConfig: def __init__(self, global_config, config): - self._enabled = config.get('enabled', global_config['enabled']) - self._retain_days = config.get('retain_days', global_config['retain_days']) + self._enabled = config.get("enabled", global_config["enabled"]) + self._retain_days = config.get("retain_days", global_config["retain_days"]) @property def enabled(self): @@ -508,16 +591,17 @@ class RecordConfig(): def to_dict(self): return { - 'enabled': self.enabled, - 'retain_days': self.retain_days, + "enabled": self.enabled, + "retain_days": self.retain_days, } -class FilterConfig(): + +class FilterConfig: def __init__(self, global_config, config, global_mask=None, frame_shape=None): - self._min_area = config.get('min_area', global_config.get('min_area', 0)) - self._max_area = config.get('max_area', global_config.get('max_area', 24000000)) - self._threshold = config.get('threshold', global_config.get('threshold', 0.7)) - self._min_score = config.get('min_score', global_config.get('min_score', 0.5)) + self._min_area = config.get("min_area", global_config.get("min_area", 0)) + self._max_area = config.get("max_area", global_config.get("max_area", 24000000)) + self._threshold = config.get("threshold", global_config.get("threshold", 0.7)) + self._min_score = config.get("min_score", global_config.get("min_score", 0.5)) self._raw_mask = [] if global_mask: @@ -526,13 +610,15 @@ class FilterConfig(): elif isinstance(global_mask, str): self._raw_mask += [global_mask] - mask = config.get('mask') + mask = config.get("mask") if mask: if isinstance(mask, list): self._raw_mask += mask elif isinstance(mask, str): self._raw_mask += [mask] - self._mask = create_mask(frame_shape, self._raw_mask) if self._raw_mask else None + self._mask = ( + create_mask(frame_shape, self._raw_mask) if self._raw_mask else None + ) @property def min_area(self): @@ -556,18 +642,29 @@ class FilterConfig(): def to_dict(self): return { - 'min_area': self.min_area, - 'max_area': self.max_area, - 'threshold': self.threshold, - 'min_score': self.min_score, - 'mask': self._raw_mask + "min_area": self.min_area, + "max_area": self.max_area, + "threshold": self.threshold, + "min_score": self.min_score, + "mask": self._raw_mask, } -class ObjectConfig(): + +class ObjectConfig: def __init__(self, global_config, config, frame_shape): - self._track = config.get('track', global_config.get('track', DEFAULT_TRACKED_OBJECTS)) - self._raw_mask = config.get('mask') - self._filters = { name: FilterConfig(global_config['filters'].get(name, {}), config['filters'].get(name, {}), self._raw_mask, frame_shape) for name in self._track } + self._track = config.get( + "track", global_config.get("track", DEFAULT_TRACKED_OBJECTS) + ) + self._raw_mask = config.get("mask") + self._filters = { + name: FilterConfig( + global_config["filters"].get(name, {}), + config["filters"].get(name, {}), + self._raw_mask, + frame_shape, + ) + for name in self._track + } @property def track(self): @@ -579,21 +676,24 @@ class ObjectConfig(): def to_dict(self): return { - 'track': self.track, - 'mask': self._raw_mask, - 'filters': { k: f.to_dict() for k, f in self.filters.items() } + "track": self.track, + "mask": self._raw_mask, + "filters": {k: f.to_dict() for k, f in self.filters.items()}, } -class CameraSnapshotsConfig(): + +class CameraSnapshotsConfig: def __init__(self, global_config, config): - self._enabled = config['enabled'] - self._timestamp = config['timestamp'] - self._bounding_box = config['bounding_box'] - self._crop = config['crop'] - self._height = config.get('height') - self._retain = RetainConfig(global_config['snapshots']['retain'], config['retain']) - self._required_zones = config['required_zones'] - + self._enabled = config["enabled"] + self._timestamp = config["timestamp"] + self._bounding_box = config["bounding_box"] + self._crop = config["crop"] + self._height = config.get("height") + self._retain = RetainConfig( + global_config["snapshots"]["retain"], config["retain"] + ) + self._required_zones = config["required_zones"] + @property def enabled(self): return self._enabled @@ -613,7 +713,7 @@ class CameraSnapshotsConfig(): @property def height(self): return self._height - + @property def retain(self): return self._retain @@ -621,26 +721,27 @@ class CameraSnapshotsConfig(): @property def required_zones(self): return self._required_zones - + def to_dict(self): return { - 'enabled': self.enabled, - 'timestamp': self.timestamp, - 'bounding_box': self.bounding_box, - 'crop': self.crop, - 'height': self.height, - 'retain': self.retain.to_dict(), - 'required_zones': self.required_zones + "enabled": self.enabled, + "timestamp": self.timestamp, + "bounding_box": self.bounding_box, + "crop": self.crop, + "height": self.height, + "retain": self.retain.to_dict(), + "required_zones": self.required_zones, } -class CameraMqttConfig(): + +class CameraMqttConfig: def __init__(self, config): - self._enabled = config['enabled'] - self._timestamp = config['timestamp'] - self._bounding_box = config['bounding_box'] - self._crop = config['crop'] - self._height = config.get('height') - self._required_zones = config['required_zones'] + self._enabled = config["enabled"] + self._timestamp = config["timestamp"] + self._bounding_box = config["bounding_box"] + self._crop = config["crop"] + self._height = config.get("height") + self._required_zones = config["required_zones"] @property def enabled(self): @@ -668,23 +769,24 @@ class CameraMqttConfig(): def to_dict(self): return { - 'enabled': self.enabled, - 'timestamp': self.timestamp, - 'bounding_box': self.bounding_box, - 'crop': self.crop, - 'height': self.height, - 'required_zones': self.required_zones + "enabled": self.enabled, + "timestamp": self.timestamp, + "bounding_box": self.bounding_box, + "crop": self.crop, + "height": self.height, + "required_zones": self.required_zones, } -class CameraClipsConfig(): + +class CameraClipsConfig: def __init__(self, global_config, config): - self._enabled = config['enabled'] - self._pre_capture = config['pre_capture'] - self._post_capture = config['post_capture'] - self._objects = config.get('objects') - self._retain = RetainConfig(global_config['clips']['retain'], config['retain']) - self._required_zones = config['required_zones'] - + self._enabled = config["enabled"] + self._pre_capture = config["pre_capture"] + self._post_capture = config["post_capture"] + self._objects = config.get("objects") + self._retain = RetainConfig(global_config["clips"]["retain"], config["retain"]) + self._required_zones = config["required_zones"] + @property def enabled(self): return self._enabled @@ -711,17 +813,18 @@ class CameraClipsConfig(): def to_dict(self): return { - 'enabled': self.enabled, - 'pre_capture': self.pre_capture, - 'post_capture': self.post_capture, - 'objects': self.objects, - 'retain': self.retain.to_dict(), - 'required_zones': self.required_zones + "enabled": self.enabled, + "pre_capture": self.pre_capture, + "post_capture": self.post_capture, + "objects": self.objects, + "retain": self.retain.to_dict(), + "required_zones": self.required_zones, } -class CameraRtmpConfig(): + +class CameraRtmpConfig: def __init__(self, global_config, config): - self._enabled = config['enabled'] + self._enabled = config["enabled"] @property def enabled(self): @@ -729,23 +832,32 @@ class CameraRtmpConfig(): def to_dict(self): return { - 'enabled': self.enabled, + "enabled": self.enabled, } -class MotionConfig(): + +class MotionConfig: def __init__(self, global_config, config, frame_shape): - self._raw_mask = config.get('mask') + self._raw_mask = config.get("mask") if self._raw_mask: self._mask = create_mask(frame_shape, self._raw_mask) else: default_mask = np.zeros(frame_shape, np.uint8) default_mask[:] = 255 self._mask = default_mask - self._threshold = config.get('threshold', global_config.get('threshold', 25)) - self._contour_area = config.get('contour_area', global_config.get('contour_area', 100)) - self._delta_alpha = config.get('delta_alpha', global_config.get('delta_alpha', 0.2)) - self._frame_alpha = config.get('frame_alpha', global_config.get('frame_alpha', 0.2)) - self._frame_height = config.get('frame_height', global_config.get('frame_height', frame_shape[0]//6)) + self._threshold = config.get("threshold", global_config.get("threshold", 25)) + self._contour_area = config.get( + "contour_area", global_config.get("contour_area", 100) + ) + self._delta_alpha = config.get( + "delta_alpha", global_config.get("delta_alpha", 0.2) + ) + self._frame_alpha = config.get( + "frame_alpha", global_config.get("frame_alpha", 0.2) + ) + self._frame_height = config.get( + "frame_height", global_config.get("frame_height", frame_shape[0] // 6) + ) @property def mask(self): @@ -773,20 +885,21 @@ class MotionConfig(): def to_dict(self): return { - 'mask': self._raw_mask, - 'threshold': self.threshold, - 'contour_area': self.contour_area, - 'delta_alpha': self.delta_alpha, - 'frame_alpha': self.frame_alpha, - 'frame_height': self.frame_height, + "mask": self._raw_mask, + "threshold": self.threshold, + "contour_area": self.contour_area, + "delta_alpha": self.delta_alpha, + "frame_alpha": self.frame_alpha, + "frame_height": self.frame_height, } - -class DetectConfig(): +class DetectConfig: def __init__(self, global_config, config, camera_fps): - self._enabled = config['enabled'] - self._max_disappeared = config.get('max_disappeared', global_config.get('max_disappeared', camera_fps*5)) + self._enabled = config["enabled"] + self._max_disappeared = config.get( + "max_disappeared", global_config.get("max_disappeared", camera_fps * 5) + ) @property def enabled(self): @@ -798,25 +911,35 @@ class DetectConfig(): def to_dict(self): return { - 'enabled': self.enabled, - 'max_disappeared': self._max_disappeared, + "enabled": self.enabled, + "max_disappeared": self._max_disappeared, } -class ZoneConfig(): + +class ZoneConfig: def __init__(self, name, config): - self._coordinates = config['coordinates'] - self._filters = { name: FilterConfig(c, c) for name, c in config['filters'].items() } + self._coordinates = config["coordinates"] + self._filters = { + name: FilterConfig(c, c) for name, c in config["filters"].items() + } if isinstance(self._coordinates, list): - self._contour = np.array([[int(p.split(',')[0]), int(p.split(',')[1])] for p in self._coordinates]) + self._contour = np.array( + [ + [int(p.split(",")[0]), int(p.split(",")[1])] + for p in self._coordinates + ] + ) elif isinstance(self._coordinates, str): - points = self._coordinates.split(',') - self._contour = np.array([[int(points[i]), int(points[i+1])] for i in range(0, len(points), 2)]) + points = self._coordinates.split(",") + self._contour = np.array( + [[int(points[i]), int(points[i + 1])] for i in range(0, len(points), 2)] + ) else: print(f"Unable to parse zone coordinates for {name}") self._contour = np.array([]) - self._color = (0,0,0) + self._color = (0, 0, 0) @property def coordinates(self): @@ -844,29 +967,36 @@ class ZoneConfig(): def to_dict(self): return { - 'filters': {k: f.to_dict() for k, f in self.filters.items()}, - 'coordinates': self._coordinates + "filters": {k: f.to_dict() for k, f in self.filters.items()}, + "coordinates": self._coordinates, } -class CameraConfig(): + +class CameraConfig: def __init__(self, name, config, global_config): self._name = name - self._ffmpeg = CameraFfmpegConfig(global_config['ffmpeg'], config['ffmpeg']) - self._height = config.get('height') - self._width = config.get('width') + self._ffmpeg = CameraFfmpegConfig(global_config["ffmpeg"], config["ffmpeg"]) + self._height = config.get("height") + self._width = config.get("width") self._frame_shape = (self._height, self._width) - self._frame_shape_yuv = (self._frame_shape[0]*3//2, self._frame_shape[1]) - self._fps = config.get('fps') - self._best_image_timeout = config['best_image_timeout'] - self._zones = { name: ZoneConfig(name, z) for name, z in config['zones'].items() } - self._clips = CameraClipsConfig(global_config, config['clips']) - self._record = RecordConfig(global_config['record'], config['record']) - self._rtmp = CameraRtmpConfig(global_config, config['rtmp']) - self._snapshots = CameraSnapshotsConfig(global_config, config['snapshots']) - self._mqtt = CameraMqttConfig(config['mqtt']) - self._objects = ObjectConfig(global_config['objects'], config.get('objects', {}), self._frame_shape) - self._motion = MotionConfig(global_config['motion'], config['motion'], self._frame_shape) - self._detect = DetectConfig(global_config['detect'], config['detect'], config.get('fps', 5)) + self._frame_shape_yuv = (self._frame_shape[0] * 3 // 2, self._frame_shape[1]) + self._fps = config.get("fps") + self._best_image_timeout = config["best_image_timeout"] + self._zones = {name: ZoneConfig(name, z) for name, z in config["zones"].items()} + self._clips = CameraClipsConfig(global_config, config["clips"]) + self._record = RecordConfig(global_config["record"], config["record"]) + self._rtmp = CameraRtmpConfig(global_config, config["rtmp"]) + self._snapshots = CameraSnapshotsConfig(global_config, config["snapshots"]) + self._mqtt = CameraMqttConfig(config["mqtt"]) + self._objects = ObjectConfig( + global_config["objects"], config.get("objects", {}), self._frame_shape + ) + self._motion = MotionConfig( + global_config["motion"], config["motion"], self._frame_shape + ) + self._detect = DetectConfig( + global_config["detect"], config["detect"], config.get("fps", 5) + ) self._ffmpeg_cmds = [] for ffmpeg_input in self._ffmpeg.inputs: @@ -874,51 +1004,57 @@ class CameraConfig(): if ffmpeg_cmd is None: continue - self._ffmpeg_cmds.append({ - 'roles': ffmpeg_input.roles, - 'cmd': ffmpeg_cmd - }) - + self._ffmpeg_cmds.append({"roles": ffmpeg_input.roles, "cmd": ffmpeg_cmd}) self._set_zone_colors(self._zones) def _get_ffmpeg_cmd(self, ffmpeg_input): ffmpeg_output_args = [] - if 'detect' in ffmpeg_input.roles: - ffmpeg_output_args = self.ffmpeg.output_args['detect'] + ffmpeg_output_args + ['pipe:'] + if "detect" in ffmpeg_input.roles: + ffmpeg_output_args = ( + self.ffmpeg.output_args["detect"] + ffmpeg_output_args + ["pipe:"] + ) if self.fps: ffmpeg_output_args = ["-r", str(self.fps)] + ffmpeg_output_args - if 'rtmp' in ffmpeg_input.roles and self.rtmp.enabled: - ffmpeg_output_args = self.ffmpeg.output_args['rtmp'] + [ - f"rtmp://127.0.0.1/live/{self.name}" - ] + ffmpeg_output_args - if 'clips' in ffmpeg_input.roles: - ffmpeg_output_args = self.ffmpeg.output_args['clips'] + [ - f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4" - ] + ffmpeg_output_args - if 'record' in ffmpeg_input.roles and self.record.enabled: - ffmpeg_output_args = self.ffmpeg.output_args['record'] + [ - f"{os.path.join(RECORD_DIR, self.name)}-%Y%m%d%H%M%S.mp4" - ] + ffmpeg_output_args + if "rtmp" in ffmpeg_input.roles and self.rtmp.enabled: + ffmpeg_output_args = ( + self.ffmpeg.output_args["rtmp"] + + [f"rtmp://127.0.0.1/live/{self.name}"] + + ffmpeg_output_args + ) + if "clips" in ffmpeg_input.roles: + ffmpeg_output_args = ( + self.ffmpeg.output_args["clips"] + + [f"{os.path.join(CACHE_DIR, self.name)}-%Y%m%d%H%M%S.mp4"] + + ffmpeg_output_args + ) + if "record" in ffmpeg_input.roles and self.record.enabled: + ffmpeg_output_args = ( + self.ffmpeg.output_args["record"] + + [f"{os.path.join(RECORD_DIR, self.name)}-%Y%m%d%H%M%S.mp4"] + + ffmpeg_output_args + ) # if there arent any outputs enabled for this input if len(ffmpeg_output_args) == 0: return None - cmd = (['ffmpeg'] + - ffmpeg_input.global_args + - ffmpeg_input.hwaccel_args + - ffmpeg_input.input_args + - ['-i', ffmpeg_input.path] + - ffmpeg_output_args) + cmd = ( + ["ffmpeg"] + + ffmpeg_input.global_args + + ffmpeg_input.hwaccel_args + + ffmpeg_input.input_args + + ["-i", ffmpeg_input.path] + + ffmpeg_output_args + ) - return [part for part in cmd if part != ''] + return [part for part in cmd if part != ""] def _set_zone_colors(self, zones: Dict[str, ZoneConfig]): # set colors for zones all_zone_names = zones.keys() zone_colors = {} - colors = plt.cm.get_cmap('tab10', len(all_zone_names)) + colors = plt.cm.get_cmap("tab10", len(all_zone_names)) for i, zone in enumerate(all_zone_names): zone_colors[zone] = tuple(int(round(255 * c)) for c in colors(i)[:3]) @@ -950,7 +1086,7 @@ class CameraConfig(): return self._best_image_timeout @property - def zones(self)-> Dict[str, ZoneConfig]: + def zones(self) -> Dict[str, ZoneConfig]: return self._zones @property @@ -999,29 +1135,32 @@ class CameraConfig(): def to_dict(self): return { - 'name': self.name, - 'height': self.height, - 'width': self.width, - 'fps': self.fps, - 'best_image_timeout': self.best_image_timeout, - 'zones': {k: z.to_dict() for k, z in self.zones.items()}, - 'clips': self.clips.to_dict(), - 'record': self.record.to_dict(), - 'rtmp': self.rtmp.to_dict(), - 'snapshots': self.snapshots.to_dict(), - 'mqtt': self.mqtt.to_dict(), - 'objects': self.objects.to_dict(), - 'motion': self.motion.to_dict(), - 'detect': self.detect.to_dict(), - 'frame_shape': self.frame_shape, - 'ffmpeg_cmds': [{'roles': c['roles'], 'cmd': ' '.join(c['cmd'])} for c in self.ffmpeg_cmds], + "name": self.name, + "height": self.height, + "width": self.width, + "fps": self.fps, + "best_image_timeout": self.best_image_timeout, + "zones": {k: z.to_dict() for k, z in self.zones.items()}, + "clips": self.clips.to_dict(), + "record": self.record.to_dict(), + "rtmp": self.rtmp.to_dict(), + "snapshots": self.snapshots.to_dict(), + "mqtt": self.mqtt.to_dict(), + "objects": self.objects.to_dict(), + "motion": self.motion.to_dict(), + "detect": self.detect.to_dict(), + "frame_shape": self.frame_shape, + "ffmpeg_cmds": [ + {"roles": c["roles"], "cmd": " ".join(c["cmd"])} + for c in self.ffmpeg_cmds + ], } -class FrigateConfig(): +class FrigateConfig: def __init__(self, config_file=None, config=None): if config is None and config_file is None: - raise ValueError('config or config_file must be defined') + raise ValueError("config or config_file must be defined") elif not config_file is None: config = self._load_file(config_file) @@ -1029,25 +1168,33 @@ class FrigateConfig(): config = self._sub_env_vars(config) - self._database = DatabaseConfig(config['database']) - self._model = ModelConfig(config['model']) - self._detectors = { name: DetectorConfig(d) for name, d in config['detectors'].items() } - self._mqtt = MqttConfig(config['mqtt']) - self._clips = ClipsConfig(config['clips']) - self._snapshots = SnapshotsConfig(config['snapshots']) - self._cameras = { name: CameraConfig(name, c, config) for name, c in config['cameras'].items() } - self._logger = LoggerConfig(config['logger']) - self._environment_vars = config['environment_vars'] + self._database = DatabaseConfig(config["database"]) + self._model = ModelConfig(config["model"]) + self._detectors = { + name: DetectorConfig(d) for name, d in config["detectors"].items() + } + self._mqtt = MqttConfig(config["mqtt"]) + self._clips = ClipsConfig(config["clips"]) + self._snapshots = SnapshotsConfig(config["snapshots"]) + self._cameras = { + name: CameraConfig(name, c, config) for name, c in config["cameras"].items() + } + self._logger = LoggerConfig(config["logger"]) + self._environment_vars = config["environment_vars"] def _sub_env_vars(self, config): - frigate_env_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')} + frigate_env_vars = { + k: v for k, v in os.environ.items() if k.startswith("FRIGATE_") + } - if 'password' in config['mqtt']: - config['mqtt']['password'] = config['mqtt']['password'].format(**frigate_env_vars) + if "password" in config["mqtt"]: + config["mqtt"]["password"] = config["mqtt"]["password"].format( + **frigate_env_vars + ) - for camera in config['cameras'].values(): - for i in camera['ffmpeg']['inputs']: - i['path'] = i['path'].format(**frigate_env_vars) + for camera in config["cameras"].values(): + for i in camera["ffmpeg"]["inputs"]: + i["path"] = i["path"].format(**frigate_env_vars) return config @@ -1064,15 +1211,15 @@ class FrigateConfig(): def to_dict(self): return { - 'database': self.database.to_dict(), - 'model': self.model.to_dict(), - 'detectors': {k: d.to_dict() for k, d in self.detectors.items()}, - 'mqtt': self.mqtt.to_dict(), - 'clips': self.clips.to_dict(), - 'snapshots': self.snapshots.to_dict(), - 'cameras': {k: c.to_dict() for k, c in self.cameras.items()}, - 'logger': self.logger.to_dict(), - 'environment_vars': self._environment_vars + "database": self.database.to_dict(), + "model": self.model.to_dict(), + "detectors": {k: d.to_dict() for k, d in self.detectors.items()}, + "mqtt": self.mqtt.to_dict(), + "clips": self.clips.to_dict(), + "snapshots": self.snapshots.to_dict(), + "cameras": {k: c.to_dict() for k, c in self.cameras.items()}, + "logger": self.logger.to_dict(), + "environment_vars": self._environment_vars, } @property diff --git a/frigate/const.py b/frigate/const.py index 2ea9f9f68..64a42b11a 100644 --- a/frigate/const.py +++ b/frigate/const.py @@ -1,3 +1,3 @@ -CLIPS_DIR = '/media/frigate/clips' -RECORD_DIR = '/media/frigate/recordings' -CACHE_DIR = '/tmp/cache' \ No newline at end of file +CLIPS_DIR = "/media/frigate/clips" +RECORD_DIR = "/media/frigate/recordings" +CACHE_DIR = "/tmp/cache" diff --git a/frigate/edgetpu.py b/frigate/edgetpu.py index d65ce523b..73916bd19 100644 --- a/frigate/edgetpu.py +++ b/frigate/edgetpu.py @@ -1,48 +1,49 @@ import datetime -import hashlib import logging import multiprocessing as mp import os import queue -import threading import signal +import threading from abc import ABC, abstractmethod -from multiprocessing.connection import Connection -from setproctitle import setproctitle from typing import Dict import numpy as np import tflite_runtime.interpreter as tflite +from setproctitle import setproctitle from tflite_runtime.interpreter import load_delegate from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen logger = logging.getLogger(__name__) -def load_labels(path, encoding='utf-8'): - """Loads labels from file (with or without index numbers). - Args: - path: path to label file. - encoding: label file encoding. - Returns: - Dictionary mapping indices to labels. - """ - with open(path, 'r', encoding=encoding) as f: - lines = f.readlines() - if not lines: - return {} - if lines[0].split(' ', maxsplit=1)[0].isdigit(): - pairs = [line.split(' ', maxsplit=1) for line in lines] - return {int(index): label.strip() for index, label in pairs} - else: - return {index: line.strip() for index, line in enumerate(lines)} +def load_labels(path, encoding="utf-8"): + """Loads labels from file (with or without index numbers). + Args: + path: path to label file. + encoding: label file encoding. + Returns: + Dictionary mapping indices to labels. + """ + with open(path, "r", encoding=encoding) as f: + lines = f.readlines() + if not lines: + return {} + + if lines[0].split(" ", maxsplit=1)[0].isdigit(): + pairs = [line.split(" ", maxsplit=1) for line in lines] + return {int(index): label.strip() for index, label in pairs} + else: + return {index: line.strip() for index, line in enumerate(lines)} + class ObjectDetector(ABC): @abstractmethod - def detect(self, tensor_input, threshold = .4): + def detect(self, tensor_input, threshold=0.4): pass + class LocalObjectDetector(ObjectDetector): def __init__(self, tf_device=None, num_threads=3, labels=None): self.fps = EventsPerSecond() @@ -57,27 +58,29 @@ class LocalObjectDetector(ObjectDetector): edge_tpu_delegate = None - if tf_device != 'cpu': + if tf_device != "cpu": try: logger.info(f"Attempting to load TPU as {device_config['device']}") - edge_tpu_delegate = load_delegate('libedgetpu.so.1.0', device_config) + edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config) logger.info("TPU found") self.interpreter = tflite.Interpreter( - model_path='/edgetpu_model.tflite', - experimental_delegates=[edge_tpu_delegate]) + model_path="/edgetpu_model.tflite", + experimental_delegates=[edge_tpu_delegate], + ) except ValueError: logger.info("No EdgeTPU detected.") raise else: self.interpreter = tflite.Interpreter( - model_path='/cpu_model.tflite', num_threads=num_threads) - + model_path="/cpu_model.tflite", num_threads=num_threads + ) + self.interpreter.allocate_tensors() self.tensor_input_details = self.interpreter.get_input_details() self.tensor_output_details = self.interpreter.get_output_details() - - def detect(self, tensor_input, threshold=.4): + + def detect(self, tensor_input, threshold=0.4): detections = [] raw_detections = self.detect_raw(tensor_input) @@ -85,28 +88,49 @@ class LocalObjectDetector(ObjectDetector): for d in raw_detections: if d[1] < threshold: break - detections.append(( - self.labels[int(d[0])], - float(d[1]), - (d[2], d[3], d[4], d[5]) - )) + detections.append( + (self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5])) + ) self.fps.update() return detections def detect_raw(self, tensor_input): - self.interpreter.set_tensor(self.tensor_input_details[0]['index'], tensor_input) + self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input) self.interpreter.invoke() - boxes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[0]['index'])) - label_codes = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[1]['index'])) - scores = np.squeeze(self.interpreter.get_tensor(self.tensor_output_details[2]['index'])) + boxes = np.squeeze( + self.interpreter.get_tensor(self.tensor_output_details[0]["index"]) + ) + label_codes = np.squeeze( + self.interpreter.get_tensor(self.tensor_output_details[1]["index"]) + ) + scores = np.squeeze( + self.interpreter.get_tensor(self.tensor_output_details[2]["index"]) + ) - detections = np.zeros((20,6), np.float32) + detections = np.zeros((20, 6), np.float32) for i, score in enumerate(scores): - detections[i] = [label_codes[i], score, boxes[i][0], boxes[i][1], boxes[i][2], boxes[i][3]] - + detections[i] = [ + label_codes[i], + score, + boxes[i][0], + boxes[i][1], + boxes[i][2], + boxes[i][3], + ] + return detections -def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp.Event], avg_speed, start, model_shape, tf_device, num_threads): + +def run_detector( + name: str, + detection_queue: mp.Queue, + out_events: Dict[str, mp.Event], + avg_speed, + start, + model_shape, + tf_device, + num_threads, +): threading.current_thread().name = f"detector:{name}" logger = logging.getLogger(f"detector.{name}") logger.info(f"Starting detection process: {os.getpid()}") @@ -114,9 +138,10 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp. listen() stop_event = mp.Event() + def receiveSignal(signalNumber, frame): stop_event.set() - + signal.signal(signal.SIGTERM, receiveSignal) signal.signal(signal.SIGINT, receiveSignal) @@ -126,12 +151,9 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp. outputs = {} for name in out_events.keys(): out_shm = mp.shared_memory.SharedMemory(name=f"out-{name}", create=False) - out_np = np.ndarray((20,6), dtype=np.float32, buffer=out_shm.buf) - outputs[name] = { - 'shm': out_shm, - 'np': out_np - } - + out_np = np.ndarray((20, 6), dtype=np.float32, buffer=out_shm.buf) + outputs[name] = {"shm": out_shm, "np": out_np} + while True: if stop_event.is_set(): break @@ -140,7 +162,9 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp. connection_id = detection_queue.get(timeout=5) except queue.Empty: continue - input_frame = frame_manager.get(connection_id, (1,model_shape[0],model_shape[1],3)) + input_frame = frame_manager.get( + connection_id, (1, model_shape[0], model_shape[1], 3) + ) if input_frame is None: continue @@ -148,26 +172,35 @@ def run_detector(name: str, detection_queue: mp.Queue, out_events: Dict[str, mp. # detect and send the output start.value = datetime.datetime.now().timestamp() detections = object_detector.detect_raw(input_frame) - duration = datetime.datetime.now().timestamp()-start.value - outputs[connection_id]['np'][:] = detections[:] + duration = datetime.datetime.now().timestamp() - start.value + outputs[connection_id]["np"][:] = detections[:] out_events[connection_id].set() start.value = 0.0 - avg_speed.value = (avg_speed.value*9 + duration)/10 - -class EdgeTPUProcess(): - def __init__(self, name, detection_queue, out_events, model_shape, tf_device=None, num_threads=3): + avg_speed.value = (avg_speed.value * 9 + duration) / 10 + + +class EdgeTPUProcess: + def __init__( + self, + name, + detection_queue, + out_events, + model_shape, + tf_device=None, + num_threads=3, + ): self.name = name self.out_events = out_events self.detection_queue = detection_queue - self.avg_inference_speed = mp.Value('d', 0.01) - self.detection_start = mp.Value('d', 0.0) + self.avg_inference_speed = mp.Value("d", 0.01) + self.detection_start = mp.Value("d", 0.0) self.detect_process = None self.model_shape = model_shape self.tf_device = tf_device self.num_threads = num_threads self.start_or_restart() - + def stop(self): self.detect_process.terminate() logging.info("Waiting for detection process to exit gracefully...") @@ -181,11 +214,25 @@ class EdgeTPUProcess(): self.detection_start.value = 0.0 if (not self.detect_process is None) and self.detect_process.is_alive(): self.stop() - self.detect_process = mp.Process(target=run_detector, name=f"detector:{self.name}", args=(self.name, self.detection_queue, self.out_events, self.avg_inference_speed, self.detection_start, self.model_shape, self.tf_device, self.num_threads)) + self.detect_process = mp.Process( + target=run_detector, + name=f"detector:{self.name}", + args=( + self.name, + self.detection_queue, + self.out_events, + self.avg_inference_speed, + self.detection_start, + self.model_shape, + self.tf_device, + self.num_threads, + ), + ) self.detect_process.daemon = True self.detect_process.start() -class RemoteObjectDetector(): + +class RemoteObjectDetector: def __init__(self, name, labels, detection_queue, event, model_shape): self.labels = load_labels(labels) self.name = name @@ -193,11 +240,15 @@ class RemoteObjectDetector(): self.detection_queue = detection_queue self.event = event self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False) - self.np_shm = np.ndarray((1,model_shape[0],model_shape[1],3), dtype=np.uint8, buffer=self.shm.buf) - self.out_shm = mp.shared_memory.SharedMemory(name=f"out-{self.name}", create=False) - self.out_np_shm = np.ndarray((20,6), dtype=np.float32, buffer=self.out_shm.buf) - - def detect(self, tensor_input, threshold=.4): + self.np_shm = np.ndarray( + (1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf + ) + self.out_shm = mp.shared_memory.SharedMemory( + name=f"out-{self.name}", create=False + ) + self.out_np_shm = np.ndarray((20, 6), dtype=np.float32, buffer=self.out_shm.buf) + + def detect(self, tensor_input, threshold=0.4): detections = [] # copy input to shared memory @@ -213,14 +264,12 @@ class RemoteObjectDetector(): for d in self.out_np_shm: if d[1] < threshold: break - detections.append(( - self.labels[int(d[0])], - float(d[1]), - (d[2], d[3], d[4], d[5]) - )) + detections.append( + (self.labels[int(d[0])], float(d[1]), (d[2], d[3], d[4], d[5])) + ) self.fps.update() return detections - + def cleanup(self): self.shm.unlink() self.out_shm.unlink() diff --git a/frigate/events.py b/frigate/events.py index 2430d9db3..93ea7f87a 100644 --- a/frigate/events.py +++ b/frigate/events.py @@ -20,10 +20,13 @@ from peewee import fn logger = logging.getLogger(__name__) + class EventProcessor(threading.Thread): - def __init__(self, config, camera_processes, event_queue, event_processed_queue, stop_event): + def __init__( + self, config, camera_processes, event_queue, event_processed_queue, stop_event + ): threading.Thread.__init__(self) - self.name = 'event_processor' + self.name = "event_processor" self.config = config self.camera_processes = camera_processes self.cached_clips = {} @@ -33,31 +36,35 @@ class EventProcessor(threading.Thread): self.stop_event = stop_event def should_create_clip(self, camera, event_data): - if event_data['false_positive']: + if event_data["false_positive"]: return False - + # if there are required zones and there is no overlap required_zones = self.config.cameras[camera].clips.required_zones - if len(required_zones) > 0 and not set(event_data['entered_zones']) & set(required_zones): - logger.debug(f"Not creating clip for {event_data['id']} because it did not enter required zones") + if len(required_zones) > 0 and not set(event_data["entered_zones"]) & set( + required_zones + ): + logger.debug( + f"Not creating clip for {event_data['id']} because it did not enter required zones" + ) return False return True - + def refresh_cache(self): cached_files = os.listdir(CACHE_DIR) files_in_use = [] for process in psutil.process_iter(): try: - if process.name() != 'ffmpeg': + if process.name() != "ffmpeg": continue flist = process.open_files() if flist: for nt in flist: if nt.path.startswith(CACHE_DIR): - files_in_use.append(nt.path.split('/')[-1]) + files_in_use.append(nt.path.split("/")[-1]) except: continue @@ -65,119 +72,154 @@ class EventProcessor(threading.Thread): if f in files_in_use or f in self.cached_clips: continue - camera = '-'.join(f.split('-')[:-1]) - start_time = datetime.datetime.strptime(f.split('-')[-1].split('.')[0], '%Y%m%d%H%M%S') - - ffprobe_cmd = " ".join([ - 'ffprobe', - '-v', - 'error', - '-show_entries', - 'format=duration', - '-of', - 'default=noprint_wrappers=1:nokey=1', - f"{os.path.join(CACHE_DIR,f)}" - ]) + camera = "-".join(f.split("-")[:-1]) + start_time = datetime.datetime.strptime( + f.split("-")[-1].split(".")[0], "%Y%m%d%H%M%S" + ) + + ffprobe_cmd = " ".join( + [ + "ffprobe", + "-v", + "error", + "-show_entries", + "format=duration", + "-of", + "default=noprint_wrappers=1:nokey=1", + f"{os.path.join(CACHE_DIR,f)}", + ] + ) p = sp.Popen(ffprobe_cmd, stdout=sp.PIPE, shell=True) (output, err) = p.communicate() p_status = p.wait() if p_status == 0: - duration = float(output.decode('utf-8').strip()) + duration = float(output.decode("utf-8").strip()) else: logger.info(f"bad file: {f}") - os.remove(os.path.join(CACHE_DIR,f)) + os.remove(os.path.join(CACHE_DIR, f)) continue self.cached_clips[f] = { - 'path': f, - 'camera': camera, - 'start_time': start_time.timestamp(), - 'duration': duration + "path": f, + "camera": camera, + "start_time": start_time.timestamp(), + "duration": duration, } if len(self.events_in_process) > 0: - earliest_event = min(self.events_in_process.values(), key=lambda x:x['start_time'])['start_time'] + earliest_event = min( + self.events_in_process.values(), key=lambda x: x["start_time"] + )["start_time"] else: earliest_event = datetime.datetime.now().timestamp() # if the earliest event exceeds the max seconds, cap it max_seconds = self.config.clips.max_seconds - if datetime.datetime.now().timestamp()-earliest_event > max_seconds: - earliest_event = datetime.datetime.now().timestamp()-max_seconds - + if datetime.datetime.now().timestamp() - earliest_event > max_seconds: + earliest_event = datetime.datetime.now().timestamp() - max_seconds + for f, data in list(self.cached_clips.items()): - if earliest_event-90 > data['start_time']+data['duration']: + if earliest_event - 90 > data["start_time"] + data["duration"]: del self.cached_clips[f] logger.debug(f"Cleaning up cached file {f}") - os.remove(os.path.join(CACHE_DIR,f)) - + os.remove(os.path.join(CACHE_DIR, f)) + # if we are still using more than 90% of the cache, proactively cleanup cache_usage = shutil.disk_usage("/tmp/cache") - if cache_usage.used/cache_usage.total > .9 and cache_usage.free < 200000000 and len(self.cached_clips) > 0: + if ( + cache_usage.used / cache_usage.total > 0.9 + and cache_usage.free < 200000000 + and len(self.cached_clips) > 0 + ): logger.warning("More than 90% of the cache is used.") - logger.warning("Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config.") + logger.warning( + "Consider increasing space available at /tmp/cache or reducing max_seconds in your clips config." + ) logger.warning("Proactively cleaning up the cache...") - while cache_usage.used/cache_usage.total > .9: - oldest_clip = min(self.cached_clips.values(), key=lambda x:x['start_time']) - del self.cached_clips[oldest_clip['path']] - os.remove(os.path.join(CACHE_DIR,oldest_clip['path'])) + while cache_usage.used / cache_usage.total > 0.9: + oldest_clip = min( + self.cached_clips.values(), key=lambda x: x["start_time"] + ) + del self.cached_clips[oldest_clip["path"]] + os.remove(os.path.join(CACHE_DIR, oldest_clip["path"])) cache_usage = shutil.disk_usage("/tmp/cache") def create_clip(self, camera, event_data, pre_capture, post_capture): # get all clips from the camera with the event sorted - sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time']) + sorted_clips = sorted( + [c for c in self.cached_clips.values() if c["camera"] == camera], + key=lambda i: i["start_time"], + ) # if there are no clips in the cache or we are still waiting on a needed file check every 5 seconds wait_count = 0 - while len(sorted_clips) == 0 or sorted_clips[-1]['start_time'] + sorted_clips[-1]['duration'] < event_data['end_time']+post_capture: + while ( + len(sorted_clips) == 0 + or sorted_clips[-1]["start_time"] + sorted_clips[-1]["duration"] + < event_data["end_time"] + post_capture + ): if wait_count > 4: - logger.warning(f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event.") + logger.warning( + f"Unable to create clip for {camera} and event {event_data['id']}. There were no cache files for this event." + ) return False logger.debug(f"No cache clips for {camera}. Waiting...") time.sleep(5) self.refresh_cache() # get all clips from the camera with the event sorted - sorted_clips = sorted([c for c in self.cached_clips.values() if c['camera'] == camera], key = lambda i: i['start_time']) + sorted_clips = sorted( + [c for c in self.cached_clips.values() if c["camera"] == camera], + key=lambda i: i["start_time"], + ) wait_count += 1 - - playlist_start = event_data['start_time']-pre_capture - playlist_end = event_data['end_time']+post_capture + + playlist_start = event_data["start_time"] - pre_capture + playlist_end = event_data["end_time"] + post_capture playlist_lines = [] for clip in sorted_clips: # clip ends before playlist start time, skip - if clip['start_time']+clip['duration'] < playlist_start: + if clip["start_time"] + clip["duration"] < playlist_start: continue # clip starts after playlist ends, finish - if clip['start_time'] > playlist_end: + if clip["start_time"] > playlist_end: break playlist_lines.append(f"file '{os.path.join(CACHE_DIR,clip['path'])}'") # if this is the starting clip, add an inpoint - if clip['start_time'] < playlist_start: - playlist_lines.append(f"inpoint {int(playlist_start-clip['start_time'])}") + if clip["start_time"] < playlist_start: + playlist_lines.append( + f"inpoint {int(playlist_start-clip['start_time'])}" + ) # if this is the ending clip, add an outpoint - if clip['start_time']+clip['duration'] > playlist_end: - playlist_lines.append(f"outpoint {int(playlist_end-clip['start_time'])}") + if clip["start_time"] + clip["duration"] > playlist_end: + playlist_lines.append( + f"outpoint {int(playlist_end-clip['start_time'])}" + ) clip_name = f"{camera}-{event_data['id']}" ffmpeg_cmd = [ - 'ffmpeg', - '-y', - '-protocol_whitelist', - 'pipe,file', - '-f', - 'concat', - '-safe', - '0', - '-i', - '-', - '-c', - 'copy', - '-movflags', - '+faststart', - f"{os.path.join(CLIPS_DIR, clip_name)}.mp4" + "ffmpeg", + "-y", + "-protocol_whitelist", + "pipe,file", + "-f", + "concat", + "-safe", + "0", + "-i", + "-", + "-c", + "copy", + "-movflags", + "+faststart", + f"{os.path.join(CLIPS_DIR, clip_name)}.mp4", ] - p = sp.run(ffmpeg_cmd, input="\n".join(playlist_lines), encoding='ascii', capture_output=True) + p = sp.run( + ffmpeg_cmd, + input="\n".join(playlist_lines), + encoding="ascii", + capture_output=True, + ) if p.returncode != 0: logger.error(p.stderr) return False @@ -199,68 +241,80 @@ class EventProcessor(threading.Thread): logger.debug(f"Event received: {event_type} {camera} {event_data['id']}") self.refresh_cache() - if event_type == 'start': - self.events_in_process[event_data['id']] = event_data + if event_type == "start": + self.events_in_process[event_data["id"]] = event_data - if event_type == 'end': + if event_type == "end": clips_config = self.config.cameras[camera].clips clip_created = False if self.should_create_clip(camera, event_data): - if clips_config.enabled and (clips_config.objects is None or event_data['label'] in clips_config.objects): - clip_created = self.create_clip(camera, event_data, clips_config.pre_capture, clips_config.post_capture) - - if clip_created or event_data['has_snapshot']: + if clips_config.enabled and ( + clips_config.objects is None + or event_data["label"] in clips_config.objects + ): + clip_created = self.create_clip( + camera, + event_data, + clips_config.pre_capture, + clips_config.post_capture, + ) + + if clip_created or event_data["has_snapshot"]: Event.create( - id=event_data['id'], - label=event_data['label'], + id=event_data["id"], + label=event_data["label"], camera=camera, - start_time=event_data['start_time'], - end_time=event_data['end_time'], - top_score=event_data['top_score'], - false_positive=event_data['false_positive'], - zones=list(event_data['entered_zones']), - thumbnail=event_data['thumbnail'], + start_time=event_data["start_time"], + end_time=event_data["end_time"], + top_score=event_data["top_score"], + false_positive=event_data["false_positive"], + zones=list(event_data["entered_zones"]), + thumbnail=event_data["thumbnail"], has_clip=clip_created, - has_snapshot=event_data['has_snapshot'], + has_snapshot=event_data["has_snapshot"], ) - del self.events_in_process[event_data['id']] - self.event_processed_queue.put((event_data['id'], camera)) + del self.events_in_process[event_data["id"]] + self.event_processed_queue.put((event_data["id"], camera)) + class EventCleanup(threading.Thread): def __init__(self, config: FrigateConfig, stop_event): threading.Thread.__init__(self) - self.name = 'event_cleanup' + self.name = "event_cleanup" self.config = config self.stop_event = stop_event self.camera_keys = list(self.config.cameras.keys()) def expire(self, media): ## Expire events from unlisted cameras based on the global config - if media == 'clips': + if media == "clips": retain_config = self.config.clips.retain - file_extension = 'mp4' - update_params = {'has_clip': False} + file_extension = "mp4" + update_params = {"has_clip": False} else: retain_config = self.config.snapshots.retain - file_extension = 'jpg' - update_params = {'has_snapshot': False} - - distinct_labels = (Event.select(Event.label) - .where(Event.camera.not_in(self.camera_keys)) - .distinct()) - + file_extension = "jpg" + update_params = {"has_snapshot": False} + + distinct_labels = ( + Event.select(Event.label) + .where(Event.camera.not_in(self.camera_keys)) + .distinct() + ) + # loop over object types in db for l in distinct_labels: # get expiration time for this label expire_days = retain_config.objects.get(l.label, retain_config.default) - expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp() + expire_after = ( + datetime.datetime.now() - datetime.timedelta(days=expire_days) + ).timestamp() # grab all events after specific time - expired_events = ( - Event.select() - .where(Event.camera.not_in(self.camera_keys), - Event.start_time < expire_after, - Event.label == l.label) + expired_events = Event.select().where( + Event.camera.not_in(self.camera_keys), + Event.start_time < expire_after, + Event.label == l.label, ) # delete the media from disk for event in expired_events: @@ -268,56 +322,57 @@ class EventCleanup(threading.Thread): media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}") media.unlink(missing_ok=True) # update the clips attribute for the db entry - update_query = ( - Event.update(update_params) - .where(Event.camera.not_in(self.camera_keys), - Event.start_time < expire_after, - Event.label == l.label) + update_query = Event.update(update_params).where( + Event.camera.not_in(self.camera_keys), + Event.start_time < expire_after, + Event.label == l.label, ) update_query.execute() ## Expire events from cameras based on the camera config for name, camera in self.config.cameras.items(): - if media == 'clips': + if media == "clips": retain_config = camera.clips.retain else: retain_config = camera.snapshots.retain # get distinct objects in database for this camera - distinct_labels = (Event.select(Event.label) - .where(Event.camera == name) - .distinct()) + distinct_labels = ( + Event.select(Event.label).where(Event.camera == name).distinct() + ) # loop over object types in db for l in distinct_labels: # get expiration time for this label expire_days = retain_config.objects.get(l.label, retain_config.default) - expire_after = (datetime.datetime.now() - datetime.timedelta(days=expire_days)).timestamp() + expire_after = ( + datetime.datetime.now() - datetime.timedelta(days=expire_days) + ).timestamp() # grab all events after specific time - expired_events = ( - Event.select() - .where(Event.camera == name, - Event.start_time < expire_after, - Event.label == l.label) + expired_events = Event.select().where( + Event.camera == name, + Event.start_time < expire_after, + Event.label == l.label, ) # delete the grabbed clips from disk for event in expired_events: media_name = f"{event.camera}-{event.id}" - media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}") + media = Path( + f"{os.path.join(CLIPS_DIR, media_name)}.{file_extension}" + ) media.unlink(missing_ok=True) # update the clips attribute for the db entry - update_query = ( - Event.update(update_params) - .where( Event.camera == name, - Event.start_time < expire_after, - Event.label == l.label) + update_query = Event.update(update_params).where( + Event.camera == name, + Event.start_time < expire_after, + Event.label == l.label, ) update_query.execute() def purge_duplicates(self): duplicate_query = """with grouped_events as ( select id, - label, - camera, + label, + camera, has_snapshot, has_clip, row_number() over ( @@ -327,7 +382,7 @@ class EventCleanup(threading.Thread): from event ) - select distinct id, camera, has_snapshot, has_clip from grouped_events + select distinct id, camera, has_snapshot, has_clip from grouped_events where copy_number > 1;""" duplicate_events = Event.raw(duplicate_query) @@ -341,13 +396,15 @@ class EventCleanup(threading.Thread): media = Path(f"{os.path.join(CLIPS_DIR, media_name)}.mp4") media.unlink(missing_ok=True) - (Event.delete() - .where( Event.id << [event.id for event in duplicate_events] ) - .execute()) - + ( + Event.delete() + .where(Event.id << [event.id for event in duplicate_events]) + .execute() + ) + def run(self): counter = 0 - while(True): + while True: if self.stop_event.is_set(): logger.info(f"Exiting event cleanup...") break @@ -359,14 +416,12 @@ class EventCleanup(threading.Thread): continue counter = 0 - self.expire('clips') - self.expire('snapshots') + self.expire("clips") + self.expire("snapshots") self.purge_duplicates() # drop events from db where has_clip and has_snapshot are false - delete_query = ( - Event.delete() - .where( Event.has_clip == False, - Event.has_snapshot == False) + delete_query = Event.delete().where( + Event.has_clip == False, Event.has_snapshot == False ) delete_query.execute() diff --git a/frigate/http.py b/frigate/http.py index 4d818d37c..ced36fe76 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -9,8 +9,15 @@ from functools import reduce import cv2 import gevent import numpy as np -from flask import (Blueprint, Flask, Response, current_app, jsonify, - make_response, request) +from flask import ( + Blueprint, + Flask, + Response, + current_app, + jsonify, + make_response, + request, +) from flask_sockets import Sockets from peewee import SqliteDatabase, operator, fn, DoesNotExist from playhouse.shortcuts import model_to_dict @@ -23,10 +30,11 @@ from frigate.version import VERSION logger = logging.getLogger(__name__) -bp = Blueprint('frigate', __name__) -ws = Blueprint('ws', __name__) +bp = Blueprint("frigate", __name__) +ws = Blueprint("ws", __name__) -class MqttBackend(): + +class MqttBackend: """Interface for registering and updating WebSocket clients.""" def __init__(self, mqtt_client, topic_prefix): @@ -42,36 +50,48 @@ class MqttBackend(): try: json_message = json.loads(message) json_message = { - 'topic': f"{self.topic_prefix}/{json_message['topic']}", - 'payload': json_message['payload'], - 'retain': json_message.get('retain', False) + "topic": f"{self.topic_prefix}/{json_message['topic']}", + "payload": json_message.get["payload"], + "retain": json_message.get("retain", False), } except: logger.warning("Unable to parse websocket message as valid json.") return - logger.debug(f"Publishing mqtt message from websockets at {json_message['topic']}.") - self.mqtt_client.publish(json_message['topic'], json_message['payload'], retain=json_message['retain']) + logger.debug( + f"Publishing mqtt message from websockets at {json_message['topic']}." + ) + self.mqtt_client.publish( + json_message["topic"], + json_message["payload"], + retain=json_message["retain"], + ) def run(self): def send(client, userdata, message): """Sends mqtt messages to clients.""" try: logger.debug(f"Received mqtt message on {message.topic}.") - ws_message = json.dumps({ - 'topic': message.topic.replace(f"{self.topic_prefix}/",""), - 'payload': message.payload.decode() - }) + ws_message = json.dumps( + { + "topic": message.topic.replace(f"{self.topic_prefix}/", ""), + "payload": message.payload.decode(), + } + ) except: # if the payload can't be decoded don't relay to clients - logger.debug(f"MQTT payload for {message.topic} wasn't text. Skipping...") + logger.debug( + f"MQTT payload for {message.topic} wasn't text. Skipping..." + ) return for client in self.clients: try: client.send(ws_message) except: - logger.debug("Removing websocket client due to a closed connection.") + logger.debug( + "Removing websocket client due to a closed connection." + ) self.clients.remove(client) self.mqtt_client.message_callback_add(f"{self.topic_prefix}/#", send) @@ -80,7 +100,14 @@ class MqttBackend(): """Maintains mqtt subscription in the background.""" gevent.spawn(self.run) -def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detected_frames_processor, mqtt_client): + +def create_app( + frigate_config, + database: SqliteDatabase, + stats_tracking, + detected_frames_processor, + mqtt_client, +): app = Flask(__name__) sockets = Sockets(app) @@ -105,14 +132,16 @@ def create_app(frigate_config, database: SqliteDatabase, stats_tracking, detecte return app -@bp.route('/') + +@bp.route("/") def is_healthy(): return "Frigate is running. Alive and healthy!" -@bp.route('/events/summary') + +@bp.route("/events/summary") def events_summary(): - has_clip = request.args.get('has_clip', type=int) - has_snapshot = request.args.get('has_snapshot', type=int) + has_clip = request.args.get("has_clip", type=int) + has_snapshot = request.args.get("has_snapshot", type=int) clauses = [] @@ -126,35 +155,40 @@ def events_summary(): clauses.append((1 == 1)) groups = ( - Event - .select( - Event.camera, - Event.label, - fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')).alias('day'), - Event.zones, - fn.COUNT(Event.id).alias('count') - ) - .where(reduce(operator.and_, clauses)) - .group_by( - Event.camera, - Event.label, - fn.strftime('%Y-%m-%d', fn.datetime(Event.start_time, 'unixepoch', 'localtime')), - Event.zones - ) + Event.select( + Event.camera, + Event.label, + fn.strftime( + "%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime") + ).alias("day"), + Event.zones, + fn.COUNT(Event.id).alias("count"), ) + .where(reduce(operator.and_, clauses)) + .group_by( + Event.camera, + Event.label, + fn.strftime( + "%Y-%m-%d", fn.datetime(Event.start_time, "unixepoch", "localtime") + ), + Event.zones, + ) + ) return jsonify([e for e in groups.dicts()]) -@bp.route('/events/') + +@bp.route("/events/") def event(id): try: return model_to_dict(Event.get(Event.id == id)) except DoesNotExist: return "Event not found", 404 -@bp.route('/events//thumbnail.jpg') + +@bp.route("/events//thumbnail.jpg") def event_thumbnail(id): - format = request.args.get('format', 'ios') + format = request.args.get("format", "ios") thumbnail_bytes = None try: event = Event.get(Event.id == id) @@ -162,7 +196,9 @@ def event_thumbnail(id): except DoesNotExist: # see if the object is currently being tracked try: - for camera_state in current_app.detected_frames_processor.camera_states.values(): + for ( + camera_state + ) in current_app.detected_frames_processor.camera_states.values(): if id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(id) if not tracked_obj is None: @@ -174,18 +210,27 @@ def event_thumbnail(id): return "Event not found", 404 # android notifications prefer a 2:1 ratio - if format == 'android': + if format == "android": jpg_as_np = np.frombuffer(thumbnail_bytes, dtype=np.uint8) img = cv2.imdecode(jpg_as_np, flags=1) - thumbnail = cv2.copyMakeBorder(img, 0, 0, int(img.shape[1]*0.5), int(img.shape[1]*0.5), cv2.BORDER_CONSTANT, (0,0,0)) - ret, jpg = cv2.imencode('.jpg', thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) + thumbnail = cv2.copyMakeBorder( + img, + 0, + 0, + int(img.shape[1] * 0.5), + int(img.shape[1] * 0.5), + cv2.BORDER_CONSTANT, + (0, 0, 0), + ) + ret, jpg = cv2.imencode(".jpg", thumbnail, [int(cv2.IMWRITE_JPEG_QUALITY), 70]) thumbnail_bytes = jpg.tobytes() response = make_response(thumbnail_bytes) - response.headers['Content-Type'] = 'image/jpg' + response.headers["Content-Type"] = "image/jpg" return response -@bp.route('/events//snapshot.jpg') + +@bp.route("/events//snapshot.jpg") def event_snapshot(id): jpg_bytes = None try: @@ -193,20 +238,24 @@ def event_snapshot(id): if not event.has_snapshot: return "Snapshot not available", 404 # read snapshot from disk - with open(os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), 'rb') as image_file: + with open( + os.path.join(CLIPS_DIR, f"{event.camera}-{id}.jpg"), "rb" + ) as image_file: jpg_bytes = image_file.read() except DoesNotExist: # see if the object is currently being tracked try: - for camera_state in current_app.detected_frames_processor.camera_states.values(): + for ( + camera_state + ) in current_app.detected_frames_processor.camera_states.values(): if id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(id) if not tracked_obj is None: jpg_bytes = tracked_obj.get_jpg_bytes( - timestamp=request.args.get('timestamp', type=int), - bounding_box=request.args.get('bbox', type=int), - crop=request.args.get('crop', type=int), - height=request.args.get('h', type=int) + timestamp=request.args.get("timestamp", type=int), + bounding_box=request.args.get("bbox", type=int), + crop=request.args.get("crop", type=int), + height=request.args.get("h", type=int), ) except: return "Event not found", 404 @@ -214,20 +263,21 @@ def event_snapshot(id): return "Event not found", 404 response = make_response(jpg_bytes) - response.headers['Content-Type'] = 'image/jpg' + response.headers["Content-Type"] = "image/jpg" return response -@bp.route('/events') + +@bp.route("/events") def events(): - limit = request.args.get('limit', 100) - camera = request.args.get('camera') - label = request.args.get('label') - zone = request.args.get('zone') - after = request.args.get('after', type=float) - before = request.args.get('before', type=float) - has_clip = request.args.get('has_clip', type=int) - has_snapshot = request.args.get('has_snapshot', type=int) - include_thumbnails = request.args.get('include_thumbnails', default=1, type=int) + limit = request.args.get("limit", 100) + camera = request.args.get("camera") + label = request.args.get("label") + zone = request.args.get("zone") + after = request.args.get("after", type=float) + before = request.args.get("before", type=float) + has_clip = request.args.get("has_clip", type=int) + has_snapshot = request.args.get("has_snapshot", type=int) + include_thumbnails = request.args.get("include_thumbnails", default=1, type=int) clauses = [] excluded_fields = [] @@ -239,7 +289,7 @@ def events(): clauses.append((Event.label == label)) if zone: - clauses.append((Event.zones.cast('text') % f"*\"{zone}\"*")) + clauses.append((Event.zones.cast("text") % f'*"{zone}"*')) if after: clauses.append((Event.start_time >= after)) @@ -259,116 +309,142 @@ def events(): if len(clauses) == 0: clauses.append((1 == 1)) - events = (Event.select() - .where(reduce(operator.and_, clauses)) - .order_by(Event.start_time.desc()) - .limit(limit)) + events = ( + Event.select() + .where(reduce(operator.and_, clauses)) + .order_by(Event.start_time.desc()) + .limit(limit) + ) return jsonify([model_to_dict(e, exclude=excluded_fields) for e in events]) -@bp.route('/config') + +@bp.route("/config") def config(): return jsonify(current_app.frigate_config.to_dict()) -@bp.route('/version') + +@bp.route("/version") def version(): return VERSION -@bp.route('/stats') + +@bp.route("/stats") def stats(): stats = stats_snapshot(current_app.stats_tracking) return jsonify(stats) -@bp.route('//