diff --git a/config/config.example.yml b/config/config.example.yml index 5f24538bf..2dfe62f15 100644 --- a/config/config.example.yml +++ b/config/config.example.yml @@ -126,7 +126,9 @@ cameras: # width: 720 ################ - ## Optional mask. Must be the same aspect ratio as your video feed. + ## Optional mask. Must be the same aspect ratio as your video feed. Value is either the + ## name of a file in the config directory or a base64 encoded bmp image prefixed with + ## 'base64,' eg. 'base64,asfasdfasdf....'. ## ## The mask works by looking at the bottom center of the bounding box for the detected ## person in the image. If that pixel in the mask is a black pixel, it ignores it as a diff --git a/frigate/video.py b/frigate/video.py index a9e0e0e47..e4ec73510 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -12,6 +12,7 @@ import numpy as np import copy import itertools import json +import base64 from collections import defaultdict from frigate.util import draw_box_with_label, area, calculate_region, clipped, intersection_over_union, intersection, EventsPerSecond, listen, PlasmaManager from frigate.objects import ObjectTracker @@ -189,7 +190,12 @@ def track_camera(name, config, global_objects_config, frame_queue, frame_shape, # load in the mask for object detection if 'mask' in config: - mask = cv2.imread("/config/{}".format(config['mask']), cv2.IMREAD_GRAYSCALE) + if config['mask'].startswith('base64,'): + img = base64.b64decode(config['mask'][7:]) + npimg = np.fromstring(img, dtype=np.uint8) + mask = cv2.imdecode(npimg, cv2.IMREAD_GRAYSCALE) + else: + mask = cv2.imread("/config/{}".format(config['mask']), cv2.IMREAD_GRAYSCALE) else: mask = None