mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
93 lines
3.7 KiB
YAML
93 lines
3.7 KiB
YAML
web_port: 5000
|
|
|
|
mqtt:
|
|
host: mqtt.server.com
|
|
topic_prefix: frigate
|
|
# client_id: frigate # Optional -- set to override default client id of 'frigate' if running multiple instances
|
|
# user: username # Optional -- Uncomment for use
|
|
# password: password # Optional -- Uncomment for use
|
|
|
|
cameras:
|
|
back:
|
|
# Source passed to ffmpeg after the -i parameter. Supports anything compatible with OpenCV and FFmpeg.
|
|
# Environment variables that begin with 'FRIGATE_' may be referenced in {}
|
|
ffmpeg_input: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
|
|
|
################
|
|
## Optional mask. Must be the same dimensions as your video feed.
|
|
## The mask works by looking at the bottom center of the bounding box for the detected
|
|
## person in the image. If that pixel in the mask is a black pixel, it ignores it as a
|
|
## false positive. In my mask, the grass and driveway visible from my backdoor camera
|
|
## are white. The garage doors, sky, and trees (anywhere it would be impossible for a
|
|
## person to stand) are black.
|
|
################
|
|
# mask: back-mask.bmp
|
|
|
|
################
|
|
# Allows you to limit the framerate within frigate for cameras that do not support
|
|
# custom framerates. A value of 1 tells frigate to look at every frame, 2 every 2nd frame,
|
|
# 3 every 3rd frame, etc.
|
|
################
|
|
take_frame: 1
|
|
|
|
################
|
|
# Optional hardware acceleration parameters for ffmpeg. If your hardware supports it, it can
|
|
# greatly reduce the CPU power used to decode the video stream. You will need to determine which
|
|
# parameters work for your specific hardware. These may work for those with Intel hardware that
|
|
# supports QuickSync.
|
|
################
|
|
# ffmpeg_hwaccel_args:
|
|
# - -hwaccel
|
|
# - vaapi
|
|
# - -hwaccel_device
|
|
# - /dev/dri/renderD128
|
|
# - -hwaccel_output_format
|
|
# - yuv420p
|
|
|
|
################
|
|
# FFmpeg log level. Default is "panic". https://ffmpeg.org/ffmpeg.html#Generic-options
|
|
################
|
|
# ffmpeg_log_level: panic
|
|
|
|
################
|
|
# Optional custom input args. Some cameras may need custom ffmpeg params to work reliably. Specifying
|
|
# these will replace the default input params.
|
|
################
|
|
# ffmpeg_input_args: []
|
|
|
|
################
|
|
# Optional custom output args. Some cameras may need custom ffmpeg params to work reliably. Specifying
|
|
# these will replace the default output params.
|
|
################
|
|
# ffmpeg_output_args: []
|
|
|
|
################
|
|
# size: size of the region in pixels
|
|
# x_offset/y_offset: position of the upper left corner of your region (top left of image is 0,0)
|
|
# min_person_area: minimum width*height of the bounding box for the detected person
|
|
# max_person_area: maximum width*height of the bounding box for the detected person
|
|
# threshold: The minimum decimal percentage (50% hit = 0.5) for the confidence from tensorflow
|
|
# Tips: All regions are resized to 300x300 before detection because the model is trained on that size.
|
|
# Resizing regions takes CPU power. Ideally, all regions should be as close to 300x300 as possible.
|
|
# Defining a region that goes outside the bounds of the image will result in errors.
|
|
################
|
|
regions:
|
|
- size: 350
|
|
x_offset: 0
|
|
y_offset: 300
|
|
min_person_area: 5000
|
|
max_person_area: 100000
|
|
threshold: 0.5
|
|
- size: 400
|
|
x_offset: 350
|
|
y_offset: 250
|
|
min_person_area: 2000
|
|
max_person_area: 100000
|
|
threshold: 0.5
|
|
- size: 400
|
|
x_offset: 750
|
|
y_offset: 250
|
|
min_person_area: 2000
|
|
max_person_area: 100000
|
|
threshold: 0.5
|