mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-02-23 00:18:31 +01:00
allow full customization of input
This commit is contained in:
parent
a659019d1a
commit
e13563770d
10
README.md
10
README.md
@ -1,9 +1,9 @@
|
|||||||
<a href='https://ko-fi.com/P5P7XGO9' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi4.png?v=2' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
<a href='https://ko-fi.com/P5P7XGO9' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi4.png?v=2' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
||||||
|
|
||||||
# Frigate - Realtime Object Detection for RTSP Cameras
|
# Frigate - Realtime Object Detection for IP Cameras
|
||||||
**Note:** This version requires the use of a [Google Coral USB Accelerator](https://coral.withgoogle.com/products/accelerator/)
|
**Note:** This version requires the use of a [Google Coral USB Accelerator](https://coral.withgoogle.com/products/accelerator/)
|
||||||
|
|
||||||
Uses OpenCV and Tensorflow to perform realtime object detection locally for RTSP cameras. Designed for integration with HomeAssistant or others via MQTT.
|
Uses OpenCV and Tensorflow to perform realtime object detection locally for IP cameras. Designed for integration with HomeAssistant or others via MQTT.
|
||||||
|
|
||||||
- Leverages multiprocessing and threads heavily with an emphasis on realtime over processing every frame
|
- Leverages multiprocessing and threads heavily with an emphasis on realtime over processing every frame
|
||||||
- Allows you to define specific regions (squares) in the image to look for objects
|
- Allows you to define specific regions (squares) in the image to look for objects
|
||||||
@ -33,7 +33,7 @@ docker run --rm \
|
|||||||
-v /dev/bus/usb:/dev/bus/usb \
|
-v /dev/bus/usb:/dev/bus/usb \
|
||||||
-v <path_to_config_dir>:/config:ro \
|
-v <path_to_config_dir>:/config:ro \
|
||||||
-p 5000:5000 \
|
-p 5000:5000 \
|
||||||
-e RTSP_PASSWORD='password' \
|
-e FRIGATE_RTSP_PASSWORD='password' \
|
||||||
frigate:latest
|
frigate:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -50,7 +50,7 @@ Example docker-compose:
|
|||||||
ports:
|
ports:
|
||||||
- "5000:5000"
|
- "5000:5000"
|
||||||
environment:
|
environment:
|
||||||
RTSP_PASSWORD: "password"
|
FRIGATE_RTSP_PASSWORD: "password"
|
||||||
```
|
```
|
||||||
|
|
||||||
A `config.yml` file must exist in the `config` directory. See example [here](config/config.yml).
|
A `config.yml` file must exist in the `config` directory. See example [here](config/config.yml).
|
||||||
@ -94,7 +94,7 @@ automation:
|
|||||||
```
|
```
|
||||||
|
|
||||||
## Tips
|
## Tips
|
||||||
- Lower the framerate of the RTSP feed on the camera to reduce the CPU usage for capturing the feed
|
- Lower the framerate of the video feed on the camera to reduce the CPU usage for capturing the feed
|
||||||
|
|
||||||
## Future improvements
|
## Future improvements
|
||||||
- [x] Remove motion detection for now
|
- [x] Remove motion detection for now
|
||||||
|
@ -8,7 +8,7 @@ mqtt:
|
|||||||
|
|
||||||
cameras:
|
cameras:
|
||||||
back:
|
back:
|
||||||
# Source passed to ffmpeg after the -i parameter.
|
# Source passed to ffmpeg after the -i parameter. Supports anything compatible with OpenCV and FFmpeg.
|
||||||
# Environment variables that begin with 'FRIGATE_' may be referenced in {}
|
# Environment variables that begin with 'FRIGATE_' may be referenced in {}
|
||||||
ffmpeg_input: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
ffmpeg_input: rtsp://viewer:{FRIGATE_RTSP_PASSWORD}@10.0.10.10:554/cam/realmonitor?channel=1&subtype=2
|
||||||
|
|
||||||
|
@ -46,21 +46,18 @@ class FrameTracker(threading.Thread):
|
|||||||
if (now - k) > 2:
|
if (now - k) > 2:
|
||||||
del self.recent_frames[k]
|
del self.recent_frames[k]
|
||||||
|
|
||||||
def get_frame_shape(rtsp_url):
|
def get_frame_shape(source):
|
||||||
# capture a single frame and check the frame shape so the correct array
|
# capture a single frame and check the frame shape so the correct array
|
||||||
# size can be allocated in memory
|
# size can be allocated in memory
|
||||||
video = cv2.VideoCapture(rtsp_url)
|
video = cv2.VideoCapture(source)
|
||||||
ret, frame = video.read()
|
ret, frame = video.read()
|
||||||
frame_shape = frame.shape
|
frame_shape = frame.shape
|
||||||
video.release()
|
video.release()
|
||||||
return frame_shape
|
return frame_shape
|
||||||
|
|
||||||
def get_rtsp_url(rtsp_config):
|
def get_ffmpeg_input(ffmpeg_input):
|
||||||
if (rtsp_config['password'].startswith('$')):
|
frigate_vars = {k: v for k, v in os.environ.items() if k.startswith('FRIGATE_')}
|
||||||
rtsp_config['password'] = os.getenv(rtsp_config['password'][1:])
|
return ffmpeg_input.format(**frigate_vars)
|
||||||
return 'rtsp://{}:{}@{}:{}{}'.format(rtsp_config['user'],
|
|
||||||
rtsp_config['password'], rtsp_config['host'], rtsp_config['port'],
|
|
||||||
rtsp_config['path'])
|
|
||||||
|
|
||||||
class CameraWatchdog(threading.Thread):
|
class CameraWatchdog(threading.Thread):
|
||||||
def __init__(self, camera):
|
def __init__(self, camera):
|
||||||
@ -119,7 +116,7 @@ class Camera:
|
|||||||
self.config = config
|
self.config = config
|
||||||
self.detected_objects = []
|
self.detected_objects = []
|
||||||
self.recent_frames = {}
|
self.recent_frames = {}
|
||||||
self.rtsp_url = get_rtsp_url(self.config['rtsp'])
|
self.ffmpeg_input = get_ffmpeg_input(self.config['ffmpeg_input'])
|
||||||
self.take_frame = self.config.get('take_frame', 1)
|
self.take_frame = self.config.get('take_frame', 1)
|
||||||
self.ffmpeg_log_level = self.config.get('ffmpeg_log_level', 'panic')
|
self.ffmpeg_log_level = self.config.get('ffmpeg_log_level', 'panic')
|
||||||
self.ffmpeg_hwaccel_args = self.config.get('ffmpeg_hwaccel_args', [])
|
self.ffmpeg_hwaccel_args = self.config.get('ffmpeg_hwaccel_args', [])
|
||||||
@ -139,7 +136,7 @@ class Camera:
|
|||||||
'-pix_fmt', 'rgb24'
|
'-pix_fmt', 'rgb24'
|
||||||
])
|
])
|
||||||
self.regions = self.config['regions']
|
self.regions = self.config['regions']
|
||||||
self.frame_shape = get_frame_shape(self.rtsp_url)
|
self.frame_shape = get_frame_shape(self.ffmpeg_input)
|
||||||
self.frame_size = self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2]
|
self.frame_size = self.frame_shape[0] * self.frame_shape[1] * self.frame_shape[2]
|
||||||
self.mqtt_client = mqtt_client
|
self.mqtt_client = mqtt_client
|
||||||
self.mqtt_topic_prefix = '{}/{}'.format(mqtt_prefix, self.name)
|
self.mqtt_topic_prefix = '{}/{}'.format(mqtt_prefix, self.name)
|
||||||
@ -225,7 +222,7 @@ class Camera:
|
|||||||
self.ffmpeg_process = None
|
self.ffmpeg_process = None
|
||||||
self.capture_thread = None
|
self.capture_thread = None
|
||||||
|
|
||||||
# create the process to capture frames from the RTSP stream and store in a shared array
|
# create the process to capture frames from the input stream and store in a shared array
|
||||||
print("Creating a new ffmpeg process...")
|
print("Creating a new ffmpeg process...")
|
||||||
self.start_ffmpeg()
|
self.start_ffmpeg()
|
||||||
|
|
||||||
@ -243,7 +240,7 @@ class Camera:
|
|||||||
ffmpeg_global_args +
|
ffmpeg_global_args +
|
||||||
self.ffmpeg_hwaccel_args +
|
self.ffmpeg_hwaccel_args +
|
||||||
self.ffmpeg_input_args +
|
self.ffmpeg_input_args +
|
||||||
['-i', self.rtsp_url] +
|
['-i', self.ffmpeg_input] +
|
||||||
self.ffmpeg_output_args +
|
self.ffmpeg_output_args +
|
||||||
['pipe:'])
|
['pipe:'])
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user