mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
use a regular subprocess for ffmpeg, refactor bounding box drawing
This commit is contained in:
parent
2b51dc3e5b
commit
baa587028b
30
Dockerfile
30
Dockerfile
@ -1,5 +1,7 @@
|
|||||||
FROM ubuntu:18.04
|
FROM ubuntu:18.04
|
||||||
|
|
||||||
|
ARG DEVICE
|
||||||
|
|
||||||
# Install packages for apt repo
|
# Install packages for apt repo
|
||||||
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
||||||
apt-transport-https \
|
apt-transport-https \
|
||||||
@ -8,11 +10,14 @@ RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
|||||||
wget \
|
wget \
|
||||||
gnupg-agent \
|
gnupg-agent \
|
||||||
dirmngr \
|
dirmngr \
|
||||||
software-properties-common
|
software-properties-common \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys D986B59D
|
COPY scripts/install_odroid_repo.sh .
|
||||||
|
|
||||||
RUN echo "deb http://deb.odroid.in/5422-s bionic main" > /etc/apt/sources.list.d/odroid.list
|
RUN if [ "$DEVICE" = "odroid" ]; then \
|
||||||
|
sh /install_odroid_repo.sh; \
|
||||||
|
fi
|
||||||
|
|
||||||
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \
|
||||||
python3 \
|
python3 \
|
||||||
@ -52,10 +57,12 @@ RUN pip install -U pip \
|
|||||||
numpy \
|
numpy \
|
||||||
Flask \
|
Flask \
|
||||||
paho-mqtt \
|
paho-mqtt \
|
||||||
PyYAML \
|
PyYAML
|
||||||
ffmpeg-python
|
|
||||||
|
|
||||||
# Download & build OpenCV
|
# Download & build OpenCV
|
||||||
|
# TODO: use multistage build to reduce image size:
|
||||||
|
# https://medium.com/@denismakogon/pain-and-gain-running-opencv-application-with-golang-and-docker-on-alpine-3-7-435aa11c7aec
|
||||||
|
# https://www.merixstudio.com/blog/docker-multi-stage-builds-python-development/
|
||||||
RUN wget -q -P /usr/local/src/ --no-check-certificate https://github.com/opencv/opencv/archive/4.0.1.zip
|
RUN wget -q -P /usr/local/src/ --no-check-certificate https://github.com/opencv/opencv/archive/4.0.1.zip
|
||||||
RUN cd /usr/local/src/ \
|
RUN cd /usr/local/src/ \
|
||||||
&& unzip 4.0.1.zip \
|
&& unzip 4.0.1.zip \
|
||||||
@ -70,14 +77,15 @@ RUN cd /usr/local/src/ \
|
|||||||
&& rm -rf /usr/local/src/opencv-4.0.1
|
&& rm -rf /usr/local/src/opencv-4.0.1
|
||||||
|
|
||||||
# Download and install EdgeTPU libraries for Coral
|
# Download and install EdgeTPU libraries for Coral
|
||||||
RUN wget https://dl.google.com/coral/edgetpu_api/edgetpu_api_latest.tar.gz -O edgetpu_api.tar.gz --trust-server-names
|
RUN wget https://dl.google.com/coral/edgetpu_api/edgetpu_api_latest.tar.gz -O edgetpu_api.tar.gz --trust-server-names \
|
||||||
|
&& tar xzf edgetpu_api.tar.gz
|
||||||
|
|
||||||
RUN tar xzf edgetpu_api.tar.gz \
|
COPY scripts/install_edgetpu_api.sh edgetpu_api/install.sh
|
||||||
&& cd edgetpu_api \
|
|
||||||
&& cp -p libedgetpu/libedgetpu_arm32.so /usr/lib/arm-linux-gnueabihf/libedgetpu.so.1.0 \
|
|
||||||
&& ldconfig \
|
|
||||||
&& python3 -m pip install --no-deps "$(ls edgetpu-*-py3-none-any.whl 2>/dev/null)"
|
|
||||||
|
|
||||||
|
RUN cd edgetpu_api \
|
||||||
|
&& /bin/bash install.sh
|
||||||
|
|
||||||
|
# Copy a python 3.6 version
|
||||||
RUN cd /usr/local/lib/python3.6/dist-packages/edgetpu/swig/ \
|
RUN cd /usr/local/lib/python3.6/dist-packages/edgetpu/swig/ \
|
||||||
&& ln -s _edgetpu_cpp_wrapper.cpython-35m-arm-linux-gnueabihf.so _edgetpu_cpp_wrapper.cpython-36m-arm-linux-gnueabihf.so
|
&& ln -s _edgetpu_cpp_wrapper.cpython-35m-arm-linux-gnueabihf.so _edgetpu_cpp_wrapper.cpython-36m-arm-linux-gnueabihf.so
|
||||||
|
|
||||||
|
@ -89,8 +89,6 @@ class FramePrepper(threading.Thread):
|
|||||||
cropped_frame = self.shared_frame[self.region_y_offset:self.region_y_offset+self.region_size, self.region_x_offset:self.region_x_offset+self.region_size].copy()
|
cropped_frame = self.shared_frame[self.region_y_offset:self.region_y_offset+self.region_size, self.region_x_offset:self.region_x_offset+self.region_size].copy()
|
||||||
frame_time = self.frame_time.value
|
frame_time = self.frame_time.value
|
||||||
|
|
||||||
# convert to RGB
|
|
||||||
#cropped_frame_rgb = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2RGB)
|
|
||||||
# Resize to 300x300 if needed
|
# Resize to 300x300 if needed
|
||||||
if cropped_frame.shape != (300, 300, 3):
|
if cropped_frame.shape != (300, 300, 3):
|
||||||
cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
|
cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR)
|
||||||
|
@ -2,6 +2,7 @@ import time
|
|||||||
import datetime
|
import datetime
|
||||||
import threading
|
import threading
|
||||||
import cv2
|
import cv2
|
||||||
|
from . util import draw_box_with_label
|
||||||
|
|
||||||
class ObjectCleaner(threading.Thread):
|
class ObjectCleaner(threading.Thread):
|
||||||
def __init__(self, objects_parsed, detected_objects):
|
def __init__(self, objects_parsed, detected_objects):
|
||||||
@ -79,12 +80,9 @@ class BestPersonFrame(threading.Thread):
|
|||||||
|
|
||||||
if not self.best_person is None and self.best_person['frame_time'] in recent_frames:
|
if not self.best_person is None and self.best_person['frame_time'] in recent_frames:
|
||||||
best_frame = recent_frames[self.best_person['frame_time']]
|
best_frame = recent_frames[self.best_person['frame_time']]
|
||||||
best_frame = cv2.cvtColor(best_frame, cv2.COLOR_BGR2RGB)
|
|
||||||
# draw the bounding box on the frame
|
|
||||||
color = (255,0,0)
|
|
||||||
cv2.rectangle(best_frame, (self.best_person['xmin'], self.best_person['ymin']),
|
|
||||||
(self.best_person['xmax'], self.best_person['ymax']),
|
|
||||||
color, 2)
|
|
||||||
|
|
||||||
# convert back to BGR
|
label = "{}: {}%".format(self.best_person['name'],int(self.best_person['score']*100))
|
||||||
|
draw_box_with_label(best_frame, self.best_person['xmin'], self.best_person['ymin'],
|
||||||
|
self.best_person['xmax'], self.best_person['ymax'], label)
|
||||||
|
|
||||||
self.best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
|
self.best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
|
||||||
|
@ -1,5 +1,26 @@
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
import cv2
|
||||||
|
|
||||||
# convert shared memory array into numpy array
|
# convert shared memory array into numpy array
|
||||||
def tonumpyarray(mp_arr):
|
def tonumpyarray(mp_arr):
|
||||||
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
|
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
|
||||||
|
|
||||||
|
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label):
|
||||||
|
color = (255,0,0)
|
||||||
|
cv2.rectangle(frame, (x_min, y_min),
|
||||||
|
(x_max, y_max),
|
||||||
|
color, 2)
|
||||||
|
font_scale = 0.5
|
||||||
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||||
|
# get the width and height of the text box
|
||||||
|
size = cv2.getTextSize(label, font, fontScale=font_scale, thickness=2)
|
||||||
|
text_width = size[0][0]
|
||||||
|
text_height = size[0][1]
|
||||||
|
line_height = text_height + size[1]
|
||||||
|
# set the text start position
|
||||||
|
text_offset_x = x_min
|
||||||
|
text_offset_y = 0 if y_min < line_height else y_min - line_height
|
||||||
|
# make the coords of the box with a small padding of two pixels
|
||||||
|
textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height))
|
||||||
|
cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED)
|
||||||
|
cv2.putText(frame, label, (text_offset_x, text_offset_y + line_height - 2), font, fontScale=font_scale, color=(0, 0, 0), thickness=2)
|
@ -5,9 +5,10 @@ import cv2
|
|||||||
import threading
|
import threading
|
||||||
import ctypes
|
import ctypes
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
|
import subprocess as sp
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import ffmpeg
|
import ffmpeg
|
||||||
from . util import tonumpyarray
|
from . util import tonumpyarray, draw_box_with_label
|
||||||
from . object_detection import FramePrepper
|
from . object_detection import FramePrepper
|
||||||
from . objects import ObjectCleaner, BestPersonFrame
|
from . objects import ObjectCleaner, BestPersonFrame
|
||||||
from . mqtt import MqttObjectPublisher
|
from . mqtt import MqttObjectPublisher
|
||||||
@ -16,34 +17,29 @@ from . mqtt import MqttObjectPublisher
|
|||||||
def fetch_frames(shared_arr, shared_frame_time, frame_lock, frame_ready, frame_shape, rtsp_url):
|
def fetch_frames(shared_arr, shared_frame_time, frame_lock, frame_ready, frame_shape, rtsp_url):
|
||||||
# convert shared memory array into numpy and shape into image array
|
# convert shared memory array into numpy and shape into image array
|
||||||
arr = tonumpyarray(shared_arr).reshape(frame_shape)
|
arr = tonumpyarray(shared_arr).reshape(frame_shape)
|
||||||
|
frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2]
|
||||||
|
|
||||||
ffmpeg_process = (
|
ffmpeg_cmd = ['ffmpeg',
|
||||||
ffmpeg
|
'-avoid_negative_ts', 'make_zero',
|
||||||
.input(rtsp_url,
|
'-fflags', '+genpts',
|
||||||
rtsp_transport="tcp",
|
'-rtsp_transport', 'tcp',
|
||||||
stimeout=5000000,
|
'-stimeout', '5000000',
|
||||||
use_wallclock_as_timestamps=1,
|
'-use_wallclock_as_timestamps', '1',
|
||||||
fflags="+genpts",
|
'-i', rtsp_url,
|
||||||
avoid_negative_ts="make_zero")
|
'-f', 'rawvideo',
|
||||||
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
|
'-pix_fmt', 'rgb24',
|
||||||
)
|
'pipe:']
|
||||||
|
|
||||||
print(ffmpeg_process.compile())
|
pipe = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=frame_size)
|
||||||
|
|
||||||
ffmpeg_process = ffmpeg_process.run_async(pipe_stdout=True)
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
in_bytes = ffmpeg_process.stdout.read(frame_shape[0] * frame_shape[1] * frame_shape[2])
|
raw_image = pipe.stdout.read(frame_size)
|
||||||
if not in_bytes:
|
|
||||||
print("No bytes received. Waiting 1 second before trying again.")
|
|
||||||
time.sleep(1)
|
|
||||||
continue
|
|
||||||
frame = (
|
frame = (
|
||||||
np
|
np
|
||||||
.frombuffer(in_bytes, np.uint8)
|
.frombuffer(raw_image, np.uint8)
|
||||||
.reshape(frame_shape)
|
.reshape(frame_shape)
|
||||||
)
|
)
|
||||||
# Lock access and update frame
|
|
||||||
with frame_lock:
|
with frame_lock:
|
||||||
shared_frame_time.value = datetime.datetime.now().timestamp()
|
shared_frame_time.value = datetime.datetime.now().timestamp()
|
||||||
arr[:] = frame
|
arr[:] = frame
|
||||||
@ -51,7 +47,7 @@ def fetch_frames(shared_arr, shared_frame_time, frame_lock, frame_ready, frame_s
|
|||||||
with frame_ready:
|
with frame_ready:
|
||||||
frame_ready.notify_all()
|
frame_ready.notify_all()
|
||||||
|
|
||||||
ffmpeg_process.wait()
|
pipe.stdout.flush()
|
||||||
|
|
||||||
# Stores 2 seconds worth of frames when motion is detected so they can be used for other threads
|
# Stores 2 seconds worth of frames when motion is detected so they can be used for other threads
|
||||||
class FrameTracker(threading.Thread):
|
class FrameTracker(threading.Thread):
|
||||||
@ -272,14 +268,10 @@ class Camera:
|
|||||||
with self.frame_lock:
|
with self.frame_lock:
|
||||||
frame = self.shared_frame_np.copy()
|
frame = self.shared_frame_np.copy()
|
||||||
|
|
||||||
# convert to RGB for drawing
|
|
||||||
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
||||||
# draw the bounding boxes on the screen
|
# draw the bounding boxes on the screen
|
||||||
for obj in detected_objects:
|
for obj in detected_objects:
|
||||||
color = (255,0,0)
|
label = "{}: {}%".format(obj['name'],int(obj['score']*100))
|
||||||
cv2.rectangle(frame, (obj['xmin'], obj['ymin']),
|
draw_box_with_label(frame, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], label)
|
||||||
(obj['xmax'], obj['ymax']),
|
|
||||||
color, 2)
|
|
||||||
|
|
||||||
for region in self.regions:
|
for region in self.regions:
|
||||||
color = (255,255,255)
|
color = (255,255,255)
|
||||||
@ -287,7 +279,7 @@ class Camera:
|
|||||||
(region['x_offset']+region['size'], region['y_offset']+region['size']),
|
(region['x_offset']+region['size'], region['y_offset']+region['size']),
|
||||||
color, 2)
|
color, 2)
|
||||||
|
|
||||||
# convert back to BGR
|
# convert to BGR
|
||||||
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
return frame
|
return frame
|
||||||
|
Loading…
Reference in New Issue
Block a user