From baa587028b66378015aa85d68cf7cf5276394708 Mon Sep 17 00:00:00 2001 From: blakeblackshear Date: Sun, 2 Jun 2019 07:29:50 -0500 Subject: [PATCH] use a regular subprocess for ffmpeg, refactor bounding box drawing --- Dockerfile | 30 +++++++++++++-------- frigate/object_detection.py | 2 -- frigate/objects.py | 12 ++++----- frigate/util.py | 23 +++++++++++++++- frigate/video.py | 52 ++++++++++++++++--------------------- 5 files changed, 68 insertions(+), 51 deletions(-) diff --git a/Dockerfile b/Dockerfile index 9e67ac971..18611113b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,7 @@ FROM ubuntu:18.04 +ARG DEVICE + # Install packages for apt repo RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \ apt-transport-https \ @@ -8,11 +10,14 @@ RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \ wget \ gnupg-agent \ dirmngr \ - software-properties-common + software-properties-common \ + && rm -rf /var/lib/apt/lists/* -RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys D986B59D +COPY scripts/install_odroid_repo.sh . -RUN echo "deb http://deb.odroid.in/5422-s bionic main" > /etc/apt/sources.list.d/odroid.list +RUN if [ "$DEVICE" = "odroid" ]; then \ + sh /install_odroid_repo.sh; \ + fi RUN apt-get -qq update && apt-get -qq install --no-install-recommends -y \ python3 \ @@ -52,10 +57,12 @@ RUN pip install -U pip \ numpy \ Flask \ paho-mqtt \ - PyYAML \ - ffmpeg-python + PyYAML # Download & build OpenCV +# TODO: use multistage build to reduce image size: +# https://medium.com/@denismakogon/pain-and-gain-running-opencv-application-with-golang-and-docker-on-alpine-3-7-435aa11c7aec +# https://www.merixstudio.com/blog/docker-multi-stage-builds-python-development/ RUN wget -q -P /usr/local/src/ --no-check-certificate https://github.com/opencv/opencv/archive/4.0.1.zip RUN cd /usr/local/src/ \ && unzip 4.0.1.zip \ @@ -70,14 +77,15 @@ RUN cd /usr/local/src/ \ && rm -rf /usr/local/src/opencv-4.0.1 # Download and install EdgeTPU libraries for Coral -RUN wget https://dl.google.com/coral/edgetpu_api/edgetpu_api_latest.tar.gz -O edgetpu_api.tar.gz --trust-server-names +RUN wget https://dl.google.com/coral/edgetpu_api/edgetpu_api_latest.tar.gz -O edgetpu_api.tar.gz --trust-server-names \ + && tar xzf edgetpu_api.tar.gz -RUN tar xzf edgetpu_api.tar.gz \ - && cd edgetpu_api \ - && cp -p libedgetpu/libedgetpu_arm32.so /usr/lib/arm-linux-gnueabihf/libedgetpu.so.1.0 \ - && ldconfig \ - && python3 -m pip install --no-deps "$(ls edgetpu-*-py3-none-any.whl 2>/dev/null)" +COPY scripts/install_edgetpu_api.sh edgetpu_api/install.sh +RUN cd edgetpu_api \ + && /bin/bash install.sh + +# Copy a python 3.6 version RUN cd /usr/local/lib/python3.6/dist-packages/edgetpu/swig/ \ && ln -s _edgetpu_cpp_wrapper.cpython-35m-arm-linux-gnueabihf.so _edgetpu_cpp_wrapper.cpython-36m-arm-linux-gnueabihf.so diff --git a/frigate/object_detection.py b/frigate/object_detection.py index 463c156ee..008334541 100644 --- a/frigate/object_detection.py +++ b/frigate/object_detection.py @@ -89,8 +89,6 @@ class FramePrepper(threading.Thread): cropped_frame = self.shared_frame[self.region_y_offset:self.region_y_offset+self.region_size, self.region_x_offset:self.region_x_offset+self.region_size].copy() frame_time = self.frame_time.value - # convert to RGB - #cropped_frame_rgb = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2RGB) # Resize to 300x300 if needed if cropped_frame.shape != (300, 300, 3): cropped_frame = cv2.resize(cropped_frame, dsize=(300, 300), interpolation=cv2.INTER_LINEAR) diff --git a/frigate/objects.py b/frigate/objects.py index fe9b0718a..39ff8f901 100644 --- a/frigate/objects.py +++ b/frigate/objects.py @@ -2,6 +2,7 @@ import time import datetime import threading import cv2 +from . util import draw_box_with_label class ObjectCleaner(threading.Thread): def __init__(self, objects_parsed, detected_objects): @@ -79,12 +80,9 @@ class BestPersonFrame(threading.Thread): if not self.best_person is None and self.best_person['frame_time'] in recent_frames: best_frame = recent_frames[self.best_person['frame_time']] - best_frame = cv2.cvtColor(best_frame, cv2.COLOR_BGR2RGB) - # draw the bounding box on the frame - color = (255,0,0) - cv2.rectangle(best_frame, (self.best_person['xmin'], self.best_person['ymin']), - (self.best_person['xmax'], self.best_person['ymax']), - color, 2) - # convert back to BGR + label = "{}: {}%".format(self.best_person['name'],int(self.best_person['score']*100)) + draw_box_with_label(best_frame, self.best_person['xmin'], self.best_person['ymin'], + self.best_person['xmax'], self.best_person['ymax'], label) + self.best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR) diff --git a/frigate/util.py b/frigate/util.py index fa174f195..c02b4f4a7 100644 --- a/frigate/util.py +++ b/frigate/util.py @@ -1,5 +1,26 @@ import numpy as np +import cv2 # convert shared memory array into numpy array def tonumpyarray(mp_arr): - return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8) \ No newline at end of file + return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8) + +def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label): + color = (255,0,0) + cv2.rectangle(frame, (x_min, y_min), + (x_max, y_max), + color, 2) + font_scale = 0.5 + font = cv2.FONT_HERSHEY_SIMPLEX + # get the width and height of the text box + size = cv2.getTextSize(label, font, fontScale=font_scale, thickness=2) + text_width = size[0][0] + text_height = size[0][1] + line_height = text_height + size[1] + # set the text start position + text_offset_x = x_min + text_offset_y = 0 if y_min < line_height else y_min - line_height + # make the coords of the box with a small padding of two pixels + textbox_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y + line_height)) + cv2.rectangle(frame, textbox_coords[0], textbox_coords[1], color, cv2.FILLED) + cv2.putText(frame, label, (text_offset_x, text_offset_y + line_height - 2), font, fontScale=font_scale, color=(0, 0, 0), thickness=2) \ No newline at end of file diff --git a/frigate/video.py b/frigate/video.py index 57f8ad059..ba3787d3f 100644 --- a/frigate/video.py +++ b/frigate/video.py @@ -5,9 +5,10 @@ import cv2 import threading import ctypes import multiprocessing as mp +import subprocess as sp import numpy as np import ffmpeg -from . util import tonumpyarray +from . util import tonumpyarray, draw_box_with_label from . object_detection import FramePrepper from . objects import ObjectCleaner, BestPersonFrame from . mqtt import MqttObjectPublisher @@ -16,34 +17,29 @@ from . mqtt import MqttObjectPublisher def fetch_frames(shared_arr, shared_frame_time, frame_lock, frame_ready, frame_shape, rtsp_url): # convert shared memory array into numpy and shape into image array arr = tonumpyarray(shared_arr).reshape(frame_shape) + frame_size = frame_shape[0] * frame_shape[1] * frame_shape[2] - ffmpeg_process = ( - ffmpeg - .input(rtsp_url, - rtsp_transport="tcp", - stimeout=5000000, - use_wallclock_as_timestamps=1, - fflags="+genpts", - avoid_negative_ts="make_zero") - .output('pipe:', format='rawvideo', pix_fmt='rgb24') - ) - - print(ffmpeg_process.compile()) - - ffmpeg_process = ffmpeg_process.run_async(pipe_stdout=True) + ffmpeg_cmd = ['ffmpeg', + '-avoid_negative_ts', 'make_zero', + '-fflags', '+genpts', + '-rtsp_transport', 'tcp', + '-stimeout', '5000000', + '-use_wallclock_as_timestamps', '1', + '-i', rtsp_url, + '-f', 'rawvideo', + '-pix_fmt', 'rgb24', + 'pipe:'] + + pipe = sp.Popen(ffmpeg_cmd, stdout = sp.PIPE, bufsize=frame_size) while True: - in_bytes = ffmpeg_process.stdout.read(frame_shape[0] * frame_shape[1] * frame_shape[2]) - if not in_bytes: - print("No bytes received. Waiting 1 second before trying again.") - time.sleep(1) - continue + raw_image = pipe.stdout.read(frame_size) frame = ( np - .frombuffer(in_bytes, np.uint8) + .frombuffer(raw_image, np.uint8) .reshape(frame_shape) ) - # Lock access and update frame + with frame_lock: shared_frame_time.value = datetime.datetime.now().timestamp() arr[:] = frame @@ -51,7 +47,7 @@ def fetch_frames(shared_arr, shared_frame_time, frame_lock, frame_ready, frame_s with frame_ready: frame_ready.notify_all() - ffmpeg_process.wait() + pipe.stdout.flush() # Stores 2 seconds worth of frames when motion is detected so they can be used for other threads class FrameTracker(threading.Thread): @@ -272,14 +268,10 @@ class Camera: with self.frame_lock: frame = self.shared_frame_np.copy() - # convert to RGB for drawing - #frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # draw the bounding boxes on the screen for obj in detected_objects: - color = (255,0,0) - cv2.rectangle(frame, (obj['xmin'], obj['ymin']), - (obj['xmax'], obj['ymax']), - color, 2) + label = "{}: {}%".format(obj['name'],int(obj['score']*100)) + draw_box_with_label(frame, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], label) for region in self.regions: color = (255,255,255) @@ -287,7 +279,7 @@ class Camera: (region['x_offset']+region['size'], region['y_offset']+region['size']), color, 2) - # convert back to BGR + # convert to BGR frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) return frame