tweak process handoff

This commit is contained in:
Blake Blackshear 2020-01-31 11:50:42 -05:00
parent 24cb3508e8
commit 8a572f96d5
2 changed files with 11 additions and 10 deletions

3
Dockerfile Normal file → Executable file
View File

@ -23,12 +23,13 @@ RUN apt -qq update && apt -qq install --no-install-recommends -y \
# python-prctl \ # python-prctl \
numpy \ numpy \
imutils \ imutils \
scipy \
&& python3.7 -m pip install -U \
SharedArray \ SharedArray \
# Flask \ # Flask \
# paho-mqtt \ # paho-mqtt \
# PyYAML \ # PyYAML \
# matplotlib \ # matplotlib \
scipy \
&& echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \ && echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" > /etc/apt/sources.list.d/coral-edgetpu.list \
&& wget -q -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \ && wget -q -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
&& apt -qq update \ && apt -qq update \

18
start_no_thread.py Normal file → Executable file
View File

@ -308,15 +308,13 @@ class RemoteObjectDetector():
detections = sa.attach("detections") detections = sa.attach("detections")
while True: while True:
# signal that the process is ready to detect
detect_ready.set()
# wait until a frame is ready # wait until a frame is ready
frame_ready.wait() frame_ready.wait()
# signal that the process is busy # signal that the process is busy
detect_ready.clear()
frame_ready.clear() frame_ready.clear()
detections[:] = object_detector.detect_raw(input_frame) detections[:] = object_detector.detect_raw(input_frame)
# signal that the process is ready to detect
detect_ready.set()
self.detect_process = mp.Process(target=run_detector, args=(model, labels, self.detect_ready, self.frame_ready)) self.detect_process = mp.Process(target=run_detector, args=(model, labels, self.detect_ready, self.frame_ready))
self.detect_process.daemon = True self.detect_process.daemon = True
@ -326,7 +324,8 @@ class RemoteObjectDetector():
detections = [] detections = []
with self.detect_lock: with self.detect_lock:
self.input_frame[:] = tensor_input self.input_frame[:] = tensor_input
# signal that a frame is ready # unset detections and signal that a frame is ready
self.detect_ready.clear()
self.frame_ready.set() self.frame_ready.set()
# wait until the detection process is finished, # wait until the detection process is finished,
self.detect_ready.wait() self.detect_ready.wait()
@ -492,9 +491,10 @@ def main():
frame_size = frame_shape[0]*frame_shape[1]*frame_shape[2] frame_size = frame_shape[0]*frame_shape[1]*frame_shape[2]
frame = np.zeros(frame_shape, np.uint8) frame = np.zeros(frame_shape, np.uint8)
motion_detector = MotionDetector(frame_shape, resize_factor=6) motion_detector = MotionDetector(frame_shape, resize_factor=6)
object_detector = ObjectDetector('/lab/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite', '/lab/labelmap.txt') # object_detector = ObjectDetector('/lab/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite', '/lab/labelmap.txt')
# object_detector = RemoteObjectDetector('/lab/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite', '/lab/labelmap.txt') # object_detector = RemoteObjectDetector('/lab/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite', '/lab/labelmap.txt')
# object_detector = ObjectDetector('/lab/detect.tflite', '/lab/labelmap.txt') # object_detector = ObjectDetector('/lab/detect.tflite', '/lab/labelmap.txt')
object_detector = RemoteObjectDetector('/lab/detect.tflite', '/lab/labelmap.txt')
object_tracker = ObjectTracker(10) object_tracker = ObjectTracker(10)
# f = open('/debug/input/back.rgb24', 'rb') # f = open('/debug/input/back.rgb24', 'rb')
@ -504,9 +504,9 @@ def main():
# -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 -hwaccel_output_format yuv420p -i output.mp4 -f rawvideo -pix_fmt rgb24 pipe: # -hwaccel vaapi -hwaccel_device /dev/dri/renderD128 -hwaccel_output_format yuv420p -i output.mp4 -f rawvideo -pix_fmt rgb24 pipe:
ffmpeg_cmd = (['ffmpeg'] + ffmpeg_cmd = (['ffmpeg'] +
['-hide_banner','-loglevel','panic'] + ['-hide_banner','-loglevel','panic'] +
['-hwaccel','vaapi','-hwaccel_device','/dev/dri/renderD129','-hwaccel_output_format','yuv420p'] + # ['-hwaccel','vaapi','-hwaccel_device','/dev/dri/renderD129','-hwaccel_output_format','yuv420p'] +
# ['-i', '/debug/input/output.mp4'] + # ['-i', '/debug/input/output.mp4'] +
['-i', '/debug/back-ali-jake.mp4'] + ['-i', '/lab/debug/back-night.mp4'] +
['-f','rawvideo','-pix_fmt','rgb24'] + ['-f','rawvideo','-pix_fmt','rgb24'] +
['pipe:']) ['pipe:'])
@ -678,7 +678,7 @@ def main():
frame_times.append(datetime.datetime.now().timestamp()-start_frame) frame_times.append(datetime.datetime.now().timestamp()-start_frame)
# if (frames >= 700 and frames <= 1635) or (frames >= 2500): # if (frames >= 700 and frames <= 1635) or (frames >= 2500):
# if (frames >= 700 and frames <= 1000): # if (frames >= 300 and frames <= 600):
if (frames >= 0): if (frames >= 0):
# row1 = cv2.hconcat([gray, cv2.convertScaleAbs(avg_frame)]) # row1 = cv2.hconcat([gray, cv2.convertScaleAbs(avg_frame)])
# row2 = cv2.hconcat([frameDelta, thresh]) # row2 = cv2.hconcat([frameDelta, thresh])