From 5d894f006a163b243ccfe03c9277b67c32cde4be Mon Sep 17 00:00:00 2001 From: blakeblackshear Date: Sat, 9 Feb 2019 08:51:11 -0600 Subject: [PATCH] basic motion detection working --- detect_objects.py | 197 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 149 insertions(+), 48 deletions(-) diff --git a/detect_objects.py b/detect_objects.py index aef0f3d55..668443293 100644 --- a/detect_objects.py +++ b/detect_objects.py @@ -1,5 +1,6 @@ import os import cv2 +import imutils import time import datetime import ctypes @@ -24,8 +25,8 @@ PATH_TO_LABELS = '/label_map.pbtext' # TODO: make dynamic? NUM_CLASSES = 90 -#REGIONS = "600,0,380:600,600,380:600,1200,380" -REGIONS = os.getenv('REGIONS') +REGIONS = "300,0,0:300,300,0:300,600,0" +#REGIONS = os.getenv('REGIONS') DETECTED_OBJECTS = [] @@ -152,62 +153,77 @@ def main(): detection_process.daemon = True detection_processes.append(detection_process) + motion_processes = [] + for index, region in enumerate(regions): + motion_process = mp.Process(target=detect_motion, args=(shared_arr, + shared_memory_objects[index]['frame_time'], + shared_memory_objects[index]['motion_detected'], + frame_shape, + region['size'], region['x_offset'], region['y_offset'])) + motion_process.daemon = True + motion_processes.append(motion_process) + object_parser = ObjectParser([obj['output_array'] for obj in shared_memory_objects]) - object_parser.start() + # object_parser.start() capture_process.start() print("capture_process pid ", capture_process.pid) - for detection_process in detection_processes: - detection_process.start() - print("detection_process pid ", detection_process.pid) + # for detection_process in detection_processes: + # detection_process.start() + # print("detection_process pid ", detection_process.pid) + for motion_process in motion_processes: + motion_process.start() + print("motion_process pid ", motion_process.pid) - app = Flask(__name__) + # app = Flask(__name__) - @app.route('/') - def index(): - # return a multipart response - return Response(imagestream(), - mimetype='multipart/x-mixed-replace; boundary=frame') - def imagestream(): - global DETECTED_OBJECTS - while True: - # max out at 5 FPS - time.sleep(0.2) - # make a copy of the current detected objects - detected_objects = DETECTED_OBJECTS.copy() - # make a copy of the current frame - frame = frame_arr.copy() - # convert to RGB for drawing - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - # draw the bounding boxes on the screen - for obj in DETECTED_OBJECTS: - vis_util.draw_bounding_box_on_image_array(frame, - obj['ymin'], - obj['xmin'], - obj['ymax'], - obj['xmax'], - color='red', - thickness=2, - display_str_list=["{}: {}%".format(obj['name'],int(obj['score']*100))], - use_normalized_coordinates=False) + # @app.route('/') + # def index(): + # # return a multipart response + # return Response(imagestream(), + # mimetype='multipart/x-mixed-replace; boundary=frame') + # def imagestream(): + # global DETECTED_OBJECTS + # while True: + # # max out at 5 FPS + # time.sleep(0.2) + # # make a copy of the current detected objects + # detected_objects = DETECTED_OBJECTS.copy() + # # make a copy of the current frame + # frame = frame_arr.copy() + # # convert to RGB for drawing + # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + # # draw the bounding boxes on the screen + # for obj in DETECTED_OBJECTS: + # vis_util.draw_bounding_box_on_image_array(frame, + # obj['ymin'], + # obj['xmin'], + # obj['ymax'], + # obj['xmax'], + # color='red', + # thickness=2, + # display_str_list=["{}: {}%".format(obj['name'],int(obj['score']*100))], + # use_normalized_coordinates=False) - for region in regions: - cv2.rectangle(frame, (region['x_offset'], region['y_offset']), - (region['x_offset']+region['size'], region['y_offset']+region['size']), - (255,255,255), 2) - # convert back to BGR - frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) - # encode the image into a jpg - ret, jpg = cv2.imencode('.jpg', frame) - yield (b'--frame\r\n' - b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n') + # for region in regions: + # cv2.rectangle(frame, (region['x_offset'], region['y_offset']), + # (region['x_offset']+region['size'], region['y_offset']+region['size']), + # (255,255,255), 2) + # # convert back to BGR + # frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + # # encode the image into a jpg + # ret, jpg = cv2.imencode('.jpg', frame) + # yield (b'--frame\r\n' + # b'Content-Type: image/jpeg\r\n\r\n' + jpg.tobytes() + b'\r\n\r\n') - app.run(host='0.0.0.0', debug=False) + # app.run(host='0.0.0.0', debug=False) capture_process.join() - for detection_process in detection_processes: - detection_process.join() - object_parser.join() + # for detection_process in detection_processes: + # detection_process.join() + for motion_process in motion_processes: + motion_process.join() + # object_parser.join() # convert shared memory array into numpy array def tonumpyarray(mp_arr): @@ -307,6 +323,91 @@ def process_frames(shared_arr, shared_output_arr, shared_frame_time, shared_moti # copy the detected objects to the output array, filling the array when needed shared_output_arr[:] = objects + [0.0] * (60-len(objects)) +# do the actual object detection +def detect_motion(shared_arr, shared_frame_time, shared_motion, frame_shape, region_size, region_x_offset, region_y_offset): + # shape shared input array into frame for processing + arr = tonumpyarray(shared_arr).reshape(frame_shape) + + no_frames_available = -1 + avg_frame = None + last_motion = -1 + while True: + now = datetime.datetime.now().timestamp() + # if it has been 30 seconds since the last motion, clear the flag + if last_motion > 0 and (now - last_motion) > 30: + last_motion = -1 + shared_motion.value = 0 + print("motion cleared") + # if there isnt a frame ready for processing + if shared_frame_time.value == 0.0: + # save the first time there were no frames available + if no_frames_available == -1: + no_frames_available = now + # if there havent been any frames available in 30 seconds, + # sleep to avoid using so much cpu if the camera feed is down + if no_frames_available > 0 and (now - no_frames_available) > 30: + time.sleep(1) + print("sleeping because no frames have been available in a while") + else: + # rest a little bit to avoid maxing out the CPU + time.sleep(0.01) + continue + + # we got a valid frame, so reset the timer + no_frames_available = -1 + + # if the frame is more than 0.5 second old, discard it + if (now - shared_frame_time.value) > 0.5: + # signal that we need a new frame + shared_frame_time.value = 0.0 + # rest a little bit to avoid maxing out the CPU + time.sleep(0.01) + continue + + # make a copy of the cropped frame + cropped_frame = arr[region_y_offset:region_y_offset+region_size, region_x_offset:region_x_offset+region_size].copy() + frame_time = shared_frame_time.value + # signal that the frame has been used so a new one will be ready + shared_frame_time.value = 0.0 + + # convert to grayscale + gray = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY) + # convert to uint8 + gray = (gray/256).astype('uint8') + # apply gaussian blur + gray = cv2.GaussianBlur(gray, (21, 21), 0) + + if avg_frame is None: + avg_frame = gray.copy().astype("float") + continue + + # look at the delta from the avg_frame + cv2.accumulateWeighted(gray, avg_frame, 0.5) + frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg_frame)) + thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] + + # dilate the thresholded image to fill in holes, then find contours + # on thresholded image + thresh = cv2.dilate(thresh, None, iterations=2) + cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, + cv2.CHAIN_APPROX_SIMPLE) + cnts = imutils.grab_contours(cnts) + + # loop over the contours + for c in cnts: + # if the contour is too small, ignore it + if cv2.contourArea(c) < 50: + continue + + print("motion_detected") + last_motion = now + shared_motion.value = 1 + + # compute the bounding box for the contour, draw it on the frame, + # and update the text + (x, y, w, h) = cv2.boundingRect(c) + cv2.rectangle(cropped_frame, (x, y), (x + w, y + h), (0, 255, 0), 2) + cv2.imwrite("motion%d.png" % frame_time, cropped_frame) if __name__ == '__main__': mp.freeze_support() main() \ No newline at end of file