adjust debugging params and alpha for background averaging

This commit is contained in:
blakeblackshear 2019-02-19 06:47:00 -06:00
parent 290150603e
commit b6547de82c

View File

@ -247,7 +247,8 @@ def main():
region['motion_detected'], region['motion_detected'],
objects_changed, objects_changed,
frame_shape, frame_shape,
region['size'], region['x_offset'], region['y_offset'])) region['size'], region['x_offset'], region['y_offset'],
False))
detection_process.daemon = True detection_process.daemon = True
detection_processes.append(detection_process) detection_processes.append(detection_process)
@ -381,7 +382,8 @@ def fetch_frames(shared_arr, shared_frame_time, frame_lock, frame_ready, frame_s
# do the actual object detection # do the actual object detection
def process_frames(shared_arr, shared_output_arr, shared_frame_time, frame_lock, frame_ready, def process_frames(shared_arr, shared_output_arr, shared_frame_time, frame_lock, frame_ready,
motion_detected, objects_changed, frame_shape, region_size, region_x_offset, region_y_offset): motion_detected, objects_changed, frame_shape, region_size, region_x_offset, region_y_offset,
debug):
debug = True debug = True
# shape shared input array into frame for processing # shape shared input array into frame for processing
arr = tonumpyarray(shared_arr).reshape(frame_shape) arr = tonumpyarray(shared_arr).reshape(frame_shape)
@ -416,7 +418,7 @@ def process_frames(shared_arr, shared_output_arr, shared_frame_time, frame_lock,
# convert to RGB # convert to RGB
cropped_frame_rgb = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2RGB) cropped_frame_rgb = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2RGB)
# do the object detection # do the object detection
objects = detect_objects(cropped_frame_rgb, sess, detection_graph, region_size, region_x_offset, region_y_offset, True) objects = detect_objects(cropped_frame_rgb, sess, detection_graph, region_size, region_x_offset, region_y_offset, debug)
# copy the detected objects to the output array, filling the array when needed # copy the detected objects to the output array, filling the array when needed
shared_output_arr[:] = objects + [0.0] * (60-len(objects)) shared_output_arr[:] = objects + [0.0] * (60-len(objects))
with objects_changed: with objects_changed:
@ -461,7 +463,7 @@ def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion
continue continue
# look at the delta from the avg_frame # look at the delta from the avg_frame
cv2.accumulateWeighted(gray, avg_frame, 0.5) cv2.accumulateWeighted(gray, avg_frame, 0.01)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg_frame)) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg_frame))
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1] thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
@ -504,7 +506,7 @@ def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion
else: else:
motion_frames = 0 motion_frames = 0
if debug and motion_frames > 0: if debug and motion_frames >= 3:
cv2.imwrite("/lab/debug/motion-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), cropped_frame) cv2.imwrite("/lab/debug/motion-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), cropped_frame)
if __name__ == '__main__': if __name__ == '__main__':