make motion detection less sensitive to rain

reduces the significance of fast moving objects and prioritizes objects that overlap in location across. multiple frames
This commit is contained in:
blakeblackshear 2019-02-20 06:20:52 -06:00
parent f54fa2e56c
commit 496b96b4f7
2 changed files with 34 additions and 15 deletions

View File

@ -44,7 +44,7 @@ Access the mjpeg stream at http://localhost:5000
- [x] Add last will and availability for MQTT - [x] Add last will and availability for MQTT
- [ ] Add ability to turn detection on and off via MQTT - [ ] Add ability to turn detection on and off via MQTT
- [ ] Add a max size for motion and objects (height/width > 1.5, total area > 1500 and < 100,000) - [ ] Add a max size for motion and objects (height/width > 1.5, total area > 1500 and < 100,000)
- [ ] Make motion less sensitive to rain - [x] Make motion less sensitive to rain
- [x] Use Events or Conditions to signal between threads rather than polling a value - [x] Use Events or Conditions to signal between threads rather than polling a value
- [ ] Implement a debug option to save images with detected objects - [ ] Implement a debug option to save images with detected objects
- [ ] Only report if x% of the recent frames have a person to avoid single frame false positives (maybe take an average of the person scores in the past x frames?) - [ ] Only report if x% of the recent frames have a person to avoid single frame false positives (maybe take an average of the person scores in the past x frames?)
@ -53,7 +53,7 @@ Access the mjpeg stream at http://localhost:5000
- [ ] Merge bounding boxes that span multiple regions - [ ] Merge bounding boxes that span multiple regions
- [ ] Switch to a config file - [ ] Switch to a config file
- [ ] Allow motion regions to be different than object detection regions - [ ] Allow motion regions to be different than object detection regions
- [ ] Add motion detection masking - [x] Add motion detection masking
- [x] Change color of bounding box if motion detected - [x] Change color of bounding box if motion detected
- [x] Look for a subset of object types - [x] Look for a subset of object types
- [ ] Try and reduce CPU usage by simplifying the tensorflow model to just include the objects we care about - [ ] Try and reduce CPU usage by simplifying the tensorflow model to just include the objects we care about

View File

@ -434,17 +434,11 @@ def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion
arr = tonumpyarray(shared_arr).reshape(frame_shape) arr = tonumpyarray(shared_arr).reshape(frame_shape)
avg_frame = None avg_frame = None
last_motion = -1 avg_delta = None
frame_time = 0.0 frame_time = 0.0
motion_frames = 0 motion_frames = 0
while True: while True:
now = datetime.datetime.now().timestamp() now = datetime.datetime.now().timestamp()
# if it has been long enough since the last motion, clear the flag
if last_motion > 0 and (now - last_motion) > 2:
last_motion = -1
motion_detected.clear()
with motion_changed:
motion_changed.notify_all()
with frame_ready: with frame_ready:
# if there isnt a frame ready for processing or it is old, wait for a signal # if there isnt a frame ready for processing or it is old, wait for a signal
@ -459,7 +453,7 @@ def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion
# convert to grayscale # convert to grayscale
gray = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY)
# apply image mask # apply image mask to remove areas from motion detection
gray[mask] = [255] gray[mask] = [255]
# apply gaussian blur # apply gaussian blur
@ -470,15 +464,33 @@ def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion
continue continue
# look at the delta from the avg_frame # look at the delta from the avg_frame
cv2.accumulateWeighted(gray, avg_frame, 0.01)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg_frame)) frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(avg_frame))
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
if avg_delta is None:
avg_delta = frameDelta.copy().astype("float")
# compute the average delta over the past few frames
# the alpha value can be modified to configure how sensitive the motion detection is
# higher values mean the current frame impacts the delta a lot, and a single raindrop may
# put it over the edge, too low and a fast moving person wont be detected as motion
# this also assumes that a person is in the same location across more than a single frame
cv2.accumulateWeighted(frameDelta, avg_delta, 0.2)
# compute the threshold image for the current frame
current_thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
# black out everything in the avg_delta where there isnt motion in the current frame
avg_delta_image = cv2.convertScaleAbs(avg_delta)
avg_delta_image[np.where(current_thresh==[0])] = [0]
# then look for deltas above the threshold, but only in areas where there is a delta
# in the current frame. this prevents deltas from previous frames from being included
thresh = cv2.threshold(avg_delta_image, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours # dilate the thresholded image to fill in holes, then find contours
# on thresholded image # on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2) thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts) cnts = imutils.grab_contours(cnts)
# if there are no contours, there is no motion # if there are no contours, there is no motion
@ -506,15 +518,22 @@ def detect_motion(shared_arr, shared_frame_time, frame_lock, frame_ready, motion
motion_frames += 1 motion_frames += 1
# if there have been enough consecutive motion frames, report motion # if there have been enough consecutive motion frames, report motion
if motion_frames >= 3: if motion_frames >= 3:
# only average in the current frame if the difference persists for at least 3 frames
cv2.accumulateWeighted(gray, avg_frame, 0.01)
motion_detected.set() motion_detected.set()
with motion_changed: with motion_changed:
motion_changed.notify_all() motion_changed.notify_all()
last_motion = now
else: else:
# when no motion, just keep averaging the frames together
cv2.accumulateWeighted(gray, avg_frame, 0.01)
motion_frames = 0 motion_frames = 0
motion_detected.clear()
with motion_changed:
motion_changed.notify_all()
if debug and motion_frames >= 3: if debug and motion_frames >= 3:
cv2.imwrite("/lab/debug/motion-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), cropped_frame) cv2.imwrite("/lab/debug/motion-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), cropped_frame)
cv2.imwrite("/lab/debug/avg_delta-{}-{}-{}.jpg".format(region_x_offset, region_y_offset, datetime.datetime.now().timestamp()), avg_delta_image)
if __name__ == '__main__': if __name__ == '__main__':
mp.freeze_support() mp.freeze_support()