From 8ff9a982b61e8af327cdb981e3ae802089f9ab48 Mon Sep 17 00:00:00 2001 From: blakeblackshear Date: Mon, 18 Mar 2019 07:48:04 -0500 Subject: [PATCH] start the detection process --- detect_objects.py | 16 +++++++++++++++- frigate/object_detection.py | 23 +++++++++++------------ 2 files changed, 26 insertions(+), 13 deletions(-) diff --git a/detect_objects.py b/detect_objects.py index 79ae829d6..f93a22669 100644 --- a/detect_objects.py +++ b/detect_objects.py @@ -108,6 +108,8 @@ def main(): detection_prep_processes = [] motion_processes = [] for region in regions: + # possibly try putting these on threads and putting prepped + # frames in a queue detection_prep_process = mp.Process(target=prep_for_detection, args=(shared_arr, shared_frame_time, frame_lock, frame_ready, @@ -131,6 +133,14 @@ def main(): motion_process.daemon = True motion_processes.append(motion_process) + # create a process for object detection + detection_process = mp.Process(target=detect_objects, args=( + prepped_frame_array, prepped_frame_time, + prepped_frame_lock, prepped_frame_ready, + prepped_frame_box, object_queue, DEBUG + )) + detection_process.daemon = True + # start a thread to store recent motion frames for processing frame_tracker = FrameTracker(frame_arr, shared_frame_time, frame_ready, frame_lock, recent_motion_frames, motion_changed, [region['motion_detected'] for region in regions]) @@ -176,11 +186,14 @@ def main(): capture_process.start() print("capture_process pid ", capture_process.pid) - # start the object detection processes + # start the object detection prep processes for detection_prep_process in detection_prep_processes: detection_prep_process.start() print("detection_prep_process pid ", detection_prep_process.pid) + detection_process.start() + print("detection_process pid ", detection_process.pid) + # start the motion detection processes # for motion_process in motion_processes: # motion_process.start() @@ -253,6 +266,7 @@ def main(): detection_prep_process.join() for motion_process in motion_processes: motion_process.join() + detection_process.join() frame_tracker.join() best_person_frame.join() object_parser.join() diff --git a/frigate/object_detection.py b/frigate/object_detection.py index 3037f803e..b56d527a7 100644 --- a/frigate/object_detection.py +++ b/frigate/object_detection.py @@ -21,32 +21,34 @@ def ReadLabelFile(file_path): def detect_objects(prepped_frame_array, prepped_frame_time, prepped_frame_lock, prepped_frame_ready, prepped_frame_box, object_queue, debug): + prepped_frame_np = tonumpyarray(prepped_frame_array) # Load the edgetpu engine and labels engine = DetectionEngine(PATH_TO_CKPT) labels = ReadLabelFile(PATH_TO_LABELS) - prepped_frame_time = 0.0 + frame_time = 0.0 + region_box = [0,0,0,0] while True: with prepped_frame_ready: prepped_frame_ready.wait() # make a copy of the cropped frame with prepped_frame_lock: - prepped_frame_copy = prepped_frame_array.copy() - prepped_frame_time = prepped_frame_time.value - region_box = prepped_frame_box.value + prepped_frame_copy = prepped_frame_np.copy() + frame_time = prepped_frame_time.value + region_box[:] = prepped_frame_box # Actual detection. - ans = engine.DetectWithInputTensor(prepped_frame_copy, threshold=0.5, top_k=3) - + objects = engine.DetectWithInputTensor(prepped_frame_copy, threshold=0.5, top_k=3) + # print(engine.get_inference_time()) # put detected objects in the queue - if ans: + if objects: # assumes square region_size = region_box[3]-region_box[0] - for obj in ans: + for obj in objects: box = obj.bounding_box.flatten().tolist() object_queue.append({ - 'frame_time': prepped_frame_time, + 'frame_time': frame_time, 'name': str(labels[obj.label_id]), 'score': float(obj.score), 'xmin': int((box[0] * region_size) + region_box[0]), @@ -74,7 +76,6 @@ def prep_for_detection(shared_whole_frame_array, shared_frame_time, frame_lock, with frame_ready: # if there isnt a frame ready for processing or it is old, wait for a new frame if shared_frame_time.value == frame_time or (now - shared_frame_time.value) > 0.5: - print("waiting...") frame_ready.wait() # make a copy of the cropped frame @@ -82,8 +83,6 @@ def prep_for_detection(shared_whole_frame_array, shared_frame_time, frame_lock, cropped_frame = shared_whole_frame[region_y_offset:region_y_offset+region_size, region_x_offset:region_x_offset+region_size].copy() frame_time = shared_frame_time.value - print("grabbed frame " + str(frame_time)) - # convert to RGB cropped_frame_rgb = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2RGB) # Resize to 300x300 if needed