From c1227789d91e182ab6b737137e2b9a2f6c19e034 Mon Sep 17 00:00:00 2001 From: Blake Blackshear Date: Sat, 19 Dec 2020 08:22:31 -0600 Subject: [PATCH] allow runtime drawing settings for mjpeg and latest --- README.md | 25 +++++++++++++++++- frigate/http.py | 24 ++++++++++++++--- frigate/motion.py | 2 +- frigate/object_processing.py | 51 ++++++++++++++++++++++-------------- frigate/video.py | 4 ++- 5 files changed, 80 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index 5bfbdbfac..d564ee44c 100644 --- a/README.md +++ b/README.md @@ -719,7 +719,19 @@ A web server is available on port 5000 with the following endpoints. ### `/` An mjpeg stream for debugging. Keep in mind the mjpeg endpoint is for debugging only and will put additional load on the system when in use. -You can access a higher resolution mjpeg stream by appending `h=height-in-pixels` to the endpoint. For example `http://localhost:5000/back?h=1080`. You can also increase the FPS by appending `fps=frame-rate` to the URL such as `http://localhost:5000/back?fps=10` or both with `?fps=10&h=1000` +Accepts the following query string parameters: +|param|Type|Description| +|----|-----|--| +|`fps`|int|Frame rate| +|`h`|int|Height in pixels| +|`bbox`|int|Show bounding boxes for detected objects (0 or 1)| +|`timestamp`|int|Print the timestamp in the upper left (0 or 1)| +|`zones`|int|Draw the zones on the image (0 or 1)| +|`mask`|int|Overlay the mask on the image (0 or 1)| +|`motion`|int|Draw blue boxes for areas with detected motion (0 or 1)| +|`regions`|int|Draw green boxes for areas where object detection was run (0 or 1)| + +You can access a higher resolution mjpeg stream by appending `h=height-in-pixels` to the endpoint. For example `http://localhost:5000/back?h=1080`. You can also increase the FPS by appending `fps=frame-rate` to the URL such as `http://localhost:5000/back?fps=10` or both with `?fps=10&h=1000`. ### `///best.jpg[?h=300&crop=1]` The best snapshot for any object type. It is a full resolution image by default. @@ -731,6 +743,17 @@ Example parameters: ### `//latest.jpg[?h=300]` The most recent frame that frigate has finished processing. It is a full resolution image by default. +Accepts the following query string parameters: +|param|Type|Description| +|----|-----|--| +|`h`|int|Height in pixels| +|`bbox`|int|Show bounding boxes for detected objects (0 or 1)| +|`timestamp`|int|Print the timestamp in the upper left (0 or 1)| +|`zones`|int|Draw the zones on the image (0 or 1)| +|`mask`|int|Overlay the mask on the image (0 or 1)| +|`motion`|int|Draw blue boxes for areas with detected motion (0 or 1)| +|`regions`|int|Draw green boxes for areas where object detection was run (0 or 1)| + Example parameters: - `h=300`: resizes the image to 300 pixes tall diff --git a/frigate/http.py b/frigate/http.py index cfa8db026..f3102cde0 100644 --- a/frigate/http.py +++ b/frigate/http.py @@ -208,18 +208,34 @@ def best(camera_name, label): def mjpeg_feed(camera_name): fps = int(request.args.get('fps', '3')) height = int(request.args.get('h', '360')) + draw_options = { + 'bounding_boxes': request.args.get('bbox', type=int), + 'timestamp': request.args.get('timestamp', type=int), + 'zones': request.args.get('zones', type=int), + 'mask': request.args.get('mask', type=int), + 'motion_boxes': request.args.get('motion', type=int), + 'regions': request.args.get('regions', type=int), + } if camera_name in current_app.frigate_config.cameras: # return a multipart response - return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height), + return Response(imagestream(current_app.detected_frames_processor, camera_name, fps, height, draw_options), mimetype='multipart/x-mixed-replace; boundary=frame') else: return "Camera named {} not found".format(camera_name), 404 @bp.route('//latest.jpg') def latest_frame(camera_name): + draw_options = { + 'bounding_boxes': request.args.get('bbox', type=int), + 'timestamp': request.args.get('timestamp', type=int), + 'zones': request.args.get('zones', type=int), + 'mask': request.args.get('mask', type=int), + 'motion_boxes': request.args.get('motion', type=int), + 'regions': request.args.get('regions', type=int), + } if camera_name in current_app.frigate_config.cameras: # max out at specified FPS - frame = current_app.detected_frames_processor.get_current_frame(camera_name) + frame = current_app.detected_frames_processor.get_current_frame(camera_name, draw_options) if frame is None: frame = np.zeros((720,1280,3), np.uint8) @@ -235,11 +251,11 @@ def latest_frame(camera_name): else: return "Camera named {} not found".format(camera_name), 404 -def imagestream(detected_frames_processor, camera_name, fps, height): +def imagestream(detected_frames_processor, camera_name, fps, height, draw_options): while True: # max out at specified FPS time.sleep(1/fps) - frame = detected_frames_processor.get_current_frame(camera_name, draw=True) + frame = detected_frames_processor.get_current_frame(camera_name, draw_options) if frame is None: frame = np.zeros((height,int(height*16/9),3), np.uint8) diff --git a/frigate/motion.py b/frigate/motion.py index 723bc2b84..f167bb516 100644 --- a/frigate/motion.py +++ b/frigate/motion.py @@ -70,7 +70,7 @@ class MotionDetector(): contour_area = cv2.contourArea(c) if contour_area > self.config.contour_area: x, y, w, h = cv2.boundingRect(c) - motion_boxes.append((x*self.resize_factor, y*self.resize_factor, (x+w)*self.resize_factor, (y+h)*self.resize_factor)) + motion_boxes.append((int(x*self.resize_factor), int(y*self.resize_factor), int((x+w)*self.resize_factor), int((y+h)*self.resize_factor))) if len(motion_boxes) > 0: self.motion_frame_count += 1 diff --git a/frigate/object_processing.py b/frigate/object_processing.py index 31536e601..0ef30d7cd 100644 --- a/frigate/object_processing.py +++ b/frigate/object_processing.py @@ -250,15 +250,17 @@ class CameraState(): self.previous_frame_id = None self.callbacks = defaultdict(lambda: []) - def get_current_frame(self, draw=False): + def get_current_frame(self, draw_options={}): with self.current_frame_lock: frame_copy = np.copy(self._current_frame) frame_time = self.current_frame_time tracked_objects = {k: v.to_dict() for k,v in self.tracked_objects.items()} + motion_boxes = self.motion_boxes.copy() + regions = self.regions.copy() frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_YUV2BGR_I420) # draw on the frame - if draw: + if draw_options.get('bounding_boxes'): # draw the bounding boxes on the frame for obj in tracked_objects.values(): thickness = 2 @@ -271,19 +273,28 @@ class CameraState(): # draw the bounding boxes on the frame box = obj['box'] draw_box_with_label(frame_copy, box[0], box[1], box[2], box[3], obj['label'], f"{int(obj['score']*100)}% {int(obj['area'])}", thickness=thickness, color=color) - # draw the regions on the frame - region = obj['region'] - cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 1) - - if self.camera_config.snapshots.show_timestamp: - time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S") - cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2) - - if self.camera_config.snapshots.draw_zones: - for name, zone in self.camera_config.zones.items(): - thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2 - cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness) + if draw_options.get('regions'): + for region in regions: + cv2.rectangle(frame_copy, (region[0], region[1]), (region[2], region[3]), (0,255,0), 2) + + if draw_options.get('timestamp'): + time_to_show = datetime.datetime.fromtimestamp(frame_time).strftime("%m/%d/%Y %H:%M:%S") + cv2.putText(frame_copy, time_to_show, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, fontScale=.8, color=(255, 255, 255), thickness=2) + + if draw_options.get('zones'): + for name, zone in self.camera_config.zones.items(): + thickness = 8 if any([name in obj['current_zones'] for obj in tracked_objects.values()]) else 2 + cv2.drawContours(frame_copy, [zone.contour], -1, zone.color, thickness) + + if draw_options.get('mask'): + mask_overlay = np.where(self.camera_config.mask==[0]) + frame_copy[mask_overlay] = [0,0,0] + + if draw_options.get('motion_boxes'): + for m_box in motion_boxes: + cv2.rectangle(frame_copy, (m_box[0], m_box[1]), (m_box[2], m_box[3]), (0,0,255), 2) + return frame_copy def finished(self, obj_id): @@ -292,8 +303,10 @@ class CameraState(): def on(self, event_type: str, callback: Callable[[Dict], None]): self.callbacks[event_type].append(callback) - def update(self, frame_time, current_detections): + def update(self, frame_time, current_detections, motion_boxes, regions): self.current_frame_time = frame_time + self.motion_boxes = motion_boxes + self.regions = regions # get the new frame frame_id = f"{self.name}{frame_time}" current_frame = self.frame_manager.get(frame_id, self.camera_config.frame_shape_yuv) @@ -453,8 +466,8 @@ class TrackedObjectProcessor(threading.Thread): else: return {} - def get_current_frame(self, camera, draw=False): - return self.camera_states[camera].get_current_frame(draw) + def get_current_frame(self, camera, draw_options={}): + return self.camera_states[camera].get_current_frame(draw_options) def run(self): while True: @@ -463,13 +476,13 @@ class TrackedObjectProcessor(threading.Thread): break try: - camera, frame_time, current_tracked_objects = self.tracked_objects_queue.get(True, 10) + camera, frame_time, current_tracked_objects, motion_boxes, regions = self.tracked_objects_queue.get(True, 10) except queue.Empty: continue camera_state = self.camera_states[camera] - camera_state.update(frame_time, current_tracked_objects) + camera_state.update(frame_time, current_tracked_objects, motion_boxes, regions) # update zone counts for each label # for each zone in the current camera diff --git a/frigate/video.py b/frigate/video.py index c0d344251..150ef4276 100755 --- a/frigate/video.py +++ b/frigate/video.py @@ -388,6 +388,8 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s region = calculate_region(frame_shape, box[0], box[1], box[2], box[3]) + + regions.append(region) selected_objects.extend(detect(object_detector, frame, model_shape, region, objects_to_track, object_filters, mask)) @@ -411,6 +413,6 @@ def process_frames(camera_name: str, frame_queue: mp.Queue, frame_shape, model_s else: fps_tracker.update() fps.value = fps_tracker.eps() - detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects)) + detected_objects_queue.put((camera_name, frame_time, object_tracker.tracked_objects, motion_boxes, regions)) detection_fps.value = object_detector.fps.eps() frame_manager.close(f"{camera_name}{frame_time}")