mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-08-13 13:47:36 +02:00
Enabling different colours for different objects helping distinguish things in a busy scene
This commit is contained in:
parent
ba71927d53
commit
2cabb784cd
@ -101,6 +101,8 @@ RUN ln -s /coco_labels.txt /label_map.pbtext
|
|||||||
RUN (apt-get autoremove -y; \
|
RUN (apt-get autoremove -y; \
|
||||||
apt-get autoclean -y)
|
apt-get autoclean -y)
|
||||||
|
|
||||||
|
RUN pip install -U matplotlib
|
||||||
|
|
||||||
WORKDIR /opt/frigate/
|
WORKDIR /opt/frigate/
|
||||||
ADD frigate frigate/
|
ADD frigate frigate/
|
||||||
COPY detect_objects.py .
|
COPY detect_objects.py .
|
||||||
|
@ -47,6 +47,7 @@ class PreppedQueueProcessor(threading.Thread):
|
|||||||
box = obj.bounding_box.flatten().tolist()
|
box = obj.bounding_box.flatten().tolist()
|
||||||
parsed_objects.append({
|
parsed_objects.append({
|
||||||
'frame_time': frame['frame_time'],
|
'frame_time': frame['frame_time'],
|
||||||
|
'label_id': obj.label_id,
|
||||||
'name': str(self.labels[obj.label_id]),
|
'name': str(self.labels[obj.label_id]),
|
||||||
'score': float(obj.score),
|
'score': float(obj.score),
|
||||||
'xmin': int((box[0] * frame['region_size']) + frame['region_x_offset']),
|
'xmin': int((box[0] * frame['region_size']) + frame['region_x_offset']),
|
||||||
|
@ -81,8 +81,10 @@ class BestPersonFrame(threading.Thread):
|
|||||||
if not self.best_person is None and self.best_person['frame_time'] in recent_frames:
|
if not self.best_person is None and self.best_person['frame_time'] in recent_frames:
|
||||||
best_frame = recent_frames[self.best_person['frame_time']]
|
best_frame = recent_frames[self.best_person['frame_time']]
|
||||||
|
|
||||||
label = "{}: {}% {}".format(self.best_person['name'],int(self.best_person['score']*100),int(self.best_person['area']))
|
|
||||||
draw_box_with_label(best_frame, self.best_person['xmin'], self.best_person['ymin'],
|
draw_box_with_label(best_frame, self.best_person['xmin'], self.best_person['ymin'],
|
||||||
self.best_person['xmax'], self.best_person['ymax'], label)
|
self.best_person['xmax'], self.best_person['ymax'],
|
||||||
|
self.best_person['name'], self.best_person['score'], self.best_person['area'],
|
||||||
|
(255, 0, 0)
|
||||||
|
)
|
||||||
|
|
||||||
self.best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
|
self.best_frame = cv2.cvtColor(best_frame, cv2.COLOR_RGB2BGR)
|
||||||
|
@ -5,11 +5,11 @@ import cv2
|
|||||||
def tonumpyarray(mp_arr):
|
def tonumpyarray(mp_arr):
|
||||||
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
|
return np.frombuffer(mp_arr.get_obj(), dtype=np.uint8)
|
||||||
|
|
||||||
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, label):
|
def draw_box_with_label(frame, x_min, y_min, x_max, y_max, name, score, area, color):
|
||||||
color = (255,0,0)
|
label = "{}: {}% {}".format(name, int(score * 100), int(area))
|
||||||
cv2.rectangle(frame, (x_min, y_min),
|
cv2.rectangle(frame, (x_min, y_min),
|
||||||
(x_max, y_max),
|
(x_max, y_max),
|
||||||
color, 2)
|
color, 1)
|
||||||
font_scale = 0.5
|
font_scale = 0.5
|
||||||
font = cv2.FONT_HERSHEY_SIMPLEX
|
font = cv2.FONT_HERSHEY_SIMPLEX
|
||||||
# get the width and height of the text box
|
# get the width and height of the text box
|
||||||
|
@ -11,6 +11,7 @@ from . util import tonumpyarray, draw_box_with_label
|
|||||||
from . object_detection import FramePrepper
|
from . object_detection import FramePrepper
|
||||||
from . objects import ObjectCleaner, BestPersonFrame
|
from . objects import ObjectCleaner, BestPersonFrame
|
||||||
from . mqtt import MqttObjectPublisher
|
from . mqtt import MqttObjectPublisher
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
# Stores 2 seconds worth of frames when motion is detected so they can be used for other threads
|
# Stores 2 seconds worth of frames when motion is detected so they can be used for other threads
|
||||||
class FrameTracker(threading.Thread):
|
class FrameTracker(threading.Thread):
|
||||||
@ -207,6 +208,7 @@ class Camera:
|
|||||||
self.mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
|
self.mask = np.zeros((self.frame_shape[0], self.frame_shape[1], 1), np.uint8)
|
||||||
self.mask[:] = 255
|
self.mask[:] = 255
|
||||||
|
|
||||||
|
self.color_map = plt.cm.get_cmap('tab20', 100)
|
||||||
|
|
||||||
def start_or_restart_capture(self):
|
def start_or_restart_capture(self):
|
||||||
if not self.ffmpeg_process is None:
|
if not self.ffmpeg_process is None:
|
||||||
@ -316,8 +318,9 @@ class Camera:
|
|||||||
|
|
||||||
# draw the bounding boxes on the screen
|
# draw the bounding boxes on the screen
|
||||||
for obj in detected_objects:
|
for obj in detected_objects:
|
||||||
label = "{}: {}% {}".format(obj['name'],int(obj['score']*100),int(obj['area']))
|
color = tuple(int(round(255 * c)) for c in self.color_map(obj['label_id'])[:3])
|
||||||
draw_box_with_label(frame, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'], label)
|
draw_box_with_label(frame, obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax'],
|
||||||
|
obj['name'], obj['score'], obj['area'], color)
|
||||||
|
|
||||||
for region in self.regions:
|
for region in self.regions:
|
||||||
color = (255,255,255)
|
color = (255,255,255)
|
||||||
|
Loading…
Reference in New Issue
Block a user