mirror of
https://github.com/blakeblackshear/frigate.git
synced 2024-11-21 19:07:46 +01:00
Refactor to simplify support for additional detector types (#3656)
* Refactor EdgeTPU and CPU model handling to detector submodules. * Fix selecting the correct detection device type from the config * Remove detector type check when creating ObjectDetectProcess * Fixes after rebasing to 0.11 * Add init file to detector folder * Rename to detect_api Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> * Add unit test for LocalObjectDetector class * Add configuration for model inputs Support transforming detection regions to RGB or BGR. Support specifying the input tensor shape. The tensor shape has a standard format ["BHWC"] when handed to the detector, but can be transformed in the detector to match the model shape using the model input_tensor config. * Add documentation for new model config parameters * Add input tensor transpose to LocalObjectDetector * Change the model input tensor config to use an enumeration * Updates for model config documentation Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
parent
1bc9efd529
commit
4383b883c0
65
benchmark.py
65
benchmark.py
@ -3,10 +3,16 @@ from statistics import mean
|
||||
import multiprocessing as mp
|
||||
import numpy as np
|
||||
import datetime
|
||||
from frigate.edgetpu import LocalObjectDetector, EdgeTPUProcess, RemoteObjectDetector, load_labels
|
||||
from frigate.config import DetectorTypeEnum
|
||||
from frigate.object_detection import (
|
||||
LocalObjectDetector,
|
||||
ObjectDetectProcess,
|
||||
RemoteObjectDetector,
|
||||
load_labels,
|
||||
)
|
||||
|
||||
my_frame = np.expand_dims(np.full((300,300,3), 1, np.uint8), axis=0)
|
||||
labels = load_labels('/labelmap.txt')
|
||||
my_frame = np.expand_dims(np.full((300, 300, 3), 1, np.uint8), axis=0)
|
||||
labels = load_labels("/labelmap.txt")
|
||||
|
||||
######
|
||||
# Minimal same process runner
|
||||
@ -39,20 +45,23 @@ labels = load_labels('/labelmap.txt')
|
||||
|
||||
|
||||
def start(id, num_detections, detection_queue, event):
|
||||
object_detector = RemoteObjectDetector(str(id), '/labelmap.txt', detection_queue, event)
|
||||
start = datetime.datetime.now().timestamp()
|
||||
object_detector = RemoteObjectDetector(
|
||||
str(id), "/labelmap.txt", detection_queue, event
|
||||
)
|
||||
start = datetime.datetime.now().timestamp()
|
||||
|
||||
frame_times = []
|
||||
for x in range(0, num_detections):
|
||||
start_frame = datetime.datetime.now().timestamp()
|
||||
detections = object_detector.detect(my_frame)
|
||||
frame_times.append(datetime.datetime.now().timestamp()-start_frame)
|
||||
frame_times = []
|
||||
for x in range(0, num_detections):
|
||||
start_frame = datetime.datetime.now().timestamp()
|
||||
detections = object_detector.detect(my_frame)
|
||||
frame_times.append(datetime.datetime.now().timestamp() - start_frame)
|
||||
|
||||
duration = datetime.datetime.now().timestamp() - start
|
||||
object_detector.cleanup()
|
||||
print(f"{id} - Processed for {duration:.2f} seconds.")
|
||||
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
|
||||
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
|
||||
|
||||
duration = datetime.datetime.now().timestamp()-start
|
||||
object_detector.cleanup()
|
||||
print(f"{id} - Processed for {duration:.2f} seconds.")
|
||||
print(f"{id} - FPS: {object_detector.fps.eps():.2f}")
|
||||
print(f"{id} - Average frame processing time: {mean(frame_times)*1000:.2f}ms")
|
||||
|
||||
######
|
||||
# Separate process runner
|
||||
@ -71,23 +80,29 @@ camera_processes = []
|
||||
|
||||
events = {}
|
||||
for x in range(0, 10):
|
||||
events[str(x)] = mp.Event()
|
||||
events[str(x)] = mp.Event()
|
||||
detection_queue = mp.Queue()
|
||||
edgetpu_process_1 = EdgeTPUProcess(detection_queue, events, 'usb:0')
|
||||
edgetpu_process_2 = EdgeTPUProcess(detection_queue, events, 'usb:1')
|
||||
edgetpu_process_1 = ObjectDetectProcess(
|
||||
detection_queue, events, DetectorTypeEnum.edgetpu, "usb:0"
|
||||
)
|
||||
edgetpu_process_2 = ObjectDetectProcess(
|
||||
detection_queue, events, DetectorTypeEnum.edgetpu, "usb:1"
|
||||
)
|
||||
|
||||
for x in range(0, 10):
|
||||
camera_process = mp.Process(target=start, args=(x, 300, detection_queue, events[str(x)]))
|
||||
camera_process.daemon = True
|
||||
camera_processes.append(camera_process)
|
||||
camera_process = mp.Process(
|
||||
target=start, args=(x, 300, detection_queue, events[str(x)])
|
||||
)
|
||||
camera_process.daemon = True
|
||||
camera_processes.append(camera_process)
|
||||
|
||||
start_time = datetime.datetime.now().timestamp()
|
||||
|
||||
for p in camera_processes:
|
||||
p.start()
|
||||
p.start()
|
||||
|
||||
for p in camera_processes:
|
||||
p.join()
|
||||
p.join()
|
||||
|
||||
duration = datetime.datetime.now().timestamp()-start_time
|
||||
print(f"Total - Processed for {duration:.2f} seconds.")
|
||||
duration = datetime.datetime.now().timestamp() - start_time
|
||||
print(f"Total - Processed for {duration:.2f} seconds.")
|
||||
|
@ -23,7 +23,7 @@ Examples of available modules are:
|
||||
|
||||
- `frigate.app`
|
||||
- `frigate.mqtt`
|
||||
- `frigate.edgetpu`
|
||||
- `frigate.object_detection`
|
||||
- `frigate.zeroconf`
|
||||
- `detector.<detector_name>`
|
||||
- `watchdog.<camera_name>`
|
||||
@ -50,6 +50,30 @@ database:
|
||||
|
||||
If using a custom model, the width and height will need to be specified.
|
||||
|
||||
Custom models may also require different input tensor formats. The colorspace conversion supports RGB, BGR, or YUV frames to be sent to the object detector. The input tensor shape parameter is an enumeration to match what specified by the model.
|
||||
|
||||
| Tensor Dimension | Description |
|
||||
| :--------------: | -------------- |
|
||||
| N | Batch Size |
|
||||
| H | Model Height |
|
||||
| W | Model Width |
|
||||
| C | Color Channels |
|
||||
|
||||
| Available Input Tensor Shapes |
|
||||
| :---------------------------: |
|
||||
| "nhwc" |
|
||||
| "nchw" |
|
||||
|
||||
```yaml
|
||||
# Optional: model config
|
||||
model:
|
||||
path: /path/to/model
|
||||
width: 320
|
||||
height: 320
|
||||
input_tensor: "nhwc"
|
||||
input_pixel_format: "bgr"
|
||||
```
|
||||
|
||||
The labelmap can be customized to your needs. A common reason to do this is to combine multiple object types that are easily confused when you don't need to be as granular such as car/truck. By default, truck is renamed to car because they are often confused. You cannot add new object types, but you can change the names of existing objects in the model.
|
||||
|
||||
```yaml
|
||||
@ -71,6 +95,7 @@ Note that if you rename objects in the labelmap, you will also need to update yo
|
||||
Included with Frigate is a build of ffmpeg that works for the vast majority of users. However, there exists some hardware setups which have incompatibilities with the included build. In this case, a docker volume mapping can be used to overwrite the included ffmpeg build with an ffmpeg build that works for your specific hardware setup.
|
||||
|
||||
To do this:
|
||||
|
||||
1. Download your ffmpeg build and uncompress to a folder on the host (let's use `/home/appdata/frigate/custom-ffmpeg` for this example).
|
||||
2. Update your docker-compose or docker CLI to include `'/home/appdata/frigate/custom-ffmpeg':'/usr/lib/btbn-ffmpeg':'ro'` in the volume mappings.
|
||||
3. Restart frigate and the custom version will be used if the mapping was done correctly.
|
||||
|
@ -97,6 +97,12 @@ model:
|
||||
width: 320
|
||||
# Required: Object detection model input height (default: shown below)
|
||||
height: 320
|
||||
# Optional: Object detection model input colorspace
|
||||
# Valid values are rgb, bgr, or yuv. (default: shown below)
|
||||
input_pixel_format: rgb
|
||||
# Optional: Object detection model input tensor format
|
||||
# Valid values are nhwc or nchw (default: shown below)
|
||||
input_tensor: "nhwc"
|
||||
# Optional: Label name modifications. These are merged into the standard labelmap.
|
||||
labelmap:
|
||||
2: vehicle
|
||||
|
@ -15,7 +15,7 @@ from playhouse.sqliteq import SqliteQueueDatabase
|
||||
|
||||
from frigate.config import DetectorTypeEnum, FrigateConfig
|
||||
from frigate.const import CACHE_DIR, CLIPS_DIR, RECORD_DIR
|
||||
from frigate.edgetpu import EdgeTPUProcess
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
from frigate.events import EventCleanup, EventProcessor
|
||||
from frigate.http import create_app
|
||||
from frigate.log import log_process, root_configurer
|
||||
@ -40,7 +40,7 @@ class FrigateApp:
|
||||
def __init__(self) -> None:
|
||||
self.stop_event: Event = mp.Event()
|
||||
self.detection_queue: Queue = mp.Queue()
|
||||
self.detectors: dict[str, EdgeTPUProcess] = {}
|
||||
self.detectors: dict[str, ObjectDetectProcess] = {}
|
||||
self.detection_out_events: dict[str, Event] = {}
|
||||
self.detection_shms: list[mp.shared_memory.SharedMemory] = []
|
||||
self.log_queue: Queue = mp.Queue()
|
||||
@ -178,8 +178,6 @@ class FrigateApp:
|
||||
self.mqtt_relay.start()
|
||||
|
||||
def start_detectors(self) -> None:
|
||||
model_path = self.config.model.path
|
||||
model_shape = (self.config.model.height, self.config.model.width)
|
||||
for name in self.config.cameras.keys():
|
||||
self.detection_out_events[name] = mp.Event()
|
||||
|
||||
@ -203,26 +201,15 @@ class FrigateApp:
|
||||
self.detection_shms.append(shm_out)
|
||||
|
||||
for name, detector in self.config.detectors.items():
|
||||
if detector.type == DetectorTypeEnum.cpu:
|
||||
self.detectors[name] = EdgeTPUProcess(
|
||||
name,
|
||||
self.detection_queue,
|
||||
self.detection_out_events,
|
||||
model_path,
|
||||
model_shape,
|
||||
"cpu",
|
||||
detector.num_threads,
|
||||
)
|
||||
if detector.type == DetectorTypeEnum.edgetpu:
|
||||
self.detectors[name] = EdgeTPUProcess(
|
||||
name,
|
||||
self.detection_queue,
|
||||
self.detection_out_events,
|
||||
model_path,
|
||||
model_shape,
|
||||
detector.device,
|
||||
detector.num_threads,
|
||||
)
|
||||
self.detectors[name] = ObjectDetectProcess(
|
||||
name,
|
||||
self.detection_queue,
|
||||
self.detection_out_events,
|
||||
self.config.model,
|
||||
detector.type,
|
||||
detector.device,
|
||||
detector.num_threads,
|
||||
)
|
||||
|
||||
def start_detected_frames_processor(self) -> None:
|
||||
self.detected_frames_processor = TrackedObjectProcessor(
|
||||
@ -253,7 +240,6 @@ class FrigateApp:
|
||||
logger.info(f"Output process started: {output_processor.pid}")
|
||||
|
||||
def start_camera_processors(self) -> None:
|
||||
model_shape = (self.config.model.height, self.config.model.width)
|
||||
for name, config in self.config.cameras.items():
|
||||
if not self.config.cameras[name].enabled:
|
||||
logger.info(f"Camera processor not started for disabled camera {name}")
|
||||
@ -265,7 +251,7 @@ class FrigateApp:
|
||||
args=(
|
||||
name,
|
||||
config,
|
||||
model_shape,
|
||||
self.config.model,
|
||||
self.config.model.merged_labelmap,
|
||||
self.detection_queue,
|
||||
self.detection_out_events[name],
|
||||
|
@ -718,6 +718,17 @@ class DatabaseConfig(FrigateBaseModel):
|
||||
)
|
||||
|
||||
|
||||
class PixelFormatEnum(str, Enum):
|
||||
rgb = "rgb"
|
||||
bgr = "bgr"
|
||||
yuv = "yuv"
|
||||
|
||||
|
||||
class InputTensorEnum(str, Enum):
|
||||
nchw = "nchw"
|
||||
nhwc = "nhwc"
|
||||
|
||||
|
||||
class ModelConfig(FrigateBaseModel):
|
||||
path: Optional[str] = Field(title="Custom Object detection model path.")
|
||||
labelmap_path: Optional[str] = Field(title="Label map for custom object detector.")
|
||||
@ -726,6 +737,12 @@ class ModelConfig(FrigateBaseModel):
|
||||
labelmap: Dict[int, str] = Field(
|
||||
default_factory=dict, title="Labelmap customization."
|
||||
)
|
||||
input_tensor: InputTensorEnum = Field(
|
||||
default=InputTensorEnum.nhwc, title="Model Input Tensor Shape"
|
||||
)
|
||||
input_pixel_format: PixelFormatEnum = Field(
|
||||
default=PixelFormatEnum.rgb, title="Model Input Pixel Color Format"
|
||||
)
|
||||
_merged_labelmap: Optional[Dict[int, str]] = PrivateAttr()
|
||||
_colormap: Dict[int, Tuple[int, int, int]] = PrivateAttr()
|
||||
|
||||
|
0
frigate/detectors/__init__.py
Normal file
0
frigate/detectors/__init__.py
Normal file
46
frigate/detectors/cpu_tfl.py
Normal file
46
frigate/detectors/cpu_tfl.py
Normal file
@ -0,0 +1,46 @@
|
||||
import logging
|
||||
import numpy as np
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
import tflite_runtime.interpreter as tflite
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CpuTfl(DetectionApi):
|
||||
def __init__(self, det_device=None, model_config=None, num_threads=3):
|
||||
self.interpreter = tflite.Interpreter(
|
||||
model_path=model_config.path or "/cpu_model.tflite", num_threads=num_threads
|
||||
)
|
||||
|
||||
self.interpreter.allocate_tensors()
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
||||
self.interpreter.invoke()
|
||||
|
||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
||||
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
||||
count = int(
|
||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
||||
)
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < 0.4 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
]
|
||||
|
||||
return detections
|
17
frigate/detectors/detection_api.py
Normal file
17
frigate/detectors/detection_api.py
Normal file
@ -0,0 +1,17 @@
|
||||
import logging
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DetectionApi(ABC):
|
||||
@abstractmethod
|
||||
def __init__(self, det_device=None, model_config=None):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def detect_raw(self, tensor_input):
|
||||
pass
|
63
frigate/detectors/edgetpu_tfl.py
Normal file
63
frigate/detectors/edgetpu_tfl.py
Normal file
@ -0,0 +1,63 @@
|
||||
import logging
|
||||
import numpy as np
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
import tflite_runtime.interpreter as tflite
|
||||
from tflite_runtime.interpreter import load_delegate
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EdgeTpuTfl(DetectionApi):
|
||||
def __init__(self, det_device=None, model_config=None):
|
||||
device_config = {"device": "usb"}
|
||||
if not det_device is None:
|
||||
device_config = {"device": det_device}
|
||||
|
||||
edge_tpu_delegate = None
|
||||
|
||||
try:
|
||||
logger.info(f"Attempting to load TPU as {device_config['device']}")
|
||||
edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
|
||||
logger.info("TPU found")
|
||||
self.interpreter = tflite.Interpreter(
|
||||
model_path=model_config.path or "/edgetpu_model.tflite",
|
||||
experimental_delegates=[edge_tpu_delegate],
|
||||
)
|
||||
except ValueError:
|
||||
logger.error(
|
||||
"No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
|
||||
)
|
||||
raise
|
||||
|
||||
self.interpreter.allocate_tensors()
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
||||
self.interpreter.invoke()
|
||||
|
||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
||||
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
||||
count = int(
|
||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
||||
)
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < 0.4 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
]
|
||||
|
||||
return detections
|
@ -8,9 +8,11 @@ import threading
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
import numpy as np
|
||||
import tflite_runtime.interpreter as tflite
|
||||
from setproctitle import setproctitle
|
||||
from tflite_runtime.interpreter import load_delegate
|
||||
|
||||
from frigate.config import DetectorTypeEnum, InputTensorEnum
|
||||
from frigate.detectors.edgetpu_tfl import EdgeTpuTfl
|
||||
from frigate.detectors.cpu_tfl import CpuTfl
|
||||
|
||||
from frigate.util import EventsPerSecond, SharedMemoryFrameManager, listen, load_labels
|
||||
|
||||
@ -23,46 +25,43 @@ class ObjectDetector(ABC):
|
||||
pass
|
||||
|
||||
|
||||
def tensor_transform(desired_shape):
|
||||
# Currently this function only supports BHWC permutations
|
||||
if desired_shape == InputTensorEnum.nhwc:
|
||||
return None
|
||||
elif desired_shape == InputTensorEnum.nchw:
|
||||
return (0, 3, 1, 2)
|
||||
|
||||
|
||||
class LocalObjectDetector(ObjectDetector):
|
||||
def __init__(self, tf_device=None, model_path=None, num_threads=3, labels=None):
|
||||
def __init__(
|
||||
self,
|
||||
det_type=DetectorTypeEnum.cpu,
|
||||
det_device=None,
|
||||
model_config=None,
|
||||
num_threads=3,
|
||||
labels=None,
|
||||
):
|
||||
self.fps = EventsPerSecond()
|
||||
if labels is None:
|
||||
self.labels = {}
|
||||
else:
|
||||
self.labels = load_labels(labels)
|
||||
|
||||
device_config = {"device": "usb"}
|
||||
if not tf_device is None:
|
||||
device_config = {"device": tf_device}
|
||||
if model_config:
|
||||
self.input_transform = tensor_transform(model_config.input_tensor)
|
||||
else:
|
||||
self.input_transform = None
|
||||
|
||||
edge_tpu_delegate = None
|
||||
|
||||
if tf_device != "cpu":
|
||||
try:
|
||||
logger.info(f"Attempting to load TPU as {device_config['device']}")
|
||||
edge_tpu_delegate = load_delegate("libedgetpu.so.1.0", device_config)
|
||||
logger.info("TPU found")
|
||||
self.interpreter = tflite.Interpreter(
|
||||
model_path=model_path or "/edgetpu_model.tflite",
|
||||
experimental_delegates=[edge_tpu_delegate],
|
||||
)
|
||||
except ValueError:
|
||||
logger.error(
|
||||
"No EdgeTPU was detected. If you do not have a Coral device yet, you must configure CPU detectors."
|
||||
)
|
||||
raise
|
||||
if det_type == DetectorTypeEnum.edgetpu:
|
||||
self.detect_api = EdgeTpuTfl(
|
||||
det_device=det_device, model_config=model_config
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"CPU detectors are not recommended and should only be used for testing or for trial purposes."
|
||||
)
|
||||
self.interpreter = tflite.Interpreter(
|
||||
model_path=model_path or "/cpu_model.tflite", num_threads=num_threads
|
||||
)
|
||||
|
||||
self.interpreter.allocate_tensors()
|
||||
|
||||
self.tensor_input_details = self.interpreter.get_input_details()
|
||||
self.tensor_output_details = self.interpreter.get_output_details()
|
||||
self.detect_api = CpuTfl(model_config=model_config, num_threads=num_threads)
|
||||
|
||||
def detect(self, tensor_input, threshold=0.4):
|
||||
detections = []
|
||||
@ -79,31 +78,9 @@ class LocalObjectDetector(ObjectDetector):
|
||||
return detections
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
self.interpreter.set_tensor(self.tensor_input_details[0]["index"], tensor_input)
|
||||
self.interpreter.invoke()
|
||||
|
||||
boxes = self.interpreter.tensor(self.tensor_output_details[0]["index"])()[0]
|
||||
class_ids = self.interpreter.tensor(self.tensor_output_details[1]["index"])()[0]
|
||||
scores = self.interpreter.tensor(self.tensor_output_details[2]["index"])()[0]
|
||||
count = int(
|
||||
self.interpreter.tensor(self.tensor_output_details[3]["index"])()[0]
|
||||
)
|
||||
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i in range(count):
|
||||
if scores[i] < 0.4 or i == 20:
|
||||
break
|
||||
detections[i] = [
|
||||
class_ids[i],
|
||||
float(scores[i]),
|
||||
boxes[i][0],
|
||||
boxes[i][1],
|
||||
boxes[i][2],
|
||||
boxes[i][3],
|
||||
]
|
||||
|
||||
return detections
|
||||
if self.input_transform:
|
||||
tensor_input = np.transpose(tensor_input, self.input_transform)
|
||||
return self.detect_api.detect_raw(tensor_input=tensor_input)
|
||||
|
||||
|
||||
def run_detector(
|
||||
@ -112,9 +89,9 @@ def run_detector(
|
||||
out_events: dict[str, mp.Event],
|
||||
avg_speed,
|
||||
start,
|
||||
model_path,
|
||||
model_shape,
|
||||
tf_device,
|
||||
model_config,
|
||||
det_type,
|
||||
det_device,
|
||||
num_threads,
|
||||
):
|
||||
threading.current_thread().name = f"detector:{name}"
|
||||
@ -133,7 +110,10 @@ def run_detector(
|
||||
|
||||
frame_manager = SharedMemoryFrameManager()
|
||||
object_detector = LocalObjectDetector(
|
||||
tf_device=tf_device, model_path=model_path, num_threads=num_threads
|
||||
det_type=det_type,
|
||||
det_device=det_device,
|
||||
model_config=model_config,
|
||||
num_threads=num_threads,
|
||||
)
|
||||
|
||||
outputs = {}
|
||||
@ -148,7 +128,7 @@ def run_detector(
|
||||
except queue.Empty:
|
||||
continue
|
||||
input_frame = frame_manager.get(
|
||||
connection_id, (1, model_shape[0], model_shape[1], 3)
|
||||
connection_id, (1, model_config.height, model_config.width, 3)
|
||||
)
|
||||
|
||||
if input_frame is None:
|
||||
@ -165,15 +145,15 @@ def run_detector(
|
||||
avg_speed.value = (avg_speed.value * 9 + duration) / 10
|
||||
|
||||
|
||||
class EdgeTPUProcess:
|
||||
class ObjectDetectProcess:
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
detection_queue,
|
||||
out_events,
|
||||
model_path,
|
||||
model_shape,
|
||||
tf_device=None,
|
||||
model_config,
|
||||
det_type=None,
|
||||
det_device=None,
|
||||
num_threads=3,
|
||||
):
|
||||
self.name = name
|
||||
@ -182,9 +162,9 @@ class EdgeTPUProcess:
|
||||
self.avg_inference_speed = mp.Value("d", 0.01)
|
||||
self.detection_start = mp.Value("d", 0.0)
|
||||
self.detect_process = None
|
||||
self.model_path = model_path
|
||||
self.model_shape = model_shape
|
||||
self.tf_device = tf_device
|
||||
self.model_config = model_config
|
||||
self.det_type = det_type
|
||||
self.det_device = det_device
|
||||
self.num_threads = num_threads
|
||||
self.start_or_restart()
|
||||
|
||||
@ -210,9 +190,9 @@ class EdgeTPUProcess:
|
||||
self.out_events,
|
||||
self.avg_inference_speed,
|
||||
self.detection_start,
|
||||
self.model_path,
|
||||
self.model_shape,
|
||||
self.tf_device,
|
||||
self.model_config,
|
||||
self.det_type,
|
||||
self.det_device,
|
||||
self.num_threads,
|
||||
),
|
||||
)
|
||||
@ -221,7 +201,7 @@ class EdgeTPUProcess:
|
||||
|
||||
|
||||
class RemoteObjectDetector:
|
||||
def __init__(self, name, labels, detection_queue, event, model_shape):
|
||||
def __init__(self, name, labels, detection_queue, event, model_config):
|
||||
self.labels = labels
|
||||
self.name = name
|
||||
self.fps = EventsPerSecond()
|
||||
@ -229,7 +209,9 @@ class RemoteObjectDetector:
|
||||
self.event = event
|
||||
self.shm = mp.shared_memory.SharedMemory(name=self.name, create=False)
|
||||
self.np_shm = np.ndarray(
|
||||
(1, model_shape[0], model_shape[1], 3), dtype=np.uint8, buffer=self.shm.buf
|
||||
(1, model_config.height, model_config.width, 3),
|
||||
dtype=np.uint8,
|
||||
buffer=self.shm.buf,
|
||||
)
|
||||
self.out_shm = mp.shared_memory.SharedMemory(
|
||||
name=f"out-{self.name}", create=False
|
@ -14,7 +14,7 @@ from frigate.config import FrigateConfig
|
||||
from frigate.const import RECORD_DIR, CLIPS_DIR, CACHE_DIR
|
||||
from frigate.types import StatsTrackingTypes, CameraMetricsTypes
|
||||
from frigate.version import VERSION
|
||||
from frigate.edgetpu import EdgeTPUProcess
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -37,7 +37,8 @@ def get_latest_version() -> str:
|
||||
|
||||
|
||||
def stats_init(
|
||||
camera_metrics: dict[str, CameraMetricsTypes], detectors: dict[str, EdgeTPUProcess]
|
||||
camera_metrics: dict[str, CameraMetricsTypes],
|
||||
detectors: dict[str, ObjectDetectProcess],
|
||||
) -> StatsTrackingTypes:
|
||||
stats_tracking: StatsTrackingTypes = {
|
||||
"camera_metrics": camera_metrics,
|
||||
|
130
frigate/test/test_object_detector.py
Normal file
130
frigate/test/test_object_detector.py
Normal file
@ -0,0 +1,130 @@
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
import numpy as np
|
||||
from frigate.config import DetectorTypeEnum, InputTensorEnum, ModelConfig
|
||||
import frigate.object_detection
|
||||
|
||||
|
||||
class TestLocalObjectDetector(unittest.TestCase):
|
||||
@patch("frigate.object_detection.EdgeTpuTfl")
|
||||
@patch("frigate.object_detection.CpuTfl")
|
||||
def test_localdetectorprocess_given_type_cpu_should_call_cputfl_init(
|
||||
self, mock_cputfl, mock_edgetputfl
|
||||
):
|
||||
test_cfg = ModelConfig()
|
||||
test_cfg.path = "/test/modelpath"
|
||||
test_obj = frigate.object_detection.LocalObjectDetector(
|
||||
det_type=DetectorTypeEnum.cpu, model_config=test_cfg, num_threads=6
|
||||
)
|
||||
|
||||
assert test_obj is not None
|
||||
mock_edgetputfl.assert_not_called()
|
||||
mock_cputfl.assert_called_once_with(model_config=test_cfg, num_threads=6)
|
||||
|
||||
@patch("frigate.object_detection.EdgeTpuTfl")
|
||||
@patch("frigate.object_detection.CpuTfl")
|
||||
def test_localdetectorprocess_given_type_edgtpu_should_call_edgtpu_init(
|
||||
self, mock_cputfl, mock_edgetputfl
|
||||
):
|
||||
test_cfg = ModelConfig()
|
||||
test_cfg.path = "/test/modelpath"
|
||||
|
||||
test_obj = frigate.object_detection.LocalObjectDetector(
|
||||
det_type=DetectorTypeEnum.edgetpu,
|
||||
det_device="usb",
|
||||
model_config=test_cfg,
|
||||
)
|
||||
|
||||
assert test_obj is not None
|
||||
mock_cputfl.assert_not_called()
|
||||
mock_edgetputfl.assert_called_once_with(det_device="usb", model_config=test_cfg)
|
||||
|
||||
@patch("frigate.object_detection.CpuTfl")
|
||||
def test_detect_raw_given_tensor_input_should_return_api_detect_raw_result(
|
||||
self, mock_cputfl
|
||||
):
|
||||
TEST_DATA = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
||||
TEST_DETECT_RESULT = np.ndarray([1, 2, 4, 8, 16, 32])
|
||||
test_obj_detect = frigate.object_detection.LocalObjectDetector(
|
||||
det_device=DetectorTypeEnum.cpu
|
||||
)
|
||||
|
||||
mock_det_api = mock_cputfl.return_value
|
||||
mock_det_api.detect_raw.return_value = TEST_DETECT_RESULT
|
||||
|
||||
test_result = test_obj_detect.detect_raw(TEST_DATA)
|
||||
|
||||
mock_det_api.detect_raw.assert_called_once_with(tensor_input=TEST_DATA)
|
||||
assert test_result is mock_det_api.detect_raw.return_value
|
||||
|
||||
@patch("frigate.object_detection.CpuTfl")
|
||||
def test_detect_raw_given_tensor_input_should_call_api_detect_raw_with_transposed_tensor(
|
||||
self, mock_cputfl
|
||||
):
|
||||
TEST_DATA = np.zeros((1, 32, 32, 3), np.uint8)
|
||||
TEST_DETECT_RESULT = np.ndarray([1, 2, 4, 8, 16, 32])
|
||||
|
||||
test_cfg = ModelConfig()
|
||||
test_cfg.input_tensor = InputTensorEnum.nchw
|
||||
|
||||
test_obj_detect = frigate.object_detection.LocalObjectDetector(
|
||||
det_device=DetectorTypeEnum.cpu, model_config=test_cfg
|
||||
)
|
||||
|
||||
mock_det_api = mock_cputfl.return_value
|
||||
mock_det_api.detect_raw.return_value = TEST_DETECT_RESULT
|
||||
|
||||
test_result = test_obj_detect.detect_raw(TEST_DATA)
|
||||
|
||||
mock_det_api.detect_raw.assert_called_once()
|
||||
assert (
|
||||
mock_det_api.detect_raw.call_args.kwargs["tensor_input"].shape
|
||||
== np.zeros((1, 3, 32, 32)).shape
|
||||
)
|
||||
|
||||
assert test_result is mock_det_api.detect_raw.return_value
|
||||
|
||||
@patch("frigate.object_detection.CpuTfl")
|
||||
@patch("frigate.object_detection.load_labels")
|
||||
def test_detect_given_tensor_input_should_return_lfiltered_detections(
|
||||
self, mock_load_labels, mock_cputfl
|
||||
):
|
||||
TEST_DATA = np.zeros((1, 32, 32, 3), np.uint8)
|
||||
TEST_DETECT_RAW = [
|
||||
[2, 0.9, 5, 4, 3, 2],
|
||||
[1, 0.5, 8, 7, 6, 5],
|
||||
[0, 0.4, 2, 4, 8, 16],
|
||||
]
|
||||
TEST_DETECT_RESULT = [
|
||||
("label-3", 0.9, (5, 4, 3, 2)),
|
||||
("label-2", 0.5, (8, 7, 6, 5)),
|
||||
]
|
||||
TEST_LABEL_FILE = "/test_labels.txt"
|
||||
mock_load_labels.return_value = [
|
||||
"label-1",
|
||||
"label-2",
|
||||
"label-3",
|
||||
"label-4",
|
||||
"label-5",
|
||||
]
|
||||
|
||||
test_obj_detect = frigate.object_detection.LocalObjectDetector(
|
||||
det_device=DetectorTypeEnum.cpu,
|
||||
model_config=ModelConfig(),
|
||||
labels=TEST_LABEL_FILE,
|
||||
)
|
||||
|
||||
mock_load_labels.assert_called_once_with(TEST_LABEL_FILE)
|
||||
|
||||
mock_det_api = mock_cputfl.return_value
|
||||
mock_det_api.detect_raw.return_value = TEST_DETECT_RAW
|
||||
|
||||
test_result = test_obj_detect.detect(tensor_input=TEST_DATA, threshold=0.5)
|
||||
|
||||
mock_det_api.detect_raw.assert_called_once()
|
||||
assert (
|
||||
mock_det_api.detect_raw.call_args.kwargs["tensor_input"].shape
|
||||
== np.zeros((1, 32, 32, 3)).shape
|
||||
)
|
||||
assert test_result == TEST_DETECT_RESULT
|
@ -3,7 +3,7 @@ from multiprocessing.queues import Queue
|
||||
from multiprocessing.sharedctypes import Synchronized
|
||||
from multiprocessing.context import Process
|
||||
|
||||
from frigate.edgetpu import EdgeTPUProcess
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
|
||||
|
||||
class CameraMetricsTypes(TypedDict):
|
||||
@ -26,6 +26,6 @@ class CameraMetricsTypes(TypedDict):
|
||||
|
||||
class StatsTrackingTypes(TypedDict):
|
||||
camera_metrics: dict[str, CameraMetricsTypes]
|
||||
detectors: dict[str, EdgeTPUProcess]
|
||||
detectors: dict[str, ObjectDetectProcess]
|
||||
started: int
|
||||
latest_frigate_version: str
|
||||
|
@ -476,6 +476,16 @@ def yuv_region_2_rgb(frame, region):
|
||||
raise
|
||||
|
||||
|
||||
def yuv_region_2_bgr(frame, region):
|
||||
try:
|
||||
yuv_cropped_frame = yuv_crop_and_resize(frame, region)
|
||||
return cv2.cvtColor(yuv_cropped_frame, cv2.COLOR_YUV2BGR_I420)
|
||||
except:
|
||||
print(f"frame.shape: {frame.shape}")
|
||||
print(f"region: {region}")
|
||||
raise
|
||||
|
||||
|
||||
def intersection(box_a, box_b):
|
||||
return (
|
||||
max(box_a[0], box_b[0]),
|
||||
|
@ -11,11 +11,11 @@ import time
|
||||
from collections import defaultdict
|
||||
|
||||
import numpy as np
|
||||
from cv2 import cv2, reduce
|
||||
import cv2
|
||||
from setproctitle import setproctitle
|
||||
|
||||
from frigate.config import CameraConfig, DetectConfig
|
||||
from frigate.edgetpu import RemoteObjectDetector
|
||||
from frigate.config import CameraConfig, DetectConfig, PixelFormatEnum
|
||||
from frigate.object_detection import RemoteObjectDetector
|
||||
from frigate.log import LogPipe
|
||||
from frigate.motion import MotionDetector
|
||||
from frigate.objects import ObjectTracker
|
||||
@ -29,7 +29,9 @@ from frigate.util import (
|
||||
intersection,
|
||||
intersection_over_union,
|
||||
listen,
|
||||
yuv_crop_and_resize,
|
||||
yuv_region_2_rgb,
|
||||
yuv_region_2_bgr,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@ -89,13 +91,20 @@ def filtered(obj, objects_to_track, object_filters):
|
||||
return False
|
||||
|
||||
|
||||
def create_tensor_input(frame, model_shape, region):
|
||||
cropped_frame = yuv_region_2_rgb(frame, region)
|
||||
def create_tensor_input(frame, model_config, region):
|
||||
if model_config.input_pixel_format == PixelFormatEnum.rgb:
|
||||
cropped_frame = yuv_region_2_rgb(frame, region)
|
||||
elif model_config.input_pixel_format == PixelFormatEnum.bgr:
|
||||
cropped_frame = yuv_region_2_bgr(frame, region)
|
||||
else:
|
||||
cropped_frame = yuv_crop_and_resize(frame, region)
|
||||
|
||||
# Resize to 300x300 if needed
|
||||
if cropped_frame.shape != (model_shape[0], model_shape[1], 3):
|
||||
# Resize if needed
|
||||
if cropped_frame.shape != (model_config.height, model_config.width, 3):
|
||||
cropped_frame = cv2.resize(
|
||||
cropped_frame, dsize=model_shape, interpolation=cv2.INTER_LINEAR
|
||||
cropped_frame,
|
||||
dsize=(model_config.height, model_config.width),
|
||||
interpolation=cv2.INTER_LINEAR,
|
||||
)
|
||||
|
||||
# Expand dimensions since the model expects images to have shape: [1, height, width, 3]
|
||||
@ -340,7 +349,7 @@ def capture_camera(name, config: CameraConfig, process_info):
|
||||
def track_camera(
|
||||
name,
|
||||
config: CameraConfig,
|
||||
model_shape,
|
||||
model_config,
|
||||
labelmap,
|
||||
detection_queue,
|
||||
result_connection,
|
||||
@ -378,7 +387,7 @@ def track_camera(
|
||||
motion_contour_area,
|
||||
)
|
||||
object_detector = RemoteObjectDetector(
|
||||
name, labelmap, detection_queue, result_connection, model_shape
|
||||
name, labelmap, detection_queue, result_connection, model_config
|
||||
)
|
||||
|
||||
object_tracker = ObjectTracker(config.detect)
|
||||
@ -389,7 +398,7 @@ def track_camera(
|
||||
name,
|
||||
frame_queue,
|
||||
frame_shape,
|
||||
model_shape,
|
||||
model_config,
|
||||
config.detect,
|
||||
frame_manager,
|
||||
motion_detector,
|
||||
@ -443,12 +452,12 @@ def detect(
|
||||
detect_config: DetectConfig,
|
||||
object_detector,
|
||||
frame,
|
||||
model_shape,
|
||||
model_config,
|
||||
region,
|
||||
objects_to_track,
|
||||
object_filters,
|
||||
):
|
||||
tensor_input = create_tensor_input(frame, model_shape, region)
|
||||
tensor_input = create_tensor_input(frame, model_config, region)
|
||||
|
||||
detections = []
|
||||
region_detections = object_detector.detect(tensor_input)
|
||||
@ -487,7 +496,7 @@ def process_frames(
|
||||
camera_name: str,
|
||||
frame_queue: mp.Queue,
|
||||
frame_shape,
|
||||
model_shape,
|
||||
model_config,
|
||||
detect_config: DetectConfig,
|
||||
frame_manager: FrameManager,
|
||||
motion_detector: MotionDetector,
|
||||
@ -571,7 +580,7 @@ def process_frames(
|
||||
# combine motion boxes with known locations of existing objects
|
||||
combined_boxes = reduce_boxes(motion_boxes + tracked_object_boxes)
|
||||
|
||||
region_min_size = max(model_shape[0], model_shape[1])
|
||||
region_min_size = max(model_config.height, model_config.width)
|
||||
# compute regions
|
||||
regions = [
|
||||
calculate_region(
|
||||
@ -634,7 +643,7 @@ def process_frames(
|
||||
detect_config,
|
||||
object_detector,
|
||||
frame,
|
||||
model_shape,
|
||||
model_config,
|
||||
region,
|
||||
objects_to_track,
|
||||
object_filters,
|
||||
@ -694,7 +703,7 @@ def process_frames(
|
||||
detect_config,
|
||||
object_detector,
|
||||
frame,
|
||||
model_shape,
|
||||
model_config,
|
||||
region,
|
||||
objects_to_track,
|
||||
object_filters,
|
||||
|
@ -5,7 +5,7 @@ import time
|
||||
import os
|
||||
import signal
|
||||
|
||||
from frigate.edgetpu import EdgeTPUProcess
|
||||
from frigate.object_detection import ObjectDetectProcess
|
||||
from frigate.util import restart_frigate
|
||||
from multiprocessing.synchronize import Event
|
||||
|
||||
@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class FrigateWatchdog(threading.Thread):
|
||||
def __init__(self, detectors: dict[str, EdgeTPUProcess], stop_event: Event):
|
||||
def __init__(self, detectors: dict[str, ObjectDetectProcess], stop_event: Event):
|
||||
threading.Thread.__init__(self)
|
||||
self.name = "frigate_watchdog"
|
||||
self.detectors = detectors
|
||||
|
@ -16,7 +16,7 @@ import cv2
|
||||
import numpy as np
|
||||
|
||||
from frigate.config import FrigateConfig
|
||||
from frigate.edgetpu import LocalObjectDetector
|
||||
from frigate.object_detection import LocalObjectDetector
|
||||
from frigate.motion import MotionDetector
|
||||
from frigate.object_processing import CameraState
|
||||
from frigate.objects import ObjectTracker
|
||||
@ -117,13 +117,12 @@ class ProcessClip:
|
||||
detection_enabled = mp.Value("d", 1)
|
||||
motion_enabled = mp.Value("d", True)
|
||||
stop_event = mp.Event()
|
||||
model_shape = (self.config.model.height, self.config.model.width)
|
||||
|
||||
process_frames(
|
||||
self.camera_name,
|
||||
self.frame_queue,
|
||||
self.frame_shape,
|
||||
model_shape,
|
||||
self.config.model,
|
||||
self.camera_config.detect,
|
||||
self.frame_manager,
|
||||
motion_detector,
|
||||
|
Loading…
Reference in New Issue
Block a user