mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-07-21 13:46:56 +02:00
LPR fixes (#17588)
* docs * docs * docs * docs * fix box merging logic * always run paddleocr models on cpu * docs clarity * fix docs * docs
This commit is contained in:
parent
f2840468b4
commit
cb27bdb2f7
@ -19,7 +19,7 @@ When a plate is recognized, the recognized name is:
|
||||
|
||||
Users running a Frigate+ model (or any custom model that natively detects license plates) should ensure that `license_plate` is added to the [list of objects to track](https://docs.frigate.video/plus/#available-label-types) either globally or for a specific camera. This will improve the accuracy and performance of the LPR model.
|
||||
|
||||
Users without a model that detects license plates can still run LPR. Frigate uses a lightweight YOLOv9 license plate detection model that runs on your CPU. In this case, you should _not_ define `license_plate` in your list of objects to track.
|
||||
Users without a model that detects license plates can still run LPR. Frigate uses a lightweight YOLOv9 license plate detection model that runs on your CPU or GPU. In this case, you should _not_ define `license_plate` in your list of objects to track.
|
||||
|
||||
:::note
|
||||
|
||||
@ -29,7 +29,7 @@ In the default mode, Frigate's LPR needs to first detect a `car` before it can r
|
||||
|
||||
## Minimum System Requirements
|
||||
|
||||
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and will be auto-selected to run on your CPU or GPU. At least 4GB of RAM is required.
|
||||
License plate recognition works by running AI models locally on your system. The models are relatively lightweight and will be auto-selected to run on your CPU. At least 4GB of RAM is required.
|
||||
|
||||
## Configuration
|
||||
|
||||
@ -40,11 +40,11 @@ lpr:
|
||||
enabled: True
|
||||
```
|
||||
|
||||
Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. You can disable it for specific cameras at the camera level:
|
||||
Like other enrichments in Frigate, LPR **must be enabled globally** to use the feature. You should disable it for specific cameras at the camera level if you don't want to run LPR on cars on those cameras:
|
||||
|
||||
```yaml
|
||||
cameras:
|
||||
driveway:
|
||||
garage:
|
||||
...
|
||||
lpr:
|
||||
enabled: False
|
||||
@ -174,7 +174,7 @@ cameras:
|
||||
type: "lpr" # required to use dedicated LPR camera mode
|
||||
detect:
|
||||
enabled: True
|
||||
fps: 5 # increase to 10 if vehicles move quickly across your frame
|
||||
fps: 5 # increase to 10 if vehicles move quickly across your frame. Higher than 10 is unnecessary and is not recommended.
|
||||
min_initialized: 2
|
||||
width: 1920
|
||||
height: 1080
|
||||
@ -313,6 +313,10 @@ In normal LPR mode, Frigate requires a `car` to be detected first before recogni
|
||||
|
||||
Yes, but performance depends on camera quality, lighting, and infrared capabilities. Make sure your camera can capture clear images of plates at night.
|
||||
|
||||
### Can I limit LPR to specific zones?
|
||||
|
||||
LPR, like other Frigate enrichments, runs at the camera level rather than the zone level. While you can't restrict LPR to specific zones directly, you can control when recognition runs by setting a `min_area` value to filter out smaller detections.
|
||||
|
||||
### How can I match known plates with minor variations?
|
||||
|
||||
Use `match_distance` to allow small character mismatches. Alternatively, define multiple variations in `known_plates`.
|
||||
@ -336,3 +340,9 @@ Use `match_distance` to allow small character mismatches. Alternatively, define
|
||||
### Will LPR slow down my system?
|
||||
|
||||
LPR's performance impact depends on your hardware. Ensure you have at least 4GB RAM and a capable CPU or GPU for optimal results. If you are running the Dedicated LPR Camera mode, resource usage will be higher compared to users who run a model that natively detects license plates. Tune your motion detection settings for your dedicated LPR camera so that the license plate detection model runs only when necessary.
|
||||
|
||||
### I am seeing a YOLOv9 plate detection metric in Enrichment Metrics, but I have a Frigate+ or custom model that detects `license_plate`. Why is the YOLOv9 model running?
|
||||
|
||||
The YOLOv9 license plate detector model will run (and the metric will appear) if you've enabled LPR but haven't defined `license_plate` as an object to track, either at the global or camera level.
|
||||
|
||||
If you are detecting `car` on cameras where you don't want to run LPR, make sure you disable LPR it at the camera level. And if you do want to run LPR on those cameras, make sure you define `license_plate` as an object to track.
|
||||
|
@ -309,7 +309,11 @@ class LicensePlateProcessingMixin:
|
||||
return image.transpose((2, 0, 1))[np.newaxis, ...]
|
||||
|
||||
def _merge_nearby_boxes(
|
||||
self, boxes: List[np.ndarray], plate_width: float, gap_fraction: float = 0.1
|
||||
self,
|
||||
boxes: List[np.ndarray],
|
||||
plate_width: float,
|
||||
gap_fraction: float = 0.1,
|
||||
min_overlap_fraction: float = -0.2,
|
||||
) -> List[np.ndarray]:
|
||||
"""
|
||||
Merge bounding boxes that are likely part of the same license plate based on proximity,
|
||||
@ -329,6 +333,7 @@ class LicensePlateProcessingMixin:
|
||||
return []
|
||||
|
||||
max_gap = plate_width * gap_fraction
|
||||
min_overlap = plate_width * min_overlap_fraction
|
||||
|
||||
# Sort boxes by top left x
|
||||
sorted_boxes = sorted(boxes, key=lambda x: x[0][0])
|
||||
@ -353,9 +358,10 @@ class LicensePlateProcessingMixin:
|
||||
next_bottom = np.max(next_box[:, 1])
|
||||
|
||||
# Consider boxes part of the same plate if they are close horizontally or overlap
|
||||
if horizontal_gap <= max_gap and max(current_top, next_top) <= min(
|
||||
current_bottom, next_bottom
|
||||
):
|
||||
# within the allowed limit and their vertical positions overlap significantly
|
||||
if min_overlap <= horizontal_gap <= max_gap and max(
|
||||
current_top, next_top
|
||||
) <= min(current_bottom, next_bottom):
|
||||
merged_points = np.vstack((current_box, next_box))
|
||||
new_box = np.array(
|
||||
[
|
||||
@ -379,7 +385,7 @@ class LicensePlateProcessingMixin:
|
||||
)
|
||||
current_box = new_box
|
||||
else:
|
||||
# If the boxes are not close enough, add the current box to the result
|
||||
# If the boxes are not close enough or overlap too much, add the current box to the result
|
||||
merged_boxes.append(current_box)
|
||||
current_box = next_box
|
||||
|
||||
|
@ -12,13 +12,13 @@ class LicensePlateModelRunner(DataProcessorModelRunner):
|
||||
def __init__(self, requestor, device: str = "CPU", model_size: str = "large"):
|
||||
super().__init__(requestor, device, model_size)
|
||||
self.detection_model = PaddleOCRDetection(
|
||||
model_size=model_size, requestor=requestor, device=device
|
||||
model_size=model_size, requestor=requestor, device="CPU"
|
||||
)
|
||||
self.classification_model = PaddleOCRClassification(
|
||||
model_size=model_size, requestor=requestor, device=device
|
||||
model_size=model_size, requestor=requestor, device="CPU"
|
||||
)
|
||||
self.recognition_model = PaddleOCRRecognition(
|
||||
model_size=model_size, requestor=requestor, device=device
|
||||
model_size=model_size, requestor=requestor, device="CPU"
|
||||
)
|
||||
self.yolov9_detection_model = LicensePlateDetector(
|
||||
model_size=model_size, requestor=requestor, device=device
|
||||
|
Loading…
Reference in New Issue
Block a user