mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-07-26 13:47:03 +02:00
Fixes (#18379)
* Don't return weighted name if it has the same number of results * Remove link to incorrect format yolov9 models * Fix command list from appearing when other inputs are focused the description box in the tracked object details pane was causing the command input list to show when focused. * clarify face docs * Add note about python yolov9 export * Check if hailort thread is still alive when timeout error is run into * Reduce inference timeout --------- Co-authored-by: Josh Hawkins <32435876+hawkeye217@users.noreply.github.com>
This commit is contained in:
parent
63f9689b0e
commit
cbdac9ece5
@ -74,8 +74,10 @@ Fine-tune face recognition with these optional parameters at the global level of
|
||||
|
||||
## Usage
|
||||
|
||||
Follow these steps to begin:
|
||||
|
||||
1. **Enable face recognition** in your configuration file and restart Frigate.
|
||||
2. **Upload your face** using the **Add Face** button's wizard in the Face Library section of the Frigate UI.
|
||||
2. **Upload one face** using the **Add Face** button's wizard in the Face Library section of the Frigate UI. Read below for the best practices on expanding your training set.
|
||||
3. When Frigate detects and attempts to recognize a face, it will appear in the **Train** tab of the Face Library, along with its associated recognition confidence.
|
||||
4. From the **Train** tab, you can **assign the face** to a new or existing person to improve recognition accuracy for the future.
|
||||
|
||||
|
@ -1053,13 +1053,14 @@ python3 yolo_to_onnx.py -m yolov7-320
|
||||
|
||||
#### YOLOv9
|
||||
|
||||
YOLOv9 models can be exported using the below code or they [can be downloaded from hugging face](https://huggingface.co/Xenova/yolov9-onnx/tree/main)
|
||||
YOLOv9 models can be exported using the below code
|
||||
|
||||
```sh
|
||||
git clone https://github.com/WongKinYiu/yolov9
|
||||
cd yolov9
|
||||
|
||||
# setup the virtual environment so installation doesn't affect main system
|
||||
# NOTE: Virtual environment must be using Python 3.11 or older.
|
||||
python3 -m venv ./
|
||||
bin/pip install -r requirements.txt
|
||||
bin/pip install onnx onnxruntime onnx-simplifier>=0.4.1
|
||||
|
@ -471,17 +471,22 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
if not results_list:
|
||||
return None, 0.0
|
||||
|
||||
weighted_scores = {}
|
||||
total_weights = {}
|
||||
counts: dict[str, int] = {}
|
||||
weighted_scores: dict[str, int] = {}
|
||||
total_weights: dict[str, int] = {}
|
||||
|
||||
for name, score, face_area in results_list:
|
||||
if name == "unknown":
|
||||
continue
|
||||
|
||||
if name not in weighted_scores:
|
||||
counts[name] = 0
|
||||
weighted_scores[name] = 0.0
|
||||
total_weights[name] = 0.0
|
||||
|
||||
# increase count
|
||||
counts[name] += 1
|
||||
|
||||
# Capped weight based on face area
|
||||
weight = min(face_area, max_weight)
|
||||
|
||||
@ -494,6 +499,12 @@ class FaceRealTimeProcessor(RealTimeProcessorApi):
|
||||
return None, 0.0
|
||||
|
||||
best_name = max(weighted_scores, key=weighted_scores.get)
|
||||
|
||||
# If the best name has the same number of results as another name, we are not confident it is a correct result
|
||||
for name, count in counts.items():
|
||||
if name != best_name and counts[best_name] == count:
|
||||
return None, 0.0
|
||||
|
||||
weighted_average = weighted_scores[best_name] / total_weights[best_name]
|
||||
|
||||
return best_name, weighted_average
|
||||
|
@ -345,11 +345,17 @@ class HailoDetector(DetectionApi):
|
||||
request_id = self.input_store.put(tensor_input)
|
||||
|
||||
try:
|
||||
_, infer_results = self.response_store.get(request_id, timeout=10.0)
|
||||
_, infer_results = self.response_store.get(request_id, timeout=1.0)
|
||||
except TimeoutError:
|
||||
logger.error(
|
||||
f"Timeout waiting for inference results for request {request_id}"
|
||||
)
|
||||
|
||||
if not self.inference_thread.is_alive():
|
||||
raise RuntimeError(
|
||||
"HailoRT inference thread has stopped, restart required."
|
||||
)
|
||||
|
||||
return np.zeros((20, 6), dtype=np.float32)
|
||||
|
||||
if isinstance(infer_results, list) and len(infer_results) == 1:
|
||||
|
@ -763,12 +763,18 @@ export default function InputWithTags({
|
||||
|
||||
{inputFocused ? (
|
||||
<LuChevronUp
|
||||
onClick={() => setInputFocused(false)}
|
||||
onClick={() => {
|
||||
setInputFocused(false);
|
||||
inputRef.current?.blur();
|
||||
}}
|
||||
className="size-4 cursor-pointer text-secondary-foreground"
|
||||
/>
|
||||
) : (
|
||||
<LuChevronDown
|
||||
onClick={() => setInputFocused(true)}
|
||||
onClick={() => {
|
||||
setInputFocused(true);
|
||||
inputRef.current?.focus();
|
||||
}}
|
||||
className="size-4 cursor-pointer text-secondary-foreground"
|
||||
/>
|
||||
)}
|
||||
@ -778,7 +784,9 @@ export default function InputWithTags({
|
||||
<CommandList
|
||||
className={cn(
|
||||
"scrollbar-container border-t duration-200 animate-in fade-in",
|
||||
inputFocused ? "visible" : "hidden",
|
||||
inputFocused && inputRef.current?.matches(":focus")
|
||||
? "visible"
|
||||
: "hidden",
|
||||
)}
|
||||
>
|
||||
{!currentFilterType && inputValue && (
|
||||
|
Loading…
Reference in New Issue
Block a user