mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-04-01 01:17:00 +02:00
Add Deepstack/CodeProject-AI.Server detector plugin (#6143)
* Add Deepstack detector plugin with configurable API URL, timeout, and API key * Update DeepStack plugin to recognize 'truck' as 'car' for label indexing * Add debug logging to DeepStack plugin for better monitoring and troubleshooting * Refactor DeepStack label loading from file to use merged labelmap * Black format * add documentation draft * fix link to codeproject website * Apply suggestions from code review Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com> --------- Co-authored-by: Nicolas Mowen <nickmowen213@gmail.com>
This commit is contained in:
parent
0751358e5b
commit
ede1dedbbd
@ -256,3 +256,25 @@ model:
|
||||
width: 416
|
||||
height: 416
|
||||
```
|
||||
|
||||
## Deepstack / CodeProject.AI Server Detector
|
||||
|
||||
The Deepstack / CodeProject.AI Server detector for Frigate allows you to integrate Deepstack and CodeProject.AI object detection capabilities into Frigate. CodeProject.AI and DeepStack are open-source AI platforms that can be run on various devices such as the Raspberry Pi, Nvidia Jetson, and other compatible hardware. It is important to note that the integration is performed over the network, so the inference times may not be as fast as native Frigate detectors, but it still provides an efficient and reliable solution for object detection and tracking.
|
||||
|
||||
### Setup
|
||||
|
||||
To get started with CodeProject.AI, visit their [official website](https://www.codeproject.com/Articles/5322557/CodeProject-AI-Server-AI-the-easy-way) to follow the instructions to download and install the AI server on your preferred device. Detailed setup instructions for CodeProject.AI are outside the scope of the Frigate documentation.
|
||||
|
||||
To integrate CodeProject.AI into Frigate, you'll need to make the following changes to your Frigate configuration file:
|
||||
|
||||
```yaml
|
||||
detectors:
|
||||
deepstack:
|
||||
api_url: http://<your_codeproject_ai_server_ip>:<port>/v1/vision/detection
|
||||
type: deepstack
|
||||
api_timeout: 0.1 # seconds
|
||||
```
|
||||
|
||||
Replace `<your_codeproject_ai_server_ip>` and `<port>` with the IP address and port of your CodeProject.AI server.
|
||||
|
||||
To verify that the integration is working correctly, start Frigate and observe the logs for any error messages related to CodeProject.AI. Additionally, you can check the Frigate web interface to see if the objects detected by CodeProject.AI are being displayed and tracked properly.
|
78
frigate/detectors/plugins/deepstack.py
Normal file
78
frigate/detectors/plugins/deepstack.py
Normal file
@ -0,0 +1,78 @@
|
||||
import logging
|
||||
import numpy as np
|
||||
import requests
|
||||
import io
|
||||
|
||||
from frigate.detectors.detection_api import DetectionApi
|
||||
from frigate.detectors.detector_config import BaseDetectorConfig
|
||||
from typing import Literal
|
||||
from pydantic import Extra, Field
|
||||
from PIL import Image
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DETECTOR_KEY = "deepstack"
|
||||
|
||||
|
||||
class DeepstackDetectorConfig(BaseDetectorConfig):
|
||||
type: Literal[DETECTOR_KEY]
|
||||
api_url: str = Field(
|
||||
default="http://localhost:80/v1/vision/detection", title="DeepStack API URL"
|
||||
)
|
||||
api_timeout: float = Field(default=0.1, title="DeepStack API timeout (in seconds)")
|
||||
api_key: str = Field(default="", title="DeepStack API key (if required)")
|
||||
|
||||
|
||||
class DeepStack(DetectionApi):
|
||||
type_key = DETECTOR_KEY
|
||||
|
||||
def __init__(self, detector_config: DeepstackDetectorConfig):
|
||||
self.api_url = detector_config.api_url
|
||||
self.api_timeout = detector_config.api_timeout
|
||||
self.api_key = detector_config.api_key
|
||||
self.labels = detector_config.model.merged_labelmap
|
||||
|
||||
self.h = detector_config.model.height
|
||||
self.w = detector_config.model.width
|
||||
|
||||
def get_label_index(self, label_value):
|
||||
if label_value.lower() == "truck":
|
||||
label_value = "car"
|
||||
for index, value in self.labels.items():
|
||||
if value == label_value.lower():
|
||||
return index
|
||||
return -1
|
||||
|
||||
def detect_raw(self, tensor_input):
|
||||
image_data = np.squeeze(tensor_input).astype(np.uint8)
|
||||
image = Image.fromarray(image_data)
|
||||
with io.BytesIO() as output:
|
||||
image.save(output, format="JPEG")
|
||||
image_bytes = output.getvalue()
|
||||
data = {"api_key": self.api_key}
|
||||
response = requests.post(
|
||||
self.api_url, files={"image": image_bytes}, timeout=self.api_timeout
|
||||
)
|
||||
response_json = response.json()
|
||||
detections = np.zeros((20, 6), np.float32)
|
||||
|
||||
for i, detection in enumerate(response_json["predictions"]):
|
||||
logger.debug(f"Response: {detection}")
|
||||
if detection["confidence"] < 0.4:
|
||||
logger.debug(f"Break due to confidence < 0.4")
|
||||
break
|
||||
label = self.get_label_index(detection["label"])
|
||||
if label < 0:
|
||||
logger.debug(f"Break due to unknown label")
|
||||
break
|
||||
detections[i] = [
|
||||
label,
|
||||
float(detection["confidence"]),
|
||||
detection["y_min"] / self.h,
|
||||
detection["x_min"] / self.w,
|
||||
detection["y_max"] / self.h,
|
||||
detection["x_max"] / self.w,
|
||||
]
|
||||
|
||||
return detections
|
Loading…
Reference in New Issue
Block a user