Version that detects model and can begin using @local

This commit is contained in:
Chirayu Rai 2025-05-07 10:14:47 -07:00
parent 52a9bdf2b9
commit 4412671e76
7 changed files with 14661 additions and 23 deletions

View File

@ -26,16 +26,24 @@ services:
YOLO_MODELS: ""
devices:
- /dev/bus/usb:/dev/bus/usb
# - /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
- /dev/dri:/dev/dri # for intel hwaccel, needs to be updated for your hardware
volumes:
- .:/workspace/frigate:cached
- ./web/dist:/opt/frigate/web:cached
- /etc/localtime:/etc/localtime:ro
- ./config:/config
- ./debug:/media/frigate
- /dev/bus/usb:/dev/bus/usb
- ./model:/zoo
- ./test_short.mp4:/testing/test_short.mp4
mqtt:
container_name: mqtt
image: eclipse-mosquitto:1.6
ports:
- "1883:1883"
- "1883:1883"
degirum_detector:
container_name: degirum
image: degirum/aiserver:latest
privileged: true
ports:
- "8778:8778"

View File

@ -72,4 +72,5 @@ prometheus-client == 0.21.*
tflite_runtime @ https://github.com/frigate-nvr/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_x86_64.whl; platform_machine == 'x86_64'
tflite_runtime @ https://github.com/feranick/TFlite-builds/releases/download/v2.17.1/tflite_runtime-2.17.1-cp311-cp311-linux_aarch64.whl; platform_machine == 'aarch64'
# DeGirum detector
degirum_headless == 0.15.*
degirum == 0.16.*
# degirum_headless == 0.15.*

View File

@ -87,43 +87,135 @@ class DGDetector(DetectionApi):
self._zoo = dg.connect(
detector_config.location, detector_config.zoo, detector_config.token
)
logger.info(f"Models in zoo: {self._zoo.list_models()}")
self.dg_model = self._zoo.load_model(
detector_config.model.path, non_blocking_batch_predict=True
detector_config.model.path,
)
self.dg_model.measure_time = True
self.dg_model.input_image_format = "RAW"
self.dg_model._postprocessor = None
# Openvino tends to have multidevice, and they default to CPU rather than GPU or NPU
types = self.dg_model.supported_device_types
for type in types:
# If openvino is supported, prioritize using gpu, then npu, then cpu
if "OPENVINO" in type:
self.dg_model.device_type = [
"OPENVINO/GPU",
"OPENVINO/NPU",
# "OPENVINO/GPU",
# "OPENVINO/NPU",
"OPENVINO/CPU",
]
elif "HAILORT" in type:
self.dg_model.device_type = [
"HAILORT/HAILO8l",
"HAILORT/HAILO8",
]
break
self.model_height = detector_config.model.height
self.model_width = detector_config.model.width
self.predict_batch = self.dg_model.predict_batch(self._queue)
input_shape = self.dg_model.input_shape[0]
self.model_height = input_shape[1]
self.model_width = input_shape[2]
frame = np.zeros(
(detector_config.model.width, detector_config.model.height, 3),
dtype=np.uint8,
)
self.dg_model(frame)
self.prediction = self.prediction_generator()
self.none_counter = 0
self.not_none_counter = 0
self.overall_frame_counter = 0
self.times = 0
def prediction_generator(self):
# logger.debug("Prediction generator was called")
with self.dg_model as model:
while 1:
# logger.debug(f"q size before calling get: {self._queue.qsize()}")
data = self._queue.get()
# logger.debug(f"q size after calling get: {self._queue.qsize()}")
# logger.debug(
# f"Data we're passing into model predict: {data}, shape of data: {data.shape}"
# )
start = time.time_ns()
result = model.predict(data)
self.times += (time.time_ns() - start) * 1e-6
# logger.info(
# f"Entire time taken to get result back: {self.times / self.overall_frame_counter}"
# )
yield result
def detect_raw(self, tensor_input):
# add tensor_input to input queue
# start = time.time_ns()
self.overall_frame_counter += 1
truncated_input = tensor_input.reshape(tensor_input.shape[1:])
self._queue.put((truncated_input, ""))
# logger.debug(f"Detect raw was called for tensor input: {tensor_input}")
# add tensor_input to input queue
self._queue.put(truncated_input)
# logger.debug(f"Queue size after adding truncated input: {self._queue.qsize()}")
# define empty detection result
detections = np.zeros((20, 6), np.float32)
res = next(self.predict_batch)
if res is not None:
# res = next(self.prediction)
result = next(self.prediction)
# return detections
# result = self.prediction_generator()
# logger.info(f"Result: {result}")
# logger.info(f"Shape of res: {res.results[0]["data"]}")
# logger.debug(f"Queue size after calling for res: {self._queue.qsize()}")
# logger.debug(f"Output of res in initial next call: {res}")
# logger.info(
# f"Overall frame number: {self.overall_frame_counter}, none count: {self.none_counter}, not none count: {self.not_none_counter}, none percentage: {self.none_counter / self.overall_frame_counter}"
# )
# logger.info(f"Time stats right after res: {self.dg_model.time_stats()}")
# start = time.time_ns()
# res_string = str(res)
# logger.info(f"Res is: {res_string}")
# logger.debug(f"Res's list of attributes: {dir(res)}")
# logger.debug(
# f"Res results, {res.results}, length of results: {len(res.results)}"
# )
# logger.info(f"Output of res: {res}")
# res_string = str(res)
# logger.info(f"Data from array: {res.results}")
# logger.info(f"First data: {res.results[0]['data']}")
# logger.info(f"Length of data: {len(res.results[0]['data'][0])}")
# if res is not None and res.results[0].get("category_id") is not None:
if result is not None:
# populate detection result with corresponding inference result information
# self.not_none_counter += 1
i = 0
for result in res.results:
detections[i] = [
result["category_id"], # Label ID
float(result["score"]), # Confidence
result["bbox"][1] / self.model_height, # y_min
result["bbox"][0] / self.model_width, # x_min
result["bbox"][3] / self.model_height, # y_max
result["bbox"][2] / self.model_width, # x_max
]
# for result in res.results:
# if i > 20:
# break
# detections[i] = [
# result["category_id"],
# float(result["score"]),
# result["bbox"][1] / self.model_height,
# result["bbox"][0] / self.model_width,
# result["bbox"][3] / self.model_height,
# result["bbox"][2] / self.model_width,
# ]
# i += 1
for item in result.results:
# logger.info(f"CURRENT ITEM: {item}")
if i >= 20:
break
category_id = int(item[5])
score = item[4]
y_min = item[1]
x_min = item[0]
x_max = item[2]
y_max = item[3]
detections[i] = [category_id, score, y_min, x_min, y_max, x_max]
i += 1
if detections[0][1] != 0: # if we have a score, then print detection
logger.info(f"Output of detections: {detections}")
## Save the detection results to a file so we can compare
# logger.info(f"Overall time took: {(time.time_ns() - start) * 1e-6}ms")
return detections

View File

@ -0,0 +1,95 @@
{
"0": "__background__",
"1": "person",
"2": "bicycle",
"3": "car",
"4": "motorcycle",
"5": "airplan",
"6": "bus",
"7": "train",
"8": "car",
"9": "boat",
"10": "traffic light",
"11": "fire hydrant",
"12": "street sign",
"13": "stop sign",
"14": "parking meter",
"15": "bench",
"16": "bird",
"17": "cat",
"18": "dog",
"19": "horse",
"20": "sheep",
"21": "cow",
"22": "elephant",
"23": "bear",
"24": "zebra",
"25": "giraffe",
"26": "hat",
"27": "backpack",
"28": "umbrella",
"29": "shoe",
"30": "eye glasses",
"31": "handbag",
"32": "tie",
"33": "suitcase",
"34": "frisbee",
"35": "skis",
"36": "snowboard",
"37": "sports ball",
"38": "kite",
"39": "baseball bat",
"40": "baseball glove",
"41": "skateboard",
"42": "surfboard",
"43": "tennis racket",
"44": "bottle",
"45": "plate",
"46": "wine glass",
"47": "cup",
"48": "fork",
"49": "knife",
"50": "spoon",
"51": "bowl",
"52": "banana",
"53": "apple",
"54": "sandwich",
"55": "orange",
"56": "broccoli",
"57": "carrot",
"58": "hot dog",
"59": "pizza",
"60": "donut",
"61": "cake",
"62": "chair",
"63": "couch",
"64": "potted plant",
"65": "bed",
"66": "mirror",
"67": "dining table",
"68": "window",
"69": "desk",
"70": "toilet",
"71": "door",
"72": "tv",
"73": "laptop",
"74": "mouse",
"75": "remote",
"76": "keyboard",
"77": "cell phone",
"78": "microwave",
"79": "oven",
"80": "toaster",
"81": "sink",
"82": "refrigerator",
"83": "blender",
"84": "book",
"85": "clock",
"86": "vase",
"87": "scissors",
"88": "teddy bear",
"89": "hair drier",
"90": "toothbrush",
"91": "hair brush"
}

View File

@ -0,0 +1,53 @@
{
"ConfigVersion": 6,
"Checksum": "0ebce8b115214756bd37cfb5b4c3b547d557c6c58e828a8b9f725214afe49600",
"DEVICE": [
{
"RuntimeAgent": "OPENVINO",
"DeviceType": "CPU",
"SupportedDeviceTypes": "OPENVINO/CPU"
}
],
"MODEL_PARAMETERS": [
{
"ModelPath": "ssdlite_mobilenet_v2.xml"
}
],
"PRE_PROCESS": [
{
"InputImgFmt": "JPEG",
"InputImgNormEn": false,
"InputN": 1,
"InputType": "Image",
"InputResizeMethod": "bilinear",
"InputPadMethod": "letterbox",
"ImageBackend": "auto",
"InputH": 300,
"InputW": 300,
"InputC": 3,
"InputQuantEn": true,
"InputQuantOffset": 0,
"InputQuantScale": 1,
"InputTensorLayout": "NCHW",
"InputImgSliceType": "None"
}
],
"POST_PROCESS": [
{
"PostProcessorInputs": [3, 1, 2],
"OutputPostprocessType": "Detection",
"LabelsPath": "labels.json",
"OutputConfThreshold": 0.3,
"MaxDetections": 20,
"OutputNMSThreshold": 0.6,
"MaxDetectionsPerClass": 100,
"MaxClassesPerDetection": 1,
"UseRegularNMS": false,
"OutputNumClasses": 90,
"XScale": 10,
"YScale": 10,
"HScale": 5,
"WScale": 5
}
]
}

File diff suppressed because it is too large Load Diff