Work through most of the cspell warnings in python (#13794)

This commit is contained in:
gtsiam 2024-09-17 18:41:46 +03:00 committed by GitHub
parent 350abda21a
commit edababa88e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 208 additions and 66 deletions

View File

@ -1,10 +1,20 @@
aarch aarch
absdiff
airockchip
Alloc
Amcrest Amcrest
amdgpu amdgpu
analyzeduration
Annke Annke
apexcharts apexcharts
arange
argmax
argmin
argpartition
ascontiguousarray
authelia authelia
authentik authentik
autodetected
automations automations
autotrack autotrack
autotracked autotracked
@ -12,128 +22,229 @@ autotracker
autotracking autotracking
balena balena
Beelink Beelink
BGRA
BHWC
blackshear blackshear
blakeblackshear blakeblackshear
bottombar bottombar
buildx buildx
castable castable
cdist
Celeron Celeron
cgroups cgroups
chipset chipset
chromadb
Chromecast Chromecast
cmdline cmdline
codeowner codeowner
CODEOWNERS
codeproject codeproject
colormap colormap
colorspace colorspace
comms
ctypeslib
CUDA CUDA
Cuvid Cuvid
Dahua Dahua
datasheet datasheet
debconf debconf
deci
deepstack deepstack
defragment defragment
devcontainer devcontainer
DEVICEMAP
discardcorrupt
dpkg dpkg
dsize
dtype
ECONNRESET
edgetpu edgetpu
faststart
fflags fflags
ffprobe ffprobe
fillna
flac flac
foscam foscam
fourcc fourcc
framebuffer framebuffer
fregate
frégate
fromarray
frombuffer
frontdoor frontdoor
fstype fstype
fullchain fullchain
fullscreen
genai
generativeai
genpts
getpid
gpuload gpuload
HACS HACS
Hailo
hass hass
hconcat
healthcheck healthcheck
hideable hideable
Hikvision Hikvision
homeassistant homeassistant
homekit homekit
homography homography
hsize
hstack
httpx
hwaccel hwaccel
hwdownload
hwmap
hwupload
iloc
imagestream imagestream
imdecode imdecode
imencode imencode
imread imread
imutils imutils
imwrite imwrite
interp
iostat iostat
iotop iotop
itemsize
Jellyfin Jellyfin
jetson jetson
jetsons
joserfc
jsmpeg jsmpeg
jsonify jsonify
Kalman Kalman
keepalive keepalive
keepdims
labelmap labelmap
letsencrypt letsencrypt
levelname
LIBAVFORMAT LIBAVFORMAT
libedgetpu
libnvinfer
libva libva
libwebp
libx
libyolo
linalg
localzone
logpipe logpipe
Loryta Loryta
lstsq
lsusb lsusb
markupsafe
maxsplit
MEMHOSTALLOC
memlimit memlimit
meshgrid
metadatas
migraphx
minilm
mjpeg mjpeg
mkfifo
mobiledet mobiledet
mobilenet mobilenet
modelpath
mosquitto mosquitto
mountpoint mountpoint
movflags
mpegts mpegts
mqtt mqtt
mse mse
msenc
namedtuples namedtuples
nbytes
nchw
ndarray
ndimage
nethogs
newaxis
nhwc
NOBLOCK
nobuffer
nokey
NONBLOCK
noninteractive noninteractive
noprint
Norfair Norfair
nptype
NTSC NTSC
numpy numpy
nvenc
nvhost
nvml nvml
nvmpi
ollama
onnx onnx
onnxruntime onnxruntime
onvif onvif
ONVIF ONVIF
openai
opencv opencv
openvino openvino
OWASP OWASP
paho paho
passwordless passwordless
popleft
posthog
postprocess
poweroff poweroff
preexec
probesize probesize
protobuf protobuf
psutil psutil
pubkey
putenv
pycache
pydantic pydantic
pyobj
pysqlite
pytz
pywebpush
qnap qnap
quantisation quantisation
Radeon Radeon
radeonsi radeonsi
radeontop radeontop
rawvideo rawvideo
rcond
RDONLY
rebranded rebranded
referer referer
Reolink Reolink
restream restream
restreamed restreamed
restreaming restreaming
rkmpp
rknn rknn
rkrga
rockchip rockchip
rocm rocm
rocminfo
rootfs rootfs
rtmp rtmp
RTSP RTSP
ruamel
scroller scroller
setproctitle setproctitle
setpts
shms shms
SIGUSR
skylake skylake
sleeptime sleeptime
SNDMORE
socs socs
sqliteq
ssdlite ssdlite
statm
stimeout
stylelint stylelint
subclassing subclassing
substream substream
superfast
surveillance surveillance
svscan
Swipeable Swipeable
sysconf sysconf
tailscale tailscale
@ -143,25 +254,50 @@ tflite
thresholded thresholded
timelapse timelapse
tmpfs tmpfs
tobytes
toggleable toggleable
traefik traefik
tzlocal
Ubiquiti Ubiquiti
udev udev
udevadm udevadm
ultrafast ultrafast
unichip unichip
unidecode
Unifi Unifi
unixepoch
unraid unraid
unreviewed unreviewed
userdata
usermod usermod
vaapi vaapi
vainfo vainfo
variations variations
vconcat
vitb
vstream
vsync
wallclock wallclock
webp webp
webpush
webrtc webrtc
websockets websockets
webui webui
werkzeug
workdir workdir
WRONLY
wsgirefserver
wsgiutils
wsize
xaddr
xmaxs
xmins
XPUB
XSUB
ymaxs
ymins
yolo yolo
yolonas
yolox
zeep zeep
zerolatency

View File

@ -7,7 +7,8 @@
"*.db", "*.db",
"node_modules", "node_modules",
"__pycache__", "__pycache__",
"dist" "dist",
"/audio-labelmap.txt"
], ],
"language": "en", "language": "en",
"dictionaryDefinitions": [ "dictionaryDefinitions": [

View File

@ -149,9 +149,9 @@ def export_delete(id: str):
try: try:
if process.name() != "ffmpeg": if process.name() != "ffmpeg":
continue continue
flist = process.open_files() file_list = process.open_files()
if flist: if file_list:
for nt in flist: for nt in file_list:
if nt.path.startswith(EXPORT_DIR): if nt.path.startswith(EXPORT_DIR):
files_in_use.append(nt.path.split("/")[-1]) files_in_use.append(nt.path.split("/")[-1])
except psutil.Error: except psutil.Error:

View File

@ -106,10 +106,10 @@ class WebPushClient(Communicator): # type: ignore[misc]
def publish(self, topic: str, payload: Any, retain: bool = False) -> None: def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
"""Wrapper for publishing when client is in valid state.""" """Wrapper for publishing when client is in valid state."""
# check for updated notification config # check for updated notification config
_, updated_notif_config = self.config_subscriber.check_for_update() _, updated_notification_config = self.config_subscriber.check_for_update()
if updated_notif_config: if updated_notification_config:
self.config.notifications = updated_notif_config self.config.notifications = updated_notification_config
if not self.config.notifications.enabled: if not self.config.notifications.enabled:
return return

View File

@ -1194,7 +1194,7 @@ class CameraConfig(FrigateBaseModel):
+ ffmpeg_output_args + ffmpeg_output_args
) )
# if there arent any outputs enabled for this input # if there aren't any outputs enabled for this input
if len(ffmpeg_output_args) == 0: if len(ffmpeg_output_args) == 0:
return None return None

View File

@ -12,7 +12,7 @@ FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
PLUS_ENV_VAR = "PLUS_API_KEY" PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video" PLUS_API_HOST = "https://api.frigate.video"
# Attribute & Object Consts # Attribute & Object constants
ATTRIBUTE_LABEL_MAP = { ATTRIBUTE_LABEL_MAP = {
"person": ["face", "amazon"], "person": ["face", "amazon"],
@ -31,7 +31,7 @@ LABEL_NMS_MAP = {
} }
LABEL_NMS_DEFAULT = 0.4 LABEL_NMS_DEFAULT = 0.4
# Audio Consts # Audio constants
AUDIO_DURATION = 0.975 AUDIO_DURATION = 0.975
AUDIO_FORMAT = "s16le" AUDIO_FORMAT = "s16le"
@ -39,7 +39,7 @@ AUDIO_MAX_BIT_RANGE = 32768.0
AUDIO_SAMPLE_RATE = 16000 AUDIO_SAMPLE_RATE = 16000
AUDIO_MIN_CONFIDENCE = 0.5 AUDIO_MIN_CONFIDENCE = 0.5
# DB Consts # DB constants
MAX_WAL_SIZE = 10 # MB MAX_WAL_SIZE = 10 # MB
@ -49,7 +49,7 @@ FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
FFMPEG_HWACCEL_VAAPI = "preset-vaapi" FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
FFMPEG_HWACCEL_VULKAN = "preset-vulkan" FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
# Regex Consts # Regex constants
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$" REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"
REGEX_RTSP_CAMERA_USER_PASS = r":\/\/[a-zA-Z0-9_-]+:[\S]+@" REGEX_RTSP_CAMERA_USER_PASS = r":\/\/[a-zA-Z0-9_-]+:[\S]+@"

View File

@ -83,11 +83,11 @@ class HailoDetector(DetectionApi):
self.network_group_params = self.network_group.create_params() self.network_group_params = self.network_group.create_params()
# Create input and output virtual stream parameters # Create input and output virtual stream parameters
self.input_vstreams_params = InputVStreamParams.make( self.input_vstream_params = InputVStreamParams.make(
self.network_group, self.network_group,
format_type=self.hef.get_input_vstream_infos()[0].format.type, format_type=self.hef.get_input_vstream_infos()[0].format.type,
) )
self.output_vstreams_params = OutputVStreamParams.make( self.output_vstream_params = OutputVStreamParams.make(
self.network_group, format_type=getattr(FormatType, output_type) self.network_group, format_type=getattr(FormatType, output_type)
) )
@ -162,8 +162,8 @@ class HailoDetector(DetectionApi):
try: try:
with InferVStreams( with InferVStreams(
self.network_group, self.network_group,
self.input_vstreams_params, self.input_vstream_params,
self.output_vstreams_params, self.output_vstream_params,
) as infer_pipeline: ) as infer_pipeline:
input_dict = {} input_dict = {}
if isinstance(input_data, dict): if isinstance(input_data, dict):

View File

@ -129,10 +129,10 @@ class OvDetector(DetectionApi):
strides = [8, 16, 32] strides = [8, 16, 32]
hsizes = [self.h // stride for stride in strides] hsize_list = [self.h // stride for stride in strides]
wsizes = [self.w // stride for stride in strides] wsize_list = [self.w // stride for stride in strides]
for hsize, wsize, stride in zip(hsizes, wsizes, strides): for hsize, wsize, stride in zip(hsize_list, wsize_list, strides):
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize)) xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
grid = np.stack((xv, yv), 2).reshape(1, -1, 2) grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid) grids.append(grid)
@ -216,10 +216,12 @@ class OvDetector(DetectionApi):
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= 0.3).squeeze() conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= 0.3).squeeze()
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred) # Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
dets = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1) detections = np.concatenate(
dets = dets[conf_mask] (image_pred[:, :5], class_conf, class_pred), axis=1
)
detections = detections[conf_mask]
ordered = dets[dets[:, 5].argsort()[::-1]][:20] ordered = detections[detections[:, 5].argsort()[::-1]][:20]
for i, object_detected in enumerate(ordered): for i, object_detected in enumerate(ordered):
detections[i] = self.process_yolo( detections[i] = self.process_yolo(

View File

@ -17,7 +17,7 @@ supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
supported_models = {ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$"} supported_models = {ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$"}
model_chache_dir = "/config/model_cache/rknn_cache/" model_cache_dir = "/config/model_cache/rknn_cache/"
class RknnDetectorConfig(BaseDetectorConfig): class RknnDetectorConfig(BaseDetectorConfig):
@ -110,7 +110,7 @@ class Rknn(DetectionApi):
if model_matched: if model_matched:
model_props["filename"] = model_path + f"-{soc}-v2.0.0-1.rknn" model_props["filename"] = model_path + f"-{soc}-v2.0.0-1.rknn"
model_props["path"] = model_chache_dir + model_props["filename"] model_props["path"] = model_cache_dir + model_props["filename"]
if not os.path.isfile(model_props["path"]): if not os.path.isfile(model_props["path"]):
self.download_model(model_props["filename"]) self.download_model(model_props["filename"])
@ -125,12 +125,12 @@ class Rknn(DetectionApi):
return model_props return model_props
def download_model(self, filename): def download_model(self, filename):
if not os.path.isdir(model_chache_dir): if not os.path.isdir(model_cache_dir):
os.mkdir(model_chache_dir) os.mkdir(model_cache_dir)
urllib.request.urlretrieve( urllib.request.urlretrieve(
f"https://github.com/MarcA711/rknn-models/releases/download/v2.0.0/{filename}", f"https://github.com/MarcA711/rknn-models/releases/download/v2.0.0/{filename}",
model_chache_dir + filename, model_cache_dir + filename,
) )
def check_config(self, config): def check_config(self, config):

View File

@ -285,14 +285,14 @@ class TensorRtDetector(DetectionApi):
boxes, scores, classes boxes, scores, classes
""" """
# filter low-conf detections and concatenate results of all yolo layers # filter low-conf detections and concatenate results of all yolo layers
detections = [] detection_list = []
for o in trt_outputs: for o in trt_outputs:
dets = o.reshape((-1, 7)) detections = o.reshape((-1, 7))
dets = dets[dets[:, 4] * dets[:, 6] >= conf_th] detections = detections[detections[:, 4] * detections[:, 6] >= conf_th]
detections.append(dets) detection_list.append(detections)
detections = np.concatenate(detections, axis=0) detection_list = np.concatenate(detection_list, axis=0)
return detections return detection_list
def detect_raw(self, tensor_input): def detect_raw(self, tensor_input):
# Input tensor has the shape of the [height, width, 3] # Input tensor has the shape of the [height, width, 3]

View File

@ -26,7 +26,7 @@ def preprocess(tensor_input, model_input_shape, model_input_element_type):
logger.warn( logger.warn(
f"preprocess: tensor_input.shape {tensor_input.shape} and model_input_shape {model_input_shape} do not match!" f"preprocess: tensor_input.shape {tensor_input.shape} and model_input_shape {model_input_shape} do not match!"
) )
# cv2.dnn.blobFromImage is faster than numpying it # cv2.dnn.blobFromImage is faster than running it through numpy
return cv2.dnn.blobFromImage( return cv2.dnn.blobFromImage(
tensor_input[0], tensor_input[0],
1.0 / 255, 1.0 / 255,

View File

@ -15,7 +15,7 @@ from frigate.models import Event
# Squelch posthog logging # Squelch posthog logging
logging.getLogger("chromadb.telemetry.product.posthog").setLevel(logging.CRITICAL) logging.getLogger("chromadb.telemetry.product.posthog").setLevel(logging.CRITICAL)
# Hotsawp the sqlite3 module for Chroma compatibility # Hot-swap the sqlite3 module for Chroma compatibility
try: try:
from chromadb import Collection from chromadb import Collection
from chromadb import HttpClient as ChromaClient from chromadb import HttpClient as ChromaClient

View File

@ -55,13 +55,13 @@ class FrigateMotionDetector(MotionDetector):
# Improve contrast # Improve contrast
if self.improve_contrast.value: if self.improve_contrast.value:
minval = np.percentile(resized_frame, 4) min_value = np.percentile(resized_frame, 4)
maxval = np.percentile(resized_frame, 96) max_value = np.percentile(resized_frame, 96)
# don't adjust if the image is a single color # don't adjust if the image is a single color
if minval < maxval: if min_value < max_value:
resized_frame = np.clip(resized_frame, minval, maxval) resized_frame = np.clip(resized_frame, min_value, max_value)
resized_frame = ( resized_frame = (
((resized_frame - minval) / (maxval - minval)) * 255 ((resized_frame - min_value) / (max_value - min_value)) * 255
).astype(np.uint8) ).astype(np.uint8)
# mask frame # mask frame
@ -100,13 +100,13 @@ class FrigateMotionDetector(MotionDetector):
# dilate the thresholded image to fill in holes, then find contours # dilate the thresholded image to fill in holes, then find contours
# on thresholded image # on thresholded image
thresh_dilated = cv2.dilate(thresh, None, iterations=2) thresh_dilated = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours( contours = cv2.findContours(
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
) )
cnts = imutils.grab_contours(cnts) contours = imutils.grab_contours(contours)
# loop over the contours # loop over the contours
for c in cnts: for c in contours:
# if the contour is big enough, count it as motion # if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c) contour_area = cv2.contourArea(c)
if contour_area > self.contour_area.value: if contour_area > self.contour_area.value:
@ -124,7 +124,7 @@ class FrigateMotionDetector(MotionDetector):
thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR) thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
# print("--------") # print("--------")
# print(self.frame_counter) # print(self.frame_counter)
for c in cnts: for c in contours:
contour_area = cv2.contourArea(c) contour_area = cv2.contourArea(c)
if contour_area > self.contour_area.value: if contour_area > self.contour_area.value:
x, y, w, h = cv2.boundingRect(c) x, y, w, h = cv2.boundingRect(c)

View File

@ -79,12 +79,15 @@ class ImprovedMotionDetector(MotionDetector):
# Improve contrast # Improve contrast
if self.config.improve_contrast: if self.config.improve_contrast:
# TODO tracking moving average of min/max to avoid sudden contrast changes # TODO tracking moving average of min/max to avoid sudden contrast changes
minval = np.percentile(resized_frame, 4).astype(np.uint8) min_value = np.percentile(resized_frame, 4).astype(np.uint8)
maxval = np.percentile(resized_frame, 96).astype(np.uint8) max_value = np.percentile(resized_frame, 96).astype(np.uint8)
# skip contrast calcs if the image is a single color # skip contrast calcs if the image is a single color
if minval < maxval: if min_value < max_value:
# keep track of the last 50 contrast values # keep track of the last 50 contrast values
self.contrast_values[self.contrast_values_index] = [minval, maxval] self.contrast_values[self.contrast_values_index] = [
min_value,
max_value,
]
self.contrast_values_index += 1 self.contrast_values_index += 1
if self.contrast_values_index == len(self.contrast_values): if self.contrast_values_index == len(self.contrast_values):
self.contrast_values_index = 0 self.contrast_values_index = 0
@ -122,14 +125,14 @@ class ImprovedMotionDetector(MotionDetector):
# dilate the thresholded image to fill in holes, then find contours # dilate the thresholded image to fill in holes, then find contours
# on thresholded image # on thresholded image
thresh_dilated = cv2.dilate(thresh, None, iterations=1) thresh_dilated = cv2.dilate(thresh, None, iterations=1)
cnts = cv2.findContours( contours = cv2.findContours(
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
) )
cnts = imutils.grab_contours(cnts) contours = imutils.grab_contours(contours)
# loop over the contours # loop over the contours
total_contour_area = 0 total_contour_area = 0
for c in cnts: for c in contours:
# if the contour is big enough, count it as motion # if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c) contour_area = cv2.contourArea(c)
total_contour_area += contour_area total_contour_area += contour_area

View File

@ -268,9 +268,9 @@ class PtzAutoTracker:
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
return return
movestatus_supported = self.onvif.get_service_capabilities(camera) move_status_supported = self.onvif.get_service_capabilities(camera)
if movestatus_supported is None or movestatus_supported.lower() != "true": if move_status_supported is None or move_status_supported.lower() != "true":
logger.warning( logger.warning(
f"Disabling autotracking for {camera}: ONVIF MoveStatus not supported" f"Disabling autotracking for {camera}: ONVIF MoveStatus not supported"
) )
@ -807,8 +807,8 @@ class PtzAutoTracker:
invalid_delta = np.any(delta > delta_thresh) invalid_delta = np.any(delta > delta_thresh)
# Check variance # Check variance
stdevs = np.std(velocities, axis=0) stdev_list = np.std(velocities, axis=0)
high_variances = np.any(stdevs > var_thresh) high_variances = np.any(stdev_list > var_thresh)
# Check direction difference # Check direction difference
velocities = np.round(velocities) velocities = np.round(velocities)

View File

@ -90,9 +90,9 @@ class RecordingMaintainer(threading.Thread):
try: try:
if process.name() != "ffmpeg": if process.name() != "ffmpeg":
continue continue
flist = process.open_files() file_list = process.open_files()
if flist: if file_list:
for nt in flist: for nt in file_list:
if nt.path.startswith(CACHE_DIR): if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1]) files_in_use.append(nt.path.split("/")[-1])
except psutil.Error: except psutil.Error:

View File

@ -250,7 +250,7 @@ def stats_snapshot(
ffmpeg_pid = ( ffmpeg_pid = (
camera_stats["ffmpeg_pid"].value if camera_stats["ffmpeg_pid"] else None camera_stats["ffmpeg_pid"].value if camera_stats["ffmpeg_pid"] else None
) )
cpid = ( capture_pid = (
camera_stats["capture_process"].pid camera_stats["capture_process"].pid
if camera_stats["capture_process"] if camera_stats["capture_process"]
else None else None
@ -262,7 +262,7 @@ def stats_snapshot(
"detection_fps": round(camera_stats["detection_fps"].value, 2), "detection_fps": round(camera_stats["detection_fps"].value, 2),
"detection_enabled": config.cameras[name].detect.enabled, "detection_enabled": config.cameras[name].detect.enabled,
"pid": pid, "pid": pid,
"capture_pid": cpid, "capture_pid": capture_pid,
"ffmpeg_pid": ffmpeg_pid, "ffmpeg_pid": ffmpeg_pid,
"audio_rms": round(camera_stats["audio_rms"].value, 4), "audio_rms": round(camera_stats["audio_rms"].value, 4),
"audio_dBFS": round(camera_stats["audio_dBFS"].value, 4), "audio_dBFS": round(camera_stats["audio_dBFS"].value, 4),

View File

@ -1,4 +1,4 @@
"""Consts for testing.""" """Constants for testing."""
TEST_DB = "test.db" TEST_DB = "test.db"
TEST_DB_CLEANUPS = ["test.db", "test.db-shm", "test.db-wal"] TEST_DB_CLEANUPS = ["test.db", "test.db-shm", "test.db-wal"]

View File

@ -511,12 +511,12 @@ def reduce_detections(
# due to min score requirement of NMSBoxes # due to min score requirement of NMSBoxes
confidences = [0.6 if clipped(o, frame_shape) else o[1] for o in group] confidences = [0.6 if clipped(o, frame_shape) else o[1] for o in group]
idxs = cv2.dnn.NMSBoxes( indices = cv2.dnn.NMSBoxes(
boxes, confidences, 0.5, LABEL_NMS_MAP.get(label, LABEL_NMS_DEFAULT) boxes, confidences, 0.5, LABEL_NMS_MAP.get(label, LABEL_NMS_DEFAULT)
) )
# add objects # add objects
for index in idxs: for index in indices:
index = index if isinstance(index, np.int32) else index[0] index = index if isinstance(index, np.int32) else index[0]
obj = group[index] obj = group[index]
selected_objects.append(obj) selected_objects.append(obj)

View File

@ -122,7 +122,7 @@ def get_cpu_stats() -> dict[str, dict]:
stats = f.readline().split() stats = f.readline().split()
utime = int(stats[13]) utime = int(stats[13])
stime = int(stats[14]) stime = int(stats[14])
starttime = int(stats[21]) start_time = int(stats[21])
with open("/proc/uptime") as f: with open("/proc/uptime") as f:
system_uptime_sec = int(float(f.read().split()[0])) system_uptime_sec = int(float(f.read().split()[0]))
@ -131,9 +131,9 @@ def get_cpu_stats() -> dict[str, dict]:
process_utime_sec = utime // clk_tck process_utime_sec = utime // clk_tck
process_stime_sec = stime // clk_tck process_stime_sec = stime // clk_tck
process_starttime_sec = starttime // clk_tck process_start_time_sec = start_time // clk_tck
process_elapsed_sec = system_uptime_sec - process_starttime_sec process_elapsed_sec = system_uptime_sec - process_start_time_sec
process_usage_sec = process_utime_sec + process_stime_sec process_usage_sec = process_utime_sec + process_stime_sec
cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec