Work through most of the cspell warnings in python (#13794)

This commit is contained in:
gtsiam 2024-09-17 18:41:46 +03:00 committed by GitHub
parent 350abda21a
commit edababa88e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
20 changed files with 208 additions and 66 deletions

View File

@ -1,10 +1,20 @@
aarch
absdiff
airockchip
Alloc
Amcrest
amdgpu
analyzeduration
Annke
apexcharts
arange
argmax
argmin
argpartition
ascontiguousarray
authelia
authentik
autodetected
automations
autotrack
autotracked
@ -12,128 +22,229 @@ autotracker
autotracking
balena
Beelink
BGRA
BHWC
blackshear
blakeblackshear
bottombar
buildx
castable
cdist
Celeron
cgroups
chipset
chromadb
Chromecast
cmdline
codeowner
CODEOWNERS
codeproject
colormap
colorspace
comms
ctypeslib
CUDA
Cuvid
Dahua
datasheet
debconf
deci
deepstack
defragment
devcontainer
DEVICEMAP
discardcorrupt
dpkg
dsize
dtype
ECONNRESET
edgetpu
faststart
fflags
ffprobe
fillna
flac
foscam
fourcc
framebuffer
fregate
frégate
fromarray
frombuffer
frontdoor
fstype
fullchain
fullscreen
genai
generativeai
genpts
getpid
gpuload
HACS
Hailo
hass
hconcat
healthcheck
hideable
Hikvision
homeassistant
homekit
homography
hsize
hstack
httpx
hwaccel
hwdownload
hwmap
hwupload
iloc
imagestream
imdecode
imencode
imread
imutils
imwrite
interp
iostat
iotop
itemsize
Jellyfin
jetson
jetsons
joserfc
jsmpeg
jsonify
Kalman
keepalive
keepdims
labelmap
letsencrypt
levelname
LIBAVFORMAT
libedgetpu
libnvinfer
libva
libwebp
libx
libyolo
linalg
localzone
logpipe
Loryta
lstsq
lsusb
markupsafe
maxsplit
MEMHOSTALLOC
memlimit
meshgrid
metadatas
migraphx
minilm
mjpeg
mkfifo
mobiledet
mobilenet
modelpath
mosquitto
mountpoint
movflags
mpegts
mqtt
mse
msenc
namedtuples
nbytes
nchw
ndarray
ndimage
nethogs
newaxis
nhwc
NOBLOCK
nobuffer
nokey
NONBLOCK
noninteractive
noprint
Norfair
nptype
NTSC
numpy
nvenc
nvhost
nvml
nvmpi
ollama
onnx
onnxruntime
onvif
ONVIF
openai
opencv
openvino
OWASP
paho
passwordless
popleft
posthog
postprocess
poweroff
preexec
probesize
protobuf
psutil
pubkey
putenv
pycache
pydantic
pyobj
pysqlite
pytz
pywebpush
qnap
quantisation
Radeon
radeonsi
radeontop
rawvideo
rcond
RDONLY
rebranded
referer
Reolink
restream
restreamed
restreaming
rkmpp
rknn
rkrga
rockchip
rocm
rocminfo
rootfs
rtmp
RTSP
ruamel
scroller
setproctitle
setpts
shms
SIGUSR
skylake
sleeptime
SNDMORE
socs
sqliteq
ssdlite
statm
stimeout
stylelint
subclassing
substream
superfast
surveillance
svscan
Swipeable
sysconf
tailscale
@ -143,25 +254,50 @@ tflite
thresholded
timelapse
tmpfs
tobytes
toggleable
traefik
tzlocal
Ubiquiti
udev
udevadm
ultrafast
unichip
unidecode
Unifi
unixepoch
unraid
unreviewed
userdata
usermod
vaapi
vainfo
variations
vconcat
vitb
vstream
vsync
wallclock
webp
webpush
webrtc
websockets
webui
werkzeug
workdir
WRONLY
wsgirefserver
wsgiutils
wsize
xaddr
xmaxs
xmins
XPUB
XSUB
ymaxs
ymins
yolo
yolonas
yolox
zeep
zerolatency

View File

@ -7,7 +7,8 @@
"*.db",
"node_modules",
"__pycache__",
"dist"
"dist",
"/audio-labelmap.txt"
],
"language": "en",
"dictionaryDefinitions": [

View File

@ -149,9 +149,9 @@ def export_delete(id: str):
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
file_list = process.open_files()
if file_list:
for nt in file_list:
if nt.path.startswith(EXPORT_DIR):
files_in_use.append(nt.path.split("/")[-1])
except psutil.Error:

View File

@ -106,10 +106,10 @@ class WebPushClient(Communicator): # type: ignore[misc]
def publish(self, topic: str, payload: Any, retain: bool = False) -> None:
"""Wrapper for publishing when client is in valid state."""
# check for updated notification config
_, updated_notif_config = self.config_subscriber.check_for_update()
_, updated_notification_config = self.config_subscriber.check_for_update()
if updated_notif_config:
self.config.notifications = updated_notif_config
if updated_notification_config:
self.config.notifications = updated_notification_config
if not self.config.notifications.enabled:
return

View File

@ -1194,7 +1194,7 @@ class CameraConfig(FrigateBaseModel):
+ ffmpeg_output_args
)
# if there arent any outputs enabled for this input
# if there aren't any outputs enabled for this input
if len(ffmpeg_output_args) == 0:
return None

View File

@ -12,7 +12,7 @@ FRIGATE_LOCALHOST = "http://127.0.0.1:5000"
PLUS_ENV_VAR = "PLUS_API_KEY"
PLUS_API_HOST = "https://api.frigate.video"
# Attribute & Object Consts
# Attribute & Object constants
ATTRIBUTE_LABEL_MAP = {
"person": ["face", "amazon"],
@ -31,7 +31,7 @@ LABEL_NMS_MAP = {
}
LABEL_NMS_DEFAULT = 0.4
# Audio Consts
# Audio constants
AUDIO_DURATION = 0.975
AUDIO_FORMAT = "s16le"
@ -39,7 +39,7 @@ AUDIO_MAX_BIT_RANGE = 32768.0
AUDIO_SAMPLE_RATE = 16000
AUDIO_MIN_CONFIDENCE = 0.5
# DB Consts
# DB constants
MAX_WAL_SIZE = 10 # MB
@ -49,7 +49,7 @@ FFMPEG_HWACCEL_NVIDIA = "preset-nvidia"
FFMPEG_HWACCEL_VAAPI = "preset-vaapi"
FFMPEG_HWACCEL_VULKAN = "preset-vulkan"
# Regex Consts
# Regex constants
REGEX_CAMERA_NAME = r"^[a-zA-Z0-9_-]+$"
REGEX_RTSP_CAMERA_USER_PASS = r":\/\/[a-zA-Z0-9_-]+:[\S]+@"

View File

@ -83,11 +83,11 @@ class HailoDetector(DetectionApi):
self.network_group_params = self.network_group.create_params()
# Create input and output virtual stream parameters
self.input_vstreams_params = InputVStreamParams.make(
self.input_vstream_params = InputVStreamParams.make(
self.network_group,
format_type=self.hef.get_input_vstream_infos()[0].format.type,
)
self.output_vstreams_params = OutputVStreamParams.make(
self.output_vstream_params = OutputVStreamParams.make(
self.network_group, format_type=getattr(FormatType, output_type)
)
@ -162,8 +162,8 @@ class HailoDetector(DetectionApi):
try:
with InferVStreams(
self.network_group,
self.input_vstreams_params,
self.output_vstreams_params,
self.input_vstream_params,
self.output_vstream_params,
) as infer_pipeline:
input_dict = {}
if isinstance(input_data, dict):

View File

@ -129,10 +129,10 @@ class OvDetector(DetectionApi):
strides = [8, 16, 32]
hsizes = [self.h // stride for stride in strides]
wsizes = [self.w // stride for stride in strides]
hsize_list = [self.h // stride for stride in strides]
wsize_list = [self.w // stride for stride in strides]
for hsize, wsize, stride in zip(hsizes, wsizes, strides):
for hsize, wsize, stride in zip(hsize_list, wsize_list, strides):
xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize))
grid = np.stack((xv, yv), 2).reshape(1, -1, 2)
grids.append(grid)
@ -216,10 +216,12 @@ class OvDetector(DetectionApi):
conf_mask = (image_pred[:, 4] * class_conf.squeeze() >= 0.3).squeeze()
# Detections ordered as (x1, y1, x2, y2, obj_conf, class_conf, class_pred)
dets = np.concatenate((image_pred[:, :5], class_conf, class_pred), axis=1)
dets = dets[conf_mask]
detections = np.concatenate(
(image_pred[:, :5], class_conf, class_pred), axis=1
)
detections = detections[conf_mask]
ordered = dets[dets[:, 5].argsort()[::-1]][:20]
ordered = detections[detections[:, 5].argsort()[::-1]][:20]
for i, object_detected in enumerate(ordered):
detections[i] = self.process_yolo(

View File

@ -17,7 +17,7 @@ supported_socs = ["rk3562", "rk3566", "rk3568", "rk3576", "rk3588"]
supported_models = {ModelTypeEnum.yolonas: "^deci-fp16-yolonas_[sml]$"}
model_chache_dir = "/config/model_cache/rknn_cache/"
model_cache_dir = "/config/model_cache/rknn_cache/"
class RknnDetectorConfig(BaseDetectorConfig):
@ -110,7 +110,7 @@ class Rknn(DetectionApi):
if model_matched:
model_props["filename"] = model_path + f"-{soc}-v2.0.0-1.rknn"
model_props["path"] = model_chache_dir + model_props["filename"]
model_props["path"] = model_cache_dir + model_props["filename"]
if not os.path.isfile(model_props["path"]):
self.download_model(model_props["filename"])
@ -125,12 +125,12 @@ class Rknn(DetectionApi):
return model_props
def download_model(self, filename):
if not os.path.isdir(model_chache_dir):
os.mkdir(model_chache_dir)
if not os.path.isdir(model_cache_dir):
os.mkdir(model_cache_dir)
urllib.request.urlretrieve(
f"https://github.com/MarcA711/rknn-models/releases/download/v2.0.0/{filename}",
model_chache_dir + filename,
model_cache_dir + filename,
)
def check_config(self, config):

View File

@ -285,14 +285,14 @@ class TensorRtDetector(DetectionApi):
boxes, scores, classes
"""
# filter low-conf detections and concatenate results of all yolo layers
detections = []
detection_list = []
for o in trt_outputs:
dets = o.reshape((-1, 7))
dets = dets[dets[:, 4] * dets[:, 6] >= conf_th]
detections.append(dets)
detections = np.concatenate(detections, axis=0)
detections = o.reshape((-1, 7))
detections = detections[detections[:, 4] * detections[:, 6] >= conf_th]
detection_list.append(detections)
detection_list = np.concatenate(detection_list, axis=0)
return detections
return detection_list
def detect_raw(self, tensor_input):
# Input tensor has the shape of the [height, width, 3]

View File

@ -26,7 +26,7 @@ def preprocess(tensor_input, model_input_shape, model_input_element_type):
logger.warn(
f"preprocess: tensor_input.shape {tensor_input.shape} and model_input_shape {model_input_shape} do not match!"
)
# cv2.dnn.blobFromImage is faster than numpying it
# cv2.dnn.blobFromImage is faster than running it through numpy
return cv2.dnn.blobFromImage(
tensor_input[0],
1.0 / 255,

View File

@ -15,7 +15,7 @@ from frigate.models import Event
# Squelch posthog logging
logging.getLogger("chromadb.telemetry.product.posthog").setLevel(logging.CRITICAL)
# Hotsawp the sqlite3 module for Chroma compatibility
# Hot-swap the sqlite3 module for Chroma compatibility
try:
from chromadb import Collection
from chromadb import HttpClient as ChromaClient

View File

@ -55,13 +55,13 @@ class FrigateMotionDetector(MotionDetector):
# Improve contrast
if self.improve_contrast.value:
minval = np.percentile(resized_frame, 4)
maxval = np.percentile(resized_frame, 96)
min_value = np.percentile(resized_frame, 4)
max_value = np.percentile(resized_frame, 96)
# don't adjust if the image is a single color
if minval < maxval:
resized_frame = np.clip(resized_frame, minval, maxval)
if min_value < max_value:
resized_frame = np.clip(resized_frame, min_value, max_value)
resized_frame = (
((resized_frame - minval) / (maxval - minval)) * 255
((resized_frame - min_value) / (max_value - min_value)) * 255
).astype(np.uint8)
# mask frame
@ -100,13 +100,13 @@ class FrigateMotionDetector(MotionDetector):
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh_dilated = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(
contours = cv2.findContours(
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
cnts = imutils.grab_contours(cnts)
contours = imutils.grab_contours(contours)
# loop over the contours
for c in cnts:
for c in contours:
# if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c)
if contour_area > self.contour_area.value:
@ -124,7 +124,7 @@ class FrigateMotionDetector(MotionDetector):
thresh_dilated = cv2.cvtColor(thresh_dilated, cv2.COLOR_GRAY2BGR)
# print("--------")
# print(self.frame_counter)
for c in cnts:
for c in contours:
contour_area = cv2.contourArea(c)
if contour_area > self.contour_area.value:
x, y, w, h = cv2.boundingRect(c)

View File

@ -79,12 +79,15 @@ class ImprovedMotionDetector(MotionDetector):
# Improve contrast
if self.config.improve_contrast:
# TODO tracking moving average of min/max to avoid sudden contrast changes
minval = np.percentile(resized_frame, 4).astype(np.uint8)
maxval = np.percentile(resized_frame, 96).astype(np.uint8)
min_value = np.percentile(resized_frame, 4).astype(np.uint8)
max_value = np.percentile(resized_frame, 96).astype(np.uint8)
# skip contrast calcs if the image is a single color
if minval < maxval:
if min_value < max_value:
# keep track of the last 50 contrast values
self.contrast_values[self.contrast_values_index] = [minval, maxval]
self.contrast_values[self.contrast_values_index] = [
min_value,
max_value,
]
self.contrast_values_index += 1
if self.contrast_values_index == len(self.contrast_values):
self.contrast_values_index = 0
@ -122,14 +125,14 @@ class ImprovedMotionDetector(MotionDetector):
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh_dilated = cv2.dilate(thresh, None, iterations=1)
cnts = cv2.findContours(
contours = cv2.findContours(
thresh_dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
)
cnts = imutils.grab_contours(cnts)
contours = imutils.grab_contours(contours)
# loop over the contours
total_contour_area = 0
for c in cnts:
for c in contours:
# if the contour is big enough, count it as motion
contour_area = cv2.contourArea(c)
total_contour_area += contour_area

View File

@ -268,9 +268,9 @@ class PtzAutoTracker:
self.ptz_metrics[camera]["ptz_autotracker_enabled"].value = False
return
movestatus_supported = self.onvif.get_service_capabilities(camera)
move_status_supported = self.onvif.get_service_capabilities(camera)
if movestatus_supported is None or movestatus_supported.lower() != "true":
if move_status_supported is None or move_status_supported.lower() != "true":
logger.warning(
f"Disabling autotracking for {camera}: ONVIF MoveStatus not supported"
)
@ -807,8 +807,8 @@ class PtzAutoTracker:
invalid_delta = np.any(delta > delta_thresh)
# Check variance
stdevs = np.std(velocities, axis=0)
high_variances = np.any(stdevs > var_thresh)
stdev_list = np.std(velocities, axis=0)
high_variances = np.any(stdev_list > var_thresh)
# Check direction difference
velocities = np.round(velocities)

View File

@ -90,9 +90,9 @@ class RecordingMaintainer(threading.Thread):
try:
if process.name() != "ffmpeg":
continue
flist = process.open_files()
if flist:
for nt in flist:
file_list = process.open_files()
if file_list:
for nt in file_list:
if nt.path.startswith(CACHE_DIR):
files_in_use.append(nt.path.split("/")[-1])
except psutil.Error:

View File

@ -250,7 +250,7 @@ def stats_snapshot(
ffmpeg_pid = (
camera_stats["ffmpeg_pid"].value if camera_stats["ffmpeg_pid"] else None
)
cpid = (
capture_pid = (
camera_stats["capture_process"].pid
if camera_stats["capture_process"]
else None
@ -262,7 +262,7 @@ def stats_snapshot(
"detection_fps": round(camera_stats["detection_fps"].value, 2),
"detection_enabled": config.cameras[name].detect.enabled,
"pid": pid,
"capture_pid": cpid,
"capture_pid": capture_pid,
"ffmpeg_pid": ffmpeg_pid,
"audio_rms": round(camera_stats["audio_rms"].value, 4),
"audio_dBFS": round(camera_stats["audio_dBFS"].value, 4),

View File

@ -1,4 +1,4 @@
"""Consts for testing."""
"""Constants for testing."""
TEST_DB = "test.db"
TEST_DB_CLEANUPS = ["test.db", "test.db-shm", "test.db-wal"]

View File

@ -511,12 +511,12 @@ def reduce_detections(
# due to min score requirement of NMSBoxes
confidences = [0.6 if clipped(o, frame_shape) else o[1] for o in group]
idxs = cv2.dnn.NMSBoxes(
indices = cv2.dnn.NMSBoxes(
boxes, confidences, 0.5, LABEL_NMS_MAP.get(label, LABEL_NMS_DEFAULT)
)
# add objects
for index in idxs:
for index in indices:
index = index if isinstance(index, np.int32) else index[0]
obj = group[index]
selected_objects.append(obj)

View File

@ -122,7 +122,7 @@ def get_cpu_stats() -> dict[str, dict]:
stats = f.readline().split()
utime = int(stats[13])
stime = int(stats[14])
starttime = int(stats[21])
start_time = int(stats[21])
with open("/proc/uptime") as f:
system_uptime_sec = int(float(f.read().split()[0]))
@ -131,9 +131,9 @@ def get_cpu_stats() -> dict[str, dict]:
process_utime_sec = utime // clk_tck
process_stime_sec = stime // clk_tck
process_starttime_sec = starttime // clk_tck
process_start_time_sec = start_time // clk_tck
process_elapsed_sec = system_uptime_sec - process_starttime_sec
process_elapsed_sec = system_uptime_sec - process_start_time_sec
process_usage_sec = process_utime_sec + process_stime_sec
cpu_average_usage = process_usage_sec * 100 // process_elapsed_sec