From 3cff3a086b49c853431ce727e3291e6b8eb1f26b Mon Sep 17 00:00:00 2001 From: John Vandenberg Date: Sun, 18 Feb 2024 06:01:50 +0800 Subject: [PATCH] fix typos (#9895) --- docs/docs/configuration/camera_specific.md | 2 +- docs/docs/configuration/motion_detection.md | 8 ++++---- docs/docs/configuration/object_detectors.md | 9 ++++----- docs/docs/frigate/installation.md | 2 +- docs/docs/guides/ha_network_storage.md | 2 +- docs/docs/guides/reverse_proxy.md | 4 ++-- docs/docs/integrations/api.md | 2 +- docs/docs/troubleshooting/edgetpu.md | 2 +- docs/docs/troubleshooting/faqs.md | 2 +- frigate/detectors/plugins/rknn.py | 4 ++-- frigate/detectors/plugins/rocm.py | 4 ++-- frigate/test/test_camera_pw.py | 2 +- frigate/util/image.py | 4 ++-- 13 files changed, 23 insertions(+), 24 deletions(-) diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index 9e2213d5f..8440c6fe5 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -101,7 +101,7 @@ If available, recommended settings are: According to [this discussion](https://github.com/blakeblackshear/frigate/issues/3235#issuecomment-1135876973), the http video streams seem to be the most reliable for Reolink. -Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels. +Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels. The setup of main stream can be also done via RTSP, but isn't always reliable on all hardware versions. The example configuration is working with the oldest HW version RLN16-410 device with multiple types of cameras. :::caution diff --git a/docs/docs/configuration/motion_detection.md b/docs/docs/configuration/motion_detection.md index f3d1d7692..4981dbbb5 100644 --- a/docs/docs/configuration/motion_detection.md +++ b/docs/docs/configuration/motion_detection.md @@ -17,7 +17,7 @@ Before tuning motion it is important to understand the goal. In an optimal confi ## Create Motion Masks -First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want events**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md). +First, mask areas with regular motion not caused by the objects you want to detect. The best way to find candidates for motion masks is by watching the debug stream with motion boxes enabled. Good use cases for motion masks are timestamps or tree limbs and large bushes that regularly move due to wind. When possible, avoid creating motion masks that would block motion detection for objects you want to track **even if they are in locations where you don't want events**. Motion masks should not be used to avoid detecting objects in specific areas. More details can be found [in the masks docs.](/configuration/masks.md). ## Prepare For Testing @@ -37,7 +37,7 @@ Remember that motion detection is just used to determine when object detection s ### Threshold -The threshold value dictates how much of a change in a pixels luminance is required to be considered motion. +The threshold value dictates how much of a change in a pixels luminance is required to be considered motion. ```yaml # default threshold value @@ -69,7 +69,7 @@ motion: Once the threshold calculation is run, the pixels that have changed are grouped together. The contour area value is used to decide which groups of changed pixels qualify as motion. Smaller values are more sensitive meaning people that are far away, small animals, etc. are more likely to be detected as motion, but it also means that small changes in shadows, leaves, etc. are detected as motion. Higher values are less sensitive meaning these things won't be detected as motion but with the risk that desired motion won't be detected until closer to the camera. -Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving. +Watching the motion boxes in the debug view, adjust the contour area until there are no motion boxes smaller than the smallest you'd expect frigate to detect something moving. ### Improve Contrast @@ -77,7 +77,7 @@ At this point if motion is working as desired there is no reason to continue wit ## Tuning Motion Detection During The Night -Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone. +Once daytime motion detection is tuned, there is a chance that the settings will work well for motion detection during the night as well. If this is the case then the preferred settings can be written to the config file and left alone. However, if the preferred day settings do not work well at night it is recommended to use HomeAssistant or some other solution to automate changing the settings. That way completely separate sets of motion settings can be used for optimal day and night motion detection. diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 2478aad42..754cf4eaf 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -13,7 +13,7 @@ The CPU detector type runs a TensorFlow Lite model utilizing the CPU without har :::tip -If you do not have GPU or Edge TPU hardware, using the [OpenVINO Detector](#openvino-detector) is often more efficient than using the CPU detector. +If you do not have GPU or Edge TPU hardware, using the [OpenVINO Detector](#openvino-detector) is often more efficient than using the CPU detector. ::: @@ -204,7 +204,7 @@ model: ### Intel NCS2 VPU and Myriad X Setup -Intel produces a neural net inference accelleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for accelleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device. +Intel produces a neural net inference acceleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for acceleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device. ```bash sudo usermod -a -G users "$(whoami)" @@ -403,7 +403,7 @@ model: # required Explanation for rknn specific options: -- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit coresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples: +- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit corresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples: - `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value. - `core_mask: 0b001` use only core0. - `core_mask: 0b011` use core0 and core1. @@ -608,5 +608,4 @@ Other settings available for the rocm detector ### Expected performance -On an AMD Ryzen 3 5400U with integrated GPU (gfx90c) the yolov8n runs in around 9ms per image (about 110 detections per second) and 18ms (55 detections per second) for yolov8s (at 320x320 detector resolution). - +On an AMD Ryzen 3 5400U with integrated GPU (gfx90c) the yolov8n runs in around 9ms per image (about 110 detections per second) and 18ms (55 detections per second) for yolov8s (at 320x320 detector resolution). \ No newline at end of file diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index 211fe8c34..f07f8f687 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -49,7 +49,7 @@ services: :::caution -Users of the Snapcraft build of Docker cannot use storage locations outside your $HOME folder. +Users of the Snapcraft build of Docker cannot use storage locations outside your $HOME folder. ::: diff --git a/docs/docs/guides/ha_network_storage.md b/docs/docs/guides/ha_network_storage.md index b248cae4a..18f39d4f1 100644 --- a/docs/docs/guides/ha_network_storage.md +++ b/docs/docs/guides/ha_network_storage.md @@ -3,7 +3,7 @@ id: ha_network_storage title: Home Assistant network storage --- -As of Home Asisstant Core 2023.6, Network Mounted Storage is supported for addons. +As of Home Assistant Core 2023.6, Network Mounted Storage is supported for addons. ## Setting Up Remote Storage For Frigate diff --git a/docs/docs/guides/reverse_proxy.md b/docs/docs/guides/reverse_proxy.md index 479df53e8..798df37bf 100644 --- a/docs/docs/guides/reverse_proxy.md +++ b/docs/docs/guides/reverse_proxy.md @@ -87,7 +87,7 @@ There are many ways to authenticate a website but a straightforward approach is ## Nginx Reverse Proxy -This method shows a working example for subdomain type reverse proxy with SSL enabled. +This method shows a working example for subdomain type reverse proxy with SSL enabled. ### Setup server and port to reverse proxy @@ -123,7 +123,7 @@ This section points to your SSL files, the example below shows locations to a de ``` -### Setup reverse proxy settings +### Setup reverse proxy settings The settings below enabled connection upgrade, sets up logging (optional) and proxies everything from the `/` context to the docker host and port specified earlier in the configuration diff --git a/docs/docs/integrations/api.md b/docs/docs/integrations/api.md index 20877bb6f..41aba3b41 100644 --- a/docs/docs/integrations/api.md +++ b/docs/docs/integrations/api.md @@ -43,7 +43,7 @@ Accepts the following query string parameters: Example parameters: -- `h=300`: resizes the image to 300 pixes tall +- `h=300`: resizes the image to 300 pixels tall ### `GET /api/stats` diff --git a/docs/docs/troubleshooting/edgetpu.md b/docs/docs/troubleshooting/edgetpu.md index b6cb8d878..5cac6e5af 100644 --- a/docs/docs/troubleshooting/edgetpu.md +++ b/docs/docs/troubleshooting/edgetpu.md @@ -25,7 +25,7 @@ The USB coral can draw up to 900mA and this can be too much for some on-device U The USB coral has different IDs when it is uninitialized and initialized. -- When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped. +- When running Frigate in a VM, Proxmox lxc, etc. you must ensure both device IDs are mapped. - When running HA OS you may need to run the Full Access version of the Frigate addon with the `Protected Mode` switch disabled so that the coral can be accessed. ## USB Coral Detection Appears to be Stuck diff --git a/docs/docs/troubleshooting/faqs.md b/docs/docs/troubleshooting/faqs.md index ca64974fa..2201fec58 100644 --- a/docs/docs/troubleshooting/faqs.md +++ b/docs/docs/troubleshooting/faqs.md @@ -56,4 +56,4 @@ SQLite does not work well on a network share, if the `/media` folder is mapped t If MQTT isn't working in docker try using the IP of the device hosting the MQTT server instead of `localhost`, `127.0.0.1`, or `mosquitto.ix-mosquitto.svc.cluster.local`. -This is because, by default, Frigate does not run in host mode so localhost points to the Frigate container and not the host device's network. +This is because, by default, Frigate does not run in host mode so localhost points to the Frigate container and not the host device's network. diff --git a/frigate/detectors/plugins/rknn.py b/frigate/detectors/plugins/rknn.py index 01c58b94d..4042fa772 100644 --- a/frigate/detectors/plugins/rknn.py +++ b/frigate/detectors/plugins/rknn.py @@ -105,10 +105,10 @@ class Rknn(DetectionApi): if (config.model.width != 320) or (config.model.height != 320): logger.error( - "Make sure to set the model width and heigth to 320 in your config.yml." + "Make sure to set the model width and height to 320 in your config.yml." ) raise Exception( - "Make sure to set the model width and heigth to 320 in your config.yml." + "Make sure to set the model width and height to 320 in your config.yml." ) if config.model.input_pixel_format != "bgr": diff --git a/frigate/detectors/plugins/rocm.py b/frigate/detectors/plugins/rocm.py index 51c3a4620..e40febab9 100644 --- a/frigate/detectors/plugins/rocm.py +++ b/frigate/detectors/plugins/rocm.py @@ -25,7 +25,7 @@ def detect_gfx_version(): def auto_override_gfx_version(): - # If environment varialbe already in place, do not override + # If environment variable already in place, do not override gfx_version = detect_gfx_version() old_override = os.getenv("HSA_OVERRIDE_GFX_VERSION") if old_override not in (None, ""): @@ -116,7 +116,7 @@ class ROCmDetector(DetectionApi): # untested self.model = migraphx.parse_tf(path) else: - raise Exception(f"AMD/ROCm: unkown model format {path}") + raise Exception(f"AMD/ROCm: unknown model format {path}") logger.info("AMD/ROCm: compiling the model") self.model.compile( migraphx.get_target("gpu"), offload_copy=True, fast_math=True diff --git a/frigate/test/test_camera_pw.py b/frigate/test/test_camera_pw.py index 137d3aad0..0964f38be 100644 --- a/frigate/test/test_camera_pw.py +++ b/frigate/test/test_camera_pw.py @@ -43,7 +43,7 @@ class TestUserPassMasking(unittest.TestCase): self.rtsp_log_message = "Did you mean file:rtsp://user:password@192.168.1.3:554" def test_rtsp_in_log_message(self): - """Test that the rtsp url in a log message is espaced.""" + """Test that the rtsp url in a log message is escaped.""" escaped = clean_camera_user_pass(self.rtsp_log_message) print(f"The escaped is {escaped}") assert escaped == "Did you mean file:rtsp://*:*@192.168.1.3:554" diff --git a/frigate/util/image.py b/frigate/util/image.py index 4fc3c2fd8..c9da2ae3b 100644 --- a/frigate/util/image.py +++ b/frigate/util/image.py @@ -347,7 +347,7 @@ def yuv_to_3_channel_yuv(yuv_frame): # flatten the image into array yuv_data = yuv_frame.ravel() - # create a numpy array to hold all the 3 chanel yuv data + # create a numpy array to hold all the 3 channel yuv data all_yuv_data = np.empty((height, width, 3), dtype=np.uint8) y_count = height * width @@ -575,7 +575,7 @@ def intersection_over_union(box_a, box_b): # compute the intersection over union by taking the intersection # area and dividing it by the sum of prediction + ground-truth - # areas - the interesection area + # areas - the intersection area iou = inter_area / float(box_a_area + box_b_area - inter_area) # return the intersection over union value