mirror of
				https://github.com/blakeblackshear/frigate.git
				synced 2025-10-27 10:52:11 +01:00 
			
		
		
		
	fix typos (#9895)
This commit is contained in:
		
							parent
							
								
									617c728a88
								
							
						
					
					
						commit
						3cff3a086b
					
				@ -204,7 +204,7 @@ model:
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
### Intel NCS2 VPU and Myriad X Setup
 | 
					### Intel NCS2 VPU and Myriad X Setup
 | 
				
			||||||
 | 
					
 | 
				
			||||||
Intel produces a neural net inference accelleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for accelleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device.
 | 
					Intel produces a neural net inference acceleration chip called Myriad X. This chip was sold in their Neural Compute Stick 2 (NCS2) which has been discontinued. If intending to use the MYRIAD device for acceleration, additional setup is required to pass through the USB device. The host needs a udev rule installed to handle the NCS2 device.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
```bash
 | 
					```bash
 | 
				
			||||||
sudo usermod -a -G users "$(whoami)"
 | 
					sudo usermod -a -G users "$(whoami)"
 | 
				
			||||||
@ -403,7 +403,7 @@ model: # required
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
Explanation for rknn specific options:
 | 
					Explanation for rknn specific options:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit coresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples:
 | 
					- **core mask** controls which cores of your NPU should be used. This option applies only to SoCs with a multicore NPU (at the time of writing this in only the RK3588/S). The easiest way is to pass the value as a binary number. To do so, use the prefix `0b` and write a `0` to disable a core and a `1` to enable a core, whereas the last digit corresponds to core0, the second last to core1, etc. You also have to use the cores in ascending order (so you can't use core0 and core2; but you can use core0 and core1). Enabling more cores can reduce the inference speed, especially when using bigger models (see section below). Examples:
 | 
				
			||||||
  - `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value.
 | 
					  - `core_mask: 0b000` or just `core_mask: 0` let the NPU decide which cores should be used. Default and recommended value.
 | 
				
			||||||
  - `core_mask: 0b001` use only core0.
 | 
					  - `core_mask: 0b001` use only core0.
 | 
				
			||||||
  - `core_mask: 0b011` use core0 and core1.
 | 
					  - `core_mask: 0b011` use core0 and core1.
 | 
				
			||||||
@ -609,4 +609,3 @@ Other settings available for the rocm detector
 | 
				
			|||||||
### Expected performance
 | 
					### Expected performance
 | 
				
			||||||
 | 
					
 | 
				
			||||||
On an AMD Ryzen 3 5400U with integrated GPU (gfx90c) the yolov8n runs in around 9ms per image (about 110 detections per second) and 18ms (55 detections per second) for yolov8s (at 320x320 detector resolution).
 | 
					On an AMD Ryzen 3 5400U with integrated GPU (gfx90c) the yolov8n runs in around 9ms per image (about 110 detections per second) and 18ms (55 detections per second) for yolov8s (at 320x320 detector resolution).
 | 
				
			||||||
 | 
					 | 
				
			||||||
 | 
				
			|||||||
@ -3,7 +3,7 @@ id: ha_network_storage
 | 
				
			|||||||
title: Home Assistant network storage
 | 
					title: Home Assistant network storage
 | 
				
			||||||
---
 | 
					---
 | 
				
			||||||
 | 
					
 | 
				
			||||||
As of Home Asisstant Core 2023.6, Network Mounted Storage is supported for addons.
 | 
					As of Home Assistant Core 2023.6, Network Mounted Storage is supported for addons.
 | 
				
			||||||
 | 
					
 | 
				
			||||||
## Setting Up Remote Storage For Frigate
 | 
					## Setting Up Remote Storage For Frigate
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
				
			|||||||
@ -43,7 +43,7 @@ Accepts the following query string parameters:
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
Example parameters:
 | 
					Example parameters:
 | 
				
			||||||
 | 
					
 | 
				
			||||||
- `h=300`: resizes the image to 300 pixes tall
 | 
					- `h=300`: resizes the image to 300 pixels tall
 | 
				
			||||||
 | 
					
 | 
				
			||||||
### `GET /api/stats`
 | 
					### `GET /api/stats`
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
				
			|||||||
@ -105,10 +105,10 @@ class Rknn(DetectionApi):
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
            if (config.model.width != 320) or (config.model.height != 320):
 | 
					            if (config.model.width != 320) or (config.model.height != 320):
 | 
				
			||||||
                logger.error(
 | 
					                logger.error(
 | 
				
			||||||
                    "Make sure to set the model width and heigth to 320 in your config.yml."
 | 
					                    "Make sure to set the model width and height to 320 in your config.yml."
 | 
				
			||||||
                )
 | 
					                )
 | 
				
			||||||
                raise Exception(
 | 
					                raise Exception(
 | 
				
			||||||
                    "Make sure to set the model width and heigth to 320 in your config.yml."
 | 
					                    "Make sure to set the model width and height to 320 in your config.yml."
 | 
				
			||||||
                )
 | 
					                )
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            if config.model.input_pixel_format != "bgr":
 | 
					            if config.model.input_pixel_format != "bgr":
 | 
				
			||||||
 | 
				
			|||||||
@ -25,7 +25,7 @@ def detect_gfx_version():
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
def auto_override_gfx_version():
 | 
					def auto_override_gfx_version():
 | 
				
			||||||
    # If environment varialbe already in place, do not override
 | 
					    # If environment variable already in place, do not override
 | 
				
			||||||
    gfx_version = detect_gfx_version()
 | 
					    gfx_version = detect_gfx_version()
 | 
				
			||||||
    old_override = os.getenv("HSA_OVERRIDE_GFX_VERSION")
 | 
					    old_override = os.getenv("HSA_OVERRIDE_GFX_VERSION")
 | 
				
			||||||
    if old_override not in (None, ""):
 | 
					    if old_override not in (None, ""):
 | 
				
			||||||
@ -116,7 +116,7 @@ class ROCmDetector(DetectionApi):
 | 
				
			|||||||
                # untested
 | 
					                # untested
 | 
				
			||||||
                self.model = migraphx.parse_tf(path)
 | 
					                self.model = migraphx.parse_tf(path)
 | 
				
			||||||
            else:
 | 
					            else:
 | 
				
			||||||
                raise Exception(f"AMD/ROCm: unkown model format {path}")
 | 
					                raise Exception(f"AMD/ROCm: unknown model format {path}")
 | 
				
			||||||
            logger.info("AMD/ROCm: compiling the model")
 | 
					            logger.info("AMD/ROCm: compiling the model")
 | 
				
			||||||
            self.model.compile(
 | 
					            self.model.compile(
 | 
				
			||||||
                migraphx.get_target("gpu"), offload_copy=True, fast_math=True
 | 
					                migraphx.get_target("gpu"), offload_copy=True, fast_math=True
 | 
				
			||||||
 | 
				
			|||||||
@ -43,7 +43,7 @@ class TestUserPassMasking(unittest.TestCase):
 | 
				
			|||||||
        self.rtsp_log_message = "Did you mean file:rtsp://user:password@192.168.1.3:554"
 | 
					        self.rtsp_log_message = "Did you mean file:rtsp://user:password@192.168.1.3:554"
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def test_rtsp_in_log_message(self):
 | 
					    def test_rtsp_in_log_message(self):
 | 
				
			||||||
        """Test that the rtsp url in a log message is espaced."""
 | 
					        """Test that the rtsp url in a log message is escaped."""
 | 
				
			||||||
        escaped = clean_camera_user_pass(self.rtsp_log_message)
 | 
					        escaped = clean_camera_user_pass(self.rtsp_log_message)
 | 
				
			||||||
        print(f"The escaped is {escaped}")
 | 
					        print(f"The escaped is {escaped}")
 | 
				
			||||||
        assert escaped == "Did you mean file:rtsp://*:*@192.168.1.3:554"
 | 
					        assert escaped == "Did you mean file:rtsp://*:*@192.168.1.3:554"
 | 
				
			||||||
 | 
				
			|||||||
@ -347,7 +347,7 @@ def yuv_to_3_channel_yuv(yuv_frame):
 | 
				
			|||||||
    # flatten the image into array
 | 
					    # flatten the image into array
 | 
				
			||||||
    yuv_data = yuv_frame.ravel()
 | 
					    yuv_data = yuv_frame.ravel()
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    # create a numpy array to hold all the 3 chanel yuv data
 | 
					    # create a numpy array to hold all the 3 channel yuv data
 | 
				
			||||||
    all_yuv_data = np.empty((height, width, 3), dtype=np.uint8)
 | 
					    all_yuv_data = np.empty((height, width, 3), dtype=np.uint8)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    y_count = height * width
 | 
					    y_count = height * width
 | 
				
			||||||
@ -575,7 +575,7 @@ def intersection_over_union(box_a, box_b):
 | 
				
			|||||||
 | 
					
 | 
				
			||||||
    # compute the intersection over union by taking the intersection
 | 
					    # compute the intersection over union by taking the intersection
 | 
				
			||||||
    # area and dividing it by the sum of prediction + ground-truth
 | 
					    # area and dividing it by the sum of prediction + ground-truth
 | 
				
			||||||
    # areas - the interesection area
 | 
					    # areas - the intersection area
 | 
				
			||||||
    iou = inter_area / float(box_a_area + box_b_area - inter_area)
 | 
					    iou = inter_area / float(box_a_area + box_b_area - inter_area)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    # return the intersection over union value
 | 
					    # return the intersection over union value
 | 
				
			||||||
 | 
				
			|||||||
		Loading…
	
		Reference in New Issue
	
	Block a user