diff --git a/docker/main/install_deps.sh b/docker/main/install_deps.sh index 94198d290..c4fe56f03 100755 --- a/docker/main/install_deps.sh +++ b/docker/main/install_deps.sh @@ -70,9 +70,16 @@ fi # arch specific packages if [[ "${TARGETARCH}" == "amd64" ]]; then + # Install non-free version of i965 driver + sed -i -E "/^Components: main$/s/main/main contrib non-free non-free-firmware/" "/etc/apt/sources.list.d/debian.sources" \ + && apt-get -qq update \ + && apt-get install --no-install-recommends --no-install-suggests -y i965-va-driver-shaders \ + && sed -i -E "/^Components: main contrib non-free non-free-firmware$/s/main contrib non-free non-free-firmware/main/" "/etc/apt/sources.list.d/debian.sources" \ + && apt-get update + # install amd / intel-i965 driver packages apt-get -qq install --no-install-recommends --no-install-suggests -y \ - i965-va-driver intel-gpu-tools onevpl-tools \ + intel-gpu-tools onevpl-tools \ libva-drm2 \ mesa-va-drivers radeontop diff --git a/docs/docs/configuration/camera_specific.md b/docs/docs/configuration/camera_specific.md index 644054d7a..98bb02c17 100644 --- a/docs/docs/configuration/camera_specific.md +++ b/docs/docs/configuration/camera_specific.md @@ -144,7 +144,13 @@ WEB Digest Algorithm - MD5 ### Reolink Cameras -Reolink has older cameras (ex: 410 & 520) as well as newer camera (ex: 520a & 511wa) which support different subsets of options. In both cases using the http stream is recommended. +Reolink has many different camera models with inconsistently supported features and behavior. The below table shows a summary of various features and recommendations. + +| Camera Resolution | Camera Generation | Recommended Stream Type | Additional Notes | +| 5MP or lower | All | http-flv | Stream is h264 | +| 6MP or higher | Latest (ex: Duo3, CX-8##) | http-flv with ffmpeg 8.0, or rtsp | This uses the new http-flv-enhanced over H265 which requires ffmpeg 8.0 | +| 6MP or higher | Older (ex: RLC-8##) | rtsp | | + Frigate works much better with newer reolink cameras that are setup with the below options: If available, recommended settings are: @@ -157,12 +163,6 @@ According to [this discussion](https://github.com/blakeblackshear/frigate/issues Cameras connected via a Reolink NVR can be connected with the http stream, use `channel[0..15]` in the stream url for the additional channels. The setup of main stream can be also done via RTSP, but isn't always reliable on all hardware versions. The example configuration is working with the oldest HW version RLN16-410 device with multiple types of cameras. -:::warning - -The below configuration only works for reolink cameras with stream resolution of 5MP or lower, 8MP+ cameras need to use RTSP as http-flv is not supported in this case. - -::: - ```yaml go2rtc: streams: @@ -259,7 +259,7 @@ To use a USB camera (webcam) with Frigate, the recommendation is to use go2rtc's go2rtc: streams: usb_camera: - - "ffmpeg:device?video=0&video_size=1024x576#video=h264" + - "ffmpeg:device?video=0&video_size=1024x576#video=h264" cameras: usb_camera: diff --git a/docs/docs/configuration/cameras.md b/docs/docs/configuration/cameras.md index 467e25f4b..57b3be1d4 100644 --- a/docs/docs/configuration/cameras.md +++ b/docs/docs/configuration/cameras.md @@ -111,10 +111,7 @@ The FeatureList on the [ONVIF Conformant Products Database](https://www.onvif.or | Hanwha XNP-6550RH | ✅ | ❌ | | | Hikvision | ✅ | ❌ | Incomplete ONVIF support (MoveStatus won't update even on latest firmware) - reported with HWP-N4215IH-DE and DS-2DE3304W-DE, but likely others | | Hikvision DS-2DE3A404IWG-E/W | ✅ | ✅ | | -| Reolink 511WA | ✅ | ❌ | Zoom only | -| Reolink E1 Pro | ✅ | ❌ | | -| Reolink E1 Zoom | ✅ | ❌ | | -| Reolink RLC-823A 16x | ✅ | ❌ | | +| Reolink | ✅ | ❌ | | | Speco O8P32X | ✅ | ❌ | | | Sunba 405-D20X | ✅ | ❌ | Incomplete ONVIF support reported on original, and 4k models. All models are suspected incompatable. | | Tapo | ✅ | ❌ | Many models supported, ONVIF Service Port: 2020 | diff --git a/docs/docs/configuration/ffmpeg_presets.md b/docs/docs/configuration/ffmpeg_presets.md index 8fd0fd811..8bba62e36 100644 --- a/docs/docs/configuration/ffmpeg_presets.md +++ b/docs/docs/configuration/ffmpeg_presets.md @@ -21,8 +21,7 @@ See [the hwaccel docs](/configuration/hardware_acceleration_video.md) for more i | preset-nvidia | Nvidia GPU | | | preset-jetson-h264 | Nvidia Jetson with h264 stream | | | preset-jetson-h265 | Nvidia Jetson with h265 stream | | -| preset-rk-h264 | Rockchip MPP with h264 stream | Use image with \*-rk suffix and privileged mode | -| preset-rk-h265 | Rockchip MPP with h265 stream | Use image with \*-rk suffix and privileged mode | +| preset-rkmpp | Rockchip MPP | Use image with \*-rk suffix and privileged mode | ### Input Args Presets diff --git a/docs/docs/configuration/hardware_acceleration_video.md b/docs/docs/configuration/hardware_acceleration_video.md index 057ae223d..cb8d7007b 100644 --- a/docs/docs/configuration/hardware_acceleration_video.md +++ b/docs/docs/configuration/hardware_acceleration_video.md @@ -9,7 +9,6 @@ It is highly recommended to use a GPU for hardware acceleration video decoding i Depending on your system, these parameters may not be compatible. More information on hardware accelerated decoding for ffmpeg can be found here: https://trac.ffmpeg.org/wiki/HWAccelIntro -# Object Detection ## Raspberry Pi 3/4 @@ -229,7 +228,7 @@ Additional configuration is needed for the Docker container to be able to access services: frigate: ... - image: ghcr.io/blakeblackshear/frigate:stable + image: ghcr.io/blakeblackshear/frigate:stable-tensorrt deploy: # <------------- Add this section resources: reservations: @@ -247,7 +246,7 @@ docker run -d \ --name frigate \ ... --gpus=all \ - ghcr.io/blakeblackshear/frigate:stable + ghcr.io/blakeblackshear/frigate:stable-tensorrt ``` ### Setup Decoder diff --git a/docs/docs/configuration/object_detectors.md b/docs/docs/configuration/object_detectors.md index 4b13510eb..0e8f0c2a8 100644 --- a/docs/docs/configuration/object_detectors.md +++ b/docs/docs/configuration/object_detectors.md @@ -499,14 +499,13 @@ Also AMD/ROCm does not "officially" support integrated GPUs. It still does work For the rocm frigate build there is some automatic detection: -- gfx90c -> 9.0.0 - gfx1031 -> 10.3.0 - gfx1103 -> 11.0.0 -If you have something else you might need to override the `HSA_OVERRIDE_GFX_VERSION` at Docker launch. Suppose the version you want is `9.0.0`, then you should configure it from command line as: +If you have something else you might need to override the `HSA_OVERRIDE_GFX_VERSION` at Docker launch. Suppose the version you want is `10.0.0`, then you should configure it from command line as: ```bash -$ docker run -e HSA_OVERRIDE_GFX_VERSION=9.0.0 \ +$ docker run -e HSA_OVERRIDE_GFX_VERSION=10.0.0 \ ... ``` @@ -517,7 +516,7 @@ services: frigate: environment: - HSA_OVERRIDE_GFX_VERSION: "9.0.0" + HSA_OVERRIDE_GFX_VERSION: "10.0.0" ``` Figuring out what version you need can be complicated as you can't tell the chipset name and driver from the AMD brand name. diff --git a/docs/docs/frigate/installation.md b/docs/docs/frigate/installation.md index 13e66de9e..56f20b775 100644 --- a/docs/docs/frigate/installation.md +++ b/docs/docs/frigate/installation.md @@ -43,7 +43,7 @@ The following ports are used by Frigate and can be mapped via docker as required | `8971` | Authenticated UI and API access without TLS. Reverse proxies should use this port. | | `5000` | Internal unauthenticated UI and API access. Access to this port should be limited. Intended to be used within the docker network for services that integrate with Frigate. | | `8554` | RTSP restreaming. By default, these streams are unauthenticated. Authentication can be configured in go2rtc section of config. | -| `8555` | WebRTC connections for low latency live views. | +| `8555` | WebRTC connections for cameras with two-way talk support. | #### Common Docker Compose storage configurations diff --git a/docs/docs/frigate/video_pipeline.md b/docs/docs/frigate/video_pipeline.md index 313e27ed5..ba9365650 100644 --- a/docs/docs/frigate/video_pipeline.md +++ b/docs/docs/frigate/video_pipeline.md @@ -15,10 +15,10 @@ At a high level, there are five processing steps that could be applied to a came %%{init: {"themeVariables": {"edgeLabelBackground": "transparent"}}}%% flowchart LR - Feed(Feed\nacquisition) --> Decode(Video\ndecoding) - Decode --> Motion(Motion\ndetection) - Motion --> Object(Object\ndetection) - Feed --> Recording(Recording\nand\nvisualization) + Feed(Feed acquisition) --> Decode(Video decoding) + Decode --> Motion(Motion detection) + Motion --> Object(Object detection) + Feed --> Recording(Recording and visualization) Motion --> Recording Object --> Recording ``` diff --git a/docs/docs/guides/configuring_go2rtc.md b/docs/docs/guides/configuring_go2rtc.md index 652aa3b26..474dde0a2 100644 --- a/docs/docs/guides/configuring_go2rtc.md +++ b/docs/docs/guides/configuring_go2rtc.md @@ -114,7 +114,7 @@ section. ## Next steps 1. If the stream you added to go2rtc is also used by Frigate for the `record` or `detect` role, you can migrate your config to pull from the RTSP restream to reduce the number of connections to your camera as shown [here](/configuration/restream#reduce-connections-to-camera). -2. You may also prefer to [setup WebRTC](/configuration/live#webrtc-extra-configuration) for slightly lower latency than MSE. Note that WebRTC only supports h264 and specific audio formats and may require opening ports on your router. +2. You can [set up WebRTC](/configuration/live#webrtc-extra-configuration) if your camera supports two-way talk. Note that WebRTC only supports specific audio formats and may require opening ports on your router. ## Important considerations diff --git a/docs/static/frigate-api.yaml b/docs/static/frigate-api.yaml index 3df025d9f..ca53bdcf7 100644 --- a/docs/static/frigate-api.yaml +++ b/docs/static/frigate-api.yaml @@ -1759,6 +1759,10 @@ paths: - name: include_thumbnails in: query required: false + description: > + Deprecated. Thumbnail data is no longer included in the response. + Use the /api/events/:event_id/thumbnail.:extension endpoint instead. + deprecated: true schema: anyOf: - type: integer @@ -1973,6 +1977,10 @@ paths: - name: include_thumbnails in: query required: false + description: > + Deprecated. Thumbnail data is no longer included in the response. + Use the /api/events/:event_id/thumbnail.:extension endpoint instead. + deprecated: true schema: anyOf: - type: integer diff --git a/frigate/api/classification.py b/frigate/api/classification.py index fd6326e2a..22e0a30f0 100644 --- a/frigate/api/classification.py +++ b/frigate/api/classification.py @@ -218,7 +218,7 @@ async def register_face(request: Request, name: str, file: UploadFile): ) context: EmbeddingsContext = request.app.embeddings - result = context.register_face(name, await file.read()) + result = None if context is None else context.register_face(name, await file.read()) if not isinstance(result, dict): return JSONResponse( diff --git a/frigate/api/defs/query/events_query_parameters.py b/frigate/api/defs/query/events_query_parameters.py index d707ba8cc..187dd3f91 100644 --- a/frigate/api/defs/query/events_query_parameters.py +++ b/frigate/api/defs/query/events_query_parameters.py @@ -1,6 +1,6 @@ from typing import Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field DEFAULT_TIME_RANGE = "00:00,24:00" @@ -21,7 +21,14 @@ class EventsQueryParams(BaseModel): has_clip: Optional[int] = None has_snapshot: Optional[int] = None in_progress: Optional[int] = None - include_thumbnails: Optional[int] = 1 + include_thumbnails: Optional[int] = Field( + 1, + description=( + "Deprecated. Thumbnail data is no longer included in the response. " + "Use the /api/events/:event_id/thumbnail.:extension endpoint instead." + ), + deprecated=True, + ) favorites: Optional[int] = None min_score: Optional[float] = None max_score: Optional[float] = None @@ -40,7 +47,14 @@ class EventsSearchQueryParams(BaseModel): query: Optional[str] = None event_id: Optional[str] = None search_type: Optional[str] = "thumbnail" - include_thumbnails: Optional[int] = 1 + include_thumbnails: Optional[int] = Field( + 1, + description=( + "Deprecated. Thumbnail data is no longer included in the response. " + "Use the /api/events/:event_id/thumbnail.:extension endpoint instead." + ), + deprecated=True, + ) limit: Optional[int] = 50 cameras: Optional[str] = "all" labels: Optional[str] = "all" diff --git a/frigate/api/defs/query/media_query_parameters.py b/frigate/api/defs/query/media_query_parameters.py index fd53af49a..a16f0d53f 100644 --- a/frigate/api/defs/query/media_query_parameters.py +++ b/frigate/api/defs/query/media_query_parameters.py @@ -11,6 +11,11 @@ class Extension(str, Enum): jpg = "jpg" jpeg = "jpeg" + def get_mime_type(self) -> str: + if self in (Extension.jpg, Extension.jpeg): + return "image/jpeg" + return f"image/{self.value}" + class MediaLatestFrameQueryParams(BaseModel): bbox: Optional[int] = None diff --git a/frigate/api/media.py b/frigate/api/media.py index a1ab6648f..8c0943b2e 100644 --- a/frigate/api/media.py +++ b/frigate/api/media.py @@ -145,15 +145,13 @@ def latest_frame( "regions": params.regions, } quality = params.quality - mime_type = extension - if extension == "png": + if extension == Extension.png: quality_params = None - elif extension == "webp": + elif extension == Extension.webp: quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), quality] - else: + else: # jpg or jpeg quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), quality] - mime_type = "jpeg" if camera_name in request.app.frigate_config.cameras: frame = frame_processor.get_current_frame(camera_name, draw_options) @@ -196,18 +194,21 @@ def latest_frame( frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA) - _, img = cv2.imencode(f".{extension}", frame, quality_params) + _, img = cv2.imencode(f".{extension.value}", frame, quality_params) return Response( content=img.tobytes(), - media_type=f"image/{mime_type}", + media_type=extension.get_mime_type(), headers={ - "Content-Type": f"image/{mime_type}", "Cache-Control": "no-store" if not params.store else "private, max-age=60", }, ) - elif camera_name == "birdseye" and request.app.frigate_config.birdseye.restream: + elif ( + camera_name == "birdseye" + and request.app.frigate_config.birdseye.enabled + and request.app.frigate_config.birdseye.restream + ): frame = cv2.cvtColor( frame_processor.get_current_frame(camera_name), cv2.COLOR_YUV2BGR_I420, @@ -218,12 +219,11 @@ def latest_frame( frame = cv2.resize(frame, dsize=(width, height), interpolation=cv2.INTER_AREA) - _, img = cv2.imencode(f".{extension}", frame, quality_params) + _, img = cv2.imencode(f".{extension.value}", frame, quality_params) return Response( content=img.tobytes(), - media_type=f"image/{mime_type}", + media_type=extension.get_mime_type(), headers={ - "Content-Type": f"image/{mime_type}", "Cache-Control": "no-store" if not params.store else "private, max-age=60", @@ -812,7 +812,10 @@ def vod_hour(year_month: str, day: int, hour: int, camera_name: str, tz_name: st "/vod/event/{event_id}", description="Returns an HLS playlist for the specified object. Append /master.m3u8 or /index.m3u8 for HLS playback.", ) -def vod_event(event_id: str): +def vod_event( + event_id: str, + padding: int = Query(0, description="Padding to apply to the vod."), +): try: event: Event = Event.get(Event.id == event_id) except DoesNotExist: @@ -835,32 +838,23 @@ def vod_event(event_id: str): status_code=404, ) - clip_path = os.path.join(CLIPS_DIR, f"{event.camera}-{event.id}.mp4") - - if not os.path.isfile(clip_path): - end_ts = ( - datetime.now().timestamp() if event.end_time is None else event.end_time - ) - vod_response = vod_ts(event.camera, event.start_time, end_ts) - # If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false - if ( - event.start_time < datetime.now().timestamp() - 300 - and type(vod_response) is tuple - and len(vod_response) == 2 - and vod_response[1] == 404 - ): - Event.update(has_clip=False).where(Event.id == event_id).execute() - return vod_response - - duration = int((event.end_time - event.start_time) * 1000) - return JSONResponse( - content={ - "cache": True, - "discontinuity": False, - "durations": [duration], - "sequences": [{"clips": [{"type": "source", "path": clip_path}]}], - } + end_ts = ( + datetime.now().timestamp() + if event.end_time is None + else (event.end_time + padding) ) + vod_response = vod_ts(event.camera, event.start_time - padding, end_ts) + + # If the recordings are not found and the event started more than 5 minutes ago, set has_clip to false + if ( + event.start_time < datetime.now().timestamp() - 300 + and type(vod_response) is tuple + and len(vod_response) == 2 + and vod_response[1] == 404 + ): + Event.update(has_clip=False).where(Event.id == event_id).execute() + + return vod_response @router.get( @@ -941,7 +935,7 @@ def event_snapshot( def event_thumbnail( request: Request, event_id: str, - extension: str, + extension: Extension, max_cache_age: int = Query( 2592000, description="Max cache age in seconds. Default 30 days in seconds." ), @@ -966,7 +960,7 @@ def event_thumbnail( if event_id in camera_state.tracked_objects: tracked_obj = camera_state.tracked_objects.get(event_id) if tracked_obj is not None: - thumbnail_bytes = tracked_obj.get_thumbnail(extension) + thumbnail_bytes = tracked_obj.get_thumbnail(extension.value) except Exception: return JSONResponse( content={"success": False, "message": "Event not found"}, @@ -994,23 +988,21 @@ def event_thumbnail( ) quality_params = None - - if extension == "jpg" or extension == "jpeg": + if extension in (Extension.jpg, Extension.jpeg): quality_params = [int(cv2.IMWRITE_JPEG_QUALITY), 70] - elif extension == "webp": + elif extension == Extension.webp: quality_params = [int(cv2.IMWRITE_WEBP_QUALITY), 60] - _, img = cv2.imencode(f".{extension}", thumbnail, quality_params) + _, img = cv2.imencode(f".{extension.value}", thumbnail, quality_params) thumbnail_bytes = img.tobytes() return Response( thumbnail_bytes, - media_type=f"image/{extension}", + media_type=extension.get_mime_type(), headers={ "Cache-Control": f"private, max-age={max_cache_age}" if event_complete else "no-store", - "Content-Type": f"image/{extension}", }, ) @@ -1221,7 +1213,11 @@ def event_snapshot_clean(request: Request, event_id: str, download: bool = False @router.get("/events/{event_id}/clip.mp4") -def event_clip(request: Request, event_id: str): +def event_clip( + request: Request, + event_id: str, + padding: int = Query(0, description="Padding to apply to clip."), +): try: event: Event = Event.get(Event.id == event_id) except DoesNotExist: @@ -1234,8 +1230,12 @@ def event_clip(request: Request, event_id: str): content={"success": False, "message": "Clip not available"}, status_code=404 ) - end_ts = datetime.now().timestamp() if event.end_time is None else event.end_time - return recording_clip(request, event.camera, event.start_time, end_ts) + end_ts = ( + datetime.now().timestamp() + if event.end_time is None + else event.end_time + padding + ) + return recording_clip(request, event.camera, event.start_time - padding, end_ts) @router.get("/events/{event_id}/preview.gif") diff --git a/frigate/config/camera/ffmpeg.py b/frigate/config/camera/ffmpeg.py index 04bbfac7b..dd65fdcd4 100644 --- a/frigate/config/camera/ffmpeg.py +++ b/frigate/config/camera/ffmpeg.py @@ -61,6 +61,7 @@ class FfmpegConfig(FrigateBaseModel): retry_interval: float = Field( default=10.0, title="Time in seconds to wait before FFmpeg retries connecting to the camera.", + gt=0.0, ) apple_compatibility: bool = Field( default=False, diff --git a/frigate/detectors/detector_config.py b/frigate/detectors/detector_config.py index 3893908d2..d7883523d 100644 --- a/frigate/detectors/detector_config.py +++ b/frigate/detectors/detector_config.py @@ -158,6 +158,9 @@ class ModelConfig(BaseModel): self.input_pixel_format = model_info["pixelFormat"] self.model_type = model_info["type"] + if model_info.get("inputDataType"): + self.input_dtype = model_info["inputDataType"] + # generate list of attribute labels self.attributes_map = { **model_info.get("attributes", DEFAULT_ATTRIBUTE_LABEL_MAP), diff --git a/frigate/genai/__init__.py b/frigate/genai/__init__.py index 46c3bd8a0..944178f78 100644 --- a/frigate/genai/__init__.py +++ b/frigate/genai/__init__.py @@ -182,10 +182,15 @@ Rules: event: Event, ) -> Optional[str]: """Generate a description for the frame.""" - prompt = camera_config.objects.genai.object_prompts.get( - event.label, - camera_config.objects.genai.prompt, - ).format(**model_to_dict(event)) + try: + prompt = camera_config.genai.object_prompts.get( + event.label, + camera_config.genai.prompt, + ).format(**model_to_dict(event)) + except KeyError as e: + logger.error(f"Invalid key in GenAI prompt: {e}") + return None + logger.debug(f"Sending images to genai provider with prompt: {prompt}") return self._send(prompt, thumbnails) diff --git a/frigate/ptz/autotrack.py b/frigate/ptz/autotrack.py index beecc62ab..6e86ecbf2 100644 --- a/frigate/ptz/autotrack.py +++ b/frigate/ptz/autotrack.py @@ -372,12 +372,13 @@ class PtzAutoTracker: logger.info(f"Camera calibration for {camera} in progress") # zoom levels test + self.zoom_time[camera] = 0 + if ( self.config.cameras[camera].onvif.autotracking.zooming != ZoomingModeEnum.disabled ): logger.info(f"Calibration for {camera} in progress: 0% complete") - self.zoom_time[camera] = 0 for i in range(2): # absolute move to 0 - fully zoomed out @@ -1332,7 +1333,11 @@ class PtzAutoTracker: if camera_config.onvif.autotracking.enabled: if not self.autotracker_init[camera]: - self._autotracker_setup(camera_config, camera) + future = asyncio.run_coroutine_threadsafe( + self._autotracker_setup(camera_config, camera), self.onvif.loop + ) + # Wait for the coroutine to complete + future.result() if self.calibrating[camera]: logger.debug(f"{camera}: Calibrating camera") @@ -1479,7 +1484,8 @@ class PtzAutoTracker: self.tracked_object[camera] = None self.tracked_object_history[camera].clear() - self.ptz_metrics[camera].motor_stopped.wait() + while not self.ptz_metrics[camera].motor_stopped.is_set(): + await self.onvif.get_camera_status(camera) logger.debug( f"{camera}: Time is {self.ptz_metrics[camera].frame_time.value}, returning to preset: {autotracker_config.return_preset}" ) @@ -1489,7 +1495,7 @@ class PtzAutoTracker: ) # update stored zoom level from preset - if not self.ptz_metrics[camera].motor_stopped.is_set(): + while not self.ptz_metrics[camera].motor_stopped.is_set(): await self.onvif.get_camera_status(camera) self.ptz_metrics[camera].tracking_active.clear() diff --git a/frigate/ptz/onvif.py b/frigate/ptz/onvif.py index bd5bef0b0..13faffc97 100644 --- a/frigate/ptz/onvif.py +++ b/frigate/ptz/onvif.py @@ -50,6 +50,8 @@ class OnvifController: self.config = config self.ptz_metrics = ptz_metrics + self.status_locks: dict[str, asyncio.Lock] = {} + # Create a dedicated event loop and run it in a separate thread self.loop = asyncio.new_event_loop() self.loop_thread = threading.Thread(target=self._run_event_loop, daemon=True) @@ -61,6 +63,7 @@ class OnvifController: continue if cam.onvif.host: self.camera_configs[cam_name] = cam + self.status_locks[cam_name] = asyncio.Lock() asyncio.run_coroutine_threadsafe(self._init_cameras(), self.loop) @@ -827,105 +830,110 @@ class OnvifController: return False async def get_camera_status(self, camera_name: str) -> None: - if camera_name not in self.cams.keys(): - logger.error(f"ONVIF is not configured for {camera_name}") - return - - if not self.cams[camera_name]["init"]: - if not await self._init_onvif(camera_name): + async with self.status_locks[camera_name]: + if camera_name not in self.cams.keys(): + logger.error(f"ONVIF is not configured for {camera_name}") return - status_request = self.cams[camera_name]["status_request"] - try: - status = await self.cams[camera_name]["ptz"].GetStatus(status_request) - except Exception: - pass # We're unsupported, that'll be reported in the next check. + if not self.cams[camera_name]["init"]: + if not await self._init_onvif(camera_name): + return - try: - pan_tilt_status = getattr(status.MoveStatus, "PanTilt", None) - zoom_status = getattr(status.MoveStatus, "Zoom", None) + status_request = self.cams[camera_name]["status_request"] + try: + status = await self.cams[camera_name]["ptz"].GetStatus(status_request) + except Exception: + pass # We're unsupported, that'll be reported in the next check. - # if it's not an attribute, see if MoveStatus even exists in the status result - if pan_tilt_status is None: - pan_tilt_status = getattr(status, "MoveStatus", None) + try: + pan_tilt_status = getattr(status.MoveStatus, "PanTilt", None) + zoom_status = getattr(status.MoveStatus, "Zoom", None) - # we're unsupported - if pan_tilt_status is None or pan_tilt_status not in [ - "IDLE", - "MOVING", - ]: - raise Exception - except Exception: - logger.warning( - f"Camera {camera_name} does not support the ONVIF GetStatus method. Autotracking will not function correctly and must be disabled in your config." + # if it's not an attribute, see if MoveStatus even exists in the status result + if pan_tilt_status is None: + pan_tilt_status = getattr(status, "MoveStatus", None) + + # we're unsupported + if pan_tilt_status is None or pan_tilt_status not in [ + "IDLE", + "MOVING", + ]: + raise Exception + except Exception: + logger.warning( + f"Camera {camera_name} does not support the ONVIF GetStatus method. Autotracking will not function correctly and must be disabled in your config." + ) + return + + logger.debug( + f"{camera_name}: Pan/tilt status: {pan_tilt_status}, Zoom status: {zoom_status}" ) - return - logger.debug( - f"{camera_name}: Pan/tilt status: {pan_tilt_status}, Zoom status: {zoom_status}" - ) + if pan_tilt_status == "IDLE" and ( + zoom_status is None or zoom_status == "IDLE" + ): + self.cams[camera_name]["active"] = False + if not self.ptz_metrics[camera_name].motor_stopped.is_set(): + self.ptz_metrics[camera_name].motor_stopped.set() - if pan_tilt_status == "IDLE" and (zoom_status is None or zoom_status == "IDLE"): - self.cams[camera_name]["active"] = False - if not self.ptz_metrics[camera_name].motor_stopped.is_set(): - self.ptz_metrics[camera_name].motor_stopped.set() + logger.debug( + f"{camera_name}: PTZ stop time: {self.ptz_metrics[camera_name].frame_time.value}" + ) + self.ptz_metrics[camera_name].stop_time.value = self.ptz_metrics[ + camera_name + ].frame_time.value + else: + self.cams[camera_name]["active"] = True + if self.ptz_metrics[camera_name].motor_stopped.is_set(): + self.ptz_metrics[camera_name].motor_stopped.clear() + + logger.debug( + f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name].frame_time.value}" + ) + + self.ptz_metrics[camera_name].start_time.value = self.ptz_metrics[ + camera_name + ].frame_time.value + self.ptz_metrics[camera_name].stop_time.value = 0 + + if ( + self.config.cameras[camera_name].onvif.autotracking.zooming + != ZoomingModeEnum.disabled + ): + # store absolute zoom level as 0 to 1 interpolated from the values of the camera + self.ptz_metrics[camera_name].zoom_level.value = numpy.interp( + round(status.Position.Zoom.x, 2), + [ + self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Min"], + self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Max"], + ], + [0, 1], + ) logger.debug( - f"{camera_name}: PTZ stop time: {self.ptz_metrics[camera_name].frame_time.value}" + f"{camera_name}: Camera zoom level: {self.ptz_metrics[camera_name].zoom_level.value}" ) + # some hikvision cams won't update MoveStatus, so warn if it hasn't changed + if ( + not self.ptz_metrics[camera_name].motor_stopped.is_set() + and not self.ptz_metrics[camera_name].reset.is_set() + and self.ptz_metrics[camera_name].start_time.value != 0 + and self.ptz_metrics[camera_name].frame_time.value + > (self.ptz_metrics[camera_name].start_time.value + 10) + and self.ptz_metrics[camera_name].stop_time.value == 0 + ): + logger.debug( + f"Start time: {self.ptz_metrics[camera_name].start_time.value}, Stop time: {self.ptz_metrics[camera_name].stop_time.value}, Frame time: {self.ptz_metrics[camera_name].frame_time.value}" + ) + # set the stop time so we don't come back into this again and spam the logs self.ptz_metrics[camera_name].stop_time.value = self.ptz_metrics[ camera_name ].frame_time.value - else: - self.cams[camera_name]["active"] = True - if self.ptz_metrics[camera_name].motor_stopped.is_set(): - self.ptz_metrics[camera_name].motor_stopped.clear() - - logger.debug( - f"{camera_name}: PTZ start time: {self.ptz_metrics[camera_name].frame_time.value}" + logger.warning( + f"Camera {camera_name} is still in ONVIF 'MOVING' status." ) - self.ptz_metrics[camera_name].start_time.value = self.ptz_metrics[ - camera_name - ].frame_time.value - self.ptz_metrics[camera_name].stop_time.value = 0 - - if ( - self.config.cameras[camera_name].onvif.autotracking.zooming - != ZoomingModeEnum.disabled - ): - # store absolute zoom level as 0 to 1 interpolated from the values of the camera - self.ptz_metrics[camera_name].zoom_level.value = numpy.interp( - round(status.Position.Zoom.x, 2), - [ - self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Min"], - self.cams[camera_name]["absolute_zoom_range"]["XRange"]["Max"], - ], - [0, 1], - ) - logger.debug( - f"{camera_name}: Camera zoom level: {self.ptz_metrics[camera_name].zoom_level.value}" - ) - - # some hikvision cams won't update MoveStatus, so warn if it hasn't changed - if ( - not self.ptz_metrics[camera_name].motor_stopped.is_set() - and not self.ptz_metrics[camera_name].reset.is_set() - and self.ptz_metrics[camera_name].start_time.value != 0 - and self.ptz_metrics[camera_name].frame_time.value - > (self.ptz_metrics[camera_name].start_time.value + 10) - and self.ptz_metrics[camera_name].stop_time.value == 0 - ): - logger.debug( - f"Start time: {self.ptz_metrics[camera_name].start_time.value}, Stop time: {self.ptz_metrics[camera_name].stop_time.value}, Frame time: {self.ptz_metrics[camera_name].frame_time.value}" - ) - # set the stop time so we don't come back into this again and spam the logs - self.ptz_metrics[camera_name].stop_time.value = self.ptz_metrics[ - camera_name - ].frame_time.value - logger.warning(f"Camera {camera_name} is still in ONVIF 'MOVING' status.") - def close(self) -> None: """Gracefully shut down the ONVIF controller.""" if not hasattr(self, "loop") or self.loop.is_closed(): diff --git a/web/src/components/menu/GeneralSettings.tsx b/web/src/components/menu/GeneralSettings.tsx index f747f75ab..16c6eb9f8 100644 --- a/web/src/components/menu/GeneralSettings.tsx +++ b/web/src/components/menu/GeneralSettings.tsx @@ -346,7 +346,9 @@ export default function GeneralSettings({ className }: GeneralSettingsProps) { diff --git a/web/src/components/overlay/ExportDialog.tsx b/web/src/components/overlay/ExportDialog.tsx index 44b55bfe3..c0c0e4538 100644 --- a/web/src/components/overlay/ExportDialog.tsx +++ b/web/src/components/overlay/ExportDialog.tsx @@ -433,137 +433,139 @@ function CustomTimeSelector({ className={`mt-3 flex items-center rounded-lg bg-secondary text-secondary-foreground ${isDesktop ? "mx-8 gap-2 px-2" : "pl-2"}`} > - { - if (!open) { - setStartOpen(false); - } - }} - > - - - - - { - if (!day) { - return; - } - - setRange({ - before: endTime, - after: day.getTime() / 1000 + 1, - }); - }} - /> - - { - const clock = e.target.value; - const [hour, minute, second] = isIOS - ? [...clock.split(":"), "00"] - : clock.split(":"); - - const start = new Date(startTime * 1000); - start.setHours( - parseInt(hour), - parseInt(minute), - parseInt(second ?? 0), - 0, - ); - setRange({ - before: endTime, - after: start.getTime() / 1000, - }); - }} - /> - - - - { - if (!open) { - setEndOpen(false); - } - }} - > - - - - - { - if (!day) { - return; - } + } + }} + > + + + + + { + if (!day) { + return; + } - setRange({ - after: startTime, - before: day.getTime() / 1000, - }); - }} - /> - - { - const clock = e.target.value; - const [hour, minute, second] = isIOS - ? [...clock.split(":"), "00"] - : clock.split(":"); + setRange({ + before: endTime, + after: day.getTime() / 1000 + 1, + }); + }} + /> + + { + const clock = e.target.value; + const [hour, minute, second] = isIOS + ? [...clock.split(":"), "00"] + : clock.split(":"); - const end = new Date(endTime * 1000); - end.setHours( - parseInt(hour), - parseInt(minute), - parseInt(second ?? 0), - 0, - ); - setRange({ - before: end.getTime() / 1000, - after: startTime, - }); - }} - /> - - + const start = new Date(startTime * 1000); + start.setHours( + parseInt(hour), + parseInt(minute), + parseInt(second ?? 0), + 0, + ); + setRange({ + before: endTime, + after: start.getTime() / 1000, + }); + }} + /> + + + + { + if (!open) { + setEndOpen(false); + } + }} + > + + + + + { + if (!day) { + return; + } + + setRange({ + after: startTime, + before: day.getTime() / 1000, + }); + }} + /> + + { + const clock = e.target.value; + const [hour, minute, second] = isIOS + ? [...clock.split(":"), "00"] + : clock.split(":"); + + const end = new Date(endTime * 1000); + end.setHours( + parseInt(hour), + parseInt(minute), + parseInt(second ?? 0), + 0, + ); + setRange({ + before: end.getTime() / 1000, + after: startTime, + }); + }} + /> + + + ); } diff --git a/web/src/components/player/GenericVideoPlayer.tsx b/web/src/components/player/GenericVideoPlayer.tsx index d64d9a736..4d6cb4ee5 100644 --- a/web/src/components/player/GenericVideoPlayer.tsx +++ b/web/src/components/player/GenericVideoPlayer.tsx @@ -1,4 +1,10 @@ -import React, { useState, useRef, useEffect, useCallback } from "react"; +import React, { + useState, + useRef, + useEffect, + useCallback, + useMemo, +} from "react"; import { useVideoDimensions } from "@/hooks/use-video-dimensions"; import HlsVideoPlayer from "./HlsVideoPlayer"; import ActivityIndicator from "../indicators/activity-indicator"; @@ -89,6 +95,12 @@ export function GenericVideoPlayer({ }, ); + const hlsSource = useMemo(() => { + return { + playlist: source, + }; + }, [source]); + return (
@@ -107,9 +119,7 @@ export function GenericVideoPlayer({ > { + // we must destroy the hlsRef every time the source changes + // so that we can create a new HLS instance with startPosition + // set at the optimal point in time + if (hlsRef.current) { + hlsRef.current.destroy(); + } + } }, [videoRef, hlsRef, useHlsCompat, currentSource]); // state handling diff --git a/web/src/components/player/JSMpegPlayer.tsx b/web/src/components/player/JSMpegPlayer.tsx index 3753a9e46..f85535013 100644 --- a/web/src/components/player/JSMpegPlayer.tsx +++ b/web/src/components/player/JSMpegPlayer.tsx @@ -164,7 +164,7 @@ export default function JSMpegPlayer({ statsIntervalRef.current = setInterval(() => { const currentTimestamp = Date.now(); const timeDiff = (currentTimestamp - lastTimestampRef.current) / 1000; // in seconds - const bitrate = (bytesReceivedRef.current * 8) / timeDiff / 1000; // in kbps + const bitrate = bytesReceivedRef.current / timeDiff / 1000; // in kBps setStats?.({ streamType: "jsmpeg", diff --git a/web/src/components/player/LivePlayer.tsx b/web/src/components/player/LivePlayer.tsx index 5d3b6aa7f..f61e544eb 100644 --- a/web/src/components/player/LivePlayer.tsx +++ b/web/src/components/player/LivePlayer.tsx @@ -82,7 +82,7 @@ export default function LivePlayer({ const [stats, setStats] = useState({ streamType: "-", - bandwidth: 0, // in kbps + bandwidth: 0, // in kBps latency: undefined, // in seconds totalFrames: 0, droppedFrames: undefined, diff --git a/web/src/components/player/MsePlayer.tsx b/web/src/components/player/MsePlayer.tsx index f3ef17a24..7c831e596 100644 --- a/web/src/components/player/MsePlayer.tsx +++ b/web/src/components/player/MsePlayer.tsx @@ -338,7 +338,7 @@ function MSEPlayer({ // console.debug("VideoRTC.buffer", b.byteLength, bufLen); } else { try { - sb?.appendBuffer(data); + sb?.appendBuffer(data as ArrayBuffer); } catch (e) { // no-op } @@ -592,7 +592,7 @@ function MSEPlayer({ const now = Date.now(); const bytesLoaded = totalBytesLoaded.current; const timeElapsed = (now - lastTimestamp) / 1000; // seconds - const bandwidth = (bytesLoaded - lastLoadedBytes) / timeElapsed / 1024; // kbps + const bandwidth = (bytesLoaded - lastLoadedBytes) / timeElapsed / 1000; // kBps lastLoadedBytes = bytesLoaded; lastTimestamp = now; diff --git a/web/src/components/player/PlayerStats.tsx b/web/src/components/player/PlayerStats.tsx index baea08b35..6d7e19f5e 100644 --- a/web/src/components/player/PlayerStats.tsx +++ b/web/src/components/player/PlayerStats.tsx @@ -17,7 +17,7 @@ export function PlayerStats({ stats, minimal }: PlayerStatsProps) {

{t("stats.bandwidth.title")}{" "} - {stats.bandwidth.toFixed(2)} kbps + {stats.bandwidth.toFixed(2)} kBps

{stats.latency != undefined && (

@@ -66,7 +66,7 @@ export function PlayerStats({ stats, minimal }: PlayerStatsProps) {

{t("stats.bandwidth.short")}{" "} - {stats.bandwidth.toFixed(2)} kbps + {stats.bandwidth.toFixed(2)} kBps
{stats.latency != undefined && (
diff --git a/web/src/components/player/WebRTCPlayer.tsx b/web/src/components/player/WebRTCPlayer.tsx index b4c9ea6b2..81d6a72dd 100644 --- a/web/src/components/player/WebRTCPlayer.tsx +++ b/web/src/components/player/WebRTCPlayer.tsx @@ -266,7 +266,7 @@ export default function WebRtcPlayer({ const bitrate = timeDiff > 0 ? (bytesReceived - lastBytesReceived) / timeDiff / 1000 - : 0; // in kbps + : 0; // in kBps setStats?.({ streamType: "WebRTC", diff --git a/web/src/hooks/use-camera-live-mode.ts b/web/src/hooks/use-camera-live-mode.ts index 238ac70cc..76689b9bc 100644 --- a/web/src/hooks/use-camera-live-mode.ts +++ b/web/src/hooks/use-camera-live-mode.ts @@ -1,5 +1,5 @@ import { CameraConfig, FrigateConfig } from "@/types/frigateConfig"; -import { useCallback, useEffect, useState } from "react"; +import { useCallback, useEffect, useState, useMemo } from "react"; import useSWR from "swr"; import { LivePlayerMode, LiveStreamMetadata } from "@/types/live"; @@ -8,9 +8,54 @@ export default function useCameraLiveMode( windowVisible: boolean, ) { const { data: config } = useSWR("config"); - const { data: allStreamMetadata } = useSWR<{ + + // Get comma-separated list of restreamed stream names for SWR key + const restreamedStreamsKey = useMemo(() => { + if (!cameras || !config) return null; + + const streamNames = new Set(); + cameras.forEach((camera) => { + const isRestreamed = Object.keys(config.go2rtc.streams || {}).includes( + Object.values(camera.live.streams)[0], + ); + + if (isRestreamed) { + Object.values(camera.live.streams).forEach((streamName) => { + streamNames.add(streamName); + }); + } + }); + + return streamNames.size > 0 + ? Array.from(streamNames).sort().join(",") + : null; + }, [cameras, config]); + + const streamsFetcher = useCallback(async (key: string) => { + const streamNames = key.split(","); + const metadata: { [key: string]: LiveStreamMetadata } = {}; + + await Promise.all( + streamNames.map(async (streamName) => { + try { + const response = await fetch(`/api/go2rtc/streams/${streamName}`); + if (response.ok) { + const data = await response.json(); + metadata[streamName] = data; + } + } catch (error) { + // eslint-disable-next-line no-console + console.error(`Failed to fetch metadata for ${streamName}:`, error); + } + }), + ); + + return metadata; + }, []); + + const { data: allStreamMetadata = {} } = useSWR<{ [key: string]: LiveStreamMetadata; - }>(config ? "go2rtc/streams" : null, { revalidateOnFocus: false }); + }>(restreamedStreamsKey, streamsFetcher, { revalidateOnFocus: false }); const [preferredLiveModes, setPreferredLiveModes] = useState<{ [key: string]: LivePlayerMode; diff --git a/web/src/hooks/use-video-dimensions.ts b/web/src/hooks/use-video-dimensions.ts index 1fad71dc8..25b8af350 100644 --- a/web/src/hooks/use-video-dimensions.ts +++ b/web/src/hooks/use-video-dimensions.ts @@ -17,7 +17,7 @@ export function useVideoDimensions( }); const videoAspectRatio = useMemo(() => { - return videoResolution.width / videoResolution.height; + return videoResolution.width / videoResolution.height || 16 / 9; }, [videoResolution]); const containerAspectRatio = useMemo(() => { @@ -25,8 +25,8 @@ export function useVideoDimensions( }, [containerWidth, containerHeight]); const videoDimensions = useMemo(() => { - if (!containerWidth || !containerHeight || !videoAspectRatio) - return { width: "100%", height: "100%" }; + if (!containerWidth || !containerHeight) + return { aspectRatio: "16 / 9", width: "100%" }; if (containerAspectRatio > videoAspectRatio) { const height = containerHeight; const width = height * videoAspectRatio; diff --git a/web/src/pages/Settings.tsx b/web/src/pages/Settings.tsx index 47d2d6b35..edbd304d8 100644 --- a/web/src/pages/Settings.tsx +++ b/web/src/pages/Settings.tsx @@ -76,7 +76,11 @@ export default function Settings() { const isAdmin = useIsAdmin(); - const allowedViewsForViewer: SettingsType[] = ["ui", "debug"]; + const allowedViewsForViewer: SettingsType[] = [ + "ui", + "debug", + "notifications", + ]; const visibleSettingsViews = !isAdmin ? allowedViewsForViewer : allSettingsViews; @@ -167,7 +171,7 @@ export default function Settings() { useSearchEffect("page", (page: string) => { if (allSettingsViews.includes(page as SettingsType)) { // Restrict viewer to UI settings - if (!isAdmin && !["ui", "debug"].includes(page)) { + if (!isAdmin && !allowedViewsForViewer.includes(page as SettingsType)) { setPage("ui"); } else { setPage(page as SettingsType); @@ -203,7 +207,7 @@ export default function Settings() { onValueChange={(value: SettingsType) => { if (value) { // Restrict viewer navigation - if (!isAdmin && !["ui", "debug"].includes(value)) { + if (!isAdmin && !allowedViewsForViewer.includes(value)) { setPageToggle("ui"); } else { setPageToggle(value); diff --git a/web/src/views/settings/NotificationsSettingsView.tsx b/web/src/views/settings/NotificationsSettingsView.tsx index 3b2c89f05..37e555dfa 100644 --- a/web/src/views/settings/NotificationsSettingsView.tsx +++ b/web/src/views/settings/NotificationsSettingsView.tsx @@ -46,6 +46,8 @@ import { Trans, useTranslation } from "react-i18next"; import { useDateLocale } from "@/hooks/use-date-locale"; import { useDocDomain } from "@/hooks/use-doc-domain"; import { CameraNameLabel } from "@/components/camera/CameraNameLabel"; +import { useIsAdmin } from "@/hooks/use-is-admin"; +import { cn } from "@/lib/utils"; const NOTIFICATION_SERVICE_WORKER = "notifications-worker.js"; @@ -64,6 +66,10 @@ export default function NotificationView({ const { t } = useTranslation(["views/settings"]); const { getLocaleDocUrl } = useDocDomain(); + // roles + + const isAdmin = useIsAdmin(); + const { data: config, mutate: updateConfig } = useSWR( "config", { @@ -380,7 +386,11 @@ export default function NotificationView({
-
+
{t("notification.notificationSettings.title")} @@ -403,139 +413,152 @@ export default function NotificationView({
-
- - ( - - {t("notification.email.title")} - - - - - {t("notification.email.desc")} - - - - )} - /> + {isAdmin && ( + + + ( + + {t("notification.email.title")} + + + + + {t("notification.email.desc")} + + + + )} + /> - ( - - {allCameras && allCameras?.length > 0 ? ( - <> -
- - {t("notification.cameras.title")} - -
-
- ( + ( + + {allCameras && allCameras?.length > 0 ? ( + <> +
+ + {t("notification.cameras.title")} + +
+
+ ( + { + setChangedValue(true); + if (checked) { + form.setValue("cameras", []); + } + field.onChange(checked); + }} + /> + )} + /> + {allCameras?.map((camera) => ( { setChangedValue(true); + let newCameras; if (checked) { - form.setValue("cameras", []); + newCameras = [ + ...field.value, + camera.name, + ]; + } else { + newCameras = field.value?.filter( + (value) => value !== camera.name, + ); } - field.onChange(checked); + field.onChange(newCameras); + form.setValue("allEnabled", false); }} /> - )} - /> - {allCameras?.map((camera) => ( - { - setChangedValue(true); - let newCameras; - if (checked) { - newCameras = [ - ...field.value, - camera.name, - ]; - } else { - newCameras = field.value?.filter( - (value) => value !== camera.name, - ); - } - field.onChange(newCameras); - form.setValue("allEnabled", false); - }} - /> - ))} + ))} +
+ + ) : ( +
+ {t("notification.cameras.noCameras")}
- - ) : ( -
- {t("notification.cameras.noCameras")} -
- )} + )} - - - {t("notification.cameras.desc")} - -
- )} - /> - -
- - -
- - + /> + +
+ + +
+ + + )}
-
- - +
+ + {t("notification.deviceSpecific")}
- {notificationCameras.length > 0 && ( + {isAdmin && notificationCameras.length > 0 && (