mirror of
https://github.com/blakeblackshear/frigate.git
synced 2025-08-04 13:47:37 +02:00
Audio transcription tweaks (#18540)
* use model runner * unload whisper model when live transcription is complete
This commit is contained in:
parent
645868e099
commit
eb1fe9fe20
81
frigate/data_processing/common/audio_transcription/model.py
Normal file
81
frigate/data_processing/common/audio_transcription/model.py
Normal file
@ -0,0 +1,81 @@
|
|||||||
|
"""Set up audio transcription models based on model size."""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
import sherpa_onnx
|
||||||
|
from faster_whisper.utils import download_model
|
||||||
|
|
||||||
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
|
from frigate.const import MODEL_CACHE_DIR
|
||||||
|
from frigate.data_processing.types import AudioTranscriptionModel
|
||||||
|
from frigate.util.downloader import ModelDownloader
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class AudioTranscriptionModelRunner:
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
device: str = "CPU",
|
||||||
|
model_size: str = "small",
|
||||||
|
):
|
||||||
|
self.model: AudioTranscriptionModel = None
|
||||||
|
self.requestor = InterProcessRequestor()
|
||||||
|
|
||||||
|
if model_size == "large":
|
||||||
|
# use the Whisper download function instead of our own
|
||||||
|
logger.debug("Downloading Whisper audio transcription model")
|
||||||
|
download_model(
|
||||||
|
size_or_id="small" if device == "cuda" else "tiny",
|
||||||
|
local_files_only=False,
|
||||||
|
cache_dir=os.path.join(MODEL_CACHE_DIR, "whisper"),
|
||||||
|
)
|
||||||
|
logger.debug("Whisper audio transcription model downloaded")
|
||||||
|
|
||||||
|
else:
|
||||||
|
# small model as default
|
||||||
|
download_path = os.path.join(MODEL_CACHE_DIR, "sherpa-onnx")
|
||||||
|
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
|
||||||
|
self.model_files = {
|
||||||
|
"encoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/encoder-epoch-99-avg-1-chunk-16-left-128.onnx",
|
||||||
|
"decoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/decoder-epoch-99-avg-1-chunk-16-left-128.onnx",
|
||||||
|
"joiner.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/joiner-epoch-99-avg-1-chunk-16-left-128.onnx",
|
||||||
|
"tokens.txt": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/tokens.txt",
|
||||||
|
}
|
||||||
|
|
||||||
|
if not all(
|
||||||
|
os.path.exists(os.path.join(download_path, n))
|
||||||
|
for n in self.model_files.keys()
|
||||||
|
):
|
||||||
|
self.downloader = ModelDownloader(
|
||||||
|
model_name="sherpa-onnx",
|
||||||
|
download_path=download_path,
|
||||||
|
file_names=self.model_files.keys(),
|
||||||
|
download_func=self.__download_models,
|
||||||
|
)
|
||||||
|
self.downloader.ensure_model_files()
|
||||||
|
self.downloader.wait_for_download()
|
||||||
|
|
||||||
|
self.model = sherpa_onnx.OnlineRecognizer.from_transducer(
|
||||||
|
tokens=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/tokens.txt"),
|
||||||
|
encoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/encoder.onnx"),
|
||||||
|
decoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/decoder.onnx"),
|
||||||
|
joiner=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/joiner.onnx"),
|
||||||
|
num_threads=2,
|
||||||
|
sample_rate=16000,
|
||||||
|
feature_dim=80,
|
||||||
|
enable_endpoint_detection=True,
|
||||||
|
rule1_min_trailing_silence=2.4,
|
||||||
|
rule2_min_trailing_silence=1.2,
|
||||||
|
rule3_min_utterance_length=300,
|
||||||
|
decoding_method="greedy_search",
|
||||||
|
provider="cpu",
|
||||||
|
)
|
||||||
|
|
||||||
|
def __download_models(self, path: str) -> None:
|
||||||
|
try:
|
||||||
|
file_name = os.path.basename(path)
|
||||||
|
ModelDownloader.download_from_url(self.model_files[file_name], path)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to download {path}: {e}")
|
@ -7,16 +7,20 @@ import threading
|
|||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import sherpa_onnx
|
|
||||||
|
|
||||||
from frigate.comms.inter_process import InterProcessRequestor
|
from frigate.comms.inter_process import InterProcessRequestor
|
||||||
from frigate.config import CameraConfig, FrigateConfig
|
from frigate.config import CameraConfig, FrigateConfig
|
||||||
from frigate.const import MODEL_CACHE_DIR
|
from frigate.const import MODEL_CACHE_DIR
|
||||||
from frigate.util.downloader import ModelDownloader
|
from frigate.data_processing.common.audio_transcription.model import (
|
||||||
|
AudioTranscriptionModelRunner,
|
||||||
|
)
|
||||||
|
from frigate.data_processing.real_time.whisper_online import (
|
||||||
|
FasterWhisperASR,
|
||||||
|
OnlineASRProcessor,
|
||||||
|
)
|
||||||
|
|
||||||
from ..types import DataProcessorMetrics
|
from ..types import DataProcessorMetrics
|
||||||
from .api import RealTimeProcessorApi
|
from .api import RealTimeProcessorApi
|
||||||
from .whisper_online import FasterWhisperASR, OnlineASRProcessor
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -27,6 +31,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
config: FrigateConfig,
|
config: FrigateConfig,
|
||||||
camera_config: CameraConfig,
|
camera_config: CameraConfig,
|
||||||
requestor: InterProcessRequestor,
|
requestor: InterProcessRequestor,
|
||||||
|
model_runner: AudioTranscriptionModelRunner,
|
||||||
metrics: DataProcessorMetrics,
|
metrics: DataProcessorMetrics,
|
||||||
stop_event: threading.Event,
|
stop_event: threading.Event,
|
||||||
):
|
):
|
||||||
@ -34,95 +39,55 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
self.config = config
|
self.config = config
|
||||||
self.camera_config = camera_config
|
self.camera_config = camera_config
|
||||||
self.requestor = requestor
|
self.requestor = requestor
|
||||||
self.recognizer = None
|
|
||||||
self.stream = None
|
self.stream = None
|
||||||
|
self.whisper_model = None
|
||||||
|
self.model_runner = model_runner
|
||||||
self.transcription_segments = []
|
self.transcription_segments = []
|
||||||
self.audio_queue = queue.Queue()
|
self.audio_queue = queue.Queue()
|
||||||
self.stop_event = stop_event
|
self.stop_event = stop_event
|
||||||
|
|
||||||
if self.config.audio_transcription.model_size == "large":
|
|
||||||
self.asr = FasterWhisperASR(
|
|
||||||
modelsize="tiny",
|
|
||||||
device="cuda"
|
|
||||||
if self.config.audio_transcription.device == "GPU"
|
|
||||||
else "cpu",
|
|
||||||
lan=config.audio_transcription.language,
|
|
||||||
model_dir=os.path.join(MODEL_CACHE_DIR, "whisper"),
|
|
||||||
)
|
|
||||||
self.asr.use_vad() # Enable Silero VAD for low-RMS audio
|
|
||||||
|
|
||||||
else:
|
|
||||||
# small model as default
|
|
||||||
download_path = os.path.join(MODEL_CACHE_DIR, "sherpa-onnx")
|
|
||||||
HF_ENDPOINT = os.environ.get("HF_ENDPOINT", "https://huggingface.co")
|
|
||||||
self.model_files = {
|
|
||||||
"encoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/encoder-epoch-99-avg-1-chunk-16-left-128.onnx",
|
|
||||||
"decoder.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/decoder-epoch-99-avg-1-chunk-16-left-128.onnx",
|
|
||||||
"joiner.onnx": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/joiner-epoch-99-avg-1-chunk-16-left-128.onnx",
|
|
||||||
"tokens.txt": f"{HF_ENDPOINT}/csukuangfj/sherpa-onnx-streaming-zipformer-en-2023-06-26/resolve/main/tokens.txt",
|
|
||||||
}
|
|
||||||
|
|
||||||
if not all(
|
|
||||||
os.path.exists(os.path.join(download_path, n))
|
|
||||||
for n in self.model_files.keys()
|
|
||||||
):
|
|
||||||
self.downloader = ModelDownloader(
|
|
||||||
model_name="sherpa-onnx",
|
|
||||||
download_path=download_path,
|
|
||||||
file_names=self.model_files.keys(),
|
|
||||||
download_func=self.__download_models,
|
|
||||||
complete_func=self.__build_recognizer,
|
|
||||||
)
|
|
||||||
self.downloader.ensure_model_files()
|
|
||||||
|
|
||||||
self.__build_recognizer()
|
|
||||||
|
|
||||||
def __download_models(self, path: str) -> None:
|
|
||||||
try:
|
|
||||||
file_name = os.path.basename(path)
|
|
||||||
ModelDownloader.download_from_url(self.model_files[file_name], path)
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Failed to download {path}: {e}")
|
|
||||||
|
|
||||||
def __build_recognizer(self) -> None:
|
def __build_recognizer(self) -> None:
|
||||||
try:
|
try:
|
||||||
if self.config.audio_transcription.model_size == "large":
|
if self.config.audio_transcription.model_size == "large":
|
||||||
self.online = OnlineASRProcessor(
|
# Whisper models need to be per-process and can only run one stream at a time
|
||||||
asr=self.asr,
|
# TODO: try parallel: https://github.com/SYSTRAN/faster-whisper/issues/100
|
||||||
|
logger.debug(f"Loading Whisper model for {self.camera_config.name}")
|
||||||
|
self.whisper_model = FasterWhisperASR(
|
||||||
|
modelsize="tiny",
|
||||||
|
device="cuda"
|
||||||
|
if self.config.audio_transcription.device == "GPU"
|
||||||
|
else "cpu",
|
||||||
|
lan=self.config.audio_transcription.language,
|
||||||
|
model_dir=os.path.join(MODEL_CACHE_DIR, "whisper"),
|
||||||
|
)
|
||||||
|
self.whisper_model.use_vad()
|
||||||
|
self.stream = OnlineASRProcessor(
|
||||||
|
asr=self.whisper_model,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
self.recognizer = sherpa_onnx.OnlineRecognizer.from_transducer(
|
logger.debug(f"Loading sherpa stream for {self.camera_config.name}")
|
||||||
tokens=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/tokens.txt"),
|
self.stream = self.model_runner.model.create_stream()
|
||||||
encoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/encoder.onnx"),
|
logger.debug(
|
||||||
decoder=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/decoder.onnx"),
|
f"Audio transcription (live) initialized for {self.camera_config.name}"
|
||||||
joiner=os.path.join(MODEL_CACHE_DIR, "sherpa-onnx/joiner.onnx"),
|
)
|
||||||
num_threads=2,
|
|
||||||
sample_rate=16000,
|
|
||||||
feature_dim=80,
|
|
||||||
enable_endpoint_detection=True,
|
|
||||||
rule1_min_trailing_silence=2.4,
|
|
||||||
rule2_min_trailing_silence=1.2,
|
|
||||||
rule3_min_utterance_length=300,
|
|
||||||
decoding_method="greedy_search",
|
|
||||||
provider="cpu",
|
|
||||||
)
|
|
||||||
self.stream = self.recognizer.create_stream()
|
|
||||||
logger.debug("Audio transcription (live) initialized")
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"Failed to initialize live streaming audio transcription: {e}"
|
f"Failed to initialize live streaming audio transcription: {e}"
|
||||||
)
|
)
|
||||||
self.recognizer = None
|
|
||||||
|
|
||||||
def __process_audio_stream(
|
def __process_audio_stream(
|
||||||
self, audio_data: np.ndarray
|
self, audio_data: np.ndarray
|
||||||
) -> Optional[tuple[str, bool]]:
|
) -> Optional[tuple[str, bool]]:
|
||||||
if (not self.recognizer or not self.stream) and not self.online:
|
if (
|
||||||
logger.debug(
|
self.model_runner.model is None
|
||||||
"Audio transcription (streaming) recognizer or stream not initialized"
|
and self.config.audio_transcription.model_size == "small"
|
||||||
)
|
):
|
||||||
|
logger.debug("Audio transcription (live) model not initialized")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
if not self.stream:
|
||||||
|
self.__build_recognizer()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if audio_data.dtype != np.float32:
|
if audio_data.dtype != np.float32:
|
||||||
audio_data = audio_data.astype(np.float32)
|
audio_data = audio_data.astype(np.float32)
|
||||||
@ -135,10 +100,14 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
|
|
||||||
if self.config.audio_transcription.model_size == "large":
|
if self.config.audio_transcription.model_size == "large":
|
||||||
# large model
|
# large model
|
||||||
self.online.insert_audio_chunk(audio_data)
|
self.stream.insert_audio_chunk(audio_data)
|
||||||
output = self.online.process_iter()
|
output = self.stream.process_iter()
|
||||||
text = output[2].strip()
|
text = output[2].strip()
|
||||||
is_endpoint = text.endswith((".", "!", "?"))
|
is_endpoint = (
|
||||||
|
text.endswith((".", "!", "?"))
|
||||||
|
and sum(len(str(lines)) for lines in self.transcription_segments)
|
||||||
|
> 300
|
||||||
|
)
|
||||||
|
|
||||||
if text:
|
if text:
|
||||||
self.transcription_segments.append(text)
|
self.transcription_segments.append(text)
|
||||||
@ -150,11 +119,11 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
# small model
|
# small model
|
||||||
self.stream.accept_waveform(16000, audio_data)
|
self.stream.accept_waveform(16000, audio_data)
|
||||||
|
|
||||||
while self.recognizer.is_ready(self.stream):
|
while self.model_runner.model.is_ready(self.stream):
|
||||||
self.recognizer.decode_stream(self.stream)
|
self.model_runner.model.decode_stream(self.stream)
|
||||||
|
|
||||||
text = self.recognizer.get_result(self.stream).strip()
|
text = self.model_runner.model.get_result(self.stream).strip()
|
||||||
is_endpoint = self.recognizer.is_endpoint(self.stream)
|
is_endpoint = self.model_runner.model.is_endpoint(self.stream)
|
||||||
|
|
||||||
logger.debug(f"Transcription result: '{text}'")
|
logger.debug(f"Transcription result: '{text}'")
|
||||||
|
|
||||||
@ -166,7 +135,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
|
|
||||||
if is_endpoint and self.config.audio_transcription.model_size == "small":
|
if is_endpoint and self.config.audio_transcription.model_size == "small":
|
||||||
# reset sherpa if we've reached an endpoint
|
# reset sherpa if we've reached an endpoint
|
||||||
self.recognizer.reset(self.stream)
|
self.model_runner.model.reset(self.stream)
|
||||||
|
|
||||||
return text, is_endpoint
|
return text, is_endpoint
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
@ -190,10 +159,17 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
logger.debug(
|
logger.debug(
|
||||||
f"Starting audio transcription thread for {self.camera_config.name}"
|
f"Starting audio transcription thread for {self.camera_config.name}"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# start with an empty transcription
|
||||||
|
self.requestor.send_data(
|
||||||
|
f"{self.camera_config.name}/audio/transcription",
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
|
||||||
while not self.stop_event.is_set():
|
while not self.stop_event.is_set():
|
||||||
try:
|
try:
|
||||||
# Get audio data from queue with a timeout to check stop_event
|
# Get audio data from queue with a timeout to check stop_event
|
||||||
obj_data, audio = self.audio_queue.get(timeout=0.1)
|
_, audio = self.audio_queue.get(timeout=0.1)
|
||||||
result = self.__process_audio_stream(audio)
|
result = self.__process_audio_stream(audio)
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
@ -209,7 +185,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
self.audio_queue.task_done()
|
self.audio_queue.task_done()
|
||||||
|
|
||||||
if is_endpoint:
|
if is_endpoint:
|
||||||
self.reset(obj_data["camera"])
|
self.reset()
|
||||||
|
|
||||||
except queue.Empty:
|
except queue.Empty:
|
||||||
continue
|
continue
|
||||||
@ -221,23 +197,7 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
f"Stopping audio transcription thread for {self.camera_config.name}"
|
f"Stopping audio transcription thread for {self.camera_config.name}"
|
||||||
)
|
)
|
||||||
|
|
||||||
def reset(self, camera: str) -> None:
|
def clear_audio_queue(self) -> None:
|
||||||
if self.config.audio_transcription.model_size == "large":
|
|
||||||
# get final output from whisper
|
|
||||||
output = self.online.finish()
|
|
||||||
self.transcription_segments = []
|
|
||||||
|
|
||||||
self.requestor.send_data(
|
|
||||||
f"{self.camera_config.name}/audio/transcription",
|
|
||||||
(output[2].strip() + " "),
|
|
||||||
)
|
|
||||||
|
|
||||||
# reset whisper
|
|
||||||
self.online.init()
|
|
||||||
else:
|
|
||||||
# reset sherpa
|
|
||||||
self.recognizer.reset(self.stream)
|
|
||||||
|
|
||||||
# Clear the audio queue
|
# Clear the audio queue
|
||||||
while not self.audio_queue.empty():
|
while not self.audio_queue.empty():
|
||||||
try:
|
try:
|
||||||
@ -246,8 +206,54 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
except queue.Empty:
|
except queue.Empty:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
def reset(self) -> None:
|
||||||
|
if self.config.audio_transcription.model_size == "large":
|
||||||
|
# get final output from whisper
|
||||||
|
output = self.stream.finish()
|
||||||
|
self.transcription_segments = []
|
||||||
|
|
||||||
|
self.requestor.send_data(
|
||||||
|
f"{self.camera_config.name}/audio/transcription",
|
||||||
|
(output[2].strip() + " "),
|
||||||
|
)
|
||||||
|
|
||||||
|
# reset whisper
|
||||||
|
self.stream.init()
|
||||||
|
self.transcription_segments = []
|
||||||
|
else:
|
||||||
|
# reset sherpa
|
||||||
|
self.model_runner.model.reset(self.stream)
|
||||||
|
|
||||||
logger.debug("Stream reset")
|
logger.debug("Stream reset")
|
||||||
|
|
||||||
|
def check_unload_model(self) -> None:
|
||||||
|
# regularly called in the loop in audio maintainer
|
||||||
|
if (
|
||||||
|
self.config.audio_transcription.model_size == "large"
|
||||||
|
and self.whisper_model is not None
|
||||||
|
):
|
||||||
|
logger.debug(f"Unloading Whisper model for {self.camera_config.name}")
|
||||||
|
self.clear_audio_queue()
|
||||||
|
self.transcription_segments = []
|
||||||
|
self.stream = None
|
||||||
|
self.whisper_model = None
|
||||||
|
|
||||||
|
self.requestor.send_data(
|
||||||
|
f"{self.camera_config.name}/audio/transcription",
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
if (
|
||||||
|
self.config.audio_transcription.model_size == "small"
|
||||||
|
and self.stream is not None
|
||||||
|
):
|
||||||
|
logger.debug(f"Clearing sherpa stream for {self.camera_config.name}")
|
||||||
|
self.stream = None
|
||||||
|
|
||||||
|
self.requestor.send_data(
|
||||||
|
f"{self.camera_config.name}/audio/transcription",
|
||||||
|
"",
|
||||||
|
)
|
||||||
|
|
||||||
def stop(self) -> None:
|
def stop(self) -> None:
|
||||||
"""Stop the transcription thread and clean up."""
|
"""Stop the transcription thread and clean up."""
|
||||||
self.stop_event.set()
|
self.stop_event.set()
|
||||||
@ -266,7 +272,6 @@ class AudioTranscriptionRealTimeProcessor(RealTimeProcessorApi):
|
|||||||
self, topic: str, request_data: dict[str, any]
|
self, topic: str, request_data: dict[str, any]
|
||||||
) -> dict[str, any] | None:
|
) -> dict[str, any] | None:
|
||||||
if topic == "clear_audio_recognizer":
|
if topic == "clear_audio_recognizer":
|
||||||
self.recognizer = None
|
|
||||||
self.stream = None
|
self.stream = None
|
||||||
self.__build_recognizer()
|
self.__build_recognizer()
|
||||||
return {"message": "Audio recognizer cleared and rebuilt", "success": True}
|
return {"message": "Audio recognizer cleared and rebuilt", "success": True}
|
||||||
|
@ -139,8 +139,11 @@ class FasterWhisperASR(ASRBase):
|
|||||||
return model
|
return model
|
||||||
|
|
||||||
def transcribe(self, audio, init_prompt=""):
|
def transcribe(self, audio, init_prompt=""):
|
||||||
|
from faster_whisper import BatchedInferencePipeline
|
||||||
|
|
||||||
# tested: beam_size=5 is faster and better than 1 (on one 200 second document from En ESIC, min chunk 0.01)
|
# tested: beam_size=5 is faster and better than 1 (on one 200 second document from En ESIC, min chunk 0.01)
|
||||||
segments, info = self.model.transcribe(
|
batched_model = BatchedInferencePipeline(model=self.model)
|
||||||
|
segments, info = batched_model.transcribe(
|
||||||
audio,
|
audio,
|
||||||
language=self.original_language,
|
language=self.original_language,
|
||||||
initial_prompt=init_prompt,
|
initial_prompt=init_prompt,
|
||||||
|
@ -4,6 +4,10 @@ import multiprocessing as mp
|
|||||||
from enum import Enum
|
from enum import Enum
|
||||||
from multiprocessing.sharedctypes import Synchronized
|
from multiprocessing.sharedctypes import Synchronized
|
||||||
|
|
||||||
|
import sherpa_onnx
|
||||||
|
|
||||||
|
from frigate.data_processing.real_time.whisper_online import FasterWhisperASR
|
||||||
|
|
||||||
|
|
||||||
class DataProcessorMetrics:
|
class DataProcessorMetrics:
|
||||||
image_embeddings_speed: Synchronized
|
image_embeddings_speed: Synchronized
|
||||||
@ -41,3 +45,6 @@ class PostProcessDataEnum(str, Enum):
|
|||||||
recording = "recording"
|
recording = "recording"
|
||||||
review = "review"
|
review = "review"
|
||||||
tracked_object = "tracked_object"
|
tracked_object = "tracked_object"
|
||||||
|
|
||||||
|
|
||||||
|
AudioTranscriptionModel = FasterWhisperASR | sherpa_onnx.OnlineRecognizer | None
|
||||||
|
@ -30,6 +30,9 @@ from frigate.const import (
|
|||||||
AUDIO_MIN_CONFIDENCE,
|
AUDIO_MIN_CONFIDENCE,
|
||||||
AUDIO_SAMPLE_RATE,
|
AUDIO_SAMPLE_RATE,
|
||||||
)
|
)
|
||||||
|
from frigate.data_processing.common.audio_transcription.model import (
|
||||||
|
AudioTranscriptionModelRunner,
|
||||||
|
)
|
||||||
from frigate.data_processing.real_time.audio_transcription import (
|
from frigate.data_processing.real_time.audio_transcription import (
|
||||||
AudioTranscriptionRealTimeProcessor,
|
AudioTranscriptionRealTimeProcessor,
|
||||||
)
|
)
|
||||||
@ -87,6 +90,10 @@ class AudioProcessor(util.Process):
|
|||||||
self.camera_metrics = camera_metrics
|
self.camera_metrics = camera_metrics
|
||||||
self.cameras = cameras
|
self.cameras = cameras
|
||||||
self.config = config
|
self.config = config
|
||||||
|
self.transcription_model_runner = AudioTranscriptionModelRunner(
|
||||||
|
self.config.audio_transcription.device,
|
||||||
|
self.config.audio_transcription.model_size,
|
||||||
|
)
|
||||||
|
|
||||||
def run(self) -> None:
|
def run(self) -> None:
|
||||||
audio_threads: list[AudioEventMaintainer] = []
|
audio_threads: list[AudioEventMaintainer] = []
|
||||||
@ -101,6 +108,7 @@ class AudioProcessor(util.Process):
|
|||||||
camera,
|
camera,
|
||||||
self.config,
|
self.config,
|
||||||
self.camera_metrics,
|
self.camera_metrics,
|
||||||
|
self.transcription_model_runner,
|
||||||
self.stop_event,
|
self.stop_event,
|
||||||
)
|
)
|
||||||
audio_threads.append(audio_thread)
|
audio_threads.append(audio_thread)
|
||||||
@ -130,6 +138,7 @@ class AudioEventMaintainer(threading.Thread):
|
|||||||
camera: CameraConfig,
|
camera: CameraConfig,
|
||||||
config: FrigateConfig,
|
config: FrigateConfig,
|
||||||
camera_metrics: dict[str, CameraMetrics],
|
camera_metrics: dict[str, CameraMetrics],
|
||||||
|
audio_transcription_model_runner: AudioTranscriptionModelRunner,
|
||||||
stop_event: threading.Event,
|
stop_event: threading.Event,
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__(name=f"{camera.name}_audio_event_processor")
|
super().__init__(name=f"{camera.name}_audio_event_processor")
|
||||||
@ -146,6 +155,7 @@ class AudioEventMaintainer(threading.Thread):
|
|||||||
self.ffmpeg_cmd = get_ffmpeg_command(self.camera_config.ffmpeg)
|
self.ffmpeg_cmd = get_ffmpeg_command(self.camera_config.ffmpeg)
|
||||||
self.logpipe = LogPipe(f"ffmpeg.{self.camera_config.name}.audio")
|
self.logpipe = LogPipe(f"ffmpeg.{self.camera_config.name}.audio")
|
||||||
self.audio_listener = None
|
self.audio_listener = None
|
||||||
|
self.audio_transcription_model_runner = audio_transcription_model_runner
|
||||||
self.transcription_processor = None
|
self.transcription_processor = None
|
||||||
self.transcription_thread = None
|
self.transcription_thread = None
|
||||||
|
|
||||||
@ -168,6 +178,7 @@ class AudioEventMaintainer(threading.Thread):
|
|||||||
config=self.config,
|
config=self.config,
|
||||||
camera_config=self.camera_config,
|
camera_config=self.camera_config,
|
||||||
requestor=self.requestor,
|
requestor=self.requestor,
|
||||||
|
model_runner=self.audio_transcription_model_runner,
|
||||||
metrics=self.camera_metrics[self.camera_config.name],
|
metrics=self.camera_metrics[self.camera_config.name],
|
||||||
stop_event=self.stop_event,
|
stop_event=self.stop_event,
|
||||||
)
|
)
|
||||||
@ -223,18 +234,18 @@ class AudioEventMaintainer(threading.Thread):
|
|||||||
)
|
)
|
||||||
|
|
||||||
# run audio transcription
|
# run audio transcription
|
||||||
if self.transcription_processor is not None and (
|
if self.transcription_processor is not None:
|
||||||
self.camera_config.audio_transcription.live_enabled
|
if self.camera_config.audio_transcription.live_enabled:
|
||||||
):
|
# process audio until we've reached the endpoint
|
||||||
self.transcribing = True
|
self.transcription_processor.process_audio(
|
||||||
# process audio until we've reached the endpoint
|
{
|
||||||
self.transcription_processor.process_audio(
|
"id": f"{self.camera_config.name}_audio",
|
||||||
{
|
"camera": self.camera_config.name,
|
||||||
"id": f"{self.camera_config.name}_audio",
|
},
|
||||||
"camera": self.camera_config.name,
|
audio,
|
||||||
},
|
)
|
||||||
audio,
|
else:
|
||||||
)
|
self.transcription_processor.check_unload_model()
|
||||||
|
|
||||||
self.expire_detections()
|
self.expire_detections()
|
||||||
|
|
||||||
@ -309,13 +320,6 @@ class AudioEventMaintainer(threading.Thread):
|
|||||||
)
|
)
|
||||||
self.detections[detection["label"]] = None
|
self.detections[detection["label"]] = None
|
||||||
|
|
||||||
# clear real-time transcription
|
|
||||||
if self.transcription_processor is not None:
|
|
||||||
self.transcription_processor.reset(self.camera_config.name)
|
|
||||||
self.requestor.send_data(
|
|
||||||
f"{self.camera_config.name}/audio/transcription", ""
|
|
||||||
)
|
|
||||||
|
|
||||||
def expire_all_detections(self) -> None:
|
def expire_all_detections(self) -> None:
|
||||||
"""Immediately end all current detections"""
|
"""Immediately end all current detections"""
|
||||||
now = datetime.datetime.now().timestamp()
|
now = datetime.datetime.now().timestamp()
|
||||||
|
Loading…
Reference in New Issue
Block a user