2024-05-09 15:40:36 +02:00
|
|
|
# SPDX-FileCopyrightText: 2022-present deepset GmbH <info@deepset.ai>
|
|
|
|
#
|
|
|
|
# SPDX-License-Identifier: Apache-2.0
|
2023-09-21 00:14:07 +02:00
|
|
|
import sys
|
2023-05-22 18:30:35 +02:00
|
|
|
from pathlib import Path
|
|
|
|
from unittest.mock import patch, MagicMock
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
import torch
|
|
|
|
|
2024-06-27 12:53:41 +02:00
|
|
|
from haystack import Pipeline
|
|
|
|
from haystack.components.fetchers import LinkContentFetcher
|
2023-12-18 10:47:46 +01:00
|
|
|
from haystack.dataclasses import Document, ByteStream
|
2023-11-24 14:48:43 +01:00
|
|
|
from haystack.components.audio import LocalWhisperTranscriber
|
2024-02-16 11:25:53 +01:00
|
|
|
from haystack.utils.device import ComponentDevice, Device
|
2023-05-22 18:30:35 +02:00
|
|
|
|
|
|
|
|
|
|
|
SAMPLES_PATH = Path(__file__).parent.parent.parent / "test_files"
|
|
|
|
|
|
|
|
|
2023-08-23 17:03:37 +02:00
|
|
|
class TestLocalWhisperTranscriber:
|
2023-05-22 18:30:35 +02:00
|
|
|
def test_init(self):
|
|
|
|
transcriber = LocalWhisperTranscriber(
|
2024-01-12 14:40:30 +01:00
|
|
|
model="large-v2"
|
2023-05-22 18:30:35 +02:00
|
|
|
) # Doesn't matter if it's huge, the model is not loaded in init.
|
2024-01-12 14:40:30 +01:00
|
|
|
assert transcriber.model == "large-v2"
|
2024-02-16 11:25:53 +01:00
|
|
|
assert transcriber.device == ComponentDevice.resolve_device(None)
|
2023-05-22 18:30:35 +02:00
|
|
|
assert transcriber._model is None
|
|
|
|
|
|
|
|
def test_init_wrong_model(self):
|
|
|
|
with pytest.raises(ValueError, match="Model name 'whisper-1' not recognized"):
|
2024-01-12 14:40:30 +01:00
|
|
|
LocalWhisperTranscriber(model="whisper-1")
|
2023-05-22 18:30:35 +02:00
|
|
|
|
2023-08-29 18:15:07 +02:00
|
|
|
def test_to_dict(self):
|
|
|
|
transcriber = LocalWhisperTranscriber()
|
|
|
|
data = transcriber.to_dict()
|
|
|
|
assert data == {
|
2023-11-24 14:48:43 +01:00
|
|
|
"type": "haystack.components.audio.whisper_local.LocalWhisperTranscriber",
|
2024-02-16 11:25:53 +01:00
|
|
|
"init_parameters": {
|
|
|
|
"model": "large",
|
|
|
|
"device": ComponentDevice.resolve_device(None).to_dict(),
|
|
|
|
"whisper_params": {},
|
|
|
|
},
|
2023-08-29 18:15:07 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
def test_to_dict_with_custom_init_parameters(self):
|
|
|
|
transcriber = LocalWhisperTranscriber(
|
2024-02-16 11:25:53 +01:00
|
|
|
model="tiny",
|
|
|
|
device=ComponentDevice.from_str("cuda:0"),
|
|
|
|
whisper_params={"return_segments": True, "temperature": [0.1, 0.6, 0.8]},
|
2023-08-29 18:15:07 +02:00
|
|
|
)
|
|
|
|
data = transcriber.to_dict()
|
|
|
|
assert data == {
|
2023-11-24 14:48:43 +01:00
|
|
|
"type": "haystack.components.audio.whisper_local.LocalWhisperTranscriber",
|
2023-08-29 18:15:07 +02:00
|
|
|
"init_parameters": {
|
2024-01-12 14:40:30 +01:00
|
|
|
"model": "tiny",
|
2024-02-16 11:25:53 +01:00
|
|
|
"device": ComponentDevice.from_str("cuda:0").to_dict(),
|
2023-08-29 18:15:07 +02:00
|
|
|
"whisper_params": {"return_segments": True, "temperature": [0.1, 0.6, 0.8]},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2024-02-16 11:25:53 +01:00
|
|
|
def test_from_dict(self):
|
|
|
|
data = {
|
|
|
|
"type": "haystack.components.audio.whisper_local.LocalWhisperTranscriber",
|
|
|
|
"init_parameters": {
|
|
|
|
"model": "tiny",
|
|
|
|
"device": ComponentDevice.from_single(Device.cpu()).to_dict(),
|
|
|
|
"whisper_params": {},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
transcriber = LocalWhisperTranscriber.from_dict(data)
|
|
|
|
assert transcriber.model == "tiny"
|
|
|
|
assert transcriber.device == ComponentDevice.from_single(Device.cpu())
|
|
|
|
assert transcriber.whisper_params == {}
|
|
|
|
assert transcriber._model is None
|
|
|
|
|
2024-05-14 08:36:14 +02:00
|
|
|
def test_from_dict_none_device(self):
|
|
|
|
data = {
|
|
|
|
"type": "haystack.components.audio.whisper_local.LocalWhisperTranscriber",
|
|
|
|
"init_parameters": {"model": "tiny", "device": None, "whisper_params": {}},
|
|
|
|
}
|
|
|
|
transcriber = LocalWhisperTranscriber.from_dict(data)
|
|
|
|
assert transcriber.model == "tiny"
|
|
|
|
assert transcriber.device == ComponentDevice.resolve_device(None)
|
|
|
|
assert transcriber.whisper_params == {}
|
|
|
|
assert transcriber._model is None
|
|
|
|
|
2023-05-22 18:30:35 +02:00
|
|
|
def test_warmup(self):
|
2023-11-24 14:48:43 +01:00
|
|
|
with patch("haystack.components.audio.whisper_local.whisper") as mocked_whisper:
|
2024-02-16 13:01:57 +01:00
|
|
|
transcriber = LocalWhisperTranscriber(model="large-v2", device=ComponentDevice.from_str("cpu"))
|
2023-05-22 18:30:35 +02:00
|
|
|
mocked_whisper.load_model.assert_not_called()
|
|
|
|
transcriber.warm_up()
|
|
|
|
mocked_whisper.load_model.assert_called_once_with("large-v2", device=torch.device(type="cpu"))
|
|
|
|
|
|
|
|
def test_warmup_doesnt_reload(self):
|
2023-11-24 14:48:43 +01:00
|
|
|
with patch("haystack.components.audio.whisper_local.whisper") as mocked_whisper:
|
2024-01-12 14:40:30 +01:00
|
|
|
transcriber = LocalWhisperTranscriber(model="large-v2")
|
2023-05-22 18:30:35 +02:00
|
|
|
transcriber.warm_up()
|
|
|
|
transcriber.warm_up()
|
|
|
|
mocked_whisper.load_model.assert_called_once()
|
|
|
|
|
|
|
|
def test_run_with_path(self):
|
2024-01-12 14:40:30 +01:00
|
|
|
comp = LocalWhisperTranscriber(model="large-v2")
|
2023-05-22 18:30:35 +02:00
|
|
|
comp._model = MagicMock()
|
|
|
|
comp._model.transcribe.return_value = {
|
|
|
|
"text": "test transcription",
|
|
|
|
"other_metadata": ["other", "meta", "data"],
|
|
|
|
}
|
2023-12-18 10:47:46 +01:00
|
|
|
results = comp.run(sources=[SAMPLES_PATH / "audio" / "this is the content of the document.wav"])
|
2023-05-22 18:30:35 +02:00
|
|
|
expected = Document(
|
2023-10-31 12:44:04 +01:00
|
|
|
content="test transcription",
|
|
|
|
meta={
|
2023-05-22 18:30:35 +02:00
|
|
|
"audio_file": SAMPLES_PATH / "audio" / "this is the content of the document.wav",
|
|
|
|
"other_metadata": ["other", "meta", "data"],
|
|
|
|
},
|
|
|
|
)
|
2023-08-09 15:51:32 +02:00
|
|
|
assert results["documents"] == [expected]
|
2023-05-22 18:30:35 +02:00
|
|
|
|
|
|
|
def test_run_with_str(self):
|
2024-01-12 14:40:30 +01:00
|
|
|
comp = LocalWhisperTranscriber(model="large-v2")
|
2023-05-22 18:30:35 +02:00
|
|
|
comp._model = MagicMock()
|
|
|
|
comp._model.transcribe.return_value = {
|
|
|
|
"text": "test transcription",
|
|
|
|
"other_metadata": ["other", "meta", "data"],
|
|
|
|
}
|
|
|
|
results = comp.run(
|
2023-12-18 10:47:46 +01:00
|
|
|
sources=[str((SAMPLES_PATH / "audio" / "this is the content of the document.wav").absolute())]
|
2023-05-22 18:30:35 +02:00
|
|
|
)
|
|
|
|
expected = Document(
|
2023-10-31 12:44:04 +01:00
|
|
|
content="test transcription",
|
|
|
|
meta={
|
2023-12-18 10:47:46 +01:00
|
|
|
"audio_file": (SAMPLES_PATH / "audio" / "this is the content of the document.wav").absolute(),
|
2023-05-22 18:30:35 +02:00
|
|
|
"other_metadata": ["other", "meta", "data"],
|
|
|
|
},
|
|
|
|
)
|
2023-08-09 15:51:32 +02:00
|
|
|
assert results["documents"] == [expected]
|
2023-05-22 18:30:35 +02:00
|
|
|
|
|
|
|
def test_transcribe(self):
|
2024-01-12 14:40:30 +01:00
|
|
|
comp = LocalWhisperTranscriber(model="large-v2")
|
2023-05-22 18:30:35 +02:00
|
|
|
comp._model = MagicMock()
|
|
|
|
comp._model.transcribe.return_value = {
|
|
|
|
"text": "test transcription",
|
|
|
|
"other_metadata": ["other", "meta", "data"],
|
|
|
|
}
|
2023-12-18 10:47:46 +01:00
|
|
|
results = comp.transcribe(sources=[SAMPLES_PATH / "audio" / "this is the content of the document.wav"])
|
2023-05-22 18:30:35 +02:00
|
|
|
expected = Document(
|
2023-10-31 12:44:04 +01:00
|
|
|
content="test transcription",
|
|
|
|
meta={
|
2023-05-22 18:30:35 +02:00
|
|
|
"audio_file": SAMPLES_PATH / "audio" / "this is the content of the document.wav",
|
|
|
|
"other_metadata": ["other", "meta", "data"],
|
|
|
|
},
|
|
|
|
)
|
|
|
|
assert results == [expected]
|
|
|
|
|
|
|
|
def test_transcribe_stream(self):
|
2024-01-12 14:40:30 +01:00
|
|
|
comp = LocalWhisperTranscriber(model="large-v2")
|
2023-05-22 18:30:35 +02:00
|
|
|
comp._model = MagicMock()
|
|
|
|
comp._model.transcribe.return_value = {
|
|
|
|
"text": "test transcription",
|
|
|
|
"other_metadata": ["other", "meta", "data"],
|
|
|
|
}
|
2023-12-18 10:47:46 +01:00
|
|
|
path = SAMPLES_PATH / "audio" / "this is the content of the document.wav"
|
|
|
|
bs = ByteStream.from_file_path(path)
|
2023-12-21 17:09:58 +05:30
|
|
|
bs.meta["file_path"] = path
|
2023-12-18 10:47:46 +01:00
|
|
|
results = comp.transcribe(sources=[bs])
|
2023-05-22 18:30:35 +02:00
|
|
|
expected = Document(
|
2023-12-18 10:47:46 +01:00
|
|
|
content="test transcription", meta={"audio_file": path, "other_metadata": ["other", "meta", "data"]}
|
2023-05-22 18:30:35 +02:00
|
|
|
)
|
|
|
|
assert results == [expected]
|
2023-09-20 14:48:09 +02:00
|
|
|
|
|
|
|
@pytest.mark.integration
|
2023-09-21 00:14:07 +02:00
|
|
|
@pytest.mark.skipif(sys.platform in ["win32", "cygwin"], reason="ffmpeg not installed on Windows CI")
|
2023-11-24 14:48:43 +01:00
|
|
|
def test_whisper_local_transcriber(self, test_files_path):
|
2024-06-27 12:53:41 +02:00
|
|
|
comp = LocalWhisperTranscriber(model="tiny", whisper_params={"language": "english"})
|
2023-09-20 14:48:09 +02:00
|
|
|
comp.warm_up()
|
|
|
|
output = comp.run(
|
2023-12-18 10:47:46 +01:00
|
|
|
sources=[
|
2023-11-24 14:48:43 +01:00
|
|
|
test_files_path / "audio" / "this is the content of the document.wav",
|
|
|
|
str((test_files_path / "audio" / "the context for this answer is here.wav").absolute()),
|
2023-12-18 10:47:46 +01:00
|
|
|
ByteStream.from_file_path(test_files_path / "audio" / "answer.wav", "rb"),
|
2023-09-20 14:48:09 +02:00
|
|
|
]
|
|
|
|
)
|
|
|
|
docs = output["documents"]
|
|
|
|
assert len(docs) == 3
|
|
|
|
|
2024-06-27 12:53:41 +02:00
|
|
|
assert all(
|
|
|
|
word in docs[0].content.strip().lower() for word in {"content", "the", "document"}
|
|
|
|
), f"Expected words not found in: {docs[0].content.strip().lower()}"
|
2023-11-24 14:48:43 +01:00
|
|
|
assert test_files_path / "audio" / "this is the content of the document.wav" == docs[0].meta["audio_file"]
|
2023-09-20 14:48:09 +02:00
|
|
|
|
2024-06-27 12:53:41 +02:00
|
|
|
assert all(
|
|
|
|
word in docs[1].content.strip().lower() for word in {"context", "answer"}
|
|
|
|
), f"Expected words not found in: {docs[1].content.strip().lower()}"
|
2023-12-18 10:47:46 +01:00
|
|
|
path = test_files_path / "audio" / "the context for this answer is here.wav"
|
|
|
|
assert path.absolute() == docs[1].meta["audio_file"]
|
2023-09-20 14:48:09 +02:00
|
|
|
|
2023-10-31 12:44:04 +01:00
|
|
|
assert docs[2].content.strip().lower() == "answer."
|
2023-12-18 10:47:46 +01:00
|
|
|
# meta.audio_file should contain the temp path where we dumped the audio bytes
|
|
|
|
assert docs[2].meta["audio_file"]
|
2024-06-27 12:53:41 +02:00
|
|
|
|
|
|
|
@pytest.mark.integration
|
|
|
|
@pytest.mark.skipif(sys.platform in ["win32", "cygwin"], reason="ffmpeg not installed on Windows CI")
|
|
|
|
def test_whisper_local_transcriber_pipeline_and_url_source(self):
|
|
|
|
pipe = Pipeline()
|
|
|
|
pipe.add_component("fetcher", LinkContentFetcher())
|
|
|
|
pipe.add_component("transcriber", LocalWhisperTranscriber(model="tiny"))
|
|
|
|
|
|
|
|
pipe.connect("fetcher", "transcriber")
|
|
|
|
result = pipe.run(
|
|
|
|
data={
|
|
|
|
"fetcher": {
|
|
|
|
"urls": ["https://ia903102.us.archive.org/19/items/100-Best--Speeches/EK_19690725_64kb.mp3"]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
)
|
|
|
|
assert "Massachusetts" in result["transcriber"]["documents"][0].content
|