mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-07-30 20:31:44 +00:00

* Unify CI tests (from #2466) * Update Documentation & Code Style * Change folder names * Fix markers list * Remove marker 'slow', replaced with 'integration' * Soften children check * Start ES first so it has time to boot while Python is setup * Run the full workflow * Try to make pip upgrade on Windows * Set KG tests as integration * Update Documentation & Code Style * typo * faster pylint * Make Pylint use the cache * filter diff files for pylint * debug pylint statement * revert pylint changes * Remove path from asserted log (fails on Windows) * Skip preprocessor test on Windows * Tackling Windows specific failures * Fix pytest command for windows suites * Remove \ from command * Move poppler test into integration * Skip opensearch test on windows * Add tolerance in reader sas score for Windows * Another pytorch approx * Raise time limit for unit tests :( * Skip poppler test on Windows CI * Specify to pull with FF only in docs check * temporarily run the docs check immediately * Allow merge commit for now * Try without fetch depth * Accelerating test * Accelerating test * Add repository and ref alongside fetch-depth * Separate out code&docs check from tests * Use setup-python cache * Delete custom action * Remove the pull step in the docs check, will find a way to run on bot commits * Add requirements.txt in .github for caching * Actually install dependencies * Change deps group for pylint * Unclear why the requirements.txt is still required :/ * Fix the code check python setup * Install all deps for pylint * Make the autoformat check depend on tests and doc updates workflows * Try installing dependencies in another order * Try again to install the deps * quoting the paths * Ad back the requirements * Try again to install rest_api and ui * Change deps group * Duplicate haystack install line * See if the cache is the problem * Disable also in mypy, who knows * split the install step * Split install step everywhere * Revert "Separate out code&docs check from tests" This reverts commit 1cd59b15ffc5b984e1d642dcbf4c8ccc2bb6c9bd. * Add back the action * Proactive support for audio (see text2speech branch) * Fix label generator tests * Remove install of libsndfile1 on win temporarily * exclude audio tests on win * install ffmpeg for integration tests Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
130 lines
5.7 KiB
Python
130 lines
5.7 KiB
Python
import sys
|
|
from typing import List
|
|
|
|
import numpy as np
|
|
import pytest
|
|
|
|
from haystack.schema import Document
|
|
from haystack.nodes.answer_generator import Seq2SeqGenerator
|
|
from haystack.pipelines import TranslationWrapperPipeline, GenerativeQAPipeline
|
|
|
|
|
|
from ..conftest import DOCS_WITH_EMBEDDINGS
|
|
|
|
|
|
# Keeping few (retriever,document_store) combination to reduce test time
|
|
@pytest.mark.skipif(sys.platform in ["win32", "cygwin"], reason="Causes OOM on windows github runner")
|
|
@pytest.mark.integration
|
|
@pytest.mark.generator
|
|
@pytest.mark.parametrize("retriever,document_store", [("embedding", "memory")], indirect=True)
|
|
def test_generator_pipeline_with_translator(
|
|
document_store, retriever, rag_generator, en_to_de_translator, de_to_en_translator
|
|
):
|
|
document_store.write_documents(DOCS_WITH_EMBEDDINGS)
|
|
query = "Was ist die Hauptstadt der Bundesrepublik Deutschland?"
|
|
base_pipeline = GenerativeQAPipeline(retriever=retriever, generator=rag_generator)
|
|
pipeline = TranslationWrapperPipeline(
|
|
input_translator=de_to_en_translator, output_translator=en_to_de_translator, pipeline=base_pipeline
|
|
)
|
|
output = pipeline.run(query=query, params={"Generator": {"top_k": 2}, "Retriever": {"top_k": 1}})
|
|
answers = output["answers"]
|
|
assert len(answers) == 2
|
|
assert "berlin" in answers[0].answer
|
|
|
|
|
|
@pytest.mark.integration
|
|
@pytest.mark.generator
|
|
def test_rag_token_generator(rag_generator):
|
|
query = "What is capital of the Germany?"
|
|
generated_docs = rag_generator.predict(query=query, documents=DOCS_WITH_EMBEDDINGS, top_k=1)
|
|
answers = generated_docs["answers"]
|
|
assert len(answers) == 1
|
|
assert "berlin" in answers[0].answer
|
|
|
|
|
|
@pytest.mark.integration
|
|
@pytest.mark.generator
|
|
@pytest.mark.parametrize("document_store", ["memory"], indirect=True)
|
|
@pytest.mark.parametrize("retriever", ["embedding"], indirect=True)
|
|
def test_generator_pipeline(document_store, retriever, rag_generator):
|
|
document_store.write_documents(DOCS_WITH_EMBEDDINGS)
|
|
query = "What is capital of the Germany?"
|
|
pipeline = GenerativeQAPipeline(retriever=retriever, generator=rag_generator)
|
|
output = pipeline.run(query=query, params={"Generator": {"top_k": 2}, "Retriever": {"top_k": 1}})
|
|
answers = output["answers"]
|
|
assert len(answers) == 2
|
|
assert "berlin" in answers[0].answer
|
|
|
|
|
|
@pytest.mark.skipif(sys.platform in ["win32", "cygwin"], reason="Causes OOM on windows github runner")
|
|
@pytest.mark.integration
|
|
@pytest.mark.generator
|
|
@pytest.mark.parametrize("document_store", ["memory"], indirect=True)
|
|
@pytest.mark.parametrize("retriever", ["retribert", "dpr_lfqa"], indirect=True)
|
|
@pytest.mark.parametrize("lfqa_generator", ["yjernite/bart_eli5", "vblagoje/bart_lfqa"], indirect=True)
|
|
@pytest.mark.embedding_dim(128)
|
|
def test_lfqa_pipeline(document_store, retriever, lfqa_generator):
|
|
# reuse existing DOCS but regenerate embeddings with retribert
|
|
docs: List[Document] = []
|
|
for idx, d in enumerate(DOCS_WITH_EMBEDDINGS):
|
|
docs.append(Document(d.content, str(idx)))
|
|
document_store.write_documents(docs)
|
|
document_store.update_embeddings(retriever)
|
|
query = "Tell me about Berlin?"
|
|
pipeline = GenerativeQAPipeline(generator=lfqa_generator, retriever=retriever)
|
|
output = pipeline.run(query=query, params={"top_k": 1})
|
|
answers = output["answers"]
|
|
assert len(answers) == 1, answers
|
|
assert "Germany" in answers[0].answer, answers[0].answer
|
|
|
|
|
|
@pytest.mark.integration
|
|
@pytest.mark.generator
|
|
@pytest.mark.parametrize("document_store", ["memory"], indirect=True)
|
|
@pytest.mark.parametrize("retriever", ["retribert"], indirect=True)
|
|
@pytest.mark.embedding_dim(128)
|
|
def test_lfqa_pipeline_unknown_converter(document_store, retriever):
|
|
# reuse existing DOCS but regenerate embeddings with retribert
|
|
docs: List[Document] = []
|
|
for idx, d in enumerate(DOCS_WITH_EMBEDDINGS):
|
|
docs.append(Document(d.content, str(idx)))
|
|
document_store.write_documents(docs)
|
|
document_store.update_embeddings(retriever)
|
|
seq2seq = Seq2SeqGenerator(model_name_or_path="patrickvonplaten/t5-tiny-random")
|
|
query = "Tell me about Berlin?"
|
|
pipeline = GenerativeQAPipeline(retriever=retriever, generator=seq2seq)
|
|
|
|
# raises exception as we don't have converter for "patrickvonplaten/t5-tiny-random" in Seq2SeqGenerator
|
|
with pytest.raises(Exception) as exception_info:
|
|
output = pipeline.run(query=query, params={"top_k": 1})
|
|
assert "doesn't have input converter registered for patrickvonplaten/t5-tiny-random" in str(exception_info.value)
|
|
|
|
|
|
@pytest.mark.integration
|
|
@pytest.mark.generator
|
|
@pytest.mark.parametrize("document_store", ["memory"], indirect=True)
|
|
@pytest.mark.parametrize("retriever", ["retribert"], indirect=True)
|
|
@pytest.mark.embedding_dim(128)
|
|
def test_lfqa_pipeline_invalid_converter(document_store, retriever):
|
|
# reuse existing DOCS but regenerate embeddings with retribert
|
|
docs: List[Document] = []
|
|
for idx, d in enumerate(DOCS_WITH_EMBEDDINGS):
|
|
docs.append(Document(d.content, str(idx)))
|
|
document_store.write_documents(docs)
|
|
document_store.update_embeddings(retriever)
|
|
|
|
class _InvalidConverter:
|
|
def __call__(self, some_invalid_para: str, another_invalid_param: str) -> None:
|
|
pass
|
|
|
|
seq2seq = Seq2SeqGenerator(
|
|
model_name_or_path="patrickvonplaten/t5-tiny-random", input_converter=_InvalidConverter()
|
|
)
|
|
query = "This query will fail due to InvalidConverter used"
|
|
pipeline = GenerativeQAPipeline(retriever=retriever, generator=seq2seq)
|
|
|
|
# raises exception as we are using invalid method signature in _InvalidConverter
|
|
with pytest.raises(Exception) as exception_info:
|
|
output = pipeline.run(query=query, params={"top_k": 1})
|
|
assert "does not have a valid __call__ method signature" in str(exception_info.value)
|