haystack/test/test_extractor.py
Sara Zan d470b9d0bd
Improve dependency management (#1994)
* Fist attempt at using setup.cfg for dependency management

* Trying the new package on the CI and in Docker too

* Add composite extras_require

* Add the safe_import function for document store imports and add some try-catch statements on rest_api and ui imports

* Fix bug on class import and rephrase error message

* Introduce typing for optional modules and add type: ignore in sparse.py

* Include importlib_metadata backport for py3.7

* Add colab group to extra_requires

* Fix pillow version

* Fix grpcio

* Separate out the crawler as another extra

* Make paths relative in rest_api and ui

* Update the test matrix in the CI

* Add try catch statements around the optional imports too to account for direct imports

* Never mix direct deps with self-references and add ES deps to the base install

* Refactor several paths in tests to make them insensitive to the execution path

* Include tstadel review and re-introduce Milvus1 in the tests suite, to fix

* Wrap pdf conversion utils into safe_import

* Update some tutorials and rever Milvus1 as default for now, see #2067

* Fix mypy config


Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2022-01-26 18:12:55 +01:00

57 lines
2.1 KiB
Python

import pytest
from haystack.nodes.retriever.sparse import ElasticsearchRetriever
from haystack.nodes.reader import FARMReader
from haystack.pipelines import Pipeline
from haystack.nodes.extractor import EntityExtractor, simplify_ner_for_qa
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
def test_extractor(document_store_with_docs):
es_retriever = ElasticsearchRetriever(document_store=document_store_with_docs)
ner = EntityExtractor()
reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", num_processes=0)
pipeline = Pipeline()
pipeline.add_node(component=es_retriever, name="ESRetriever", inputs=["Query"])
pipeline.add_node(component=ner, name="NER", inputs=["ESRetriever"])
pipeline.add_node(component=reader, name="Reader", inputs=["NER"])
prediction = pipeline.run(
query="Who lives in Berlin?",
params={
"ESRetriever": {"top_k": 1},
"Reader": {"top_k": 1},
}
)
entities = [entity["word"] for entity in prediction["answers"][0].meta["entities"]]
assert "Carla" in entities
assert "Berlin" in entities
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
def test_extractor_output_simplifier(document_store_with_docs):
es_retriever = ElasticsearchRetriever(document_store=document_store_with_docs)
ner = EntityExtractor()
reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", num_processes=0)
pipeline = Pipeline()
pipeline.add_node(component=es_retriever, name="ESRetriever", inputs=["Query"])
pipeline.add_node(component=ner, name="NER", inputs=["ESRetriever"])
pipeline.add_node(component=reader, name="Reader", inputs=["NER"])
prediction = pipeline.run(
query="Who lives in Berlin?",
params={
"ESRetriever": {"top_k": 1},
"Reader": {"top_k": 1},
}
)
simplified = simplify_ner_for_qa(prediction)
assert simplified[0] == {
"answer": "Carla",
"entities": ["Carla"]
}