2021-10-11 11:04:11 +02:00
|
|
|
import pytest
|
|
|
|
|
2022-04-26 16:09:39 +02:00
|
|
|
from haystack.nodes.retriever.sparse import BM25Retriever
|
2022-01-26 18:12:55 +01:00
|
|
|
from haystack.nodes.reader import FARMReader
|
|
|
|
from haystack.pipelines import Pipeline
|
2021-10-11 11:04:11 +02:00
|
|
|
|
2022-01-26 18:12:55 +01:00
|
|
|
from haystack.nodes.extractor import EntityExtractor, simplify_ner_for_qa
|
2021-10-11 11:04:11 +02:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
|
|
|
|
def test_extractor(document_store_with_docs):
|
2022-02-03 13:43:18 +01:00
|
|
|
|
2022-04-26 16:09:39 +02:00
|
|
|
es_retriever = BM25Retriever(document_store=document_store_with_docs)
|
2021-10-11 11:04:11 +02:00
|
|
|
ner = EntityExtractor()
|
2022-05-11 11:11:00 +02:00
|
|
|
reader = FARMReader(model_name_or_path="deepset/tinyroberta-squad2", num_processes=0)
|
2021-10-11 11:04:11 +02:00
|
|
|
|
|
|
|
pipeline = Pipeline()
|
|
|
|
pipeline.add_node(component=es_retriever, name="ESRetriever", inputs=["Query"])
|
|
|
|
pipeline.add_node(component=ner, name="NER", inputs=["ESRetriever"])
|
|
|
|
pipeline.add_node(component=reader, name="Reader", inputs=["NER"])
|
|
|
|
|
|
|
|
prediction = pipeline.run(
|
2022-03-07 19:25:33 +01:00
|
|
|
query="Who lives in Berlin?", params={"ESRetriever": {"top_k": 1}, "Reader": {"top_k": 1}}
|
2021-10-11 11:04:11 +02:00
|
|
|
)
|
2021-10-13 14:23:23 +02:00
|
|
|
entities = [entity["word"] for entity in prediction["answers"][0].meta["entities"]]
|
2021-10-11 11:04:11 +02:00
|
|
|
assert "Carla" in entities
|
|
|
|
assert "Berlin" in entities
|
|
|
|
|
|
|
|
|
2022-05-11 11:11:00 +02:00
|
|
|
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
|
|
|
|
def test_extractor_batch_single_query(document_store_with_docs):
|
|
|
|
|
|
|
|
es_retriever = BM25Retriever(document_store=document_store_with_docs)
|
|
|
|
ner = EntityExtractor()
|
|
|
|
reader = FARMReader(model_name_or_path="deepset/tinyroberta-squad2", num_processes=0)
|
|
|
|
|
|
|
|
pipeline = Pipeline()
|
|
|
|
pipeline.add_node(component=es_retriever, name="ESRetriever", inputs=["Query"])
|
|
|
|
pipeline.add_node(component=ner, name="NER", inputs=["ESRetriever"])
|
|
|
|
pipeline.add_node(component=reader, name="Reader", inputs=["NER"])
|
|
|
|
|
|
|
|
prediction = pipeline.run_batch(
|
2022-05-24 12:33:45 +02:00
|
|
|
queries=["Who lives in Berlin?"], params={"ESRetriever": {"top_k": 1}, "Reader": {"top_k": 1}}
|
2022-05-11 11:11:00 +02:00
|
|
|
)
|
|
|
|
entities = [entity["word"] for entity in prediction["answers"][0][0].meta["entities"]]
|
|
|
|
assert "Carla" in entities
|
|
|
|
assert "Berlin" in entities
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
|
|
|
|
def test_extractor_batch_multiple_queries(document_store_with_docs):
|
|
|
|
|
|
|
|
es_retriever = BM25Retriever(document_store=document_store_with_docs)
|
|
|
|
ner = EntityExtractor()
|
|
|
|
reader = FARMReader(model_name_or_path="deepset/tinyroberta-squad2", num_processes=0)
|
|
|
|
|
|
|
|
pipeline = Pipeline()
|
|
|
|
pipeline.add_node(component=es_retriever, name="ESRetriever", inputs=["Query"])
|
|
|
|
pipeline.add_node(component=ner, name="NER", inputs=["ESRetriever"])
|
|
|
|
pipeline.add_node(component=reader, name="Reader", inputs=["NER"])
|
|
|
|
|
|
|
|
prediction = pipeline.run_batch(
|
|
|
|
queries=["Who lives in Berlin?", "Who lives in New York?"],
|
|
|
|
params={"ESRetriever": {"top_k": 1}, "Reader": {"top_k": 1}},
|
|
|
|
)
|
|
|
|
entities_carla = [entity["word"] for entity in prediction["answers"][0][0].meta["entities"]]
|
|
|
|
entities_paul = [entity["word"] for entity in prediction["answers"][1][0].meta["entities"]]
|
|
|
|
assert "Carla" in entities_carla
|
|
|
|
assert "Berlin" in entities_carla
|
|
|
|
assert "Paul" in entities_paul
|
|
|
|
assert "New York" in entities_paul
|
|
|
|
|
|
|
|
|
2021-10-11 11:04:11 +02:00
|
|
|
@pytest.mark.parametrize("document_store_with_docs", ["elasticsearch"], indirect=True)
|
|
|
|
def test_extractor_output_simplifier(document_store_with_docs):
|
2022-02-03 13:43:18 +01:00
|
|
|
|
2022-04-26 16:09:39 +02:00
|
|
|
es_retriever = BM25Retriever(document_store=document_store_with_docs)
|
2021-10-11 11:04:11 +02:00
|
|
|
ner = EntityExtractor()
|
2022-05-11 11:11:00 +02:00
|
|
|
reader = FARMReader(model_name_or_path="deepset/tinyroberta-squad2", num_processes=0)
|
2021-10-11 11:04:11 +02:00
|
|
|
|
|
|
|
pipeline = Pipeline()
|
|
|
|
pipeline.add_node(component=es_retriever, name="ESRetriever", inputs=["Query"])
|
|
|
|
pipeline.add_node(component=ner, name="NER", inputs=["ESRetriever"])
|
|
|
|
pipeline.add_node(component=reader, name="Reader", inputs=["NER"])
|
|
|
|
|
|
|
|
prediction = pipeline.run(
|
2022-03-07 19:25:33 +01:00
|
|
|
query="Who lives in Berlin?", params={"ESRetriever": {"top_k": 1}, "Reader": {"top_k": 1}}
|
2021-10-11 11:04:11 +02:00
|
|
|
)
|
|
|
|
simplified = simplify_ner_for_qa(prediction)
|
2022-05-11 11:11:00 +02:00
|
|
|
assert simplified[0] == {"answer": "Carla and I", "entities": ["Carla"]}
|