mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-07-27 19:00:35 +00:00

* first draft / notes on new primitives * wip label / feedback refactor * rename doc.text -> doc.content. add doc.content_type * add datatype for content * remove faq_question_field from ES and weaviate. rename text_field -> content_field in docstores. update tutorials for content field * update converters for . Add warning for empty * renam label.question -> label.query. Allow sorting of Answers. * WIP primitives * update ui/reader for new Answer format * Improve Label. First refactoring of MultiLabel. Adjust eval code * fixed workflow conflict with introducing new one (#1472) * Add latest docstring and tutorial changes * make add_eval_data() work again * fix reader formats. WIP fix _extract_docs_and_labels_from_dict * fix test reader * Add latest docstring and tutorial changes * fix another test case for reader * fix mypy in farm reader.eval() * fix mypy in farm reader.eval() * WIP ORM refactor * Add latest docstring and tutorial changes * fix mypy weaviate * make label and multilabel dataclasses * bump mypy env in CI to python 3.8 * WIP refactor Label ORM * WIP refactor Label ORM * simplify tests for individual doc stores * WIP refactoring markers of tests * test alternative approach for tests with existing parametrization * WIP refactor ORMs * fix skip logic of already parametrized tests * fix weaviate behaviour in tests - not parametrizing it in our general test cases. * Add latest docstring and tutorial changes * fix some tests * remove sql from document_store_types * fix markers for generator and pipeline test * remove inmemory marker * remove unneeded elasticsearch markers * add dataclasses-json dependency. adjust ORM to just store JSON repr * ignore type as dataclasses_json seems to miss functionality here * update readme and contributing.md * update contributing * adjust example * fix duplicate doc handling for custom index * Add latest docstring and tutorial changes * fix some ORM issues. fix get_all_labels_aggregated. * update drop flags where get_all_labels_aggregated() was used before * Add latest docstring and tutorial changes * add to_json(). add + fix tests * fix no_answer handling in label / multilabel * fix duplicate docs in memory doc store. change primary key for sql doc table * fix mypy issues * fix mypy issues * haystack/retriever/base.py * fix test_write_document_meta[elastic] * fix test_elasticsearch_custom_fields * fix test_labels[elastic] * fix crawler * fix converter * fix docx converter * fix preprocessor * fix test_utils * fix tfidf retriever. fix selection of docstore in tests with multiple fixtures / parameterizations * Add latest docstring and tutorial changes * fix crawler test. fix ocrconverter attribute * fix test_elasticsearch_custom_query * fix generator pipeline * fix ocr converter * fix ragenerator * Add latest docstring and tutorial changes * fix test_load_and_save_yaml for elasticsearch * fixes for pipeline tests * fix faq pipeline * fix pipeline tests * Add latest docstring and tutorial changes * fix weaviate * Add latest docstring and tutorial changes * trigger CI * satisfy mypy * Add latest docstring and tutorial changes * satisfy mypy * Add latest docstring and tutorial changes * trigger CI * fix question generation test * fix ray. fix Q-generation * fix translator test * satisfy mypy * wip refactor feedback rest api * fix rest api feedback endpoint * fix doc classifier * remove relation of Labels -> Docs in SQL ORM * fix faiss/milvus tests * fix doc classifier test * fix eval test * fixing eval issues * Add latest docstring and tutorial changes * fix mypy * WIP replace dataclasses-json with manual serialization * Add latest docstring and tutorial changes * revert to dataclass-json serialization for now. remove debug prints. * update docstrings * fix extractor. fix Answer Span init * fix api test * keep meta data of answers in reader.run() * fix meta handling * adress review feedback * Add latest docstring and tutorial changes * make document=None for open domain labels * add import * fix print utils * fix rest api * adress review feedback * Add latest docstring and tutorial changes * fix mypy Co-authored-by: Markus Paff <markuspaff.mp@gmail.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
166 lines
7.0 KiB
Python
166 lines
7.0 KiB
Python
import math
|
|
|
|
import pytest
|
|
|
|
from haystack import Document, Answer
|
|
from haystack.reader.base import BaseReader
|
|
from haystack.reader.farm import FARMReader
|
|
|
|
|
|
def test_reader_basic(reader):
|
|
assert reader is not None
|
|
assert isinstance(reader, BaseReader)
|
|
|
|
|
|
def test_output(prediction):
|
|
assert prediction is not None
|
|
assert prediction["query"] == "Who lives in Berlin?"
|
|
assert prediction["answers"][0].answer == "Carla"
|
|
assert prediction["answers"][0].offsets_in_context[0].start == 11
|
|
assert prediction["answers"][0].offsets_in_context[0].end== 16
|
|
assert prediction["answers"][0].score <= 1
|
|
assert prediction["answers"][0].score >= 0
|
|
assert prediction["answers"][0].context == "My name is Carla and I live in Berlin"
|
|
assert len(prediction["answers"]) == 5
|
|
|
|
|
|
@pytest.mark.slow
|
|
def test_no_answer_output(no_answer_prediction):
|
|
assert no_answer_prediction is not None
|
|
assert no_answer_prediction["query"] == "What is the meaning of life?"
|
|
assert math.isclose(no_answer_prediction["no_ans_gap"], -13.048564434051514, rel_tol=0.0001)
|
|
assert no_answer_prediction["answers"][0].answer == ""
|
|
assert no_answer_prediction["answers"][0].offsets_in_context[0].start == 0
|
|
assert no_answer_prediction["answers"][0].offsets_in_context[0].end == 0
|
|
assert no_answer_prediction["answers"][0].score <= 1
|
|
assert no_answer_prediction["answers"][0].score >= 0
|
|
assert no_answer_prediction["answers"][0].context == None
|
|
assert no_answer_prediction["answers"][0].document_id == None
|
|
answers = [x.answer for x in no_answer_prediction["answers"]]
|
|
assert answers.count("") == 1
|
|
assert len(no_answer_prediction["answers"]) == 5
|
|
|
|
|
|
# TODO Directly compare farm and transformers reader outputs
|
|
# TODO checks to see that model is responsive to input arguments e.g. context_window_size - topk
|
|
|
|
|
|
@pytest.mark.slow
|
|
def test_prediction_attributes(prediction):
|
|
# TODO FARM's prediction also has no_ans_gap
|
|
attributes_gold = ["query", "answers"]
|
|
for ag in attributes_gold:
|
|
assert ag in prediction
|
|
|
|
|
|
def test_answer_attributes(prediction):
|
|
# TODO Transformers answer also has meta key
|
|
answer = prediction["answers"][0]
|
|
assert type(answer) == Answer
|
|
attributes_gold = ['answer', 'score', 'context', 'offsets_in_context', 'offsets_in_document','type']
|
|
for ag in attributes_gold:
|
|
assert getattr(answer,ag,None) is not None
|
|
|
|
|
|
@pytest.mark.slow
|
|
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
|
|
@pytest.mark.parametrize("window_size", [10, 15, 20])
|
|
def test_context_window_size(reader, test_docs_xs, window_size):
|
|
docs = [Document.from_dict(d) if isinstance(d, dict) else d for d in test_docs_xs]
|
|
|
|
assert isinstance(reader, FARMReader)
|
|
|
|
old_window_size = reader.inferencer.model.prediction_heads[0].context_window_size
|
|
reader.inferencer.model.prediction_heads[0].context_window_size = window_size
|
|
|
|
prediction = reader.predict(query="Who lives in Berlin?", documents=docs, top_k=5)
|
|
for answer in prediction["answers"]:
|
|
# If the extracted answer is larger than the context window, the context window is expanded.
|
|
# If the extracted answer is odd in length, the resulting context window is one less than context_window_size
|
|
# due to rounding (See FARM's QACandidate)
|
|
# TODO Currently the behaviour of context_window_size in FARMReader and TransformerReader is different
|
|
if len(answer.answer) <= window_size:
|
|
assert len(answer.context) in [window_size, window_size - 1]
|
|
else:
|
|
assert len(answer.answer) == len(answer.context)
|
|
|
|
reader.inferencer.model.prediction_heads[0].context_window_size = old_window_size
|
|
|
|
# TODO Need to test transformers reader
|
|
# TODO Currently the behaviour of context_window_size in FARMReader and TransformerReader is different
|
|
|
|
|
|
@pytest.mark.parametrize("reader", ["farm"], indirect=True)
|
|
@pytest.mark.parametrize("top_k", [2, 5, 10])
|
|
def test_top_k(reader, test_docs_xs, top_k):
|
|
docs = [Document.from_dict(d) if isinstance(d, dict) else d for d in test_docs_xs]
|
|
|
|
assert isinstance(reader, FARMReader)
|
|
|
|
old_top_k_per_candidate = reader.top_k_per_candidate
|
|
reader.top_k_per_candidate = 4
|
|
reader.inferencer.model.prediction_heads[0].n_best = reader.top_k_per_candidate + 1
|
|
try:
|
|
old_top_k_per_sample = reader.inferencer.model.prediction_heads[0].n_best_per_sample
|
|
reader.inferencer.model.prediction_heads[0].n_best_per_sample = 4
|
|
except:
|
|
print("WARNING: Could not set `top_k_per_sample` in FARM. Please update FARM version.")
|
|
|
|
prediction = reader.predict(query="Who lives in Berlin?", documents=docs, top_k=top_k)
|
|
assert len(prediction["answers"]) == top_k
|
|
|
|
reader.top_k_per_candidate = old_top_k_per_candidate
|
|
reader.inferencer.model.prediction_heads[0].n_best = reader.top_k_per_candidate + 1
|
|
try:
|
|
reader.inferencer.model.prediction_heads[0].n_best_per_sample = old_top_k_per_sample
|
|
except:
|
|
print("WARNING: Could not set `top_k_per_sample` in FARM. Please update FARM version.")
|
|
|
|
|
|
def test_farm_reader_update_params(test_docs_xs):
|
|
reader = FARMReader(
|
|
model_name_or_path="deepset/roberta-base-squad2",
|
|
use_gpu=False,
|
|
no_ans_boost=0,
|
|
num_processes=0
|
|
)
|
|
|
|
docs = [Document.from_dict(d) if isinstance(d, dict) else d for d in test_docs_xs]
|
|
|
|
# original reader
|
|
prediction = reader.predict(query="Who lives in Berlin?", documents=docs, top_k=3)
|
|
assert len(prediction["answers"]) == 3
|
|
assert prediction["answers"][0].answer == "Carla"
|
|
|
|
# update no_ans_boost
|
|
reader.update_parameters(
|
|
context_window_size=100, no_ans_boost=100, return_no_answer=True, max_seq_len=384, doc_stride=128
|
|
)
|
|
prediction = reader.predict(query="Who lives in Berlin?", documents=docs, top_k=3)
|
|
assert len(prediction["answers"]) == 3
|
|
assert prediction["answers"][0].answer == ""
|
|
|
|
# update no_ans_boost
|
|
reader.update_parameters(
|
|
context_window_size=100, no_ans_boost=0, return_no_answer=False, max_seq_len=384, doc_stride=128
|
|
)
|
|
prediction = reader.predict(query="Who lives in Berlin?", documents=docs, top_k=3)
|
|
assert len(prediction["answers"]) == 3
|
|
assert None not in [ans.answer for ans in prediction["answers"]]
|
|
|
|
# update context_window_size
|
|
reader.update_parameters(context_window_size=6, no_ans_boost=-10, max_seq_len=384, doc_stride=128)
|
|
prediction = reader.predict(query="Who lives in Berlin?", documents=docs, top_k=3)
|
|
assert len(prediction["answers"]) == 3
|
|
assert len(prediction["answers"][0].context) == 6
|
|
|
|
# update doc_stride with invalid value
|
|
with pytest.raises(Exception):
|
|
reader.update_parameters(context_window_size=100, no_ans_boost=-10, max_seq_len=384, doc_stride=999)
|
|
reader.predict(query="Who lives in Berlin?", documents=docs, top_k=3)
|
|
|
|
# update max_seq_len with invalid value
|
|
with pytest.raises(Exception):
|
|
reader.update_parameters(context_window_size=6, no_ans_boost=-10, max_seq_len=99, doc_stride=128)
|
|
reader.predict(query="Who lives in Berlin?", documents=docs, top_k=3)
|