mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-07-27 19:00:35 +00:00

* Files moved, imports all broken * Fix most imports and docstrings into * Fix the paths to the modules in the API docs * Add latest docstring and tutorial changes * Add a few pipelines that were lost in the inports * Fix a bunch of mypy warnings * Add latest docstring and tutorial changes * Create a file_classifier module * Add docs for file_classifier * Fixed most circular imports, now the REST API can start * Add latest docstring and tutorial changes * Tackling more mypy issues * Reintroduce from FARM and fix last mypy issues hopefully * Re-enable old-style imports * Fix some more import from the top-level package in an attempt to sort out circular imports * Fix some imports in tests to new-style to prevent failed class equalities from breaking tests * Change document_store into document_stores * Update imports in tutorials * Add latest docstring and tutorial changes * Probably fixes summarizer tests * Improve the old-style import allowing module imports (should work) * Try to fix the docs * Remove dedicated KnowledgeGraph page from autodocs * Remove dedicated GraphRetriever page from autodocs * Fix generate_docstrings.sh with an updated list of yaml files to look for * Fix some more modules in the docs * Fix the document stores docs too * Fix a small issue on Tutorial14 * Add latest docstring and tutorial changes * Add deprecation warning to old-style imports * Remove stray folder and import Dict into dense.py * Change import path for MLFlowLogger * Add old loggers path to the import path aliases * Fix debug output of convert_ipynb.py * Fix circular import on BaseRetriever * Missed one merge block * re-run tutorial 5 * Fix imports in tutorial 5 * Re-enable squad_to_dpr CLI from the root package and move get_batches_from_generator into document_stores.base * Add latest docstring and tutorial changes * Fix typo in utils __init__ * Fix a few more imports * Fix benchmarks too * New-style imports in test_knowledge_graph * Rollback setup.py * Rollback squad_to_dpr too Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
76 lines
3.4 KiB
Python
76 lines
3.4 KiB
Python
import pandas as pd
|
|
|
|
from haystack.schema import Document
|
|
from haystack.pipelines.base import Pipeline
|
|
|
|
|
|
def test_table_reader(table_reader):
|
|
data = {
|
|
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
|
|
"age": ["57", "46", "60"],
|
|
"number of movies": ["87", "53", "69"],
|
|
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
|
|
}
|
|
table = pd.DataFrame(data)
|
|
|
|
query = "When was DiCaprio born?"
|
|
prediction = table_reader.predict(query=query, documents=[Document(content=table, content_type="table")])
|
|
assert prediction["answers"][0].answer == "10 june 1996"
|
|
assert prediction["answers"][0].offsets_in_context[0].start == 7
|
|
assert prediction["answers"][0].offsets_in_context[0].end == 8
|
|
|
|
# test aggregation
|
|
query = "How old are DiCaprio and Pitt on average?"
|
|
prediction = table_reader.predict(query=query, documents=[Document(content=table, content_type="table")])
|
|
assert prediction["answers"][0].answer == "51.5"
|
|
assert prediction["answers"][0].meta["answer_cells"] == ["57", "46"]
|
|
assert prediction["answers"][0].meta["aggregation_operator"] == "AVERAGE"
|
|
assert prediction["answers"][0].offsets_in_context[0].start == 1
|
|
assert prediction["answers"][0].offsets_in_context[0].end == 2
|
|
assert prediction["answers"][0].offsets_in_context[1].start == 5
|
|
assert prediction["answers"][0].offsets_in_context[1].end == 6
|
|
|
|
|
|
def test_table_reader_in_pipeline(table_reader):
|
|
pipeline = Pipeline()
|
|
pipeline.add_node(table_reader, "TableReader", ["Query"])
|
|
data = {
|
|
"actors": ["brad pitt", "leonardo di caprio", "george clooney"],
|
|
"age": ["57", "46", "60"],
|
|
"number of movies": ["87", "53", "69"],
|
|
"date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"],
|
|
}
|
|
|
|
table = pd.DataFrame(data)
|
|
query = "Which actors played in more than 60 movies?"
|
|
|
|
prediction = pipeline.run(query=query, documents=[Document(content=table, content_type="table")])
|
|
|
|
assert prediction["answers"][0].answer == "brad pitt, george clooney"
|
|
assert prediction["answers"][0].meta["aggregation_operator"] == "NONE"
|
|
assert prediction["answers"][0].offsets_in_context[0].start == 0
|
|
assert prediction["answers"][0].offsets_in_context[0].end == 1
|
|
assert prediction["answers"][0].offsets_in_context[1].start == 8
|
|
assert prediction["answers"][0].offsets_in_context[1].end == 9
|
|
|
|
|
|
def test_table_reader_aggregation(table_reader):
|
|
data = {
|
|
"Mountain": ["Mount Everest", "K2", "Kangchenjunga", "Lhotse", "Makalu"],
|
|
"Height": ["8848m", "8,611 m", "8 586m", "8 516 m", "8,485m"]
|
|
}
|
|
table = pd.DataFrame(data)
|
|
|
|
query = "How tall are all mountains on average?"
|
|
prediction = table_reader.predict(query=query, documents=[Document(content=table, content_type="table")])
|
|
assert prediction["answers"][0].answer == "8609.2 m"
|
|
assert prediction["answers"][0].meta["aggregation_operator"] == "AVERAGE"
|
|
assert prediction["answers"][0].meta["answer_cells"] == ['8848m', '8,611 m', '8 586m', '8 516 m', '8,485m']
|
|
|
|
query = "How tall are all mountains together?"
|
|
prediction = table_reader.predict(query=query, documents=[Document(content=table, content_type="table")])
|
|
assert prediction["answers"][0].answer == "43046.0 m"
|
|
assert prediction["answers"][0].meta["aggregation_operator"] == "SUM"
|
|
assert prediction["answers"][0].meta["answer_cells"] == ['8848m', '8,611 m', '8 586m', '8 516 m', '8,485m']
|
|
|