mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-08-29 10:56:40 +00:00

* Files moved, imports all broken * Fix most imports and docstrings into * Fix the paths to the modules in the API docs * Add latest docstring and tutorial changes * Add a few pipelines that were lost in the inports * Fix a bunch of mypy warnings * Add latest docstring and tutorial changes * Create a file_classifier module * Add docs for file_classifier * Fixed most circular imports, now the REST API can start * Add latest docstring and tutorial changes * Tackling more mypy issues * Reintroduce from FARM and fix last mypy issues hopefully * Re-enable old-style imports * Fix some more import from the top-level package in an attempt to sort out circular imports * Fix some imports in tests to new-style to prevent failed class equalities from breaking tests * Change document_store into document_stores * Update imports in tutorials * Add latest docstring and tutorial changes * Probably fixes summarizer tests * Improve the old-style import allowing module imports (should work) * Try to fix the docs * Remove dedicated KnowledgeGraph page from autodocs * Remove dedicated GraphRetriever page from autodocs * Fix generate_docstrings.sh with an updated list of yaml files to look for * Fix some more modules in the docs * Fix the document stores docs too * Fix a small issue on Tutorial14 * Add latest docstring and tutorial changes * Add deprecation warning to old-style imports * Remove stray folder and import Dict into dense.py * Change import path for MLFlowLogger * Add old loggers path to the import path aliases * Fix debug output of convert_ipynb.py * Fix circular import on BaseRetriever * Missed one merge block * re-run tutorial 5 * Fix imports in tutorial 5 * Re-enable squad_to_dpr CLI from the root package and move get_batches_from_generator into document_stores.base * Add latest docstring and tutorial changes * Fix typo in utils __init__ * Fix a few more imports * Fix benchmarks too * New-style imports in test_knowledge_graph * Rollback setup.py * Rollback squad_to_dpr too Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
91 lines
3.6 KiB
Python
91 lines
3.6 KiB
Python
from utils import get_document_store, index_to_doc_store, get_reader
|
|
from haystack.document_stores.utils import eval_data_from_json
|
|
from haystack.modeling.data_handler.processor import _download_extract_downstream_data
|
|
|
|
from pathlib import Path
|
|
import pandas as pd
|
|
from results_to_json import reader as reader_json
|
|
from templates import READER_TEMPLATE
|
|
import json
|
|
import logging
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
reader_models_full = ["deepset/roberta-base-squad2", "deepset/minilm-uncased-squad2",
|
|
"deepset/bert-base-cased-squad2", "deepset/bert-large-uncased-whole-word-masking-squad2",
|
|
"deepset/xlm-roberta-large-squad2", "distilbert-base-uncased-distilled-squad"]
|
|
reader_models_ci = ["deepset/minilm-uncased-squad2"]
|
|
|
|
reader_types = ["farm"]
|
|
data_dir = Path("../../data/squad20")
|
|
filename = "dev-v2.0.json"
|
|
# Note that this number is approximate - it was calculated using Bert Base Cased
|
|
# This number could vary when using a different tokenizer
|
|
n_total_passages = 12350
|
|
n_total_docs = 1204
|
|
|
|
results_file = "reader_results.csv"
|
|
|
|
reader_json_file = "../../docs/_src/benchmarks/reader_performance.json"
|
|
|
|
doc_index = "eval_document"
|
|
label_index = "label"
|
|
|
|
def benchmark_reader(ci=False, update_json=False, save_markdown=False, **kwargs):
|
|
if ci:
|
|
reader_models = reader_models_ci
|
|
else:
|
|
reader_models = reader_models_full
|
|
reader_results = []
|
|
doc_store = get_document_store("elasticsearch")
|
|
# download squad data
|
|
_download_extract_downstream_data(input_file=data_dir/filename)
|
|
docs, labels = eval_data_from_json(data_dir/filename, max_docs=None)
|
|
|
|
index_to_doc_store(doc_store, docs, None, labels)
|
|
for reader_name in reader_models:
|
|
for reader_type in reader_types:
|
|
logger.info(f"##### Start reader run - model:{reader_name}, type: {reader_type} ##### ")
|
|
try:
|
|
reader = get_reader(reader_name, reader_type)
|
|
results = reader.eval(document_store=doc_store,
|
|
doc_index=doc_index,
|
|
label_index=label_index,
|
|
device="cuda")
|
|
# print(results)
|
|
results["passages_per_second"] = n_total_passages / results["reader_time"]
|
|
results["reader"] = reader_name
|
|
results["error"] = ""
|
|
reader_results.append(results)
|
|
except Exception as e:
|
|
results = {'EM': 0.,
|
|
'f1': 0.,
|
|
'top_n_accuracy': 0.,
|
|
'top_n': 0,
|
|
'reader_time': 0.,
|
|
"passages_per_second": 0.,
|
|
"seconds_per_query": 0.,
|
|
'reader': reader_name,
|
|
"error": e}
|
|
reader_results.append(results)
|
|
reader_df = pd.DataFrame.from_records(reader_results)
|
|
reader_df.to_csv(results_file)
|
|
if save_markdown:
|
|
md_file = results_file.replace(".csv", ".md")
|
|
with open(md_file, "w") as f:
|
|
f.write(str(reader_df.to_markdown()))
|
|
doc_store.delete_all_documents(label_index)
|
|
doc_store.delete_all_documents(doc_index)
|
|
if update_json:
|
|
populate_reader_json()
|
|
|
|
|
|
def populate_reader_json():
|
|
reader_results = reader_json()
|
|
template = READER_TEMPLATE
|
|
template["data"] = reader_results
|
|
json.dump(template, open(reader_json_file, "w"), indent=4)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
benchmark_reader(ci=True, update_json=True, save_markdown=True) |