From dd8dc588b1a157a81b77659397097438b0c65aa9 Mon Sep 17 00:00:00 2001 From: tstadel <60758086+tstadel@users.noreply.github.com> Date: Wed, 25 May 2022 16:26:29 +0200 Subject: [PATCH] fix eval with context matching in table qa use cases (#2597) --- haystack/pipelines/base.py | 4 ++-- tutorials/Tutorial15_TableQA.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/haystack/pipelines/base.py b/haystack/pipelines/base.py index c61030c3f..c63d29d5a 100644 --- a/haystack/pipelines/base.py +++ b/haystack/pipelines/base.py @@ -1323,8 +1323,8 @@ class Pipeline: df_answers["gold_contexts_similarity"] = df_answers.map_rows( lambda row: [ calculate_context_similarity( - gold_context, - row["context"] or "", + str(gold_context), # could be dataframe + str(row["context"]) if row["context"] is not None else "", # could be dataframe min_length=context_matching_min_length, boost_split_overlaps=context_matching_boost_split_overlaps, ) diff --git a/tutorials/Tutorial15_TableQA.py b/tutorials/Tutorial15_TableQA.py index 44a54b2b8..c96e79952 100644 --- a/tutorials/Tutorial15_TableQA.py +++ b/tutorials/Tutorial15_TableQA.py @@ -141,6 +141,8 @@ def tutorial15_tableqa(): passages = read_texts(f"{doc_dir}/texts.json") document_store.write_documents(passages) + document_store.update_embeddings(retriever=retriever, update_existing_embeddings=False) + # Example query whose answer resides in a text passage predictions = text_table_qa_pipeline.run(query="Which country does the film Macaroni come from?") # We can see both text passages and tables as contexts of the predicted answers.