fix eval with context matching in table qa use cases (#2597)

This commit is contained in:
tstadel 2022-05-25 16:26:29 +02:00 committed by GitHub
parent b6986ea25d
commit dd8dc588b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 4 additions and 2 deletions

View File

@ -1323,8 +1323,8 @@ class Pipeline:
df_answers["gold_contexts_similarity"] = df_answers.map_rows(
lambda row: [
calculate_context_similarity(
gold_context,
row["context"] or "",
str(gold_context), # could be dataframe
str(row["context"]) if row["context"] is not None else "", # could be dataframe
min_length=context_matching_min_length,
boost_split_overlaps=context_matching_boost_split_overlaps,
)

View File

@ -141,6 +141,8 @@ def tutorial15_tableqa():
passages = read_texts(f"{doc_dir}/texts.json")
document_store.write_documents(passages)
document_store.update_embeddings(retriever=retriever, update_existing_embeddings=False)
# Example query whose answer resides in a text passage
predictions = text_table_qa_pipeline.run(query="Which country does the film Macaroni come from?")
# We can see both text passages and tables as contexts of the predicted answers.