mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-11-01 10:19:23 +00:00
fix eval with context matching in table qa use cases (#2597)
This commit is contained in:
parent
b6986ea25d
commit
dd8dc588b1
@ -1323,8 +1323,8 @@ class Pipeline:
|
||||
df_answers["gold_contexts_similarity"] = df_answers.map_rows(
|
||||
lambda row: [
|
||||
calculate_context_similarity(
|
||||
gold_context,
|
||||
row["context"] or "",
|
||||
str(gold_context), # could be dataframe
|
||||
str(row["context"]) if row["context"] is not None else "", # could be dataframe
|
||||
min_length=context_matching_min_length,
|
||||
boost_split_overlaps=context_matching_boost_split_overlaps,
|
||||
)
|
||||
|
||||
@ -141,6 +141,8 @@ def tutorial15_tableqa():
|
||||
passages = read_texts(f"{doc_dir}/texts.json")
|
||||
document_store.write_documents(passages)
|
||||
|
||||
document_store.update_embeddings(retriever=retriever, update_existing_embeddings=False)
|
||||
|
||||
# Example query whose answer resides in a text passage
|
||||
predictions = text_table_qa_pipeline.run(query="Which country does the film Macaroni come from?")
|
||||
# We can see both text passages and tables as contexts of the predicted answers.
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user