mirror of
https://github.com/deepset-ai/haystack.git
synced 2026-01-06 03:57:19 +00:00
Add File Upload Functionality in UI (#995)
This commit is contained in:
parent
056be3354b
commit
37a72d2f45
@ -44,6 +44,10 @@ def file_upload(
|
||||
file_path = Path(FILE_UPLOAD_PATH) / f"{uuid.uuid4().hex}_{file.filename}"
|
||||
with file_path.open("wb") as buffer:
|
||||
shutil.copyfileobj(file.file, buffer)
|
||||
|
||||
meta = json.loads(meta) or {}
|
||||
meta["name"] = file.filename
|
||||
|
||||
INDEXING_PIPELINE.run(
|
||||
file_path=file_path,
|
||||
remove_numeric_tables=remove_numeric_tables,
|
||||
@ -55,7 +59,7 @@ def file_upload(
|
||||
split_length=split_length,
|
||||
split_overlap=split_overlap,
|
||||
split_respect_sentence_boundary=split_respect_sentence_boundary,
|
||||
meta=json.loads(meta) or {},
|
||||
meta=meta,
|
||||
)
|
||||
finally:
|
||||
file.file.close()
|
||||
|
||||
@ -20,6 +20,9 @@ components: # define all the building-blocks for Pipeline
|
||||
type: PDFToTextConverter
|
||||
- name: Preprocessor
|
||||
type: PreProcessor
|
||||
params:
|
||||
split_by: word
|
||||
split_length: 1000
|
||||
- name: FileTypeClassifier
|
||||
type: FileTypeClassifier
|
||||
|
||||
|
||||
85
ui/utils.py
85
ui/utils.py
@ -6,39 +6,58 @@ import streamlit as st
|
||||
API_ENDPOINT = os.getenv("API_ENDPOINT", "http://localhost:8000")
|
||||
DOC_REQUEST = "query"
|
||||
DOC_FEEDBACK = "feedback"
|
||||
DOC_UPLOAD = "file-upload"
|
||||
|
||||
|
||||
@st.cache(show_spinner=False)
|
||||
def retrieve_doc(query,filters=None,top_k_reader=5,top_k_retriever=5):
|
||||
# Query Haystack API
|
||||
url = f"{API_ENDPOINT}/{DOC_REQUEST}"
|
||||
req = {"query": query, "filters": filters, "top_k_retriever": top_k_retriever, "top_k_reader": top_k_reader}
|
||||
response_raw = requests.post(url,json=req).json()
|
||||
|
||||
# Format response
|
||||
result = []
|
||||
answers = response_raw["answers"]
|
||||
for i in range(len(answers)):
|
||||
answer = answers[i]['answer']
|
||||
if answer:
|
||||
context = '...' + answers[i]['context'] + '...'
|
||||
meta_name = answers[i]['meta']['name']
|
||||
relevance = round(answers[i]['probability']*100,2)
|
||||
document_id = answers[i]['document_id']
|
||||
offset_start_in_doc = answers[i]['offset_start_in_doc']
|
||||
result.append({'context':context,'answer':answer,'source':meta_name,'relevance':relevance, 'document_id':document_id,'offset_start_in_doc':offset_start_in_doc})
|
||||
return result, response_raw
|
||||
def retrieve_doc(query, filters=None, top_k_reader=5, top_k_retriever=5):
|
||||
# Query Haystack API
|
||||
url = f"{API_ENDPOINT}/{DOC_REQUEST}"
|
||||
req = {"query": query, "filters": filters, "top_k_retriever": top_k_retriever, "top_k_reader": top_k_reader}
|
||||
response_raw = requests.post(url, json=req).json()
|
||||
|
||||
def feedback_doc(question,is_correct_answer,document_id,model_id,is_correct_document,answer,offset_start_in_doc):
|
||||
# Feedback Haystack API
|
||||
url = f"{API_ENDPOINT}/{DOC_FEEDBACK}"
|
||||
req = {
|
||||
"question": question,
|
||||
"is_correct_answer": is_correct_answer,
|
||||
"document_id": document_id,
|
||||
"model_id": model_id,
|
||||
"is_correct_document": is_correct_document,
|
||||
"answer": answer,
|
||||
"offset_start_in_doc": offset_start_in_doc
|
||||
}
|
||||
response_raw = requests.post(url,json=req).json()
|
||||
return response_raw
|
||||
# Format response
|
||||
result = []
|
||||
answers = response_raw["answers"]
|
||||
for i in range(len(answers)):
|
||||
answer = answers[i]["answer"]
|
||||
if answer:
|
||||
context = "..." + answers[i]["context"] + "..."
|
||||
meta_name = answers[i]["meta"]["name"]
|
||||
relevance = round(answers[i]["probability"] * 100, 2)
|
||||
document_id = answers[i]["document_id"]
|
||||
offset_start_in_doc = answers[i]["offset_start_in_doc"]
|
||||
result.append(
|
||||
{
|
||||
"context": context,
|
||||
"answer": answer,
|
||||
"source": meta_name,
|
||||
"relevance": relevance,
|
||||
"document_id": document_id,
|
||||
"offset_start_in_doc": offset_start_in_doc,
|
||||
}
|
||||
)
|
||||
return result, response_raw
|
||||
|
||||
|
||||
def feedback_doc(question, is_correct_answer, document_id, model_id, is_correct_document, answer, offset_start_in_doc):
|
||||
# Feedback Haystack API
|
||||
url = f"{API_ENDPOINT}/{DOC_FEEDBACK}"
|
||||
req = {
|
||||
"question": question,
|
||||
"is_correct_answer": is_correct_answer,
|
||||
"document_id": document_id,
|
||||
"model_id": model_id,
|
||||
"is_correct_document": is_correct_document,
|
||||
"answer": answer,
|
||||
"offset_start_in_doc": offset_start_in_doc,
|
||||
}
|
||||
response_raw = requests.post(url, json=req).json()
|
||||
return response_raw
|
||||
|
||||
|
||||
def upload_doc(file):
|
||||
url = f"{API_ENDPOINT}/{DOC_UPLOAD}"
|
||||
files = [("file", file)]
|
||||
response_raw = requests.post(url, files=files).json()
|
||||
return response_raw
|
||||
|
||||
129
ui/webapp.py
129
ui/webapp.py
@ -1,49 +1,74 @@
|
||||
import os
|
||||
import sys
|
||||
import streamlit as st
|
||||
from utils import retrieve_doc
|
||||
from utils import feedback_doc
|
||||
from annotated_text import annotated_text
|
||||
|
||||
import pandas as pd
|
||||
# streamlit does not support any states out of the box. On every button click, streamlit reload the whole page
|
||||
# and every value gets lost. To keep track of our feedback state we use the official streamlit gist mentioned
|
||||
import streamlit as st
|
||||
from annotated_text import annotated_text
|
||||
|
||||
# streamlit does not support any states out of the box. On every button click, streamlit reload the whole page
|
||||
# and every value gets lost. To keep track of our feedback state we use the official streamlit gist mentioned
|
||||
# here https://gist.github.com/tvst/036da038ab3e999a64497f42de966a92
|
||||
import SessionState
|
||||
from utils import feedback_doc
|
||||
from utils import retrieve_doc
|
||||
from utils import upload_doc
|
||||
|
||||
|
||||
def annotate_answer(answer, context):
|
||||
start_idx = context.find(answer)
|
||||
end_idx = start_idx+len(answer)
|
||||
annotated_text(context[:start_idx],(answer,"ANSWER","#8ef"),context[end_idx:])
|
||||
end_idx = start_idx + len(answer)
|
||||
annotated_text(context[:start_idx], (answer, "ANSWER", "#8ef"), context[end_idx:])
|
||||
|
||||
|
||||
def random_questions(df):
|
||||
random_row = df.sample(1)
|
||||
random_question = random_row["Question Text"].values[0]
|
||||
random_answer = random_row["Answer"].values[0]
|
||||
return random_question, random_answer
|
||||
random_row = df.sample(1)
|
||||
random_question = random_row["Question Text"].values[0]
|
||||
random_answer = random_row["Answer"].values[0]
|
||||
return random_question, random_answer
|
||||
|
||||
|
||||
# Define state
|
||||
state_question = SessionState.get(random_question='Who is the father of Arya Starck?', random_answer='', next_question='false', run_query='false')
|
||||
state_question = SessionState.get(
|
||||
random_question="Who is the father of Arya Starck?", random_answer="", next_question="false", run_query="false"
|
||||
)
|
||||
|
||||
# Initalize variables
|
||||
# Initialize variables
|
||||
eval_mode = False
|
||||
random_question = "Who is the father of Arya Starck?"
|
||||
eval_labels = os.getenv("EVAL_FILE", "eval_labels_example.csv")
|
||||
|
||||
# UI search bar and sidebar
|
||||
# UI search bar and sidebar
|
||||
st.write("# Haystack Demo")
|
||||
st.sidebar.header("Options")
|
||||
top_k_reader = st.sidebar.slider("Max. number of answers",min_value=1,max_value=10,value=3,step=1)
|
||||
top_k_retriever = st.sidebar.slider("Max. number of documents from retriever",min_value=1,max_value=10,value=3,step=1)
|
||||
top_k_reader = st.sidebar.slider("Max. number of answers", min_value=1, max_value=10, value=3, step=1)
|
||||
top_k_retriever = st.sidebar.slider(
|
||||
"Max. number of documents from retriever", min_value=1, max_value=10, value=3, step=1
|
||||
)
|
||||
eval_mode = st.sidebar.checkbox("Evalution mode")
|
||||
debug = st.sidebar.checkbox("Show debug info")
|
||||
|
||||
st.sidebar.write("## File Upload:")
|
||||
data_file = st.sidebar.file_uploader("", type=["pdf", "txt", "docx"])
|
||||
# Upload file
|
||||
if data_file:
|
||||
raw_json = upload_doc(data_file)
|
||||
st.write(raw_json)
|
||||
if debug:
|
||||
st.subheader("REST API JSON response")
|
||||
st.write(raw_json)
|
||||
|
||||
# load csv into pandas dataframe
|
||||
if eval_mode:
|
||||
try:
|
||||
df = pd.read_csv(eval_labels, sep=";")
|
||||
except Exception:
|
||||
sys.exit('The eval file was not found. Please check the README for more information.')
|
||||
if state_question and hasattr(state_question, 'next_question') and hasattr(state_question, 'random_question') and state_question.next_question:
|
||||
sys.exit("The eval file was not found. Please check the README for more information.")
|
||||
if (
|
||||
state_question
|
||||
and hasattr(state_question, "next_question")
|
||||
and hasattr(state_question, "random_question")
|
||||
and state_question.next_question
|
||||
):
|
||||
random_question = state_question.random_question
|
||||
random_answer = state_question.random_answer
|
||||
else:
|
||||
@ -55,16 +80,16 @@ if eval_mode:
|
||||
if eval_mode:
|
||||
next_question = st.button("Load new question")
|
||||
if next_question:
|
||||
random_question, random_answer = random_questions(df)
|
||||
state_question.random_question = random_question
|
||||
state_question.random_answer = random_answer
|
||||
state_question.next_question = "true"
|
||||
state_question.run_query = "false"
|
||||
random_question, random_answer = random_questions(df)
|
||||
state_question.random_question = random_question
|
||||
state_question.random_answer = random_answer
|
||||
state_question.next_question = "true"
|
||||
state_question.run_query = "false"
|
||||
else:
|
||||
state_question.next_question = "false"
|
||||
state_question.next_question = "false"
|
||||
|
||||
# Search bar
|
||||
question = st.text_input("Please provide your query:",value=random_question)
|
||||
question = st.text_input("Please provide your query:", value=random_question)
|
||||
if state_question and state_question.run_query:
|
||||
run_query = state_question.run_query
|
||||
st.button("Run")
|
||||
@ -76,38 +101,52 @@ raw_json_feedback = ""
|
||||
|
||||
# Get results for query
|
||||
if run_query:
|
||||
with st.spinner("Performing neural search on documents... 🧠 \n "
|
||||
"Do you want to optimize speed or accuracy? \n"
|
||||
"Check out the docs: https://haystack.deepset.ai/docs/latest/optimizationmd "):
|
||||
results,raw_json = retrieve_doc(question,top_k_reader=top_k_reader,top_k_retriever=top_k_retriever)
|
||||
with st.spinner(
|
||||
"Performing neural search on documents... 🧠 \n "
|
||||
"Do you want to optimize speed or accuracy? \n"
|
||||
"Check out the docs: https://haystack.deepset.ai/docs/latest/optimizationmd "
|
||||
):
|
||||
results, raw_json = retrieve_doc(question, top_k_reader=top_k_reader, top_k_retriever=top_k_retriever)
|
||||
|
||||
# Show if we use a question of the given set
|
||||
if question == random_question and eval_mode:
|
||||
st.write("## Correct answers:")
|
||||
random_answer
|
||||
|
||||
|
||||
st.write("## Retrieved answers:")
|
||||
|
||||
# Make every button key unique
|
||||
count = 0
|
||||
|
||||
for result in results:
|
||||
annotate_answer(result['answer'],result['context'])
|
||||
'**Relevance:** ', result['relevance'] , '**Source:** ' , result['source']
|
||||
annotate_answer(result["answer"], result["context"])
|
||||
"**Relevance:** ", result["relevance"], "**Source:** ", result["source"]
|
||||
if eval_mode:
|
||||
# Define columns for buttons
|
||||
button_col1, button_col2, button_col3, button_col4 = st.beta_columns([1,1,1,6])
|
||||
if button_col1.button("👍", key=(result['answer'] + str(count)), help="Correct answer"):
|
||||
raw_json_feedback = feedback_doc(question,"true",result['document_id'],1,"true",result['answer'],result['offset_start_in_doc'])
|
||||
st.success('Thanks for your feedback')
|
||||
if button_col2.button("👎", key=(result['answer'] + str(count)), help="Wrong answer and wrong passage"):
|
||||
raw_json_feedback = feedback_doc(question,"false",result['document_id'],1,"false",result['answer'],result['offset_start_in_doc'])
|
||||
st.success('Thanks for your feedback!')
|
||||
if button_col3.button("👎👍", key=(result['answer'] + str(count)), help="Wrong answer, but correct passage"):
|
||||
raw_json_feedback = feedback_doc(question,"false",result['document_id'],1,"true",result['answer'],result['offset_start_in_doc'])
|
||||
st.success('Thanks for your feedback!')
|
||||
count+=1
|
||||
button_col1, button_col2, button_col3, button_col4 = st.beta_columns([1, 1, 1, 6])
|
||||
if button_col1.button("👍", key=(result["answer"] + str(count)), help="Correct answer"):
|
||||
raw_json_feedback = feedback_doc(
|
||||
question, "true", result["document_id"], 1, "true", result["answer"], result["offset_start_in_doc"]
|
||||
)
|
||||
st.success("Thanks for your feedback")
|
||||
if button_col2.button("👎", key=(result["answer"] + str(count)), help="Wrong answer and wrong passage"):
|
||||
raw_json_feedback = feedback_doc(
|
||||
question,
|
||||
"false",
|
||||
result["document_id"],
|
||||
1,
|
||||
"false",
|
||||
result["answer"],
|
||||
result["offset_start_in_doc"],
|
||||
)
|
||||
st.success("Thanks for your feedback!")
|
||||
if button_col3.button("👎👍", key=(result["answer"] + str(count)), help="Wrong answer, but correct passage"):
|
||||
raw_json_feedback = feedback_doc(
|
||||
question, "false", result["document_id"], 1, "true", result["answer"], result["offset_start_in_doc"]
|
||||
)
|
||||
st.success("Thanks for your feedback!")
|
||||
count += 1
|
||||
st.write("___")
|
||||
if debug:
|
||||
st.subheader("REST API JSON response")
|
||||
st.write(raw_json)
|
||||
st.write(raw_json)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user