mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-12-30 00:30:09 +00:00
Add "API is loading" message in the UI (#1493)
* Create the /initialized endpoint * Now showing an error message if the connection fails, and a 'Haystack is loading' message while workers are starting up * Improve the appearance of the various messages * Newline at the end of file
This commit is contained in:
parent
1cd17022af
commit
2de5385ac2
@ -46,6 +46,18 @@ logger.info(f"Loaded pipeline nodes: {PIPELINE.graph.nodes.keys()}")
|
||||
concurrency_limiter = RequestLimiter(CONCURRENT_REQUEST_PER_WORKER)
|
||||
|
||||
|
||||
@router.get("/initialized")
|
||||
def initialized():
|
||||
"""
|
||||
This endpoint can be used during startup to understand if the
|
||||
server is ready to take any requests, or is still loading.
|
||||
|
||||
The recommended approach is to call this endpoint with a short timeout,
|
||||
like 500ms, and in case of no reply, consider the server busy.
|
||||
"""
|
||||
return True
|
||||
|
||||
|
||||
@router.post("/query", response_model=Response)
|
||||
def query(request: Request):
|
||||
with concurrency_limiter.run():
|
||||
|
||||
12
ui/utils.py
12
ui/utils.py
@ -1,14 +1,26 @@
|
||||
import os
|
||||
|
||||
import logging
|
||||
import requests
|
||||
import streamlit as st
|
||||
|
||||
API_ENDPOINT = os.getenv("API_ENDPOINT", "http://localhost:8000")
|
||||
STATUS = "initialized"
|
||||
DOC_REQUEST = "query"
|
||||
DOC_FEEDBACK = "feedback"
|
||||
DOC_UPLOAD = "file-upload"
|
||||
|
||||
|
||||
def haystack_is_ready():
|
||||
url = f"{API_ENDPOINT}/{STATUS}"
|
||||
try:
|
||||
if requests.get(url).json():
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
return False
|
||||
|
||||
|
||||
@st.cache(show_spinner=False)
|
||||
def retrieve_doc(query, filters=None, top_k_reader=5, top_k_retriever=5):
|
||||
# Query Haystack API
|
||||
|
||||
260
ui/webapp.py
260
ui/webapp.py
@ -1,6 +1,7 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
import logging
|
||||
import pandas as pd
|
||||
import streamlit as st
|
||||
from annotated_text import annotated_text
|
||||
@ -9,9 +10,7 @@ from annotated_text import annotated_text
|
||||
# and every value gets lost. To keep track of our feedback state we use the official streamlit gist mentioned
|
||||
# here https://gist.github.com/tvst/036da038ab3e999a64497f42de966a92
|
||||
import SessionState
|
||||
from utils import feedback_doc
|
||||
from utils import retrieve_doc
|
||||
from utils import upload_doc
|
||||
from utils import feedback_doc, haystack_is_ready, retrieve_doc, upload_doc
|
||||
|
||||
# Adjust to a question that you would like users to see in the search bar when they load the UI:
|
||||
DEFAULT_QUESTION_AT_STARTUP = "Who is the father of Arya Stark?"
|
||||
@ -44,130 +43,143 @@ def random_questions(df):
|
||||
return random_question, random_answer
|
||||
|
||||
|
||||
# Define state
|
||||
state_question = SessionState.get(
|
||||
random_question=DEFAULT_QUESTION_AT_STARTUP, random_answer="", next_question="false", run_query="false"
|
||||
)
|
||||
def main():
|
||||
# Define state
|
||||
state_question = SessionState.get(
|
||||
random_question=DEFAULT_QUESTION_AT_STARTUP, random_answer="", next_question="false", run_query="false"
|
||||
)
|
||||
|
||||
# Initialize variables
|
||||
eval_mode = False
|
||||
random_question = DEFAULT_QUESTION_AT_STARTUP
|
||||
eval_labels = os.getenv("EVAL_FILE", "eval_labels_example.csv")
|
||||
# Initialize variables
|
||||
eval_mode = False
|
||||
random_question = DEFAULT_QUESTION_AT_STARTUP
|
||||
eval_labels = os.getenv("EVAL_FILE", "eval_labels_example.csv")
|
||||
|
||||
# UI search bar and sidebar
|
||||
st.write("# Haystack Demo")
|
||||
st.sidebar.header("Options")
|
||||
top_k_reader = st.sidebar.slider("Max. number of answers", min_value=1, max_value=10, value=3, step=1)
|
||||
top_k_retriever = st.sidebar.slider(
|
||||
"Max. number of documents from retriever", min_value=1, max_value=10, value=3, step=1
|
||||
)
|
||||
eval_mode = st.sidebar.checkbox("Evaluation mode")
|
||||
debug = st.sidebar.checkbox("Show debug info")
|
||||
# UI search bar and sidebar
|
||||
st.write("# Haystack Demo")
|
||||
st.sidebar.header("Options")
|
||||
top_k_reader = st.sidebar.slider("Max. number of answers", min_value=1, max_value=10, value=3, step=1)
|
||||
top_k_retriever = st.sidebar.slider(
|
||||
"Max. number of documents from retriever", min_value=1, max_value=10, value=3, step=1
|
||||
)
|
||||
eval_mode = st.sidebar.checkbox("Evaluation mode")
|
||||
debug = st.sidebar.checkbox("Show debug info")
|
||||
|
||||
st.sidebar.write("## File Upload:")
|
||||
data_files = st.sidebar.file_uploader("", type=["pdf", "txt", "docx"], accept_multiple_files=True)
|
||||
for data_file in data_files:
|
||||
# Upload file
|
||||
if data_file:
|
||||
raw_json = upload_doc(data_file)
|
||||
st.sidebar.write(raw_json)
|
||||
st.sidebar.write("## File Upload:")
|
||||
data_files = st.sidebar.file_uploader("", type=["pdf", "txt", "docx"], accept_multiple_files=True)
|
||||
for data_file in data_files:
|
||||
# Upload file
|
||||
if data_file:
|
||||
raw_json = upload_doc(data_file)
|
||||
st.sidebar.write(raw_json)
|
||||
if debug:
|
||||
st.subheader("REST API JSON response")
|
||||
st.sidebar.write(raw_json)
|
||||
|
||||
# load csv into pandas dataframe
|
||||
if eval_mode:
|
||||
try:
|
||||
df = pd.read_csv(eval_labels, sep=";")
|
||||
except Exception:
|
||||
sys.exit("The eval file was not found. Please check the README for more information.")
|
||||
if (
|
||||
state_question
|
||||
and hasattr(state_question, "next_question")
|
||||
and hasattr(state_question, "random_question")
|
||||
and state_question.next_question
|
||||
):
|
||||
random_question = state_question.random_question
|
||||
random_answer = state_question.random_answer
|
||||
else:
|
||||
random_question, random_answer = random_questions(df)
|
||||
state_question.random_question = random_question
|
||||
state_question.random_answer = random_answer
|
||||
|
||||
# Get next random question from the CSV
|
||||
if eval_mode:
|
||||
next_question = st.button("Load new question")
|
||||
if next_question:
|
||||
random_question, random_answer = random_questions(df)
|
||||
state_question.random_question = random_question
|
||||
state_question.random_answer = random_answer
|
||||
state_question.next_question = True
|
||||
state_question.run_query = False
|
||||
else:
|
||||
state_question.next_question = False
|
||||
|
||||
# Search bar
|
||||
question = st.text_input("Please provide your query:", value=random_question)
|
||||
if state_question and state_question.run_query:
|
||||
run_query = state_question.run_query
|
||||
st.button("Run")
|
||||
else:
|
||||
run_query = st.button("Run")
|
||||
state_question.run_query = run_query
|
||||
|
||||
raw_json_feedback = ""
|
||||
|
||||
with st.spinner("⌛️ Haystack is starting..."):
|
||||
if not haystack_is_ready():
|
||||
st.error("🚫 Connection Error. Is Haystack running?")
|
||||
run_query = False
|
||||
|
||||
# Get results for query
|
||||
if run_query:
|
||||
with st.spinner(
|
||||
"🧠 Performing neural search on documents... \n "
|
||||
"Do you want to optimize speed or accuracy? \n"
|
||||
"Check out the docs: https://haystack.deepset.ai/usage/optimization "
|
||||
):
|
||||
try:
|
||||
results, raw_json = retrieve_doc(question, top_k_reader=top_k_reader, top_k_retriever=top_k_retriever)
|
||||
except Exception as e:
|
||||
logging.exception(e)
|
||||
st.error("🐞 An error occurred during the request. Check the logs in the console to know more.")
|
||||
return
|
||||
|
||||
# Show if we use a question of the given set
|
||||
if question == random_question and eval_mode:
|
||||
st.write("## Correct answers:")
|
||||
random_answer
|
||||
|
||||
st.write("## Results:")
|
||||
|
||||
# Make every button key unique
|
||||
count = 0
|
||||
|
||||
for result in results:
|
||||
if result["answer"]:
|
||||
annotate_answer(result["answer"], result["context"])
|
||||
else:
|
||||
show_plain_documents(result["context"])
|
||||
st.write("**Relevance:** ", result["relevance"], "**Source:** ", result["source"])
|
||||
if eval_mode:
|
||||
# Define columns for buttons
|
||||
button_col1, button_col2, button_col3, button_col4 = st.beta_columns([1, 1, 1, 6])
|
||||
if button_col1.button("👍", key=(result["context"] + str(count)), help="Correct answer"):
|
||||
raw_json_feedback = feedback_doc(
|
||||
question, "true", result["document_id"], 1, "true", result["answer"], result["offset_start_in_doc"]
|
||||
)
|
||||
st.success("Thanks for your feedback")
|
||||
if button_col2.button("👎", key=(result["context"] + str(count)), help="Wrong answer and wrong passage"):
|
||||
raw_json_feedback = feedback_doc(
|
||||
question,
|
||||
"false",
|
||||
result["document_id"],
|
||||
1,
|
||||
"false",
|
||||
result["answer"],
|
||||
result["offset_start_in_doc"],
|
||||
)
|
||||
st.success("Thanks for your feedback!")
|
||||
if button_col3.button("👎👍", key=(result["context"] + str(count)), help="Wrong answer, but correct passage"):
|
||||
raw_json_feedback = feedback_doc(
|
||||
question, "false", result["document_id"], 1, "true", result["answer"], result["offset_start_in_doc"]
|
||||
)
|
||||
st.success("Thanks for your feedback!")
|
||||
count += 1
|
||||
st.write("___")
|
||||
if debug:
|
||||
st.subheader("REST API JSON response")
|
||||
st.sidebar.write(raw_json)
|
||||
st.write(raw_json)
|
||||
|
||||
# load csv into pandas dataframe
|
||||
if eval_mode:
|
||||
try:
|
||||
df = pd.read_csv(eval_labels, sep=";")
|
||||
except Exception:
|
||||
sys.exit("The eval file was not found. Please check the README for more information.")
|
||||
if (
|
||||
state_question
|
||||
and hasattr(state_question, "next_question")
|
||||
and hasattr(state_question, "random_question")
|
||||
and state_question.next_question
|
||||
):
|
||||
random_question = state_question.random_question
|
||||
random_answer = state_question.random_answer
|
||||
else:
|
||||
random_question, random_answer = random_questions(df)
|
||||
state_question.random_question = random_question
|
||||
state_question.random_answer = random_answer
|
||||
|
||||
# Get next random question from the CSV
|
||||
if eval_mode:
|
||||
next_question = st.button("Load new question")
|
||||
if next_question:
|
||||
random_question, random_answer = random_questions(df)
|
||||
state_question.random_question = random_question
|
||||
state_question.random_answer = random_answer
|
||||
state_question.next_question = "true"
|
||||
state_question.run_query = "false"
|
||||
else:
|
||||
state_question.next_question = "false"
|
||||
|
||||
# Search bar
|
||||
question = st.text_input("Please provide your query:", value=random_question)
|
||||
if state_question and state_question.run_query:
|
||||
run_query = state_question.run_query
|
||||
st.button("Run")
|
||||
else:
|
||||
run_query = st.button("Run")
|
||||
state_question.run_query = run_query
|
||||
|
||||
raw_json_feedback = ""
|
||||
|
||||
# Get results for query
|
||||
if run_query:
|
||||
with st.spinner(
|
||||
"Performing neural search on documents... 🧠 \n "
|
||||
"Do you want to optimize speed or accuracy? \n"
|
||||
"Check out the docs: https://haystack.deepset.ai/usage/optimization "
|
||||
):
|
||||
results, raw_json = retrieve_doc(question, top_k_reader=top_k_reader, top_k_retriever=top_k_retriever)
|
||||
|
||||
# Show if we use a question of the given set
|
||||
if question == random_question and eval_mode:
|
||||
st.write("## Correct answers:")
|
||||
random_answer
|
||||
|
||||
st.write("## Results:")
|
||||
|
||||
# Make every button key unique
|
||||
count = 0
|
||||
|
||||
for result in results:
|
||||
if result["answer"]:
|
||||
annotate_answer(result["answer"], result["context"])
|
||||
else:
|
||||
show_plain_documents(result["context"])
|
||||
st.write("**Relevance:** ", result["relevance"], "**Source:** ", result["source"])
|
||||
if eval_mode:
|
||||
# Define columns for buttons
|
||||
button_col1, button_col2, button_col3, button_col4 = st.beta_columns([1, 1, 1, 6])
|
||||
if button_col1.button("👍", key=(result["context"] + str(count)), help="Correct answer"):
|
||||
raw_json_feedback = feedback_doc(
|
||||
question, "true", result["document_id"], 1, "true", result["answer"], result["offset_start_in_doc"]
|
||||
)
|
||||
st.success("Thanks for your feedback")
|
||||
if button_col2.button("👎", key=(result["context"] + str(count)), help="Wrong answer and wrong passage"):
|
||||
raw_json_feedback = feedback_doc(
|
||||
question,
|
||||
"false",
|
||||
result["document_id"],
|
||||
1,
|
||||
"false",
|
||||
result["answer"],
|
||||
result["offset_start_in_doc"],
|
||||
)
|
||||
st.success("Thanks for your feedback!")
|
||||
if button_col3.button("👎👍", key=(result["context"] + str(count)), help="Wrong answer, but correct passage"):
|
||||
raw_json_feedback = feedback_doc(
|
||||
question, "false", result["document_id"], 1, "true", result["answer"], result["offset_start_in_doc"]
|
||||
)
|
||||
st.success("Thanks for your feedback!")
|
||||
count += 1
|
||||
st.write("___")
|
||||
if debug:
|
||||
st.subheader("REST API JSON response")
|
||||
st.write(raw_json)
|
||||
main()
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user