import os import sys import html import logging import pandas as pd from json import JSONDecodeError from pathlib import Path import streamlit as st from annotated_text import annotation from markdown import markdown from htbuilder import H # streamlit does not support any states out of the box. On every button click, streamlit reload the whole page # and every value gets lost. To keep track of our feedback state we use the official streamlit gist mentioned # here https://gist.github.com/tvst/036da038ab3e999a64497f42de966a92 import SessionState from utils import HS_VERSION, feedback_doc, haystack_is_ready, retrieve_doc, upload_doc, haystack_version # Adjust to a question that you would like users to see in the search bar when they load the UI: DEFAULT_QUESTION_AT_STARTUP = "What's the capital of France?" # Labels for the evaluation EVAL_LABELS = os.getenv("EVAL_FILE", Path(__file__).parent / "eval_labels_example.csv") # Whether the file upload should be enabled or not DISABLE_FILE_UPLOAD = os.getenv("HAYSTACK_UI_DISABLE_FILE_UPLOAD") def main(): # Persistent state state = SessionState.get( random_question=DEFAULT_QUESTION_AT_STARTUP, random_answer="", results=None, raw_json=None, get_next_question=True ) # Small callback to reset the interface in case the text of the question changes def reset_results(*args): state.results = None state.raw_json = None # Title st.write("# Haystack Demo - Explore the world") st.write(""" This demo takes its data from a selection of Wikipedia pages crawled in November 2021 on the topic of 'Countries and capital cities'. Ask any question on this topic and see if Haystack can find the correct answer to your query! *Note: do not use keywords, but type full-fledged questions.* The demo is not optimized to deal with keyword queries and might misunderstand you. """) # Sidebar st.sidebar.header("Options") top_k_reader = st.sidebar.slider("Max. number of answers", min_value=1, max_value=10, value=3, step=1, on_change=reset_results) top_k_retriever = st.sidebar.slider("Max. number of documents from retriever", min_value=1, max_value=10, value=3, step=1, on_change=reset_results) eval_mode = st.sidebar.checkbox("Evaluation mode") debug = st.sidebar.checkbox("Show debug info") # File upload block if not DISABLE_FILE_UPLOAD: st.sidebar.write("## File Upload:") data_files = st.sidebar.file_uploader("", type=["pdf", "txt", "docx"], accept_multiple_files=True) for data_file in data_files: # Upload file if data_file: raw_json = upload_doc(data_file) st.sidebar.write(str(data_file.name) + "    ✅ ") if debug: st.subheader("REST API JSON response") st.sidebar.write(raw_json) hs_version = None try: hs_version = f" (v{haystack_version()})" except Exception: pass st.sidebar.markdown(f""" """, unsafe_allow_html=True) # Load csv into pandas dataframe try: df = pd.read_csv(EVAL_LABELS, sep=";") except Exception: st.error(f"The eval file was not found. Please check the demo's [README](https://github.com/deepset-ai/haystack/tree/master/ui/README.md) for more information.") sys.exit(f"The eval file was not found under `{EVAL_LABELS}`. Please check the README (https://github.com/deepset-ai/haystack/tree/master/ui/README.md) for more information.") # Search bar question = st.text_input("", value=state.random_question, max_chars=100, on_change=reset_results ) col1, col2 = st.columns(2) col1.markdown("", unsafe_allow_html=True) col2.markdown("", unsafe_allow_html=True) # Run button run_query = col1.button("Run") # Get next random question from the CSV state.get_next_question = col2.button("Random question") if state.get_next_question: reset_results() new_row = df.sample(1) while new_row["Question Text"].values[0] == state.random_question: # Avoid picking the same question twice (the change is not visible on the UI) new_row = df.sample(1) state.random_question = new_row["Question Text"].values[0] state.random_answer = new_row["Answer"].values[0] # Re-runs the script setting the random question as the textbox value # Unfortunately necessary as the Random Question button is _below_ the textbox raise st.script_runner.RerunException(st.script_request_queue.RerunData(None)) # Check the connection with st.spinner("⌛️    Haystack is starting..."): if not haystack_is_ready(): st.error("🚫    Connection Error. Is Haystack running?") run_query = False reset_results() # Get results for query if run_query and question: reset_results() with st.spinner( "🧠    Performing neural search on documents... \n " "Do you want to optimize speed or accuracy? \n" "Check out the docs: https://haystack.deepset.ai/usage/optimization " ): try: state.results, state.raw_json = retrieve_doc(question, top_k_reader=top_k_reader, top_k_retriever=top_k_retriever) except JSONDecodeError as je: st.error("👓    An error occurred reading the results. Is the document store working?") return except Exception as e: logging.exception(e) if "The server is busy processing requests" in str(e): st.error("🧑‍🌾    All our workers are busy! Try again later.") else: st.error("🐞    An error occurred during the request. Check the logs in the console to know more.") return if state.results: # Show the gold answer if we use a question of the given set if question == state.random_question and eval_mode: st.write("## Correct answers:") st.write(state.random_answer) st.write("## Results:") count = 0 # Make every button key unique for result in state.results: if result["answer"]: answer, context = result["answer"], result["context"] start_idx = context.find(answer) end_idx = start_idx + len(answer) # Hack due to this bug: https://github.com/streamlit/streamlit/issues/3190 st.write(markdown(context[:start_idx] + str(annotation(answer, "ANSWER", "#8ef")) + context[end_idx:]), unsafe_allow_html=True) st.write("**Relevance:** ", result["relevance"], "**Source:** ", result["source"]) else: st.info("🤔    Haystack is unsure whether any of the documents contain an answer to your question. Try to reformulate it!") st.write("**Relevance:** ", result["relevance"]) if eval_mode: # Define columns for buttons button_col1, button_col2, button_col3, _ = st.columns([1, 1, 1, 6]) if button_col1.button("👍", key=f"{result['context']}{count}1", help="Correct answer"): feedback_doc( question=question, is_correct_answer="true", document_id=result.get("document_id", None), model_id=1, is_correct_document="true", answer=result["answer"], offset_start_in_doc=result.get("offset_start_in_doc", None) ) st.success("✨    Thanks for your feedback!    ✨") if button_col2.button("👎", key=f"{result['context']}{count}2", help="Wrong answer and wrong passage"): feedback_doc( question=question, is_correct_answer="false", document_id=result.get("document_id", None), model_id=1, is_correct_document="false", answer=result["answer"], offset_start_in_doc=result.get("offset_start_in_doc", None) ) st.success("✨    Thanks for your feedback!    ✨") if button_col3.button("👎👍", key=f"{result['context']}{count}3", help="Wrong answer, but correct passage"): feedback_doc( question=question, is_correct_answer="false", document_id=result.get("document_id", None), model_id=1, is_correct_document="true", answer=result["answer"], offset_start_in_doc=result.get("offset_start_in_doc", None) ) st.success("✨    Thanks for your feedback!    ✨") count += 1 st.write("___") if debug: st.subheader("REST API JSON response") st.write(state.raw_json) main()