haystack/ui/webapp.py

266 lines
11 KiB
Python
Raw Normal View History

import os
import sys
import logging
import pandas as pd
from json import JSONDecodeError
from pathlib import Path
try:
import streamlit as st
from annotated_text import annotation
from markdown import markdown
from ui.utils import haystack_is_ready, query, send_feedback, upload_doc, haystack_version, get_backlink
except (ImportError, ModuleNotFoundError) as ie:
from haystack.utils.import_utils import _optional_component_not_installed
_optional_component_not_installed(__name__, "ui", ie)
# Adjust to a question that you would like users to see in the search bar when they load the UI:
DEFAULT_QUESTION_AT_STARTUP = os.getenv("DEFAULT_QUESTION_AT_STARTUP", "What's the capital of France?")
DEFAULT_ANSWER_AT_STARTUP = os.getenv("DEFAULT_ANSWER_AT_STARTUP", "Paris")
# Sliders
DEFAULT_DOCS_FROM_RETRIEVER = int(os.getenv("DEFAULT_DOCS_FROM_RETRIEVER", 3))
DEFAULT_NUMBER_OF_ANSWERS = int(os.getenv("DEFAULT_NUMBER_OF_ANSWERS", 3))
Redesign primitives - `Document`, `Answer`, `Label` (#1398) * first draft / notes on new primitives * wip label / feedback refactor * rename doc.text -> doc.content. add doc.content_type * add datatype for content * remove faq_question_field from ES and weaviate. rename text_field -> content_field in docstores. update tutorials for content field * update converters for . Add warning for empty * renam label.question -> label.query. Allow sorting of Answers. * WIP primitives * update ui/reader for new Answer format * Improve Label. First refactoring of MultiLabel. Adjust eval code * fixed workflow conflict with introducing new one (#1472) * Add latest docstring and tutorial changes * make add_eval_data() work again * fix reader formats. WIP fix _extract_docs_and_labels_from_dict * fix test reader * Add latest docstring and tutorial changes * fix another test case for reader * fix mypy in farm reader.eval() * fix mypy in farm reader.eval() * WIP ORM refactor * Add latest docstring and tutorial changes * fix mypy weaviate * make label and multilabel dataclasses * bump mypy env in CI to python 3.8 * WIP refactor Label ORM * WIP refactor Label ORM * simplify tests for individual doc stores * WIP refactoring markers of tests * test alternative approach for tests with existing parametrization * WIP refactor ORMs * fix skip logic of already parametrized tests * fix weaviate behaviour in tests - not parametrizing it in our general test cases. * Add latest docstring and tutorial changes * fix some tests * remove sql from document_store_types * fix markers for generator and pipeline test * remove inmemory marker * remove unneeded elasticsearch markers * add dataclasses-json dependency. adjust ORM to just store JSON repr * ignore type as dataclasses_json seems to miss functionality here * update readme and contributing.md * update contributing * adjust example * fix duplicate doc handling for custom index * Add latest docstring and tutorial changes * fix some ORM issues. fix get_all_labels_aggregated. * update drop flags where get_all_labels_aggregated() was used before * Add latest docstring and tutorial changes * add to_json(). add + fix tests * fix no_answer handling in label / multilabel * fix duplicate docs in memory doc store. change primary key for sql doc table * fix mypy issues * fix mypy issues * haystack/retriever/base.py * fix test_write_document_meta[elastic] * fix test_elasticsearch_custom_fields * fix test_labels[elastic] * fix crawler * fix converter * fix docx converter * fix preprocessor * fix test_utils * fix tfidf retriever. fix selection of docstore in tests with multiple fixtures / parameterizations * Add latest docstring and tutorial changes * fix crawler test. fix ocrconverter attribute * fix test_elasticsearch_custom_query * fix generator pipeline * fix ocr converter * fix ragenerator * Add latest docstring and tutorial changes * fix test_load_and_save_yaml for elasticsearch * fixes for pipeline tests * fix faq pipeline * fix pipeline tests * Add latest docstring and tutorial changes * fix weaviate * Add latest docstring and tutorial changes * trigger CI * satisfy mypy * Add latest docstring and tutorial changes * satisfy mypy * Add latest docstring and tutorial changes * trigger CI * fix question generation test * fix ray. fix Q-generation * fix translator test * satisfy mypy * wip refactor feedback rest api * fix rest api feedback endpoint * fix doc classifier * remove relation of Labels -> Docs in SQL ORM * fix faiss/milvus tests * fix doc classifier test * fix eval test * fixing eval issues * Add latest docstring and tutorial changes * fix mypy * WIP replace dataclasses-json with manual serialization * Add latest docstring and tutorial changes * revert to dataclass-json serialization for now. remove debug prints. * update docstrings * fix extractor. fix Answer Span init * fix api test * keep meta data of answers in reader.run() * fix meta handling * adress review feedback * Add latest docstring and tutorial changes * make document=None for open domain labels * add import * fix print utils * fix rest api * adress review feedback * Add latest docstring and tutorial changes * fix mypy Co-authored-by: Markus Paff <markuspaff.mp@gmail.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2021-10-13 14:23:23 +02:00
# Labels for the evaluation
EVAL_LABELS = os.getenv("EVAL_FILE", Path(__file__).parent / "eval_labels_example.csv")
# Whether the file upload should be enabled or not
DISABLE_FILE_UPLOAD = bool(os.getenv("DISABLE_FILE_UPLOAD"))
def set_state_if_absent(key, value):
if key not in st.session_state:
st.session_state[key] = value
def main():
st.set_page_config(page_title='Haystack Demo', page_icon="https://haystack.deepset.ai/img/HaystackIcon.png")
# Persistent state
set_state_if_absent('question', DEFAULT_QUESTION_AT_STARTUP)
set_state_if_absent('answer', DEFAULT_ANSWER_AT_STARTUP)
set_state_if_absent('results', None)
set_state_if_absent('raw_json', None)
set_state_if_absent('random_question_requested', False)
# Small callback to reset the interface in case the text of the question changes
def reset_results(*args):
st.session_state.answer = None
st.session_state.results = None
st.session_state.raw_json = None
# Title
st.write("# Haystack Demo - Explore the world")
st.markdown("""
This demo takes its data from a selection of Wikipedia pages crawled in November 2021 on the topic of
<h3 style='text-align:center;padding: 0 0 1rem;'>Countries and capital cities</h3>
Ask any question on this topic and see if Haystack can find the correct answer to your query!
*Note: do not use keywords, but full-fledged questions.* The demo is not optimized to deal with keyword queries and might misunderstand you.
""", unsafe_allow_html=True)
# Sidebar
st.sidebar.header("Options")
top_k_reader = st.sidebar.slider(
"Max. number of answers",
min_value=1,
max_value=10,
value=DEFAULT_NUMBER_OF_ANSWERS,
step=1,
on_change=reset_results)
top_k_retriever = st.sidebar.slider(
"Max. number of documents from retriever",
min_value=1,
max_value=10,
value=DEFAULT_DOCS_FROM_RETRIEVER,
step=1,
on_change=reset_results)
eval_mode = st.sidebar.checkbox("Evaluation mode")
debug = st.sidebar.checkbox("Show debug info")
# File upload block
if not DISABLE_FILE_UPLOAD:
st.sidebar.write("## File Upload:")
data_files = st.sidebar.file_uploader("", type=["pdf", "txt", "docx"], accept_multiple_files=True)
for data_file in data_files:
# Upload file
if data_file:
raw_json = upload_doc(data_file)
st.sidebar.write(str(data_file.name) + " &nbsp;&nbsp; ✅ ")
if debug:
st.subheader("REST API JSON response")
st.sidebar.write(raw_json)
hs_version = ""
try:
hs_version = f" <small>(v{haystack_version()})</small>"
except Exception:
pass
st.sidebar.markdown(f"""
<style>
a {{
text-decoration: none;
}}
.haystack-footer {{
text-align: center;
}}
.haystack-footer h4 {{
margin: 0.1rem;
padding:0;
}}
footer {{
opacity: 0;
}}
</style>
<div class="haystack-footer">
<hr />
<h4>Built with <a href="https://www.deepset.ai/haystack">Haystack</a>{hs_version}</h4>
<p>Get it on <a href="https://github.com/deepset-ai/haystack/">GitHub</a> &nbsp;&nbsp; - &nbsp;&nbsp; Read the <a href="https://haystack.deepset.ai/overview/intro">Docs</a></p>
<small>Data crawled from <a href="https://en.wikipedia.org/wiki/Category:Lists_of_countries_by_continent">Wikipedia</a> in November 2021.<br />See the <a href="https://creativecommons.org/licenses/by-sa/3.0/">License</a> (CC BY-SA 3.0).</small>
</div>
""", unsafe_allow_html=True)
# Load csv into pandas dataframe
try:
df = pd.read_csv(EVAL_LABELS, sep=";")
except Exception:
st.error(f"The eval file was not found. Please check the demo's [README](https://github.com/deepset-ai/haystack/tree/master/ui/README.md) for more information.")
sys.exit(f"The eval file was not found under `{EVAL_LABELS}`. Please check the README (https://github.com/deepset-ai/haystack/tree/master/ui/README.md) for more information.")
# Search bar
question = st.text_input("",
value=st.session_state.question,
max_chars=100,
on_change=reset_results
)
col1, col2 = st.columns(2)
col1.markdown("<style>.stButton button {width:100%;}</style>", unsafe_allow_html=True)
col2.markdown("<style>.stButton button {width:100%;}</style>", unsafe_allow_html=True)
# Run button
run_pressed = col1.button("Run")
# Get next random question from the CSV
if col2.button("Random question"):
reset_results()
new_row = df.sample(1)
while new_row["Question Text"].values[0] == st.session_state.question: # Avoid picking the same question twice (the change is not visible on the UI)
new_row = df.sample(1)
st.session_state.question = new_row["Question Text"].values[0]
st.session_state.answer = new_row["Answer"].values[0]
st.session_state.random_question_requested = True
# Re-runs the script setting the random question as the textbox value
# Unfortunately necessary as the Random Question button is _below_ the textbox
raise st.script_runner.RerunException(st.script_request_queue.RerunData(None))
else:
st.session_state.random_question_requested = False
run_query = (run_pressed or question != st.session_state.question) and not st.session_state.random_question_requested
# Check the connection
with st.spinner("⌛️ &nbsp;&nbsp; Haystack is starting..."):
if not haystack_is_ready():
st.error("🚫 &nbsp;&nbsp; Connection Error. Is Haystack running?")
run_query = False
reset_results()
# Get results for query
if run_query and question:
reset_results()
st.session_state.question = question
with st.spinner(
"🧠 &nbsp;&nbsp; Performing neural search on documents... \n "
"Do you want to optimize speed or accuracy? \n"
"Check out the docs: https://haystack.deepset.ai/usage/optimization "
):
try:
st.session_state.results, st.session_state.raw_json = query(question, top_k_reader=top_k_reader,
top_k_retriever=top_k_retriever)
except JSONDecodeError as je:
st.error("👓 &nbsp;&nbsp; An error occurred reading the results. Is the document store working?")
return
except Exception as e:
logging.exception(e)
if "The server is busy processing requests" in str(e) or "503" in str(e):
st.error("🧑‍🌾 &nbsp;&nbsp; All our workers are busy! Try again later.")
else:
st.error("🐞 &nbsp;&nbsp; An error occurred during the request.")
return
if st.session_state.results:
# Show the gold answer if we use a question of the given set
if eval_mode and st.session_state.answer:
st.write("## Correct answer:")
st.write(st.session_state.answer)
st.write("## Results:")
for count, result in enumerate(st.session_state.results):
if result["answer"]:
answer, context = result["answer"], result["context"]
start_idx = context.find(answer)
end_idx = start_idx + len(answer)
# Hack due to this bug: https://github.com/streamlit/streamlit/issues/3190
st.write(markdown(context[:start_idx] + str(annotation(answer, "ANSWER", "#8ef")) + context[end_idx:]), unsafe_allow_html=True)
source = ""
url, title = get_backlink(result)
if url and title:
source = f"[{result['document']['meta']['title']}]({result['document']['meta']['url']})"
else:
source = f"{result['source']}"
st.markdown(f"**Relevance:** {result['relevance']} - **Source:** {source}")
else:
st.info("🤔 &nbsp;&nbsp; Haystack is unsure whether any of the documents contain an answer to your question. Try to reformulate it!")
st.write("**Relevance:** ", result["relevance"])
if eval_mode and result["answer"]:
# Define columns for buttons
is_correct_answer = None
is_correct_document = None
button_col1, button_col2, button_col3, _ = st.columns([1, 1, 1, 6])
if button_col1.button("👍", key=f"{result['context']}{count}1", help="Correct answer"):
is_correct_answer=True
is_correct_document=True
if button_col2.button("👎", key=f"{result['context']}{count}2", help="Wrong answer and wrong passage"):
is_correct_answer=False
is_correct_document=False
if button_col3.button("👎👍", key=f"{result['context']}{count}3", help="Wrong answer, but correct passage"):
is_correct_answer=False
is_correct_document=True
if is_correct_answer is not None and is_correct_document is not None:
try:
send_feedback(
query=question,
answer_obj=result["_raw"],
is_correct_answer=is_correct_answer,
is_correct_document=is_correct_document,
document=result["document"]
)
st.success("✨ &nbsp;&nbsp; Thanks for your feedback! &nbsp;&nbsp; ✨")
except Exception as e:
logging.exception(e)
st.error("🐞 &nbsp;&nbsp; An error occurred while submitting your feedback!")
st.write("___")
if debug:
st.subheader("REST API JSON response")
st.write(st.session_state.raw_json)
main()