diff --git a/.gitignore b/.gitignore index 52d14980a..0cece31b1 100644 --- a/.gitignore +++ b/.gitignore @@ -127,3 +127,6 @@ dmypy.json # Pyre type checker .pyre/ + +# haystack files +farm_haystack/database/qa.db diff --git a/example.py b/example.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/farm_haystack/__init__.py b/farm_haystack/__init__.py index 98870bbf1..db1f779fd 100644 --- a/farm_haystack/__init__.py +++ b/farm_haystack/__init__.py @@ -1,10 +1,17 @@ from farm_haystack.retriever.tfidf import TfidfRetriever -from farm_haystack.reader.adaptive_model import FARMReader +from farm_haystack.reader.farm import FARMReader from farm_haystack.database import db import logging +import farm + +import pandas as pd +pd.options.display.max_colwidth = 80 logger = logging.getLogger(__name__) +logging.getLogger('farm').setLevel(logging.WARNING) +logging.getLogger('transformers').setLevel(logging.WARNING) + class Finder: """ @@ -82,8 +89,11 @@ class Finder: df_sliced = self.retriever.df.loc[retrieved_scores.keys()] if verbose: logger.info( - f"Identified {df_sliced.shape[0]} candidates via retriever:\n {df_sliced}" + f"Identified {df_sliced.shape[0]} candidates via retriever:\n {df_sliced.to_string(col_space=10, index=False)}" ) + logger.info( + f"Applying the reader now to look for the answer in detail ..." + ) inference_dicts = [] for idx, row in df_sliced.iterrows(): if candidate_doc_ids and row["document_id"] not in candidate_doc_ids: diff --git a/farm_haystack/api/inference.py b/farm_haystack/api/inference.py index e17ff65a9..0a42af0de 100644 --- a/farm_haystack/api/inference.py +++ b/farm_haystack/api/inference.py @@ -8,7 +8,7 @@ from flask_restplus import Api, Resource from farm_haystack import Finder from farm_haystack.database import app -from farm_haystack.reader.adaptive_model import FARMReader +from farm_haystack.reader.farm import FARMReader from farm_haystack.retriever.tfidf import TfidfRetriever CORS(app) diff --git a/farm_haystack/reader/adaptive_model.py b/farm_haystack/reader/farm.py similarity index 100% rename from farm_haystack/reader/adaptive_model.py rename to farm_haystack/reader/farm.py diff --git a/farm_haystack/retriever/tfidf.py b/farm_haystack/retriever/tfidf.py index d371f84af..47f7bb86c 100644 --- a/farm_haystack/retriever/tfidf.py +++ b/farm_haystack/retriever/tfidf.py @@ -66,7 +66,7 @@ class TfidfRetriever(BaseRetriever): Paragraph(document_id=doc.id, paragraph_id=p_id, text=(p,)) ) p_id += 1 - logger.info(f"Found {len(paragraphs)} candidate passages from {len(documents)} docs in DB") + logger.info(f"Found {len(paragraphs)} candidate paragraphs from {len(documents)} docs in DB") return paragraphs def retrieve(self, query, candidate_doc_ids=None, top_k=10): diff --git a/tutorials/Tutorial1_Basic_QA_Pipeline.ipynb b/tutorials/Tutorial1_Basic_QA_Pipeline.ipynb index 8226a8c38..025090aac 100644 --- a/tutorials/Tutorial1_Basic_QA_Pipeline.ipynb +++ b/tutorials/Tutorial1_Basic_QA_Pipeline.ipynb @@ -19,15 +19,45 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Current working directory is /home/mp/deepset/dev/haystack\n" + ] + } + ], + "source": [ + "# Let's start by adjust the working directory so that it is the root of the repository\n", + "# This should be run just once.\n", + "import os\n", + "os.chdir('../')\n", + "print(\"Current working directory is {}\".format(os.getcwd()))" + ] + }, + { + "cell_type": "code", + "execution_count": 2, "metadata": { "pycharm": { "is_executing": false } }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I1125 16:55:41.544814 139975239116608 file_utils.py:39] PyTorch version 1.3.0 available.\n", + "I1125 16:55:41.619155 139975239116608 modeling_xlnet.py:194] Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .\n" + ] + } + ], "source": [ - "from farm_haystack.reader.adaptive_model import FARMReader\n", + "from farm_haystack.reader.farm import FARMReader\n", "from farm_haystack.retriever.tfidf import TfidfRetriever\n", "from farm_haystack import Finder\n", "from farm_haystack.indexing.io import write_documents_to_db, fetch_archive_from_http\n", @@ -44,13 +74,23 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": { "pycharm": { "is_executing": false } }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I1125 16:55:41.863932 139975239116608 io.py:57] Fetching from https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/wiki_gameofthrones_txt.zip to `data/article_txt_got`\n", + "100%|██████████| 1167348/1167348 [00:00<00:00, 9196388.44B/s]\n", + "I1125 16:55:47.962270 139975239116608 io.py:30] Wrote 517 docs to DB\n" + ] + } + ], "source": [ "# Init a database (default: sqllite)\n", "from farm_haystack.database import db\n", @@ -77,14 +117,22 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": { "pycharm": { "is_executing": false, "name": "#%%\n" } }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I1125 16:55:48.018222 139975239116608 tfidf.py:69] Found 2811 candidate paragraphs from 517 docs in DB\n" + ] + } + ], "source": [ "# A retriever identifies the k most promising chunks of text that might contain the answer for our question\n", "# Retrievers use some simple but fast algorithm, here: TF-IDF\n", @@ -93,22 +141,30 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": { "pycharm": { "is_executing": false } }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "W1125 16:55:53.613250 139975239116608 processor.py:170] Loading tokenizer from deprecated FARM config. If you used `custom_vocab` or `never_split_chars`, this won't work anymore.\n" + ] + } + ], "source": [ "# A reader scans the text chunks in detail and extracts the k best answers\n", "# Reader use more powerful but slower deep learning models, here: a BERT QA model trained via FARM on Squad 2.0\n", - "reader = FARMReader(model_dir=\"../FARM/saved_models/bert-english-qa-large\")" + "reader = FARMReader(model_dir=\"../FARM/saved_models/bert-english-qa-large\", use_gpu=False)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": { "pycharm": { "is_executing": false @@ -129,13 +185,34 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": { "pycharm": { "is_executing": false } }, - "outputs": [], + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "I1125 16:55:54.057870 139975239116608 __init__.py:92] Identified 10 candidates via retriever:\n", + " paragraph_id document_id text\n", + " 1257 227 \\n===Arya Stark===\\n'''Arya Stark''' portrayed by Maisie Williams. Arya Star...\n", + " 1023 169 \\n====Season 8====\\nArya reunites with Jon, Gendry, and the Hound, who have ...\n", + " 1016 169 \\n====Season 1====\\nArya accompanies her father Ned and her sister Sansa to ...\n", + " 718 144 \\n===''A Game of Thrones''===\\nSansa Stark begins the novel by being betroth...\n", + " 161 33 \\n===In Braavos===\\nLady Crane returns to her chambers to find a wounded Ary...\n", + " 1846 304 \\n== Characters ==\\nThe tale is told through the eyes of 9 recurring POV cha...\n", + " 1009 169 \\n==== ''A Game of Thrones'' ====\\nArya adopts a direwolf cub, which she nam...\n", + " 1022 169 \\n====Season 7====\\nTaking the face of Walder Frey, Arya gathers the men of ...\n", + " 847 163 \\n=== Arya Stark ===\\nArya Stark is the third child and younger daughter of ...\n", + " 562 117 \\n===On the Kingsroad===\\nCity Watchmen search the caravan for Gendry but ar...\n", + "I1125 16:55:54.058521 139975239116608 __init__.py:95] Applying the reader now to look for the answer in detail ...\n", + "Inferencing: 100%|██████████| 1/1 [00:24<00:00, 24.35s/it]\n" + ] + } + ], "source": [ "# You can configure how many candidates the reader and retriever shall return\n", "# The higher top_k_retriever, the better (but also the slower) your answers. \n", @@ -144,7 +221,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -154,17 +231,48 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": { "pycharm": { "is_executing": false, "name": "#%%\n" } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[ { 'answer': 'Eddard',\n", + " 'context': 'ry warrior queen. She travels with her father, Eddard, to '\n", + " \"King's Landing when he is made Hand of the\"},\n", + " { 'answer': 'Ned',\n", + " 'context': '\\n'\n", + " '====Season 1====\\n'\n", + " 'Arya accompanies her father Ned and her sister Sansa to '\n", + " \"King's Landing. Before the\"},\n", + " { 'answer': 'Lord Eddard',\n", + " 'context': ' is the younger daughter and third child of Lord Eddard '\n", + " 'and Catelyn Stark of Winterfell. Ever the to'},\n", + " { 'answer': 'Lord Eddard Stark',\n", + " 'context': ' Tourney of the Hand to honour her father Lord Eddard '\n", + " 'Stark, Sansa Stark is enchanted by the knights'},\n", + " { 'answer': 'Eddard and Catelyn Stark',\n", + " 'context': 'e third child and younger daughter of Eddard and Catelyn '\n", + " 'Stark. She serves as a POV character for 33'}]\n" + ] + } + ], "source": [ "print_answers(prediction, details=\"minimal\")" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": {