diff --git a/tutorials/Tutorial1_Basic_QA_Pipeline.ipynb b/tutorials/Tutorial1_Basic_QA_Pipeline.ipynb index f52734c5b..1fd5a1857 100644 --- a/tutorials/Tutorial1_Basic_QA_Pipeline.ipynb +++ b/tutorials/Tutorial1_Basic_QA_Pipeline.ipynb @@ -16,10 +16,39 @@ "In this tutorial we will work on a slightly different domain: \"Game of Thrones\". \n", "\n", "Let's see how we can use a bunch of Wikipedia articles to answer a variety of questions about the \n", - "marvellous seven kingdoms... \n", - "\n" + "marvellous seven kingdoms... \n" ] }, + { + "cell_type": "markdown", + "source": [ + "### Prepare environment\n", + "\n", + "#### Colab: Enable the GPU runtime\n", + "Make sure you enable the GPU runtime to experience decent speed in this tutorial.\n", + "**Runtime -> Change Runtime type -> Hardware accelerator -> GPU**\n", + "\n", + "" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Make sure you have a GPU running\n", + "!nvidia-smi" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, { "cell_type": "code", "execution_count": null, @@ -306,7 +335,7 @@ "# Load a local model or any of the QA models on\n", "# Hugging Face's model hub (https://huggingface.co/models)\n", "\n", - "reader = FARMReader(model_name_or_path=\"deepset/roberta-base-squad2\", use_gpu=False)" + "reader = FARMReader(model_name_or_path=\"deepset/roberta-base-squad2\", use_gpu=True)" ] }, { diff --git a/tutorials/Tutorial1_Basic_QA_Pipeline.py b/tutorials/Tutorial1_Basic_QA_Pipeline.py index c901436b7..1e55bc971 100755 --- a/tutorials/Tutorial1_Basic_QA_Pipeline.py +++ b/tutorials/Tutorial1_Basic_QA_Pipeline.py @@ -128,7 +128,7 @@ retriever = ElasticsearchRetriever(document_store=document_store) # Load a local model or any of the QA models on # Hugging Face's model hub (https://huggingface.co/models) -reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=False) +reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=True) # #### TransformersReader diff --git a/tutorials/Tutorial2_Finetune_a_model_on_your_data.ipynb b/tutorials/Tutorial2_Finetune_a_model_on_your_data.ipynb index a698a9e61..286f62985 100644 --- a/tutorials/Tutorial2_Finetune_a_model_on_your_data.ipynb +++ b/tutorials/Tutorial2_Finetune_a_model_on_your_data.ipynb @@ -15,6 +15,36 @@ "This tutorial shows you how to fine-tune a pretrained model on your own dataset." ] }, + { + "cell_type": "markdown", + "source": [ + "### Prepare environment\n", + "\n", + "#### Colab: Enable the GPU runtime\n", + "Make sure you enable the GPU runtime to experience decent speed in this tutorial.\n", + "**Runtime -> Change Runtime type -> Hardware accelerator -> GPU**\n", + "\n", + "" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Make sure you have a GPU running\n", + "!nvidia-smi" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, { "cell_type": "code", "execution_count": 1, diff --git a/tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.ipynb b/tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.ipynb index 4be77992f..b771e098a 100644 --- a/tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.ipynb +++ b/tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.ipynb @@ -15,6 +15,36 @@ "If you are interested in more feature-rich Elasticsearch, then please refer to the Tutorial 1. " ] }, + { + "cell_type": "markdown", + "source": [ + "### Prepare environment\n", + "\n", + "#### Colab: Enable the GPU runtime\n", + "Make sure you enable the GPU runtime to experience decent speed in this tutorial.\n", + "**Runtime -> Change Runtime type -> Hardware accelerator -> GPU**\n", + "\n", + "" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Make sure you have a GPU running\n", + "!nvidia-smi" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, { "cell_type": "code", "execution_count": null, @@ -224,7 +254,7 @@ "# Load a local model or any of the QA models on\n", "# Hugging Face's model hub (https://huggingface.co/models)\n", "\n", - "reader = FARMReader(model_name_or_path=\"deepset/roberta-base-squad2\", use_gpu=False)" + "reader = FARMReader(model_name_or_path=\"deepset/roberta-base-squad2\", use_gpu=True)" ] }, { diff --git a/tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.py b/tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.py index 5fde00c77..6bb7f93f6 100644 --- a/tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.py +++ b/tutorials/Tutorial3_Basic_QA_Pipeline_without_Elasticsearch.py @@ -83,7 +83,7 @@ retriever = TfidfRetriever(document_store=document_store) # # Load a local model or any of the QA models on # Hugging Face's model hub (https://huggingface.co/models) -reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=False) +reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=True) # #### TransformersReader diff --git a/tutorials/Tutorial4_FAQ_style_QA.ipynb b/tutorials/Tutorial4_FAQ_style_QA.ipynb index 2b2800d3c..89249d1de 100644 --- a/tutorials/Tutorial4_FAQ_style_QA.ipynb +++ b/tutorials/Tutorial4_FAQ_style_QA.ipynb @@ -20,9 +20,39 @@ "\n", "- Generalizability: We can only answer questions that are similar to existing ones in FAQ\n", "\n", - "In some use cases, a combination of extractive QA and FAQ-style can also be an interesting option.\n" + "In some use cases, a combination of extractive QA and FAQ-style can also be an interesting option." ] }, + { + "cell_type": "markdown", + "source": [ + "### Prepare environment\n", + "\n", + "#### Colab: Enable the GPU runtime\n", + "Make sure you enable the GPU runtime to experience decent speed in this tutorial.\n", + "**Runtime -> Change Runtime type -> Hardware accelerator -> GPU**\n", + "\n", + "" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Make sure you have a GPU running\n", + "!nvidia-smi" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, { "cell_type": "code", "execution_count": null, diff --git a/tutorials/Tutorial5_Evaluation.ipynb b/tutorials/Tutorial5_Evaluation.ipynb index d31c41068..cb5e94e5c 100644 --- a/tutorials/Tutorial5_Evaluation.ipynb +++ b/tutorials/Tutorial5_Evaluation.ipynb @@ -18,6 +18,36 @@ "To be able to make a statement about the performance of a question-answering system, it is important to evalute it. Furthermore, evaluation allows to determine which parts of the system can be improved." ] }, + { + "cell_type": "markdown", + "source": [ + "### Prepare environment\n", + "\n", + "#### Colab: Enable the GPU runtime\n", + "Make sure you enable the GPU runtime to experience decent speed in this tutorial.\n", + "**Runtime -> Change Runtime type -> Hardware accelerator -> GPU**\n", + "\n", + "" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Make sure you have a GPU running\n", + "!nvidia-smi" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, { "cell_type": "markdown", "metadata": { diff --git a/tutorials/Tutorial7_RAG_Generator.ipynb b/tutorials/Tutorial7_RAG_Generator.ipynb index fde164cc8..29512c3eb 100644 --- a/tutorials/Tutorial7_RAG_Generator.ipynb +++ b/tutorials/Tutorial7_RAG_Generator.ipynb @@ -15,6 +15,36 @@ "collapsed": false } }, + { + "cell_type": "markdown", + "source": [ + "### Prepare environment\n", + "\n", + "#### Colab: Enable the GPU runtime\n", + "Make sure you enable the GPU runtime to experience decent speed in this tutorial.\n", + "**Runtime -> Change Runtime type -> Hardware accelerator -> GPU**\n", + "\n", + "" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "outputs": [], + "source": [ + "# Make sure you have a GPU running\n", + "!nvidia-smi" + ], + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + } + }, { "cell_type": "markdown", "source": [ @@ -155,14 +185,14 @@ " document_store=document_store,\n", " query_embedding_model=\"facebook/dpr-question_encoder-single-nq-base\",\n", " passage_embedding_model=\"facebook/dpr-ctx_encoder-single-nq-base\",\n", - " use_gpu=False,\n", + " use_gpu=True,\n", " embed_title=True,\n", ")\n", "\n", "# Initialize RAG Generator\n", "generator = RAGenerator(\n", " model_name_or_path=\"facebook/rag-token-nq\",\n", - " use_gpu=False,\n", + " use_gpu=True,\n", " top_k_answers=1,\n", " max_length=200,\n", " min_length=2,\n", diff --git a/tutorials/Tutorial7_RAG_Generator.py b/tutorials/Tutorial7_RAG_Generator.py index 1d84592da..6c2c5c8dd 100644 --- a/tutorials/Tutorial7_RAG_Generator.py +++ b/tutorials/Tutorial7_RAG_Generator.py @@ -48,14 +48,14 @@ retriever = DensePassageRetriever( document_store=document_store, query_embedding_model="facebook/dpr-question_encoder-single-nq-base", passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base", - use_gpu=False, + use_gpu=True, embed_title=True, ) # Initialize RAG Generator generator = RAGenerator( model_name_or_path="facebook/rag-token-nq", - use_gpu=False, + use_gpu=True, top_k_answers=1, max_length=200, min_length=2,