Fix another self.device/s typo (#1734)

* Fix yet another self.device(s) typo

* Add typing to 'initialize_device_settings' to try prevent future issues

* Fix bug in Tutorial5

* Fix the same bug in the notebook

Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
This commit is contained in:
Sara Zan 2021-11-11 17:18:06 +01:00 committed by GitHub
parent 8082549663
commit 85a08d671a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 15 additions and 13 deletions

View File

@ -64,7 +64,7 @@ es_server = Popen(['elasticsearch-7.9.2/bin/elasticsearch'],
```python
from haystack.modeling.utils import initialize_device_settings
device, n_gpu = initialize_device_settings(use_cuda=True)
devices, n_gpu = initialize_device_settings(use_cuda=True)
```
@ -189,7 +189,7 @@ the model as the answer span (i.e. SQuAD style)
```python
# Evaluate Reader on its own
reader_eval_results = reader.eval(document_store=document_store, device=device, label_index=label_index, doc_index=doc_index)
reader_eval_results = reader.eval(document_store=document_store, device=devices[0], label_index=label_index, doc_index=doc_index)
# Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch
#reader_eval_results = reader.eval_on_file("../data/nq", "nq_dev_subset_v2.json", device=device)

View File

@ -1,4 +1,4 @@
from typing import Any, Iterator, Tuple
from typing import Any, Iterator, Tuple, List
import logging
import os
@ -48,7 +48,7 @@ def set_all_seeds(seed: int, deterministic_cudnn: bool=False) -> None:
torch.backends.cudnn.benchmark = False
def initialize_device_settings(use_cuda: bool, local_rank: int = -1, multi_gpu: bool = True):
def initialize_device_settings(use_cuda: bool, local_rank: int = -1, multi_gpu: bool = True) -> Tuple[List[torch.device], int]:
"""
Returns a list of available devices.

View File

@ -8,9 +8,8 @@
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
"name": "python3",
"display_name": "Python 3.9.5 64-bit ('venv': venv)"
},
"language_info": {
"codemirror_mode": {
@ -22,7 +21,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.9"
"version": "3.9.5"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
@ -14400,6 +14399,9 @@
"collapsed": false
}
}
},
"interpreter": {
"hash": "01829e1eb67c4f5275a41f9336c92adbb77a108c8fc957dfe99d03e96dd1f349"
}
},
"cells": [
@ -14928,7 +14930,7 @@
"source": [
"from haystack.modeling.utils import initialize_device_settings\n",
"\n",
"device, n_gpu = initialize_device_settings(use_cuda=True)"
"devices, n_gpu = initialize_device_settings(use_cuda=True)"
],
"outputs": [
{
@ -15397,7 +15399,7 @@
"execution_count": 10,
"source": [
"# Evaluate Reader on its own\n",
"reader_eval_results = reader.eval(document_store=document_store, device=device, label_index=label_index, doc_index=doc_index)\n",
"reader_eval_results = reader.eval(document_store=document_store, device=devices[0], label_index=label_index, doc_index=doc_index)\n",
"# Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch\n",
"#reader_eval_results = reader.eval_on_file(\"../data/nq\", \"nq_dev_subset_v2.json\", device=device)\n",
"\n",

View File

@ -40,7 +40,7 @@ def tutorial5_evaluation():
# Code
##############################################
launch_es()
device, n_gpu = initialize_device_settings(use_cuda=True)
devices, n_gpu = initialize_device_settings(use_cuda=True)
# Download evaluation data, which is a subset of Natural Questions development set containing 50 documents
doc_dir = "../data/nq"
@ -109,7 +109,7 @@ def tutorial5_evaluation():
eval_retriever = EvalDocuments()
eval_reader = EvalAnswers(sas_model="sentence-transformers/paraphrase-multilingual-mpnet-base-v2")
## Evaluate Retriever on its own in closed domain fashion
# Evaluate Retriever on its own in closed domain fashion
if style == "retriever_closed":
retriever_eval_results = retriever.eval(top_k=10, label_index=label_index, doc_index=doc_index)
## Retriever Recall is the proportion of questions for which the correct document containing the answer is
@ -120,7 +120,7 @@ def tutorial5_evaluation():
# Evaluate Reader on its own in closed domain fashion (i.e. SQuAD style)
elif style == "reader_closed":
reader_eval_results = reader.eval(document_store=document_store, device=device, label_index=label_index, doc_index=doc_index)
reader_eval_results = reader.eval(document_store=document_store, device=devices[0], label_index=label_index, doc_index=doc_index)
# Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch
#reader_eval_results = reader.eval_on_file("../data/nq", "nq_dev_subset_v2.json", device=device)