mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-07-22 16:31:16 +00:00
3096 lines
152 KiB
Plaintext
3096 lines
152 KiB
Plaintext
{
|
||
"nbformat": 4,
|
||
"nbformat_minor": 0,
|
||
"metadata": {
|
||
"kernelspec": {
|
||
"display_name": "Python 3",
|
||
"language": "python",
|
||
"name": "python3"
|
||
},
|
||
"language_info": {
|
||
"codemirror_mode": {
|
||
"name": "ipython",
|
||
"version": 2
|
||
},
|
||
"file_extension": ".py",
|
||
"mimetype": "text/x-python",
|
||
"name": "python",
|
||
"nbconvert_exporter": "python",
|
||
"pygments_lexer": "ipython2",
|
||
"version": "2.7.6"
|
||
},
|
||
"colab": {
|
||
"name": "Tutorial5_Evaluation.ipynb",
|
||
"provenance": []
|
||
},
|
||
"accelerator": "GPU",
|
||
"widgets": {
|
||
"application/vnd.jupyter.widget-state+json": {
|
||
"398e8dc496594a5f8e65daecc3ddad4a": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HBoxModel",
|
||
"state": {
|
||
"_view_name": "HBoxView",
|
||
"_dom_classes": [],
|
||
"_model_name": "HBoxModel",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"box_style": "",
|
||
"layout": "IPY_MODEL_ee436c1e0fb24802b5d0706299ce7e81",
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"children": [
|
||
"IPY_MODEL_9f5b31e32c5c4398a6eede32431ad55e",
|
||
"IPY_MODEL_479f015ad2e8412d96a34ddb59a054d1"
|
||
]
|
||
}
|
||
},
|
||
"ee436c1e0fb24802b5d0706299ce7e81": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"9f5b31e32c5c4398a6eede32431ad55e": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "FloatProgressModel",
|
||
"state": {
|
||
"_view_name": "ProgressView",
|
||
"style": "IPY_MODEL_eb25be6e7fcc49cfbf9815d69f849637",
|
||
"_dom_classes": [],
|
||
"description": "Downloading: 100%",
|
||
"_model_name": "FloatProgressModel",
|
||
"bar_style": "success",
|
||
"max": 559,
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": 559,
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"orientation": "horizontal",
|
||
"min": 0,
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_f7ebc8ee77094382a2b26f879cdf8613"
|
||
}
|
||
},
|
||
"479f015ad2e8412d96a34ddb59a054d1": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HTMLModel",
|
||
"state": {
|
||
"_view_name": "HTMLView",
|
||
"style": "IPY_MODEL_93e868a4b6384840b3d245391ee2915a",
|
||
"_dom_classes": [],
|
||
"description": "",
|
||
"_model_name": "HTMLModel",
|
||
"placeholder": "",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": " 559/559 [00:54<00:00, 10.3B/s]",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_e4a33236e1954bbf83e57ff15c40ff7f"
|
||
}
|
||
},
|
||
"eb25be6e7fcc49cfbf9815d69f849637": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "ProgressStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "ProgressStyleModel",
|
||
"description_width": "initial",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"bar_color": null,
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"f7ebc8ee77094382a2b26f879cdf8613": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"93e868a4b6384840b3d245391ee2915a": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "DescriptionStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "DescriptionStyleModel",
|
||
"description_width": "",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"e4a33236e1954bbf83e57ff15c40ff7f": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"18250ec6840147658d1e038138f8aba0": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HBoxModel",
|
||
"state": {
|
||
"_view_name": "HBoxView",
|
||
"_dom_classes": [],
|
||
"_model_name": "HBoxModel",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"box_style": "",
|
||
"layout": "IPY_MODEL_a8ba7f1398f2401b8d0139aabdf6ce1f",
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"children": [
|
||
"IPY_MODEL_49daa700f016433e88b492490b1a8d89",
|
||
"IPY_MODEL_08adb4accbe649fd9d21da2246c00d63"
|
||
]
|
||
}
|
||
},
|
||
"a8ba7f1398f2401b8d0139aabdf6ce1f": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"49daa700f016433e88b492490b1a8d89": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "FloatProgressModel",
|
||
"state": {
|
||
"_view_name": "ProgressView",
|
||
"style": "IPY_MODEL_c840113abcc44f84a88e6120fe198fba",
|
||
"_dom_classes": [],
|
||
"description": "Downloading: 100%",
|
||
"_model_name": "FloatProgressModel",
|
||
"bar_style": "success",
|
||
"max": 498637366,
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": 498637366,
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"orientation": "horizontal",
|
||
"min": 0,
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_47e0a33131714f31bb8b4a456b54d060"
|
||
}
|
||
},
|
||
"08adb4accbe649fd9d21da2246c00d63": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HTMLModel",
|
||
"state": {
|
||
"_view_name": "HTMLView",
|
||
"style": "IPY_MODEL_75ed8c01b4ed4c37a024a6c24036e35a",
|
||
"_dom_classes": [],
|
||
"description": "",
|
||
"_model_name": "HTMLModel",
|
||
"placeholder": "",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": " 499M/499M [00:41<00:00, 11.9MB/s]",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_7675cb36a32e49b1b14d301bf29fa668"
|
||
}
|
||
},
|
||
"c840113abcc44f84a88e6120fe198fba": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "ProgressStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "ProgressStyleModel",
|
||
"description_width": "initial",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"bar_color": null,
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"47e0a33131714f31bb8b4a456b54d060": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"75ed8c01b4ed4c37a024a6c24036e35a": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "DescriptionStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "DescriptionStyleModel",
|
||
"description_width": "",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"7675cb36a32e49b1b14d301bf29fa668": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"ec3c00e615164fb488ebfb51d8ac9d9e": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HBoxModel",
|
||
"state": {
|
||
"_view_name": "HBoxView",
|
||
"_dom_classes": [],
|
||
"_model_name": "HBoxModel",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"box_style": "",
|
||
"layout": "IPY_MODEL_9da0f76f26294b27982e080b8af6e28b",
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"children": [
|
||
"IPY_MODEL_ef78454fe89347ea8f6f3148e23440db",
|
||
"IPY_MODEL_bbbeec7c73c24059bffd602af5dcbd62"
|
||
]
|
||
}
|
||
},
|
||
"9da0f76f26294b27982e080b8af6e28b": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"ef78454fe89347ea8f6f3148e23440db": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "FloatProgressModel",
|
||
"state": {
|
||
"_view_name": "ProgressView",
|
||
"style": "IPY_MODEL_cb53b988f7df44d29ff7efcb0a236fce",
|
||
"_dom_classes": [],
|
||
"description": "Downloading: 100%",
|
||
"_model_name": "FloatProgressModel",
|
||
"bar_style": "success",
|
||
"max": 898822,
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": 898822,
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"orientation": "horizontal",
|
||
"min": 0,
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_8b9a20a51500438c9d6e84d9f71e8cbd"
|
||
}
|
||
},
|
||
"bbbeec7c73c24059bffd602af5dcbd62": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HTMLModel",
|
||
"state": {
|
||
"_view_name": "HTMLView",
|
||
"style": "IPY_MODEL_082e03f9f75c4546bd5b2326b33e8017",
|
||
"_dom_classes": [],
|
||
"description": "",
|
||
"_model_name": "HTMLModel",
|
||
"placeholder": "",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": " 899k/899k [00:02<00:00, 334kB/s]",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_9c78a3ef3d804f89b4102a5029415e35"
|
||
}
|
||
},
|
||
"cb53b988f7df44d29ff7efcb0a236fce": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "ProgressStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "ProgressStyleModel",
|
||
"description_width": "initial",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"bar_color": null,
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"8b9a20a51500438c9d6e84d9f71e8cbd": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"082e03f9f75c4546bd5b2326b33e8017": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "DescriptionStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "DescriptionStyleModel",
|
||
"description_width": "",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"9c78a3ef3d804f89b4102a5029415e35": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"9597bad322c34d02a0c10dd66b21813e": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HBoxModel",
|
||
"state": {
|
||
"_view_name": "HBoxView",
|
||
"_dom_classes": [],
|
||
"_model_name": "HBoxModel",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"box_style": "",
|
||
"layout": "IPY_MODEL_62661d74e0ea462cb6b8e574aa17dc2f",
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"children": [
|
||
"IPY_MODEL_71416028306340ee8987d703b506245b",
|
||
"IPY_MODEL_100352c2499749e48ed10f3dee4f569d"
|
||
]
|
||
}
|
||
},
|
||
"62661d74e0ea462cb6b8e574aa17dc2f": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"71416028306340ee8987d703b506245b": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "FloatProgressModel",
|
||
"state": {
|
||
"_view_name": "ProgressView",
|
||
"style": "IPY_MODEL_dd518bb2a8d141f58118e2cf7e140ffb",
|
||
"_dom_classes": [],
|
||
"description": "Downloading: 100%",
|
||
"_model_name": "FloatProgressModel",
|
||
"bar_style": "success",
|
||
"max": 456318,
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": 456318,
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"orientation": "horizontal",
|
||
"min": 0,
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_5957d73fcef34606ac8eac654b9584d4"
|
||
}
|
||
},
|
||
"100352c2499749e48ed10f3dee4f569d": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HTMLModel",
|
||
"state": {
|
||
"_view_name": "HTMLView",
|
||
"style": "IPY_MODEL_5c8d0662ec12422a83525292cc6a51ac",
|
||
"_dom_classes": [],
|
||
"description": "",
|
||
"_model_name": "HTMLModel",
|
||
"placeholder": "",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": " 456k/456k [00:04<00:00, 92.6kB/s]",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_45469f8ddb3b4df8aab3e7ca3fbe6921"
|
||
}
|
||
},
|
||
"dd518bb2a8d141f58118e2cf7e140ffb": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "ProgressStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "ProgressStyleModel",
|
||
"description_width": "initial",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"bar_color": null,
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"5957d73fcef34606ac8eac654b9584d4": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"5c8d0662ec12422a83525292cc6a51ac": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "DescriptionStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "DescriptionStyleModel",
|
||
"description_width": "",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"45469f8ddb3b4df8aab3e7ca3fbe6921": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"6c5fb91a498840a1a2cae914660f8684": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HBoxModel",
|
||
"state": {
|
||
"_view_name": "HBoxView",
|
||
"_dom_classes": [],
|
||
"_model_name": "HBoxModel",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"box_style": "",
|
||
"layout": "IPY_MODEL_1314e2ff61264f81acaf0042efd0cf5a",
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"children": [
|
||
"IPY_MODEL_fce574414e154590a4a642362db98421",
|
||
"IPY_MODEL_7937f15a8f874591bb693ce1546c7c5b"
|
||
]
|
||
}
|
||
},
|
||
"1314e2ff61264f81acaf0042efd0cf5a": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"fce574414e154590a4a642362db98421": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "FloatProgressModel",
|
||
"state": {
|
||
"_view_name": "ProgressView",
|
||
"style": "IPY_MODEL_706c079c7d484975ad69a91b3f95e9f3",
|
||
"_dom_classes": [],
|
||
"description": "Downloading: 100%",
|
||
"_model_name": "FloatProgressModel",
|
||
"bar_style": "success",
|
||
"max": 150,
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": 150,
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"orientation": "horizontal",
|
||
"min": 0,
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_4473fa7bf766425bb704bb9772c021fd"
|
||
}
|
||
},
|
||
"7937f15a8f874591bb693ce1546c7c5b": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HTMLModel",
|
||
"state": {
|
||
"_view_name": "HTMLView",
|
||
"style": "IPY_MODEL_ef088eda6fe04abe958059620da9b5d0",
|
||
"_dom_classes": [],
|
||
"description": "",
|
||
"_model_name": "HTMLModel",
|
||
"placeholder": "",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": " 150/150 [00:01<00:00, 89.2B/s]",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_e1819597e4d840b19cfa8aba00ef3ef1"
|
||
}
|
||
},
|
||
"706c079c7d484975ad69a91b3f95e9f3": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "ProgressStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "ProgressStyleModel",
|
||
"description_width": "initial",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"bar_color": null,
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"4473fa7bf766425bb704bb9772c021fd": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"ef088eda6fe04abe958059620da9b5d0": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "DescriptionStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "DescriptionStyleModel",
|
||
"description_width": "",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"e1819597e4d840b19cfa8aba00ef3ef1": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"19173b1d00314580a4f40efa0df4b174": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HBoxModel",
|
||
"state": {
|
||
"_view_name": "HBoxView",
|
||
"_dom_classes": [],
|
||
"_model_name": "HBoxModel",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"box_style": "",
|
||
"layout": "IPY_MODEL_d7ce404f2f784b0f93abf8f3ba592e18",
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"children": [
|
||
"IPY_MODEL_9da1585dcdd440fa8b2e8351ffeba348",
|
||
"IPY_MODEL_8880e647ca4a4796b571f5b983b541b4"
|
||
]
|
||
}
|
||
},
|
||
"d7ce404f2f784b0f93abf8f3ba592e18": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"9da1585dcdd440fa8b2e8351ffeba348": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "FloatProgressModel",
|
||
"state": {
|
||
"_view_name": "ProgressView",
|
||
"style": "IPY_MODEL_01d9cd6656494aef83a3947945c0acdd",
|
||
"_dom_classes": [],
|
||
"description": "Downloading: 100%",
|
||
"_model_name": "FloatProgressModel",
|
||
"bar_style": "success",
|
||
"max": 189,
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": 189,
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"orientation": "horizontal",
|
||
"min": 0,
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_b01601a11a324141b2d88190a87ef950"
|
||
}
|
||
},
|
||
"8880e647ca4a4796b571f5b983b541b4": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "HTMLModel",
|
||
"state": {
|
||
"_view_name": "HTMLView",
|
||
"style": "IPY_MODEL_c7db7b055dbe4361965ca4ab8c028536",
|
||
"_dom_classes": [],
|
||
"description": "",
|
||
"_model_name": "HTMLModel",
|
||
"placeholder": "",
|
||
"_view_module": "@jupyter-widgets/controls",
|
||
"_model_module_version": "1.5.0",
|
||
"value": " 189/189 [00:00<00:00, 1.94kB/s]",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.5.0",
|
||
"description_tooltip": null,
|
||
"_model_module": "@jupyter-widgets/controls",
|
||
"layout": "IPY_MODEL_c1852f69951e4d80aecb5a6c942b2ca6"
|
||
}
|
||
},
|
||
"01d9cd6656494aef83a3947945c0acdd": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "ProgressStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "ProgressStyleModel",
|
||
"description_width": "initial",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"bar_color": null,
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"b01601a11a324141b2d88190a87ef950": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
},
|
||
"c7db7b055dbe4361965ca4ab8c028536": {
|
||
"model_module": "@jupyter-widgets/controls",
|
||
"model_name": "DescriptionStyleModel",
|
||
"state": {
|
||
"_view_name": "StyleView",
|
||
"_model_name": "DescriptionStyleModel",
|
||
"description_width": "",
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"_model_module_version": "1.5.0",
|
||
"_view_count": null,
|
||
"_view_module_version": "1.2.0",
|
||
"_model_module": "@jupyter-widgets/controls"
|
||
}
|
||
},
|
||
"c1852f69951e4d80aecb5a6c942b2ca6": {
|
||
"model_module": "@jupyter-widgets/base",
|
||
"model_name": "LayoutModel",
|
||
"state": {
|
||
"_view_name": "LayoutView",
|
||
"grid_template_rows": null,
|
||
"right": null,
|
||
"justify_content": null,
|
||
"_view_module": "@jupyter-widgets/base",
|
||
"overflow": null,
|
||
"_model_module_version": "1.2.0",
|
||
"_view_count": null,
|
||
"flex_flow": null,
|
||
"width": null,
|
||
"min_width": null,
|
||
"border": null,
|
||
"align_items": null,
|
||
"bottom": null,
|
||
"_model_module": "@jupyter-widgets/base",
|
||
"top": null,
|
||
"grid_column": null,
|
||
"overflow_y": null,
|
||
"overflow_x": null,
|
||
"grid_auto_flow": null,
|
||
"grid_area": null,
|
||
"grid_template_columns": null,
|
||
"flex": null,
|
||
"_model_name": "LayoutModel",
|
||
"justify_items": null,
|
||
"grid_row": null,
|
||
"max_height": null,
|
||
"align_content": null,
|
||
"visibility": null,
|
||
"align_self": null,
|
||
"height": null,
|
||
"min_height": null,
|
||
"padding": null,
|
||
"grid_auto_rows": null,
|
||
"grid_gap": null,
|
||
"max_width": null,
|
||
"order": null,
|
||
"_view_module_version": "1.2.0",
|
||
"grid_template_areas": null,
|
||
"object_position": null,
|
||
"object_fit": null,
|
||
"grid_auto_columns": null,
|
||
"margin": null,
|
||
"display": null,
|
||
"left": null
|
||
}
|
||
}
|
||
}
|
||
}
|
||
},
|
||
"cells": [
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {
|
||
"collapsed": true,
|
||
"pycharm": {
|
||
"name": "#%% md\n"
|
||
},
|
||
"id": "MGSXn0USOhtu",
|
||
"colab_type": "text"
|
||
},
|
||
"source": [
|
||
"# Evalutaion\n",
|
||
"To be able to make a statement about the performance of a question-asnwering system, it is important to evalute it. Furthermore, evaluation allows to determine which parts of the system can be improved."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {
|
||
"collapsed": false,
|
||
"id": "E6H_7lAmOht8",
|
||
"colab_type": "text"
|
||
},
|
||
"source": [
|
||
"## Start an Elasticsearch server\n",
|
||
"You can start Elasticsearch on your local machine instance using Docker. If Docker is not readily available in your environment (eg., in Colab notebooks), then you can manually download and execute Elasticsearch from source."
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "vgmFOp82Oht_",
|
||
"colab_type": "code",
|
||
"colab": {}
|
||
},
|
||
"source": [
|
||
"# Recommended: Start Elasticsearch using Docker\n",
|
||
"#! docker run -d -p 9200:9200 -e \"discovery.type=single-node\" elasticsearch:7.6.2"
|
||
],
|
||
"execution_count": 0,
|
||
"outputs": []
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "tNoaWcDKOhuL",
|
||
"colab_type": "code",
|
||
"colab": {}
|
||
},
|
||
"source": [
|
||
"# In Colab / No Docker environments: Start Elasticsearch from source\n",
|
||
"! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.6.2-linux-x86_64.tar.gz -q\n",
|
||
"! tar -xzf elasticsearch-7.6.2-linux-x86_64.tar.gz\n",
|
||
"! chown -R daemon:daemon elasticsearch-7.6.2\n",
|
||
"\n",
|
||
"import os\n",
|
||
"from subprocess import Popen, PIPE, STDOUT\n",
|
||
"es_server = Popen(['elasticsearch-7.6.2/bin/elasticsearch'],\n",
|
||
" stdout=PIPE, stderr=STDOUT,\n",
|
||
" preexec_fn=lambda: os.setuid(1) # as daemon\n",
|
||
" )\n",
|
||
"# wait until ES has started\n",
|
||
"! sleep 30"
|
||
],
|
||
"execution_count": 0,
|
||
"outputs": []
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "w7zkLhqXOhuX",
|
||
"colab_type": "code",
|
||
"colab": {}
|
||
},
|
||
"source": [
|
||
"# install haystack\n",
|
||
"! pip install git+git://github.com/deepset-ai/haystack.git@ef9e4f4467a2e265bad72b048a1a3186e40969b1"
|
||
],
|
||
"execution_count": 0,
|
||
"outputs": []
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "w0MHgxrYOhur",
|
||
"colab_type": "code",
|
||
"colab": {
|
||
"base_uri": "https://localhost:8080/",
|
||
"height": 54
|
||
},
|
||
"outputId": "9e530bf3-44b1-4ea1-86e2-8be0bb9163ad"
|
||
},
|
||
"source": [
|
||
"from farm.utils import initialize_device_settings\n",
|
||
"\n",
|
||
"device, n_gpu = initialize_device_settings(use_cuda=True)"
|
||
],
|
||
"execution_count": 6,
|
||
"outputs": [
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:11:23 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, automatic mixed precision training: None\n"
|
||
],
|
||
"name": "stderr"
|
||
}
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "tTXxr6TAOhuz",
|
||
"colab_type": "code",
|
||
"colab": {
|
||
"base_uri": "https://localhost:8080/",
|
||
"height": 87
|
||
},
|
||
"outputId": "99a4e32b-e0ec-4c94-dab3-1a09c53d4dc1"
|
||
},
|
||
"source": [
|
||
"\n",
|
||
"from haystack.indexing.utils import fetch_archive_from_http\n",
|
||
"\n",
|
||
"# Download evaluation data, which is a subset of Natural Questions development set containing 50 documents\n",
|
||
"doc_dir = \"../data/nq\"\n",
|
||
"s3_url = \"https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/nq_dev_subset.json.zip\"\n",
|
||
"fetch_archive_from_http(url=s3_url, output_dir=doc_dir)"
|
||
],
|
||
"execution_count": 7,
|
||
"outputs": [
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:11:26 - INFO - haystack.indexing.io - Fetching from https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/nq_dev_subset.json.zip to `../data/nq`\n",
|
||
"100%|██████████| 621983/621983 [00:01<00:00, 477723.47B/s]\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "execute_result",
|
||
"data": {
|
||
"text/plain": [
|
||
"True"
|
||
]
|
||
},
|
||
"metadata": {
|
||
"tags": []
|
||
},
|
||
"execution_count": 7
|
||
}
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "B_NEtezLOhu5",
|
||
"colab_type": "code",
|
||
"colab": {}
|
||
},
|
||
"source": [
|
||
"# Connect to Elasticsearch\n",
|
||
"from haystack.database.elasticsearch import ElasticsearchDocumentStore\n",
|
||
"\n",
|
||
"document_store = ElasticsearchDocumentStore(host=\"localhost\", username=\"\", password=\"\", create_index=False)"
|
||
],
|
||
"execution_count": 0,
|
||
"outputs": []
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "bRFsQUAJOhu_",
|
||
"colab_type": "code",
|
||
"colab": {
|
||
"base_uri": "https://localhost:8080/",
|
||
"height": 71
|
||
},
|
||
"outputId": "56b84800-c524-4418-9664-e2720b66a1af"
|
||
},
|
||
"source": [
|
||
"# Add evaluation data to Elasticsearch database\n",
|
||
"document_store.add_eval_data(\"../data/nq/nq_dev_subset.json\")"
|
||
],
|
||
"execution_count": 9,
|
||
"outputs": [
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:11:30 - INFO - elasticsearch - POST http://localhost:9200/_bulk [status:200 request:1.613s]\n",
|
||
"06/05/2020 16:11:31 - INFO - elasticsearch - POST http://localhost:9200/_bulk [status:200 request:0.453s]\n"
|
||
],
|
||
"name": "stderr"
|
||
}
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {
|
||
"collapsed": false,
|
||
"pycharm": {
|
||
"name": "#%% md\n"
|
||
},
|
||
"id": "gy8YwmSYOhvE",
|
||
"colab_type": "text"
|
||
},
|
||
"source": [
|
||
"## Initialize components of QA-System"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "JkhaPMIJOhvF",
|
||
"colab_type": "code",
|
||
"colab": {}
|
||
},
|
||
"source": [
|
||
"# Initialize Retriever\n",
|
||
"from haystack.retriever.elasticsearch import ElasticsearchRetriever\n",
|
||
"\n",
|
||
"retriever = ElasticsearchRetriever(document_store=document_store)"
|
||
],
|
||
"execution_count": 0,
|
||
"outputs": []
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "cW3Ypn_gOhvK",
|
||
"colab_type": "code",
|
||
"colab": {
|
||
"base_uri": "https://localhost:8080/",
|
||
"height": 725,
|
||
"referenced_widgets": [
|
||
"398e8dc496594a5f8e65daecc3ddad4a",
|
||
"ee436c1e0fb24802b5d0706299ce7e81",
|
||
"9f5b31e32c5c4398a6eede32431ad55e",
|
||
"479f015ad2e8412d96a34ddb59a054d1",
|
||
"eb25be6e7fcc49cfbf9815d69f849637",
|
||
"f7ebc8ee77094382a2b26f879cdf8613",
|
||
"93e868a4b6384840b3d245391ee2915a",
|
||
"e4a33236e1954bbf83e57ff15c40ff7f",
|
||
"18250ec6840147658d1e038138f8aba0",
|
||
"a8ba7f1398f2401b8d0139aabdf6ce1f",
|
||
"49daa700f016433e88b492490b1a8d89",
|
||
"08adb4accbe649fd9d21da2246c00d63",
|
||
"c840113abcc44f84a88e6120fe198fba",
|
||
"47e0a33131714f31bb8b4a456b54d060",
|
||
"75ed8c01b4ed4c37a024a6c24036e35a",
|
||
"7675cb36a32e49b1b14d301bf29fa668",
|
||
"ec3c00e615164fb488ebfb51d8ac9d9e",
|
||
"9da0f76f26294b27982e080b8af6e28b",
|
||
"ef78454fe89347ea8f6f3148e23440db",
|
||
"bbbeec7c73c24059bffd602af5dcbd62",
|
||
"cb53b988f7df44d29ff7efcb0a236fce",
|
||
"8b9a20a51500438c9d6e84d9f71e8cbd",
|
||
"082e03f9f75c4546bd5b2326b33e8017",
|
||
"9c78a3ef3d804f89b4102a5029415e35",
|
||
"9597bad322c34d02a0c10dd66b21813e",
|
||
"62661d74e0ea462cb6b8e574aa17dc2f",
|
||
"71416028306340ee8987d703b506245b",
|
||
"100352c2499749e48ed10f3dee4f569d",
|
||
"dd518bb2a8d141f58118e2cf7e140ffb",
|
||
"5957d73fcef34606ac8eac654b9584d4",
|
||
"5c8d0662ec12422a83525292cc6a51ac",
|
||
"45469f8ddb3b4df8aab3e7ca3fbe6921",
|
||
"6c5fb91a498840a1a2cae914660f8684",
|
||
"1314e2ff61264f81acaf0042efd0cf5a",
|
||
"fce574414e154590a4a642362db98421",
|
||
"7937f15a8f874591bb693ce1546c7c5b",
|
||
"706c079c7d484975ad69a91b3f95e9f3",
|
||
"4473fa7bf766425bb704bb9772c021fd",
|
||
"ef088eda6fe04abe958059620da9b5d0",
|
||
"e1819597e4d840b19cfa8aba00ef3ef1",
|
||
"19173b1d00314580a4f40efa0df4b174",
|
||
"d7ce404f2f784b0f93abf8f3ba592e18",
|
||
"9da1585dcdd440fa8b2e8351ffeba348",
|
||
"8880e647ca4a4796b571f5b983b541b4",
|
||
"01d9cd6656494aef83a3947945c0acdd",
|
||
"b01601a11a324141b2d88190a87ef950",
|
||
"c7db7b055dbe4361965ca4ab8c028536",
|
||
"c1852f69951e4d80aecb5a6c942b2ca6"
|
||
]
|
||
},
|
||
"outputId": "89ad5598-1017-499f-c986-72bba2a3a6cb"
|
||
},
|
||
"source": [
|
||
"# Initialize Reader\n",
|
||
"from haystack.reader.farm import FARMReader\n",
|
||
"\n",
|
||
"reader = FARMReader(\"deepset/roberta-base-squad2\")"
|
||
],
|
||
"execution_count": 11,
|
||
"outputs": [
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:11:31 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, automatic mixed precision training: None\n",
|
||
"06/05/2020 16:11:31 - INFO - farm.infer - Could not find `deepset/roberta-base-squad2` locally. Try to download from model hub ...\n",
|
||
"06/05/2020 16:11:32 - INFO - filelock - Lock 140574308859240 acquired on /root/.cache/torch/transformers/f7d4b9379a9c487fa03ccf3d8e00058faa9d664cf01fc03409138246f48760da.c6288e0f84ec797ba5c525c923a5bbc479b47c761aded9734a5f6a473b044c8d.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "display_data",
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "398e8dc496594a5f8e65daecc3ddad4a",
|
||
"version_minor": 0,
|
||
"version_major": 2
|
||
},
|
||
"text/plain": [
|
||
"HBox(children=(FloatProgress(value=0.0, description='Downloading', max=559.0, style=ProgressStyle(description_…"
|
||
]
|
||
},
|
||
"metadata": {
|
||
"tags": []
|
||
}
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:11:33 - INFO - filelock - Lock 140574308859240 released on /root/.cache/torch/transformers/f7d4b9379a9c487fa03ccf3d8e00058faa9d664cf01fc03409138246f48760da.c6288e0f84ec797ba5c525c923a5bbc479b47c761aded9734a5f6a473b044c8d.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"\n"
|
||
],
|
||
"name": "stdout"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:11:33 - INFO - filelock - Lock 140574717619952 acquired on /root/.cache/torch/transformers/5600193782e3a4c414cddf8f0e52bf650d4d6c4c022094532d275ee730cef8f5.d045adc91e17ecdf7dc3eeff4c875df94bdf2eb749d72b3ae47ae93f8e85213c.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "display_data",
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "18250ec6840147658d1e038138f8aba0",
|
||
"version_minor": 0,
|
||
"version_major": 2
|
||
},
|
||
"text/plain": [
|
||
"HBox(children=(FloatProgress(value=0.0, description='Downloading', max=498637366.0, style=ProgressStyle(descri…"
|
||
]
|
||
},
|
||
"metadata": {
|
||
"tags": []
|
||
}
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:16 - INFO - filelock - Lock 140574717619952 released on /root/.cache/torch/transformers/5600193782e3a4c414cddf8f0e52bf650d4d6c4c022094532d275ee730cef8f5.d045adc91e17ecdf7dc3eeff4c875df94bdf2eb749d72b3ae47ae93f8e85213c.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"\n"
|
||
],
|
||
"name": "stdout"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:20 - WARNING - farm.modeling.language_model - Could not automatically detect from language model name what language it is. \n",
|
||
"\t We guess it's an *ENGLISH* model ... \n",
|
||
"\t If not: Init the language model by supplying the 'language' param.\n",
|
||
"06/05/2020 16:12:27 - WARNING - farm.modeling.prediction_head - Some unused parameters are passed to the QuestionAnsweringHead. Might not be a problem. Params: {\"loss_ignore_index\": -1}\n",
|
||
"06/05/2020 16:12:37 - INFO - filelock - Lock 140574306905112 acquired on /root/.cache/torch/transformers/1e3af82648d7190d959a9d76d727ef629b1ca51b3da6ad04039122453cb56307.6a4061e8fc00057d21d80413635a86fdcf55b6e7594ad9e25257d2f99a02f4be.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "display_data",
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "ec3c00e615164fb488ebfb51d8ac9d9e",
|
||
"version_minor": 0,
|
||
"version_major": 2
|
||
},
|
||
"text/plain": [
|
||
"HBox(children=(FloatProgress(value=0.0, description='Downloading', max=898822.0, style=ProgressStyle(descripti…"
|
||
]
|
||
},
|
||
"metadata": {
|
||
"tags": []
|
||
}
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:39 - INFO - filelock - Lock 140574306905112 released on /root/.cache/torch/transformers/1e3af82648d7190d959a9d76d727ef629b1ca51b3da6ad04039122453cb56307.6a4061e8fc00057d21d80413635a86fdcf55b6e7594ad9e25257d2f99a02f4be.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"\n"
|
||
],
|
||
"name": "stdout"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:40 - INFO - filelock - Lock 140574306905112 acquired on /root/.cache/torch/transformers/b901c69e8e7da4a24c635ad81d016d274f174261f4f5c144e43f4b00e242c3b0.70bec105b4158ed9a1747fea67a43f5dee97855c64d62b6ec3742f4cfdb5feda.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "display_data",
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "9597bad322c34d02a0c10dd66b21813e",
|
||
"version_minor": 0,
|
||
"version_major": 2
|
||
},
|
||
"text/plain": [
|
||
"HBox(children=(FloatProgress(value=0.0, description='Downloading', max=456318.0, style=ProgressStyle(descripti…"
|
||
]
|
||
},
|
||
"metadata": {
|
||
"tags": []
|
||
}
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:42 - INFO - filelock - Lock 140574306905112 released on /root/.cache/torch/transformers/b901c69e8e7da4a24c635ad81d016d274f174261f4f5c144e43f4b00e242c3b0.70bec105b4158ed9a1747fea67a43f5dee97855c64d62b6ec3742f4cfdb5feda.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"\n"
|
||
],
|
||
"name": "stdout"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:43 - INFO - filelock - Lock 140574306905112 acquired on /root/.cache/torch/transformers/2d9b03b59a8af464bf4238025a3cf0e5a340b9d0ba77400011e23c130b452510.16f949018cf247a2ea7465a74ca9a292212875e5fd72f969e0807011e7f192e4.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "display_data",
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "6c5fb91a498840a1a2cae914660f8684",
|
||
"version_minor": 0,
|
||
"version_major": 2
|
||
},
|
||
"text/plain": [
|
||
"HBox(children=(FloatProgress(value=0.0, description='Downloading', max=150.0, style=ProgressStyle(description_…"
|
||
]
|
||
},
|
||
"metadata": {
|
||
"tags": []
|
||
}
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:44 - INFO - filelock - Lock 140574306905112 released on /root/.cache/torch/transformers/2d9b03b59a8af464bf4238025a3cf0e5a340b9d0ba77400011e23c130b452510.16f949018cf247a2ea7465a74ca9a292212875e5fd72f969e0807011e7f192e4.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"\n"
|
||
],
|
||
"name": "stdout"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:45 - INFO - filelock - Lock 140574306905112 acquired on /root/.cache/torch/transformers/507984f2e28c7dfed5db9a20acd68beb969c7f2833abc9e582e967fa0291f3dc.100c88dbe27dbd73822c575274ade4eb2427596ac56e96769249b7512341654d.lock\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "display_data",
|
||
"data": {
|
||
"application/vnd.jupyter.widget-view+json": {
|
||
"model_id": "19173b1d00314580a4f40efa0df4b174",
|
||
"version_minor": 0,
|
||
"version_major": 2
|
||
},
|
||
"text/plain": [
|
||
"HBox(children=(FloatProgress(value=0.0, description='Downloading', max=189.0, style=ProgressStyle(description_…"
|
||
]
|
||
},
|
||
"metadata": {
|
||
"tags": []
|
||
}
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:46 - INFO - filelock - Lock 140574306905112 released on /root/.cache/torch/transformers/507984f2e28c7dfed5db9a20acd68beb969c7f2833abc9e582e967fa0291f3dc.100c88dbe27dbd73822c575274ade4eb2427596ac56e96769249b7512341654d.lock\n",
|
||
"06/05/2020 16:12:46 - INFO - farm.utils - device: cuda n_gpu: 1, distributed training: False, automatic mixed precision training: None\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"\n"
|
||
],
|
||
"name": "stdout"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:46 - INFO - farm.infer - Got ya 1 parallel workers to do inference ...\n",
|
||
"06/05/2020 16:12:46 - INFO - farm.infer - 0 \n",
|
||
"06/05/2020 16:12:46 - INFO - farm.infer - /w\\\n",
|
||
"06/05/2020 16:12:46 - INFO - farm.infer - /'\\\n",
|
||
"06/05/2020 16:12:46 - INFO - farm.infer - \n"
|
||
],
|
||
"name": "stderr"
|
||
}
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "gOs7qy4xOhvO",
|
||
"colab_type": "code",
|
||
"colab": {}
|
||
},
|
||
"source": [
|
||
"# Initialize Finder which sticks together Reader and Retriever\n",
|
||
"from haystack.finder import Finder\n",
|
||
"\n",
|
||
"finder = Finder(reader, retriever)"
|
||
],
|
||
"execution_count": 0,
|
||
"outputs": []
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {
|
||
"collapsed": false,
|
||
"pycharm": {
|
||
"name": "#%% md\n"
|
||
},
|
||
"id": "qwkBgzh5OhvR",
|
||
"colab_type": "text"
|
||
},
|
||
"source": [
|
||
"## Evaluation of Retriever"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "YzvLhnx3OhvS",
|
||
"colab_type": "code",
|
||
"colab": {
|
||
"base_uri": "https://localhost:8080/",
|
||
"height": 1000
|
||
},
|
||
"outputId": "1d45f072-0ae0-4864-8ccc-aa12303a8d04"
|
||
},
|
||
"source": [
|
||
"# Evaluate Retriever on its own\n",
|
||
"retriever_eval_results = retriever.eval()\n",
|
||
"\n",
|
||
"## Retriever Recall is the proportion of questions for which the correct document containing the answer is\n",
|
||
"## among the correct documents\n",
|
||
"print(\"Retriever Recall:\", retriever_eval_results[\"recall\"])\n",
|
||
"## Retriever Mean Avg Precision rewards retrievers that give relevant documents a higher rank\n",
|
||
"print(\"Retriever Mean Avg Precision:\", retriever_eval_results[\"map\"])"
|
||
],
|
||
"execution_count": 13,
|
||
"outputs": [
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/feedback/_search?scroll=5m&size=1000 [status:200 request:0.170s]\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.069s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.022s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.021s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.019s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.027s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.026s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.024s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.014s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:12:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.014s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.014s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.019s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.018s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/_search/scroll [status:200 request:0.017s]\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - DELETE http://localhost:9200/_search/scroll [status:200 request:0.007s]\n",
|
||
"06/05/2020 16:12:47 - INFO - haystack.retriever.elasticsearch - For 54 out of 54 questions (100.00%), the answer was in the top-10 candidate passages selected by the retriever.\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Retriever Recall: 1.0\n",
|
||
"Retriever Mean Avg Precision: 0.9367283950617283\n"
|
||
],
|
||
"name": "stdout"
|
||
}
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {
|
||
"collapsed": false,
|
||
"pycharm": {
|
||
"name": "#%% md\n"
|
||
},
|
||
"id": "fjZRnB6bOhvW",
|
||
"colab_type": "text"
|
||
},
|
||
"source": [
|
||
"## Evaluation of Reader"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "Lgsgf4KaOhvY",
|
||
"colab_type": "code",
|
||
"colab": {
|
||
"base_uri": "https://localhost:8080/",
|
||
"height": 203
|
||
},
|
||
"outputId": "24d3755e-bf2e-4396-f1a2-59c925cc54d3"
|
||
},
|
||
"source": [
|
||
"# Evaluate Reader on its own\n",
|
||
"reader_eval_results = reader.eval(document_store=document_store, device=device)\n",
|
||
"\n",
|
||
"# Evaluation of Reader can also be done directly on a SQuAD-formatted file\n",
|
||
"# without passing the data to Elasticsearch\n",
|
||
"#reader_eval_results = reader.eval_on_file(\"../data/natural_questions\", \"dev_subset.json\", device=device)\n",
|
||
"\n",
|
||
"## Reader Top-N-Recall is the proportion of predicted answers that overlap with their corresponding correct answer\n",
|
||
"print(\"Reader Top-N-Recall:\", reader_eval_results[\"top_n_recall\"])\n",
|
||
"## Reader Exact Match is the proportion of questions where the predicted answer is exactly the same as the correct answer\n",
|
||
"print(\"Reader Exact Match:\", reader_eval_results[\"EM\"])\n",
|
||
"## Reader F1-Score is the average overlap between the predicted answers and the correct answers\n",
|
||
"print(\"Reader F1-Score:\", reader_eval_results[\"f1\"])"
|
||
],
|
||
"execution_count": 14,
|
||
"outputs": [
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/feedback/_search?scroll=5m&size=1000 [status:200 request:0.022s]\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/_search/scroll [status:200 request:0.005s]\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - DELETE http://localhost:9200/_search/scroll [status:200 request:0.003s]\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search?scroll=5m&size=1000 [status:200 request:0.039s]\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - POST http://localhost:9200/_search/scroll [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:12:47 - INFO - elasticsearch - DELETE http://localhost:9200/_search/scroll [status:200 request:0.003s]\n",
|
||
"Evaluating: 100%|██████████| 78/78 [00:31<00:00, 2.50it/s]\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"Reader Top-N-Recall: 0.6111111111111112\n",
|
||
"Reader Exact Match: 0.4074074074074074\n",
|
||
"Reader F1-Score: 0.4340132402934336\n"
|
||
],
|
||
"name": "stdout"
|
||
}
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "markdown",
|
||
"metadata": {
|
||
"collapsed": false,
|
||
"pycharm": {
|
||
"name": "#%% md\n"
|
||
},
|
||
"id": "7i84KXONOhvc",
|
||
"colab_type": "text"
|
||
},
|
||
"source": [
|
||
"## Evaluation of Finder"
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "yLpMHAexOhvd",
|
||
"colab_type": "code",
|
||
"colab": {
|
||
"base_uri": "https://localhost:8080/",
|
||
"height": 1000
|
||
},
|
||
"outputId": "fd74be7d-5c8e-4eb9-a653-062427b74347"
|
||
},
|
||
"source": [
|
||
"# Evaluate combination of Reader and Retriever through Finder\n",
|
||
"finder_eval_results = finder.eval()\n",
|
||
"\n",
|
||
"print(\"\\n___Retriever Metrics in Finder___\")\n",
|
||
"print(\"Retriever Recall:\", finder_eval_results[\"retriever_recall\"])\n",
|
||
"print(\"Retriever Mean Avg Precision:\", finder_eval_results[\"retriever_map\"])\n",
|
||
"\n",
|
||
"# Reader is only evaluated with those questions, where the correct document is among the retrieved ones\n",
|
||
"print(\"\\n___Reader Metrics in Finder___\")\n",
|
||
"print(\"Reader Top-1 accuracy:\", finder_eval_results[\"reader_top1_accuracy\"])\n",
|
||
"print(\"Reader Top-1 accuracy (has answer):\", finder_eval_results[\"reader_top1_accuracy_has_answer\"])\n",
|
||
"print(\"Reader Top-k accuracy:\", finder_eval_results[\"reader_top_k_accuracy\"])\n",
|
||
"print(\"Reader Top-k accuracy (has answer):\", finder_eval_results[\"reader_topk_accuracy_has_answer\"])\n",
|
||
"print(\"Reader Top-1 EM:\", finder_eval_results[\"reader_top1_em\"])\n",
|
||
"print(\"Reader Top-1 EM (has answer):\", finder_eval_results[\"reader_top1_em_has_answer\"])\n",
|
||
"print(\"Reader Top-k EM:\", finder_eval_results[\"reader_topk_em\"])\n",
|
||
"print(\"Reader Top-k EM (has answer):\", finder_eval_results[\"reader_topk_em_has_answer\"])\n",
|
||
"print(\"Reader Top-1 F1:\", finder_eval_results[\"reader_top1_f1\"])\n",
|
||
"print(\"Reader Top-1 F1 (has answer):\", finder_eval_results[\"reader_top1_f1_has_answer\"])\n",
|
||
"print(\"Reader Top-k F1:\", finder_eval_results[\"reader_topk_f1\"])\n",
|
||
"print(\"Reader Top-k F1 (has answer):\", finder_eval_results[\"reader_topk_f1_has_answer\"])\n",
|
||
"print(\"Reader Top-1 no-answer accuracy:\", finder_eval_results[\"reader_top1_no_answer_accuracy\"])\n",
|
||
"print(\"Reader Top-k no-answer accuracy:\", finder_eval_results[\"reader_topk_no_answer_accuracy\"])\n",
|
||
"\n",
|
||
"# Time measurements\n",
|
||
"print(\"\\n___Time Measurements___\")\n",
|
||
"print(\"Total retrieve time:\", finder_eval_results[\"total_retrieve_time\"])\n",
|
||
"print(\"Avg retrieve time per question:\", finder_eval_results[\"avg_retrieve_time\"])\n",
|
||
"print(\"Total reader timer:\", finder_eval_results[\"total_reader_time\"])\n",
|
||
"print(\"Avg read time per question:\", finder_eval_results[\"avg_reader_time\"])\n",
|
||
"print(\"Total Finder time:\", finder_eval_results[\"total_finder_time\"])"
|
||
],
|
||
"execution_count": 15,
|
||
"outputs": [
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"06/05/2020 16:13:44 - INFO - elasticsearch - POST http://localhost:9200/feedback/_search?scroll=5m&size=1000 [status:200 request:0.014s]\n",
|
||
"06/05/2020 16:13:44 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:13:44 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.021s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.038s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.019s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.014s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.017s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.012s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.019s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.008s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.016s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.014s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:45 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.015s]\n",
|
||
"06/05/2020 16:13:45 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:13:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.009s]\n",
|
||
"06/05/2020 16:13:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.013s]\n",
|
||
"06/05/2020 16:13:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.011s]\n",
|
||
"06/05/2020 16:13:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/eval_document/_search [status:200 request:0.010s]\n",
|
||
"06/05/2020 16:13:46 - INFO - haystack.retriever.elasticsearch - Got 10 candidates from retriever\n",
|
||
"06/05/2020 16:13:46 - INFO - elasticsearch - POST http://localhost:9200/_search/scroll [status:200 request:0.004s]\n",
|
||
"06/05/2020 16:13:46 - INFO - elasticsearch - DELETE http://localhost:9200/_search/scroll [status:200 request:0.002s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.88 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.11 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.92 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.46 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.81 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.82 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.35 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.40 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.37 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.04 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.12 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.03 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.30 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.59 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.48 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.48 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.99 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.86 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.76 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.90 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.93 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.86 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.77 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.83 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.18 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.20 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.23 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.88 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.13 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 15.60 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.08 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.36 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.90 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.14 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.33 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.84 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.14 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.39 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.56 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.35 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.48 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.76 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.23 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.57 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.48 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.74 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.16 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.98 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.11 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.68 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.64 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.64 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.32 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 16.33 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.06 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.18 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.91 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.89 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.36 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.11 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.83 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.66 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.21 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.13 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.22 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.12 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.45 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.67 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.64 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.49 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.64 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.51 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.25 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.11 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.62 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.05 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.88 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.43 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.68 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.97 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.70 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.67 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.52 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.17 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.42 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.25 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.84 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.35 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.52 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.57 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.97 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.17 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.58 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.86 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.70 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.50 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.02 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 1.86 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.10 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.92 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.78 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.77 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.43 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 2.20 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.64 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.94 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.65 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.67 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.13 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.21 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.10 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.76 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.62 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.58 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.04 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.81 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.51 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.61 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.58 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.74 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.36 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.18 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.94 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.34 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.86 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.24 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 2.23 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.16 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.78 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.39 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 1.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.06 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.11 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.52 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.40 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.52 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.36 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.60 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.36 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.65 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.90 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.13 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.95 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.84 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.72 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.43 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.38 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.30 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.42 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.48 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.65 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.84 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.81 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.56 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.90 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.51 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.32 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.30 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.91 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.46 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.59 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 29.40 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.04 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.55 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.72 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.84 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.39 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.16 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.80 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.39 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.51 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.35 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.57 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.57 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.86 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.48 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.47 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.50 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.16 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.36 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.59 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.36 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.10 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 13.85 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.34 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.20 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.60 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.68 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.31 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.73 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.75 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.59 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.00 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.57 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.70 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.39 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.13 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.51 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.30 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.68 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.26 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.95 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.02 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.33 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.30 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.56 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.76 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.81 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 2.23 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.44 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.40 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.45 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.97 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.72 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.45 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.95 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.73 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.84 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.09 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.74 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.71 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.00 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.44 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.32 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.27 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.19 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.61 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 8.17 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.97 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.12 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.56 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.34 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.61 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.82 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.51 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.92 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.13 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.08 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.80 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 14.27 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 2.00 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.48 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.39 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.80 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 1.65 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.36 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.05 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.59 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.28 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.67 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.76 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.54 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.03 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.98 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.60 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.46 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.55 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.43 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.28 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.88 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.50 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.92 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.59 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.64 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.49 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.17 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.29 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.27 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.62 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.86 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.03 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.30 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 2.18 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.13 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.11 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.18 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.75 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.42 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.76 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.37 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.97 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.02 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.14 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 1.99 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.94 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.67 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.04 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.24 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.25 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.88 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.75 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.95 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.30 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.90 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.40 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.85 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.27 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.79 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.29 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 2.08 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:03<00:00, 1.44 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.48 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.72 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.26 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.50 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.00 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.01 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.52 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:02<00:00, 1.47 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.70 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.27 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.63 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.68 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.52 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.44 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.75 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.30 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.03 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.68 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 9.13 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.93 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.51 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.50 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.74 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.52 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.15 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.01 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.38 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.64 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.20 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.68 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.68 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.46 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.58 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.55 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.66 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.63 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.89 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.30 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.56 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.08 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.08 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.22 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.11 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.28 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.72 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.51 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.03 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 3.01 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.76 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.37 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.33 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.43 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.33 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.28 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.18 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.42 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.54 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 13.85 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.41 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.32 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.73 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.77 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.04 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.13 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.30 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.44 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.01 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.38 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 5.80 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.80 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.78 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.29 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.05 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.76 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.22 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.75 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.52 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.89 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.85 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.03 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.97 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.20 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.92 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.24 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.93 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.84 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.72 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.29 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 1.87 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.81 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.93 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.24 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.80 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.60 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.18 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.89 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.33 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.51 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.47 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.59 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.76 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.66 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.39 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.02 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.42 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.23 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.22 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.48 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.05 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.73 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 7.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.54 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.09 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.86 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 6.37 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.95 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.41 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.29 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.21 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.26 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.26 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.65 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:04<00:00, 1.08 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.77 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.22 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:03<00:00, 1.32 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:05<00:00, 1.61 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.57 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.89 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.61 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 23.65 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.93 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.57 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.45 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.06 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.53 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.49 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.68 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.26 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.59 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.54 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.10 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.69 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.75 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.57 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.19 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 2.45 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.49 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.26 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 10.10 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.64 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.56 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.95 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.11 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.47 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.20 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.47 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.40 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.01 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.12 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.64 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.72 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.25 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.05 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:02<00:00, 1.41s/ Batches]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.15 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.92 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 1.93 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.87 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.70 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.88 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.38 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.29 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.07 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:00<00:00, 3.05 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.63 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.57 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.10 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 4.76 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.70 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:02<00:00, 1.96 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:01<00:00, 1.43 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 2.36 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:03<00:00, 1.08s/ Batches]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.24 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.22 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.12 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:03<00:00, 1.34 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:04<00:00, 1.99 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.99 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 3.97 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.85 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 3/3 [00:01<00:00, 2.98 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 2.92 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.63 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 2/2 [00:00<00:00, 3.02 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 5/5 [00:01<00:00, 2.55 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 9/9 [00:03<00:00, 2.56 Batches/s]\n",
|
||
"Inferencing Samples: 100%|██████████| 1/1 [00:00<00:00, 4.05 Batches/s]\n",
|
||
"06/05/2020 16:25:44 - INFO - haystack.finder - 37 out of 54 questions were correctly answered (68.52%).\n",
|
||
"06/05/2020 16:25:44 - INFO - haystack.finder - 0 questions could not be answered due to the retriever.\n",
|
||
"06/05/2020 16:25:44 - INFO - haystack.finder - 17 questions could not be answered due to the reader.\n"
|
||
],
|
||
"name": "stderr"
|
||
},
|
||
{
|
||
"output_type": "stream",
|
||
"text": [
|
||
"\n",
|
||
"___Retriever Metrics in Finder___\n",
|
||
"Retriever Recall: 1.0\n",
|
||
"Retriever Mean Avg Precision: 0.9367283950617283\n",
|
||
"\n",
|
||
"___Reader Metrics in Finder___\n",
|
||
"Reader Top-1 accuracy: 0.3333333333333333\n",
|
||
"Reader Top-1 accuracy (has answer): 0.12\n",
|
||
"Reader Top-k accuracy: 0.6851851851851852\n",
|
||
"Reader Top-k accuracy (has answer): 0.36\n",
|
||
"Reader Top-1 EM: 0.2777777777777778\n",
|
||
"Reader Top-1 EM (has answer): 0.0\n",
|
||
"Reader Top-k EM: 0.5370370370370371\n",
|
||
"Reader Top-k EM (has answer): 0.04\n",
|
||
"Reader Top-1 F1: 0.3891157185894027\n",
|
||
"Reader Top-1 F1 (has answer): 0.24048995215311006\n",
|
||
"Reader Top-k F1: 0.6400575387839845\n",
|
||
"Reader Top-k F1 (has answer): 0.2625242837734066\n",
|
||
"Reader Top-1 no-answer accuracy: 0.5172413793103449\n",
|
||
"Reader Top-k no-answer accuracy: 0.9655172413793104\n",
|
||
"\n",
|
||
"___Time Measurements___\n",
|
||
"Total retrieve time: 1.1358914375305176\n",
|
||
"Avg retrieve time per question: 0.02049741480085585\n",
|
||
"Total reader timer: 717.9651441574097\n",
|
||
"Avg read time per question: 13.295561874354327\n",
|
||
"Total Finder time: 719.1010527610779\n"
|
||
],
|
||
"name": "stdout"
|
||
}
|
||
]
|
||
},
|
||
{
|
||
"cell_type": "code",
|
||
"metadata": {
|
||
"pycharm": {
|
||
"name": "#%%\n"
|
||
},
|
||
"id": "DD57b_LkOhvg",
|
||
"colab_type": "code",
|
||
"colab": {}
|
||
},
|
||
"source": [
|
||
""
|
||
],
|
||
"execution_count": 0,
|
||
"outputs": []
|
||
}
|
||
]
|
||
} |