haystack/test/test_utils.py
Sara Zan d470b9d0bd
Improve dependency management (#1994)
* Fist attempt at using setup.cfg for dependency management

* Trying the new package on the CI and in Docker too

* Add composite extras_require

* Add the safe_import function for document store imports and add some try-catch statements on rest_api and ui imports

* Fix bug on class import and rephrase error message

* Introduce typing for optional modules and add type: ignore in sparse.py

* Include importlib_metadata backport for py3.7

* Add colab group to extra_requires

* Fix pillow version

* Fix grpcio

* Separate out the crawler as another extra

* Make paths relative in rest_api and ui

* Update the test matrix in the CI

* Add try catch statements around the optional imports too to account for direct imports

* Never mix direct deps with self-references and add ES deps to the base install

* Refactor several paths in tests to make them insensitive to the execution path

* Include tstadel review and re-introduce Milvus1 in the tests suite, to fix

* Wrap pdf conversion utils into safe_import

* Update some tutorials and rever Milvus1 as default for now, see #2067

* Fix mypy config


Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
2022-01-26 18:12:55 +01:00

31 lines
1.4 KiB
Python

import pytest
from pathlib import Path
from haystack.utils.preprocessing import convert_files_to_dicts, tika_convert_files_to_dicts
from haystack.utils.cleaning import clean_wiki_text
from haystack.utils.augment_squad import augment_squad
from haystack.utils.squad_data import SquadData
from conftest import SAMPLES_PATH
def test_convert_files_to_dicts():
documents = convert_files_to_dicts(dir_path=(SAMPLES_PATH).absolute(), clean_func=clean_wiki_text, split_paragraphs=True)
assert documents and len(documents) > 0
@pytest.mark.tika
def test_tika_convert_files_to_dicts():
documents = tika_convert_files_to_dicts(dir_path=SAMPLES_PATH, clean_func=clean_wiki_text, split_paragraphs=True)
assert documents and len(documents) > 0
def test_squad_augmentation():
input_ = SAMPLES_PATH/"squad"/"tiny.json"
output = SAMPLES_PATH/"squad"/"tiny_augmented.json"
glove_path = SAMPLES_PATH/"glove"/"tiny.txt" # dummy glove file, will not even be use when augmenting tiny.json
multiplication_factor = 5
augment_squad(model="distilbert-base-uncased", tokenizer="distilbert-base-uncased", squad_path=input_, output_path=output,
glove_path=glove_path, multiplication_factor=multiplication_factor)
original_squad = SquadData.from_file(input_)
augmented_squad = SquadData.from_file(output)
assert original_squad.count(unit="paragraph") == augmented_squad.count(unit="paragraph") * multiplication_factor