mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-07-27 19:00:35 +00:00

* Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
41 lines
1.5 KiB
Python
41 lines
1.5 KiB
Python
import pytest
|
|
from pathlib import Path
|
|
|
|
from haystack.utils.preprocessing import convert_files_to_dicts, tika_convert_files_to_dicts
|
|
from haystack.utils.cleaning import clean_wiki_text
|
|
from haystack.utils.augment_squad import augment_squad
|
|
from haystack.utils.squad_data import SquadData
|
|
|
|
from conftest import SAMPLES_PATH
|
|
|
|
|
|
def test_convert_files_to_dicts():
|
|
documents = convert_files_to_dicts(
|
|
dir_path=(SAMPLES_PATH).absolute(), clean_func=clean_wiki_text, split_paragraphs=True
|
|
)
|
|
assert documents and len(documents) > 0
|
|
|
|
|
|
@pytest.mark.tika
|
|
def test_tika_convert_files_to_dicts():
|
|
documents = tika_convert_files_to_dicts(dir_path=SAMPLES_PATH, clean_func=clean_wiki_text, split_paragraphs=True)
|
|
assert documents and len(documents) > 0
|
|
|
|
|
|
def test_squad_augmentation():
|
|
input_ = SAMPLES_PATH / "squad" / "tiny.json"
|
|
output = SAMPLES_PATH / "squad" / "tiny_augmented.json"
|
|
glove_path = SAMPLES_PATH / "glove" / "tiny.txt" # dummy glove file, will not even be use when augmenting tiny.json
|
|
multiplication_factor = 5
|
|
augment_squad(
|
|
model="distilbert-base-uncased",
|
|
tokenizer="distilbert-base-uncased",
|
|
squad_path=input_,
|
|
output_path=output,
|
|
glove_path=glove_path,
|
|
multiplication_factor=multiplication_factor,
|
|
)
|
|
original_squad = SquadData.from_file(input_)
|
|
augmented_squad = SquadData.from_file(output)
|
|
assert original_squad.count(unit="paragraph") == augmented_squad.count(unit="paragraph") * multiplication_factor
|