mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-07-27 02:40:41 +00:00

* first draft / notes on new primitives * wip label / feedback refactor * rename doc.text -> doc.content. add doc.content_type * add datatype for content * remove faq_question_field from ES and weaviate. rename text_field -> content_field in docstores. update tutorials for content field * update converters for . Add warning for empty * renam label.question -> label.query. Allow sorting of Answers. * WIP primitives * update ui/reader for new Answer format * Improve Label. First refactoring of MultiLabel. Adjust eval code * fixed workflow conflict with introducing new one (#1472) * Add latest docstring and tutorial changes * make add_eval_data() work again * fix reader formats. WIP fix _extract_docs_and_labels_from_dict * fix test reader * Add latest docstring and tutorial changes * fix another test case for reader * fix mypy in farm reader.eval() * fix mypy in farm reader.eval() * WIP ORM refactor * Add latest docstring and tutorial changes * fix mypy weaviate * make label and multilabel dataclasses * bump mypy env in CI to python 3.8 * WIP refactor Label ORM * WIP refactor Label ORM * simplify tests for individual doc stores * WIP refactoring markers of tests * test alternative approach for tests with existing parametrization * WIP refactor ORMs * fix skip logic of already parametrized tests * fix weaviate behaviour in tests - not parametrizing it in our general test cases. * Add latest docstring and tutorial changes * fix some tests * remove sql from document_store_types * fix markers for generator and pipeline test * remove inmemory marker * remove unneeded elasticsearch markers * add dataclasses-json dependency. adjust ORM to just store JSON repr * ignore type as dataclasses_json seems to miss functionality here * update readme and contributing.md * update contributing * adjust example * fix duplicate doc handling for custom index * Add latest docstring and tutorial changes * fix some ORM issues. fix get_all_labels_aggregated. * update drop flags where get_all_labels_aggregated() was used before * Add latest docstring and tutorial changes * add to_json(). add + fix tests * fix no_answer handling in label / multilabel * fix duplicate docs in memory doc store. change primary key for sql doc table * fix mypy issues * fix mypy issues * haystack/retriever/base.py * fix test_write_document_meta[elastic] * fix test_elasticsearch_custom_fields * fix test_labels[elastic] * fix crawler * fix converter * fix docx converter * fix preprocessor * fix test_utils * fix tfidf retriever. fix selection of docstore in tests with multiple fixtures / parameterizations * Add latest docstring and tutorial changes * fix crawler test. fix ocrconverter attribute * fix test_elasticsearch_custom_query * fix generator pipeline * fix ocr converter * fix ragenerator * Add latest docstring and tutorial changes * fix test_load_and_save_yaml for elasticsearch * fixes for pipeline tests * fix faq pipeline * fix pipeline tests * Add latest docstring and tutorial changes * fix weaviate * Add latest docstring and tutorial changes * trigger CI * satisfy mypy * Add latest docstring and tutorial changes * satisfy mypy * Add latest docstring and tutorial changes * trigger CI * fix question generation test * fix ray. fix Q-generation * fix translator test * satisfy mypy * wip refactor feedback rest api * fix rest api feedback endpoint * fix doc classifier * remove relation of Labels -> Docs in SQL ORM * fix faiss/milvus tests * fix doc classifier test * fix eval test * fixing eval issues * Add latest docstring and tutorial changes * fix mypy * WIP replace dataclasses-json with manual serialization * Add latest docstring and tutorial changes * revert to dataclass-json serialization for now. remove debug prints. * update docstrings * fix extractor. fix Answer Span init * fix api test * keep meta data of answers in reader.run() * fix meta handling * adress review feedback * Add latest docstring and tutorial changes * make document=None for open domain labels * add import * fix print utils * fix rest api * adress review feedback * Add latest docstring and tutorial changes * fix mypy Co-authored-by: Markus Paff <markuspaff.mp@gmail.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
82 lines
3.4 KiB
Python
82 lines
3.4 KiB
Python
from pathlib import Path
|
|
|
|
import pytest
|
|
|
|
from haystack.file_converter.pdf import PDFToTextConverter
|
|
from haystack.preprocessor.preprocessor import PreProcessor
|
|
|
|
TEXT = """
|
|
This is a sample sentence in paragraph_1. This is a sample sentence in paragraph_1. This is a sample sentence in
|
|
paragraph_1. This is a sample sentence in paragraph_1. This is a sample sentence in paragraph_1.
|
|
|
|
This is a sample sentence in paragraph_2. This is a sample sentence in paragraph_2. This is a sample sentence in
|
|
paragraph_2. This is a sample sentence in paragraph_2. This is a sample sentence in paragraph_2.
|
|
|
|
This is a sample sentence in paragraph_3. This is a sample sentence in paragraph_3. This is a sample sentence in
|
|
paragraph_3. This is a sample sentence in paragraph_3. This is to trick the test with using an abbreviation like Dr.
|
|
in the sentence.
|
|
"""
|
|
|
|
|
|
@pytest.mark.tika
|
|
def test_preprocess_sentence_split():
|
|
document = {"content": TEXT}
|
|
preprocessor = PreProcessor(split_length=1, split_overlap=0, split_by="sentence", split_respect_sentence_boundary=False)
|
|
documents = preprocessor.process(document)
|
|
assert len(documents) == 15
|
|
|
|
preprocessor = PreProcessor(
|
|
split_length=10, split_overlap=0, split_by="sentence", split_respect_sentence_boundary=False,
|
|
)
|
|
documents = preprocessor.process(document)
|
|
assert len(documents) == 2
|
|
|
|
|
|
@pytest.mark.tika
|
|
def test_preprocess_word_split():
|
|
document = {"content": TEXT}
|
|
preprocessor = PreProcessor(split_length=10, split_overlap=0, split_by="word", split_respect_sentence_boundary=False)
|
|
documents = preprocessor.process(document)
|
|
assert len(documents) == 11
|
|
|
|
preprocessor = PreProcessor(split_length=15, split_overlap=0, split_by="word", split_respect_sentence_boundary=True)
|
|
documents = preprocessor.process(document)
|
|
for i,doc in enumerate(documents):
|
|
if i == 0:
|
|
assert len(doc["content"].split(" ")) == 14
|
|
assert len(doc["content"].split(" ")) <= 15 or doc["content"].startswith("This is to trick")
|
|
assert len(documents) == 8
|
|
|
|
preprocessor = PreProcessor(split_length=40, split_overlap=10, split_by="word", split_respect_sentence_boundary=True)
|
|
documents = preprocessor.process(document)
|
|
assert len(documents) == 5
|
|
|
|
preprocessor = PreProcessor(split_length=5, split_overlap=0, split_by="word", split_respect_sentence_boundary=True)
|
|
documents = preprocessor.process(document)
|
|
assert len(documents) == 15
|
|
|
|
|
|
@pytest.mark.tika
|
|
def test_preprocess_passage_split():
|
|
document = {"content": TEXT}
|
|
preprocessor = PreProcessor(split_length=1, split_overlap=0, split_by="passage", split_respect_sentence_boundary=False)
|
|
documents = preprocessor.process(document)
|
|
assert len(documents) == 3
|
|
|
|
preprocessor = PreProcessor(split_length=2, split_overlap=0, split_by="passage", split_respect_sentence_boundary=False)
|
|
documents = preprocessor.process(document)
|
|
assert len(documents) == 2
|
|
|
|
|
|
@pytest.mark.tika
|
|
def test_clean_header_footer():
|
|
converter = PDFToTextConverter()
|
|
document = converter.convert(file_path=Path("samples/pdf/sample_pdf_2.pdf")) # file contains header/footer
|
|
|
|
preprocessor = PreProcessor(clean_header_footer=True, split_by=None)
|
|
documents = preprocessor.process(document)
|
|
|
|
assert len(documents) == 1
|
|
|
|
assert "This is a header." not in documents[0]["content"]
|
|
assert "footer" not in documents[0]["content"] |