mirror of
https://github.com/deepset-ai/haystack.git
synced 2025-08-20 06:28:39 +00:00

* Add BasePipeline.validate_config, BasePipeline.validate_yaml, and some new custom exception classes * Make error composition work properly * Clarify typing * Help mypy a bit more * Update Documentation & Code Style * Enable autogenerated docs for Milvus1 and 2 separately * Revert "Enable autogenerated docs for Milvus1 and 2 separately" This reverts commit 282be4a78a6e95862a9b4c924fc3dea5ca71e28d. * Update Documentation & Code Style * Re-enable 'additionalProperties: False' * Add pipeline.type to JSON Schema, was somehow forgotten * Disable additionalProperties on the pipeline properties too * Fix json-schemas for 1.1.0 and 1.2.0 (should not do it again in the future) * Cal super in PipelineValidationError * Improve _read_pipeline_config_from_yaml's error handling * Fix generate_json_schema.py to include document stores * Fix json schemas (retro-fix 1.1.0 again) * Improve custom errors printing, add link to docs * Add function in BaseComponent to list its subclasses in a module * Make some document stores base classes abstract * Add marker 'integration' in pytest flags * Slighly improve validation of pipelines at load * Adding tests for YAML loading and validation * Make custom_query Optional for validation issues * Fix bug in _read_pipeline_config_from_yaml * Improve error handling in BasePipeline and Pipeline and add DAG check * Move json schema generation into haystack/nodes/_json_schema.py (useful for tests) * Simplify errors slightly * Add some YAML validation tests * Remove load_from_config from BasePipeline, it was never used anyway * Improve tests * Include json-schemas in package * Fix conftest imports * Make BasePipeline abstract * Improve mocking by making the test independent from the YAML version * Add exportable_to_yaml decorator to forget about set_config on mock nodes * Fix mypy errors * Comment out one monkeypatch * Fix typing again * Improve error message for validation * Add required properties to pipelines * Fix YAML version for REST API YAMLs to 1.2.0 * Fix load_from_yaml call in load_from_deepset_cloud * fix HaystackError.__getattr__ * Add super().__init__()in most nodes and docstore, comment set_config * Remove type from REST API pipelines * Remove useless init from doc2answers * Call super in Seq3SeqGenerator * Typo in deepsetcloud.py * Fix rest api indexing error mismatch and mock version of JSON schema in all tests * Working on pipeline tests * Improve errors printing slightly * Add back test_pipeline.yaml * _json_schema.py supports different versions with identical schemas * Add type to 0.7 schema for backwards compatibility * Fix small bug in _json_schema.py * Try alternative to generate json schemas on the CI * Update Documentation & Code Style * Make linux CI match autoformat CI * Fix super-init-not-called * Accidentally committed file * Update Documentation & Code Style * fix test_summarizer_translation.py's import * Mock YAML in a few suites, split and simplify test_pipeline_debug_and_validation.py::test_invalid_run_args * Fix json schema for ray tests too * Update Documentation & Code Style * Reintroduce validation * Usa unstable version in tests and rest api * Make unstable support the latest versions * Update Documentation & Code Style * Remove needless fixture * Make type in pipeline optional in the strings validation * Fix schemas * Fix string validation for pipeline type * Improve validate_config_strings * Remove type from test p[ipelines * Update Documentation & Code Style * Fix test_pipeline * Removing more type from pipelines * Temporary CI patc * Fix issue with exportable_to_yaml never invoking the wrapped init * rm stray file * pipeline tests are green again * Linux CI now needs .[all] to generate the schema * Bugfixes, pipeline tests seems to be green * Typo in version after merge * Implement missing methods in Weaviate * Trying to avoid FAISS tests from running in the Milvus1 test suite * Fix some stray test paths and faiss index dumping * Fix pytest markers list * Temporarily disable cache to be able to see tests failures * Fix pyproject.toml syntax * Use only tmp_path * Fix preprocessor signature after merge * Fix faiss bug * Fix Ray test * Fix documentation issue by removing quotes from faiss type * Update Documentation & Code Style * use document properly in preprocessor tests * Update Documentation & Code Style * make preprocessor capable of handling documents * import document * Revert support for documents in preprocessor, do later * Fix bug in _json_schema.py that was breaking validation * re-enable cache * Update Documentation & Code Style * Simplify calling _json_schema.py from the CI * Remove redundant ABC inheritance * Ensure exportable_to_yaml works only on implementations * Rename subclass to class_ in Meta * Make run() and get_config() abstract in BasePipeline * Revert unintended change in preprocessor * Move outgoing_edges_input_node check inside try block * Rename VALID_CODE_GEN_INPUT_REGEX into VALID_INPUT_REGEX * Add check for a RecursionError on validate_config_strings * Address usages of _pipeline_config in data silo and elasticsearch * Rename _pipeline_config into _init_parameters * Fix pytest marker and remove unused imports * Remove most redundant ABCs * Rename _init_parameters into _component_configuration * Remove set_config and type from _component_configuration's dict * Remove last instances of set_config and replace with super().__init__() * Implement __init_subclass__ approach * Simplify checks on the existence of _component_configuration * Fix faiss issue * Dynamic generation of node schemas & weed out old schemas * Add debatable test * Add docstring to debatable test * Positive diff between schemas implemented * Improve diff printing * Rename REST API YAML files to trigger IDE validation * Fix typing issues * Fix more typing * Typo in YAML filename * Remove needless type:ignore * Add tests * Fix tests & validation feedback for accessory classes in custom nodes * Refactor RAGeneratorType out * Fix broken import in conftest * Improve source error handling * Remove unused import in test_eval.py breaking tests * Fix changed error message in tests matches too * Normalize generate_openapi_specs.py and generate_json_schema.py in the actions * Fix path to generate_openapi_specs.py in autoformat.yml * Update Documentation & Code Style * Add test for FAISSDocumentStore-like situations (superclass with init params) * Update Documentation & Code Style * Fix indentation * Remove commented set_config * Store model_name_or_path in FARMReader to use in DistillationDataSilo * Rename _component_configuration into _component_config * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
122 lines
5.0 KiB
Python
122 lines
5.0 KiB
Python
from pathlib import Path
|
|
import os
|
|
|
|
import pytest
|
|
|
|
from haystack.nodes import (
|
|
MarkdownConverter,
|
|
DocxToTextConverter,
|
|
PDFToTextConverter,
|
|
PDFToTextOCRConverter,
|
|
TikaConverter,
|
|
AzureConverter,
|
|
ParsrConverter,
|
|
)
|
|
|
|
from .conftest import SAMPLES_PATH
|
|
|
|
|
|
@pytest.mark.tika
|
|
@pytest.mark.parametrize(
|
|
# "Converter", [PDFToTextConverter, TikaConverter, PDFToTextOCRConverter]
|
|
"Converter",
|
|
[PDFToTextOCRConverter],
|
|
)
|
|
def test_convert(Converter):
|
|
converter = Converter()
|
|
document = converter.convert(file_path=SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf")[0]
|
|
pages = document["content"].split("\f")
|
|
assert len(pages) == 4 # the sample PDF file has four pages.
|
|
assert pages[0] != "" # the page 1 of PDF contains text.
|
|
assert pages[2] == "" # the page 3 of PDF file is empty.
|
|
# assert text is retained from the document.
|
|
# As whitespace can differ (\n," ", etc.), we standardize all to simple whitespace
|
|
page_standard_whitespace = " ".join(pages[0].split())
|
|
assert "Adobe Systems made the PDF specification available free of charge in 1993." in page_standard_whitespace
|
|
|
|
|
|
@pytest.mark.tika
|
|
@pytest.mark.parametrize("Converter", [PDFToTextConverter, TikaConverter])
|
|
def test_table_removal(Converter):
|
|
converter = Converter(remove_numeric_tables=True)
|
|
document = converter.convert(file_path=SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf")[0]
|
|
pages = document["content"].split("\f")
|
|
# assert numeric rows are removed from the table.
|
|
assert "324" not in pages[0]
|
|
assert "54x growth" not in pages[0]
|
|
|
|
|
|
@pytest.mark.tika
|
|
@pytest.mark.parametrize("Converter", [PDFToTextConverter, TikaConverter])
|
|
def test_language_validation(Converter, caplog):
|
|
converter = Converter(valid_languages=["en"])
|
|
converter.convert(file_path=SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf")
|
|
assert "samples/pdf/sample_pdf_1.pdf is not one of ['en']." not in caplog.text
|
|
|
|
converter = Converter(valid_languages=["de"])
|
|
converter.convert(file_path=SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf")
|
|
assert "samples/pdf/sample_pdf_1.pdf is not one of ['de']." in caplog.text
|
|
|
|
|
|
def test_docx_converter():
|
|
converter = DocxToTextConverter()
|
|
document = converter.convert(file_path=SAMPLES_PATH / "docx" / "sample_docx.docx")[0]
|
|
assert document["content"].startswith("Sample Docx File")
|
|
|
|
|
|
def test_markdown_converter():
|
|
converter = MarkdownConverter()
|
|
document = converter.convert(file_path=SAMPLES_PATH / "markdown" / "sample.md")[0]
|
|
assert document["content"].startswith("What to build with Haystack")
|
|
|
|
|
|
def test_azure_converter():
|
|
# Check if Form Recognizer endpoint and credential key in environment variables
|
|
if "AZURE_FORMRECOGNIZER_ENDPOINT" in os.environ and "AZURE_FORMRECOGNIZER_KEY" in os.environ:
|
|
converter = AzureConverter(
|
|
endpoint=os.environ["AZURE_FORMRECOGNIZER_ENDPOINT"],
|
|
credential_key=os.environ["AZURE_FORMRECOGNIZER_KEY"],
|
|
save_json=True,
|
|
)
|
|
|
|
docs = converter.convert(file_path=SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf")
|
|
assert len(docs) == 2
|
|
assert docs[0]["content_type"] == "table"
|
|
assert len(docs[0]["content"]) == 5 # number of rows
|
|
assert len(docs[0]["content"][0]) == 5 # number of columns, Form Recognizer assumes there are 5 columns
|
|
assert docs[0]["content"][0] == ["", "Column 1", "", "Column 2", "Column 3"]
|
|
assert docs[0]["content"][4] == ["D", "$54.35", "", "$6345.", ""]
|
|
assert (
|
|
docs[0]["meta"]["preceding_context"] == "specification. These proprietary technologies are not "
|
|
"standardized and their\nspecification is published only on "
|
|
"Adobe's website. Many of them are also not\nsupported by "
|
|
"popular third-party implementations of PDF."
|
|
)
|
|
assert docs[0]["meta"]["following_context"] == ""
|
|
|
|
assert docs[1]["content_type"] == "text"
|
|
assert docs[1]["content"].startswith("A sample PDF file")
|
|
|
|
|
|
def test_parsr_converter():
|
|
converter = ParsrConverter()
|
|
|
|
docs = converter.convert(file_path=str((SAMPLES_PATH / "pdf" / "sample_pdf_1.pdf").absolute()))
|
|
assert len(docs) == 2
|
|
assert docs[0]["content_type"] == "table"
|
|
assert len(docs[0]["content"]) == 5 # number of rows
|
|
assert len(docs[0]["content"][0]) == 4 # number of columns
|
|
assert docs[0]["content"][0] == ["", "Column 1", "Column 2", "Column 3"]
|
|
assert docs[0]["content"][4] == ["D", "$54.35", "$6345.", ""]
|
|
assert (
|
|
docs[0]["meta"]["preceding_context"] == "specification. These proprietary technologies are not "
|
|
"standardized and their\nspecification is published only on "
|
|
"Adobe's website. Many of them are also not\nsupported by popular "
|
|
"third-party implementations of PDF."
|
|
)
|
|
assert docs[0]["meta"]["following_context"] == ""
|
|
|
|
assert docs[1]["content_type"] == "text"
|
|
assert docs[1]["content"].startswith("A sample PDF file")
|
|
assert docs[1]["content"].endswith("Page 4 of Sample PDF\n… the page 3 is empty.")
|