mirror of
https://github.com/docling-project/docling.git
synced 2025-06-27 05:20:05 +00:00

* add the pytests Signed-off-by: Peter Staar <taa@zurich.ibm.com> * renamed the test folder and added the toplevel test Signed-off-by: Peter Staar <taa@zurich.ibm.com> * updated the toplevel function test Signed-off-by: Peter Staar <taa@zurich.ibm.com> * need to start running all tests successfully Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the reference converted documents Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added first test for json and md output Signed-off-by: Peter Staar <taa@zurich.ibm.com> * ran pre-commit Signed-off-by: Peter Staar <taa@zurich.ibm.com> * replaced deprecated json function with model_dump_json Signed-off-by: Peter Staar <taa@zurich.ibm.com> * replaced deprecated json function with model_dump_json Signed-off-by: Peter Staar <taa@zurich.ibm.com> * reformatted code Signed-off-by: Peter Staar <taa@zurich.ibm.com> * Fix backend tests Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * commented out the drawing Signed-off-by: Peter Staar <taa@zurich.ibm.com> * ci: avoid duplicate runs Signed-off-by: Michele Dolfi <97102151+dolfim-ibm@users.noreply.github.com> * commented out json verification for now Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added verification of input cells Signed-off-by: Peter Staar <taa@zurich.ibm.com> * reformat code Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added test to verify the cells in the pages Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added test to verify the cells in the pages (2) Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added test to verify the cells in the pages (3) Signed-off-by: Peter Staar <taa@zurich.ibm.com> * run all examples in CI Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * make sure examples return failures Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * raise a failure if examples fail Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * fix examples Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * run examples after tests Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * Add tests and update top_level_tests using only datamodels Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * Remove unnecessary code Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * Validate conversion status on e2e test Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * package verify utils and add more tests Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * reduce docs in example, since they are already in the tests Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * skip batch_convert Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * pin docling-parse 1.1.2 Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * updated the error messages Signed-off-by: Peter Staar <taa@zurich.ibm.com> * commented out the json verification for now Signed-off-by: Peter Staar <taa@zurich.ibm.com> * bumped GLM version Signed-off-by: Peter Staar <taa@zurich.ibm.com> * Fix lockfile Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * Pin new docling-parse v1.1.3 Signed-off-by: Christoph Auer <cau@zurich.ibm.com> --------- Signed-off-by: Peter Staar <taa@zurich.ibm.com> Signed-off-by: Christoph Auer <cau@zurich.ibm.com> Signed-off-by: Michele Dolfi <97102151+dolfim-ibm@users.noreply.github.com> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> Co-authored-by: Christoph Auer <cau@zurich.ibm.com> Co-authored-by: Michele Dolfi <97102151+dolfim-ibm@users.noreply.github.com> Co-authored-by: Michele Dolfi <dol@zurich.ibm.com>
133 lines
4.1 KiB
Python
133 lines
4.1 KiB
Python
import json
|
|
import logging
|
|
import time
|
|
from pathlib import Path
|
|
from typing import Iterable
|
|
|
|
from docling.backend.docling_parse_backend import DoclingParseDocumentBackend
|
|
from docling.backend.pypdfium2_backend import PyPdfiumDocumentBackend
|
|
from docling.datamodel.base_models import ConversionStatus, PipelineOptions
|
|
from docling.datamodel.document import ConversionResult, DocumentConversionInput
|
|
from docling.document_converter import DocumentConverter
|
|
|
|
_log = logging.getLogger(__name__)
|
|
|
|
|
|
def export_documents(
|
|
conv_results: Iterable[ConversionResult],
|
|
output_dir: Path,
|
|
):
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
|
|
|
success_count = 0
|
|
failure_count = 0
|
|
|
|
for conv_res in conv_results:
|
|
if conv_res.status == ConversionStatus.SUCCESS:
|
|
success_count += 1
|
|
doc_filename = conv_res.input.file.stem
|
|
|
|
# Export Deep Search document JSON format:
|
|
with (output_dir / f"{doc_filename}.json").open("w") as fp:
|
|
fp.write(json.dumps(conv_res.render_as_dict()))
|
|
|
|
# Export Markdown format:
|
|
with (output_dir / f"{doc_filename}.md").open("w") as fp:
|
|
fp.write(conv_res.render_as_markdown())
|
|
else:
|
|
_log.info(f"Document {conv_res.input.file} failed to convert.")
|
|
failure_count += 1
|
|
|
|
_log.info(
|
|
f"Processed {success_count + failure_count} docs, of which {failure_count} failed"
|
|
)
|
|
|
|
return success_count, failure_count
|
|
|
|
|
|
def main():
|
|
logging.basicConfig(level=logging.INFO)
|
|
|
|
input_doc_paths = [
|
|
Path("./tests/data/2206.01062.pdf"),
|
|
]
|
|
|
|
###########################################################################
|
|
|
|
# The following sections contain a combination of PipelineOptions
|
|
# and PDF Backends for various configurations.
|
|
# Uncomment one section at the time to see the differences in the output.
|
|
|
|
# PyPdfium without OCR
|
|
# --------------------
|
|
# pipeline_options = PipelineOptions()
|
|
# pipeline_options.do_ocr=False
|
|
# pipeline_options.do_table_structure=True
|
|
# pipeline_options.table_structure_options.do_cell_matching = False
|
|
|
|
# doc_converter = DocumentConverter(
|
|
# pipeline_options=pipeline_options,
|
|
# pdf_backend=PyPdfiumDocumentBackend,
|
|
# )
|
|
|
|
# PyPdfium with OCR
|
|
# -----------------
|
|
# pipeline_options = PipelineOptions()
|
|
# pipeline_options.do_ocr=False
|
|
# pipeline_options.do_table_structure=True
|
|
# pipeline_options.table_structure_options.do_cell_matching = True
|
|
|
|
# doc_converter = DocumentConverter(
|
|
# pipeline_options=pipeline_options,
|
|
# pdf_backend=PyPdfiumDocumentBackend,
|
|
# )
|
|
|
|
# Docling Parse without OCR
|
|
# -------------------------
|
|
pipeline_options = PipelineOptions()
|
|
pipeline_options.do_ocr = False
|
|
pipeline_options.do_table_structure = True
|
|
pipeline_options.table_structure_options.do_cell_matching = True
|
|
|
|
doc_converter = DocumentConverter(
|
|
pipeline_options=pipeline_options,
|
|
pdf_backend=DoclingParseDocumentBackend,
|
|
)
|
|
|
|
# Docling Parse with OCR
|
|
# ----------------------
|
|
# pipeline_options = PipelineOptions()
|
|
# pipeline_options.do_ocr=True
|
|
# pipeline_options.do_table_structure=True
|
|
# pipeline_options.table_structure_options.do_cell_matching = True
|
|
|
|
# doc_converter = DocumentConverter(
|
|
# pipeline_options=pipeline_options,
|
|
# pdf_backend=DoclingParseDocumentBackend,
|
|
# )
|
|
|
|
###########################################################################
|
|
|
|
# Define input files
|
|
input = DocumentConversionInput.from_paths(input_doc_paths)
|
|
|
|
start_time = time.time()
|
|
|
|
conv_results = doc_converter.convert(input)
|
|
success_count, failure_count = export_documents(
|
|
conv_results, output_dir=Path("./scratch")
|
|
)
|
|
|
|
end_time = time.time() - start_time
|
|
|
|
_log.info(f"All documents were converted in {end_time:.2f} seconds.")
|
|
|
|
if failure_count > 0:
|
|
raise RuntimeError(
|
|
f"The example failed converting {failure_count} on {len(input_doc_paths)}."
|
|
)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|