mirror of
https://github.com/docling-project/docling.git
synced 2025-11-02 03:42:59 +00:00
feat: Add granite-docling model (#2272)
* adding granite-docling preview Signed-off-by: Peter Staar <taa@zurich.ibm.com> * updated the model specs Signed-off-by: Peter Staar <taa@zurich.ibm.com> * typo Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use granite-docling and add to the model downloader Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * update docs and README Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * Update final repo_ids for GraniteDocling Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * Update final repo_ids for GraniteDocling Signed-off-by: Christoph Auer <cau@zurich.ibm.com> * Fix model name in CLI usage example Signed-off-by: Christoph Auer <60343111+cau-git@users.noreply.github.com> * Fix VLM model name in README.md Signed-off-by: Christoph Auer <60343111+cau-git@users.noreply.github.com> --------- Signed-off-by: Peter Staar <taa@zurich.ibm.com> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> Signed-off-by: Christoph Auer <cau@zurich.ibm.com> Signed-off-by: Christoph Auer <60343111+cau-git@users.noreply.github.com> Co-authored-by: Peter Staar <taa@zurich.ibm.com> Co-authored-by: Michele Dolfi <dol@zurich.ibm.com>
This commit is contained in:
parent
ff351fd40c
commit
17afb664d0
@ -36,7 +36,7 @@ Docling simplifies document processing, parsing diverse formats — including ad
|
||||
* 🔒 Local execution capabilities for sensitive data and air-gapped environments
|
||||
* 🤖 Plug-and-play [integrations][integrations] incl. LangChain, LlamaIndex, Crew AI & Haystack for agentic AI
|
||||
* 🔍 Extensive OCR support for scanned PDFs and images
|
||||
* 👓 Support of several Visual Language Models ([SmolDocling](https://huggingface.co/ds4sd/SmolDocling-256M-preview))
|
||||
* 👓 Support of several Visual Language Models ([GraniteDocling](https://huggingface.co/ibm-granite/granite-docling-258M))
|
||||
* 🎙️ Audio support with Automatic Speech Recognition (ASR) models
|
||||
* 🔌 Connect to any agent using the [MCP server](https://docling-project.github.io/docling/usage/mcp/)
|
||||
* 💻 Simple and convenient CLI
|
||||
@ -88,9 +88,9 @@ Docling has a built-in CLI to run conversions.
|
||||
docling https://arxiv.org/pdf/2206.01062
|
||||
```
|
||||
|
||||
You can also use 🥚[SmolDocling](https://huggingface.co/ds4sd/SmolDocling-256M-preview) and other VLMs via Docling CLI:
|
||||
You can also use 🥚[GraniteDocling](https://huggingface.co/ibm-granite/granite-docling-258M) and other VLMs via Docling CLI:
|
||||
```bash
|
||||
docling --pipeline vlm --vlm-model smoldocling https://arxiv.org/pdf/2206.01062
|
||||
docling --pipeline vlm --vlm-model granite_docling https://arxiv.org/pdf/2206.01062
|
||||
```
|
||||
This will use MLX acceleration on supported Apple Silicon hardware.
|
||||
|
||||
|
||||
@ -64,6 +64,8 @@ from docling.datamodel.vlm_model_specs import (
|
||||
GOT2_TRANSFORMERS,
|
||||
GRANITE_VISION_OLLAMA,
|
||||
GRANITE_VISION_TRANSFORMERS,
|
||||
GRANITEDOCLING_MLX,
|
||||
GRANITEDOCLING_TRANSFORMERS,
|
||||
SMOLDOCLING_MLX,
|
||||
SMOLDOCLING_TRANSFORMERS,
|
||||
SMOLDOCLING_VLLM,
|
||||
@ -334,7 +336,7 @@ def convert( # noqa: C901
|
||||
vlm_model: Annotated[
|
||||
VlmModelType,
|
||||
typer.Option(..., help="Choose the VLM model to use with PDF or image files."),
|
||||
] = VlmModelType.SMOLDOCLING,
|
||||
] = VlmModelType.GRANITEDOCLING,
|
||||
asr_model: Annotated[
|
||||
AsrModelType,
|
||||
typer.Option(..., help="Choose the ASR model to use with audio/video files."),
|
||||
@ -684,6 +686,18 @@ def convert( # noqa: C901
|
||||
"To run SmolDocling faster, please install mlx-vlm:\n"
|
||||
"pip install mlx-vlm"
|
||||
)
|
||||
elif vlm_model == VlmModelType.GRANITEDOCLING:
|
||||
pipeline_options.vlm_options = GRANITEDOCLING_TRANSFORMERS
|
||||
if sys.platform == "darwin":
|
||||
try:
|
||||
import mlx_vlm
|
||||
|
||||
pipeline_options.vlm_options = GRANITEDOCLING_MLX
|
||||
except ImportError:
|
||||
_log.warning(
|
||||
"To run GraniteDocling faster, please install mlx-vlm:\n"
|
||||
"pip install mlx-vlm"
|
||||
)
|
||||
elif vlm_model == VlmModelType.SMOLDOCLING_VLLM:
|
||||
pipeline_options.vlm_options = SMOLDOCLING_VLLM
|
||||
|
||||
|
||||
@ -33,6 +33,8 @@ class _AvailableModels(str, Enum):
|
||||
CODE_FORMULA = "code_formula"
|
||||
PICTURE_CLASSIFIER = "picture_classifier"
|
||||
SMOLVLM = "smolvlm"
|
||||
GRANITEDOCLING = "granitedocling"
|
||||
GRANITEDOCLING_MLX = "granitedocling_mlx"
|
||||
SMOLDOCLING = "smoldocling"
|
||||
SMOLDOCLING_MLX = "smoldocling_mlx"
|
||||
GRANITE_VISION = "granite_vision"
|
||||
@ -108,6 +110,8 @@ def download(
|
||||
with_code_formula=_AvailableModels.CODE_FORMULA in to_download,
|
||||
with_picture_classifier=_AvailableModels.PICTURE_CLASSIFIER in to_download,
|
||||
with_smolvlm=_AvailableModels.SMOLVLM in to_download,
|
||||
with_granitedocling=_AvailableModels.GRANITEDOCLING in to_download,
|
||||
with_granitedocling_mlx=_AvailableModels.GRANITEDOCLING_MLX in to_download,
|
||||
with_smoldocling=_AvailableModels.SMOLDOCLING in to_download,
|
||||
with_smoldocling_mlx=_AvailableModels.SMOLDOCLING_MLX in to_download,
|
||||
with_granite_vision=_AvailableModels.GRANITE_VISION in to_download,
|
||||
|
||||
@ -12,7 +12,7 @@ from pydantic import (
|
||||
)
|
||||
from typing_extensions import deprecated
|
||||
|
||||
from docling.datamodel import asr_model_specs
|
||||
from docling.datamodel import asr_model_specs, vlm_model_specs
|
||||
|
||||
# Import the following for backwards compatibility
|
||||
from docling.datamodel.accelerator_options import AcceleratorDevice, AcceleratorOptions
|
||||
@ -290,7 +290,7 @@ class VlmPipelineOptions(PaginatedPipelineOptions):
|
||||
)
|
||||
# If True, text from backend will be used instead of generated text
|
||||
vlm_options: Union[InlineVlmOptions, ApiVlmOptions] = (
|
||||
smoldocling_vlm_conversion_options
|
||||
vlm_model_specs.GRANITEDOCLING_TRANSFORMERS
|
||||
)
|
||||
|
||||
|
||||
|
||||
@ -18,6 +18,35 @@ from docling.datamodel.pipeline_options_vlm_model import (
|
||||
_log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Granite-Docling
|
||||
GRANITEDOCLING_TRANSFORMERS = InlineVlmOptions(
|
||||
repo_id="ibm-granite/granite-docling-258M",
|
||||
prompt="Convert this page to docling.",
|
||||
response_format=ResponseFormat.DOCTAGS,
|
||||
inference_framework=InferenceFramework.TRANSFORMERS,
|
||||
transformers_model_type=TransformersModelType.AUTOMODEL_IMAGETEXTTOTEXT,
|
||||
supported_devices=[
|
||||
AcceleratorDevice.CPU,
|
||||
AcceleratorDevice.CUDA,
|
||||
],
|
||||
scale=2.0,
|
||||
temperature=0.0,
|
||||
max_new_tokens=8192,
|
||||
stop_strings=["</doctag>", "<|end_of_text|>"],
|
||||
)
|
||||
|
||||
GRANITEDOCLING_MLX = InlineVlmOptions(
|
||||
repo_id="ibm-granite/granite-docling-258M-mlx",
|
||||
prompt="Convert this page to docling.",
|
||||
response_format=ResponseFormat.DOCTAGS,
|
||||
inference_framework=InferenceFramework.MLX,
|
||||
supported_devices=[AcceleratorDevice.MPS],
|
||||
scale=2.0,
|
||||
temperature=0.0,
|
||||
max_new_tokens=8192,
|
||||
stop_strings=["</doctag>", "<|end_of_text|>"],
|
||||
)
|
||||
|
||||
# SmolDocling
|
||||
SMOLDOCLING_MLX = InlineVlmOptions(
|
||||
repo_id="ds4sd/SmolDocling-256M-preview-mlx-bf16",
|
||||
@ -272,3 +301,4 @@ class VlmModelType(str, Enum):
|
||||
GRANITE_VISION_VLLM = "granite_vision_vllm"
|
||||
GRANITE_VISION_OLLAMA = "granite_vision_ollama"
|
||||
GOT_OCR_2 = "got_ocr_2"
|
||||
GRANITEDOCLING = "granite_docling"
|
||||
|
||||
@ -10,6 +10,8 @@ from docling.datamodel.pipeline_options import (
|
||||
)
|
||||
from docling.datamodel.settings import settings
|
||||
from docling.datamodel.vlm_model_specs import (
|
||||
GRANITEDOCLING_MLX,
|
||||
GRANITEDOCLING_TRANSFORMERS,
|
||||
SMOLDOCLING_MLX,
|
||||
SMOLDOCLING_TRANSFORMERS,
|
||||
)
|
||||
@ -34,6 +36,8 @@ def download_models(
|
||||
with_code_formula: bool = True,
|
||||
with_picture_classifier: bool = True,
|
||||
with_smolvlm: bool = False,
|
||||
with_granitedocling: bool = False,
|
||||
with_granitedocling_mlx: bool = False,
|
||||
with_smoldocling: bool = False,
|
||||
with_smoldocling_mlx: bool = False,
|
||||
with_granite_vision: bool = False,
|
||||
@ -86,6 +90,24 @@ def download_models(
|
||||
progress=progress,
|
||||
)
|
||||
|
||||
if with_granitedocling:
|
||||
_log.info("Downloading GraniteDocling model...")
|
||||
download_hf_model(
|
||||
repo_id=GRANITEDOCLING_TRANSFORMERS.repo_id,
|
||||
local_dir=output_dir / GRANITEDOCLING_TRANSFORMERS.repo_cache_folder,
|
||||
force=force,
|
||||
progress=progress,
|
||||
)
|
||||
|
||||
if with_granitedocling_mlx:
|
||||
_log.info("Downloading GraniteDocling MLX model...")
|
||||
download_hf_model(
|
||||
repo_id=GRANITEDOCLING_MLX.repo_id,
|
||||
local_dir=output_dir / GRANITEDOCLING_MLX.repo_cache_folder,
|
||||
force=force,
|
||||
progress=progress,
|
||||
)
|
||||
|
||||
if with_smoldocling:
|
||||
_log.info("Downloading SmolDocling model...")
|
||||
download_hf_model(
|
||||
|
||||
4
docs/examples/minimal_vlm_pipeline.py
vendored
4
docs/examples/minimal_vlm_pipeline.py
vendored
@ -32,7 +32,7 @@ from docling.pipeline.vlm_pipeline import VlmPipeline
|
||||
source = "https://arxiv.org/pdf/2501.17887"
|
||||
|
||||
###### USING SIMPLE DEFAULT VALUES
|
||||
# - SmolDocling model
|
||||
# - GraniteDocling model
|
||||
# - Using the transformers framework
|
||||
|
||||
converter = DocumentConverter(
|
||||
@ -53,7 +53,7 @@ print(doc.export_to_markdown())
|
||||
# For more options see the `compare_vlm_models.py` example.
|
||||
|
||||
pipeline_options = VlmPipelineOptions(
|
||||
vlm_options=vlm_model_specs.SMOLDOCLING_MLX,
|
||||
vlm_options=vlm_model_specs.GRANITEDOCLING_MLX,
|
||||
)
|
||||
|
||||
converter = DocumentConverter(
|
||||
|
||||
2
docs/index.md
vendored
2
docs/index.md
vendored
@ -28,7 +28,7 @@ Docling simplifies document processing, parsing diverse formats — including ad
|
||||
* 🔒 Local execution capabilities for sensitive data and air-gapped environments
|
||||
* 🤖 Plug-and-play [integrations][integrations] incl. LangChain, LlamaIndex, Crew AI & Haystack for agentic AI
|
||||
* 🔍 Extensive OCR support for scanned PDFs and images
|
||||
* 👓 Support of several Visual Language Models ([SmolDocling](https://huggingface.co/ds4sd/SmolDocling-256M-preview))
|
||||
* 👓 Support of several Visual Language Models ([GraniteDocling](https://huggingface.co/ibm-granite/granite-docling-258M))
|
||||
* 🎙️ Support for Audio with Automatic Speech Recognition (ASR) models
|
||||
* 🔌 Connect to any agent using the [Docling MCP](https://docling-project.github.io/docling/usage/mcp/) server
|
||||
* 💻 Simple and convenient CLI
|
||||
|
||||
4
docs/usage/index.md
vendored
4
docs/usage/index.md
vendored
@ -31,9 +31,9 @@ You can additionally use Docling directly from your terminal, for instance:
|
||||
docling https://arxiv.org/pdf/2206.01062
|
||||
```
|
||||
|
||||
The CLI provides various options, such as 🥚[SmolDocling](https://huggingface.co/ds4sd/SmolDocling-256M-preview) (incl. MLX acceleration) & other VLMs:
|
||||
The CLI provides various options, such as 🥚[GraniteDocling](https://huggingface.co/ibm-granite/granite-docling-258M) (incl. MLX acceleration) & other VLMs:
|
||||
```bash
|
||||
docling --pipeline vlm --vlm-model smoldocling https://arxiv.org/pdf/2206.01062
|
||||
docling --pipeline vlm --vlm-model granite_docling https://arxiv.org/pdf/2206.01062
|
||||
```
|
||||
|
||||
For all available options, run `docling --help` or check the [CLI reference](../reference/cli.md).
|
||||
|
||||
2
docs/usage/vision_models.md
vendored
2
docs/usage/vision_models.md
vendored
@ -45,6 +45,8 @@ The following table reports the models currently available out-of-the-box.
|
||||
|
||||
| Model instance | Model | Framework | Device | Num pages | Inference time (sec) |
|
||||
| ---------------|------ | --------- | ------ | --------- | ---------------------|
|
||||
| `vlm_model_specs.GRANITEDOCLING_TRANSFORMERS` | [ibm-granite/granite-docling-258M](https://huggingface.co/ibm-granite/granite-docling-258M) | `Transformers/AutoModelForVision2Seq` | MPS | 1 | - |
|
||||
| `vlm_model_specs.GRANITEDOCLING_MLX` | [ibm-granite/granite-docling-258M-mlx-bf16](https://huggingface.co/ibm-granite/granite-docling-258M-mlx-bf16) | `MLX`| MPS | 1 | - |
|
||||
| `vlm_model_specs.SMOLDOCLING_TRANSFORMERS` | [ds4sd/SmolDocling-256M-preview](https://huggingface.co/ds4sd/SmolDocling-256M-preview) | `Transformers/AutoModelForVision2Seq` | MPS | 1 | 102.212 |
|
||||
| `vlm_model_specs.SMOLDOCLING_MLX` | [ds4sd/SmolDocling-256M-preview-mlx-bf16](https://huggingface.co/ds4sd/SmolDocling-256M-preview-mlx-bf16) | `MLX`| MPS | 1 | 6.15453 |
|
||||
| `vlm_model_specs.QWEN25_VL_3B_MLX` | [mlx-community/Qwen2.5-VL-3B-Instruct-bf16](https://huggingface.co/mlx-community/Qwen2.5-VL-3B-Instruct-bf16) | `MLX`| MPS | 1 | 23.4951 |
|
||||
|
||||
@ -83,7 +83,7 @@ nav:
|
||||
- "Custom conversion": examples/custom_convert.py
|
||||
- "Batch conversion": examples/batch_convert.py
|
||||
- "Multi-format conversion": examples/run_with_formats.py
|
||||
- "VLM pipeline with SmolDocling": examples/minimal_vlm_pipeline.py
|
||||
- "VLM pipeline with GraniteDocling": examples/minimal_vlm_pipeline.py
|
||||
- "VLM pipeline with remote model": examples/vlm_pipeline_api_model.py
|
||||
- "VLM comparison": examples/compare_vlm_models.py
|
||||
- "ASR pipeline with Whisper": examples/minimal_asr_pipeline.py
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user