mirror of
https://github.com/docling-project/docling.git
synced 2025-06-27 05:20:05 +00:00

* feat: adding new vlm-models support Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the transformers Signed-off-by: Peter Staar <taa@zurich.ibm.com> * got microsoft/Phi-4-multimodal-instruct to work Signed-off-by: Peter Staar <taa@zurich.ibm.com> * working on vlm's Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring the VLM part Signed-off-by: Peter Staar <taa@zurich.ibm.com> * all working, now serious refacgtoring necessary Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring the download_model Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the formulate_prompt Signed-off-by: Peter Staar <taa@zurich.ibm.com> * pixtral 12b runs via MLX and native transformers Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the VlmPredictionToken Signed-off-by: Peter Staar <taa@zurich.ibm.com> * refactoring minimal_vlm_pipeline Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the MyPy Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added pipeline_model_specializations file Signed-off-by: Peter Staar <taa@zurich.ibm.com> * need to get Phi4 working again ... Signed-off-by: Peter Staar <taa@zurich.ibm.com> * finalising last points for vlms support Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the pipeline for Phi4 Signed-off-by: Peter Staar <taa@zurich.ibm.com> * streamlining all code Signed-off-by: Peter Staar <taa@zurich.ibm.com> * reformatted the code Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixing the tests Signed-off-by: Peter Staar <taa@zurich.ibm.com> * added the html backend to the VLM pipeline Signed-off-by: Peter Staar <taa@zurich.ibm.com> * fixed the static load_from_doctags Signed-off-by: Peter Staar <taa@zurich.ibm.com> * restore stable imports Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use AutoModelForVision2Seq for Pixtral and review example (including rename) Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove unused value Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * refactor instances of VLM models Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * skip compare example in CI Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use lowercase and uppercase only Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add new minimal_vlm example and refactor pipeline_options_vlm_model for cleaner import Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * rename pipeline_vlm_model_spec Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * move more argument to options and simplify model init Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add supported_devices Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove not-needed function Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * exclude minimal_vlm Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * missing file Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add message for transformers version Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * rename to specs Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use module import and remove MLX from non-darwin Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove hf_vlm_model and add extra_generation_args Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * use single HF VLM model class Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * remove torch type Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> * add docs for vision models Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> --------- Signed-off-by: Peter Staar <taa@zurich.ibm.com> Signed-off-by: Michele Dolfi <dol@zurich.ibm.com> Co-authored-by: Michele Dolfi <dol@zurich.ibm.com>
110 lines
3.4 KiB
Python
Vendored
110 lines
3.4 KiB
Python
Vendored
import logging
|
|
import os
|
|
from pathlib import Path
|
|
|
|
import requests
|
|
from dotenv import load_dotenv
|
|
|
|
from docling.datamodel.base_models import InputFormat
|
|
from docling.datamodel.pipeline_options import (
|
|
VlmPipelineOptions,
|
|
)
|
|
from docling.datamodel.pipeline_options_vlm_model import ApiVlmOptions, ResponseFormat
|
|
from docling.document_converter import DocumentConverter, PdfFormatOption
|
|
from docling.pipeline.vlm_pipeline import VlmPipeline
|
|
|
|
|
|
def ollama_vlm_options(model: str, prompt: str):
|
|
options = ApiVlmOptions(
|
|
url="http://localhost:11434/v1/chat/completions", # the default Ollama endpoint
|
|
params=dict(
|
|
model=model,
|
|
),
|
|
prompt=prompt,
|
|
timeout=90,
|
|
scale=1.0,
|
|
response_format=ResponseFormat.MARKDOWN,
|
|
)
|
|
return options
|
|
|
|
|
|
def watsonx_vlm_options(model: str, prompt: str):
|
|
load_dotenv()
|
|
api_key = os.environ.get("WX_API_KEY")
|
|
project_id = os.environ.get("WX_PROJECT_ID")
|
|
|
|
def _get_iam_access_token(api_key: str) -> str:
|
|
res = requests.post(
|
|
url="https://iam.cloud.ibm.com/identity/token",
|
|
headers={
|
|
"Content-Type": "application/x-www-form-urlencoded",
|
|
},
|
|
data=f"grant_type=urn:ibm:params:oauth:grant-type:apikey&apikey={api_key}",
|
|
)
|
|
res.raise_for_status()
|
|
api_out = res.json()
|
|
print(f"{api_out=}")
|
|
return api_out["access_token"]
|
|
|
|
options = ApiVlmOptions(
|
|
url="https://us-south.ml.cloud.ibm.com/ml/v1/text/chat?version=2023-05-29",
|
|
params=dict(
|
|
model_id=model,
|
|
project_id=project_id,
|
|
parameters=dict(
|
|
max_new_tokens=400,
|
|
),
|
|
),
|
|
headers={
|
|
"Authorization": "Bearer " + _get_iam_access_token(api_key=api_key),
|
|
},
|
|
prompt=prompt,
|
|
timeout=60,
|
|
response_format=ResponseFormat.MARKDOWN,
|
|
)
|
|
return options
|
|
|
|
|
|
def main():
|
|
logging.basicConfig(level=logging.INFO)
|
|
|
|
# input_doc_path = Path("./tests/data/pdf/2206.01062.pdf")
|
|
input_doc_path = Path("./tests/data/pdf/2305.03393v1-pg9.pdf")
|
|
|
|
pipeline_options = VlmPipelineOptions(
|
|
enable_remote_services=True # <-- this is required!
|
|
)
|
|
|
|
# The ApiVlmOptions() allows to interface with APIs supporting
|
|
# the multi-modal chat interface. Here follow a few example on how to configure those.
|
|
|
|
# One possibility is self-hosting model, e.g. via Ollama.
|
|
# Example using the Granite Vision model: (uncomment the following lines)
|
|
pipeline_options.vlm_options = ollama_vlm_options(
|
|
model="granite3.2-vision:2b",
|
|
prompt="OCR the full page to markdown.",
|
|
)
|
|
|
|
# Another possibility is using online services, e.g. watsonx.ai.
|
|
# Using requires setting the env variables WX_API_KEY and WX_PROJECT_ID.
|
|
# Uncomment the following line for this option:
|
|
# pipeline_options.vlm_options = watsonx_vlm_options(
|
|
# model="ibm/granite-vision-3-2-2b", prompt="OCR the full page to markdown."
|
|
# )
|
|
|
|
# Create the DocumentConverter and launch the conversion.
|
|
doc_converter = DocumentConverter(
|
|
format_options={
|
|
InputFormat.PDF: PdfFormatOption(
|
|
pipeline_options=pipeline_options,
|
|
pipeline_cls=VlmPipeline,
|
|
)
|
|
}
|
|
)
|
|
result = doc_converter.convert(input_doc_path)
|
|
print(result.document.export_to_markdown())
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|