2023-02-03 20:12:03 -08:00
|
|
|
"""Image Reader.
|
|
|
|
|
|
|
|
A parser for image files.
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
|
|
|
import re
|
|
|
|
from pathlib import Path
|
2023-02-03 23:38:12 -08:00
|
|
|
from typing import Dict, List, Optional
|
2023-02-03 20:12:03 -08:00
|
|
|
|
|
|
|
from gpt_index.readers.base import BaseReader
|
|
|
|
from gpt_index.readers.schema.base import Document
|
|
|
|
|
|
|
|
|
|
|
|
class ImageReader(BaseReader):
|
|
|
|
"""Image parser.
|
|
|
|
|
|
|
|
Extract text from images using DONUT.
|
|
|
|
|
|
|
|
"""
|
|
|
|
|
2023-02-18 08:48:00 +05:30
|
|
|
def __init__(self, text_type: str = "plain_text") -> None:
|
2023-02-03 20:12:03 -08:00
|
|
|
"""Init parser."""
|
2023-02-18 08:48:00 +05:30
|
|
|
|
|
|
|
if text_type == "plain_text":
|
|
|
|
import pytesseract
|
2023-02-17 19:28:21 -08:00
|
|
|
|
2023-02-18 08:48:00 +05:30
|
|
|
processor = None
|
|
|
|
model = pytesseract
|
|
|
|
else:
|
2023-02-17 19:28:21 -08:00
|
|
|
from transformers import DonutProcessor, VisionEncoderDecoderModel
|
2023-02-18 08:48:00 +05:30
|
|
|
|
|
|
|
processor = DonutProcessor.from_pretrained(
|
|
|
|
"naver-clova-ix/donut-base-finetuned-cord-v2"
|
|
|
|
)
|
|
|
|
model = VisionEncoderDecoderModel.from_pretrained(
|
|
|
|
"naver-clova-ix/donut-base-finetuned-cord-v2"
|
2023-02-03 20:12:03 -08:00
|
|
|
)
|
|
|
|
|
|
|
|
self.parser_config = {"processor": processor, "model": model}
|
|
|
|
|
|
|
|
def load_data(
|
|
|
|
self, file: Path, extra_info: Optional[Dict] = None
|
|
|
|
) -> List[Document]:
|
|
|
|
"""Parse file."""
|
2023-02-18 08:48:00 +05:30
|
|
|
from PIL import Image
|
2023-02-17 19:28:21 -08:00
|
|
|
|
2023-02-03 20:12:03 -08:00
|
|
|
model = self.parser_config["model"]
|
|
|
|
processor = self.parser_config["processor"]
|
|
|
|
|
2023-02-18 08:48:00 +05:30
|
|
|
if processor:
|
|
|
|
import torch
|
|
|
|
|
|
|
|
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
model.to(device)
|
|
|
|
# load document image
|
|
|
|
image = Image.open(file)
|
|
|
|
if image.mode != "RGB":
|
|
|
|
image = image.convert("RGB")
|
|
|
|
|
|
|
|
# prepare decoder inputs
|
|
|
|
task_prompt = "<s_cord-v2>"
|
|
|
|
decoder_input_ids = processor.tokenizer(
|
|
|
|
task_prompt, add_special_tokens=False, return_tensors="pt"
|
|
|
|
).input_ids
|
|
|
|
|
|
|
|
pixel_values = processor(image, return_tensors="pt").pixel_values
|
|
|
|
|
|
|
|
outputs = model.generate(
|
|
|
|
pixel_values.to(device),
|
|
|
|
decoder_input_ids=decoder_input_ids.to(device),
|
|
|
|
max_length=model.decoder.config.max_position_embeddings,
|
|
|
|
early_stopping=True,
|
|
|
|
pad_token_id=processor.tokenizer.pad_token_id,
|
|
|
|
eos_token_id=processor.tokenizer.eos_token_id,
|
|
|
|
use_cache=True,
|
|
|
|
num_beams=1,
|
|
|
|
bad_words_ids=[[processor.tokenizer.unk_token_id]],
|
|
|
|
return_dict_in_generate=True,
|
|
|
|
)
|
|
|
|
|
|
|
|
sequence = processor.batch_decode(outputs.sequences)[0]
|
|
|
|
sequence = sequence.replace(processor.tokenizer.eos_token, "").replace(
|
|
|
|
processor.tokenizer.pad_token, ""
|
|
|
|
)
|
|
|
|
# remove first task start token
|
|
|
|
text = re.sub(r"<.*?>", "", sequence, count=1).strip()
|
|
|
|
else:
|
|
|
|
# load document image
|
|
|
|
image = Image.open(file)
|
|
|
|
text = model.image_to_string(image)
|
|
|
|
|
2023-02-17 19:28:21 -08:00
|
|
|
return [Document(text, extra_info=extra_info)]
|