Refactoring prompts into their own new folder

This commit is contained in:
Jake Poznanski 2024-09-30 18:48:17 +00:00
parent d74f9a352b
commit da1982acb8
4 changed files with 29 additions and 26 deletions

View File

@ -0,0 +1 @@
from .prompts import *

View File

@ -0,0 +1,24 @@
# This is the prompt we use for getting chat gpt 4o to convert documents into our silver training data
def build_openai_silver_data_prompt(base_text: str) -> str:
return (
f"Below is the image of one page of a PDF document, as well as some raw textual content that was previously extracted for it. "
f"Just return the plain text representation of this document as if you were reading it naturally.\n"
f"Turn equations into a LaTeX representation, and tables into markdown format. Remove the headers and footers, but keep references and footnotes.\n"
f"Read any natural handwriting.\n"
f"This is likely one page out of several in the document, so be sure to preserve any sentences that come from the previous page, or continue onto the next page, exactly as they are.\n"
f"If there is no text at all that you think you should read, just output [NO TEXT].\n"
f"If the page has no English text on it at all, just output [NO ENGLISH TEXT].\n"
f"Do not hallucinate.\n"
f"RAW_TEXT_START\n{base_text}\nRAW_TEXT_END"
)
# This is a base prompt that will be used for training and running the fine tuned model
# It's simplified from the prompt which was used to generate the silver data, and can change from dataset to dataset
def build_finetuning_prompt(base_text: str) -> str:
return (
f"Below is the image of one page of a document, as well as some raw textual content that was previously extracted for it. "
f"Just return the plain text representation of this document as if you were reading it naturally.\n"
f"Do not hallucinate.\n"
f"RAW_TEXT_START\n{base_text}\nRAW_TEXT_END"
)

View File

@ -12,23 +12,11 @@ from typing import Generator
from concurrent.futures import ThreadPoolExecutor, as_completed
from urllib.parse import urlparse
# reuse mise pdf filtering base code
from pdelfin.prompts import build_openai_silver_data_prompt
from pdelfin.filter import PdfFilter
TARGET_IMAGE_DIM = 2048
def _build_prompt(base_text: str) -> str:
return (
f"Below is the image of one page of a PDF document, as well as some raw textual content that was previously extracted for it. "
f"Just return the plain text representation of this document as if you were reading it naturally.\n"
f"Turn equations into a LaTeX representation, and tables into markdown format. Remove the headers and footers, but keep references and footnotes.\n"
f"Read any natural handwriting.\n"
f"This is likely one page out of several in the document, so be sure to preserve any sentences that come from the previous page, or continue onto the next page, exactly as they are.\n"
f"If there is no text at all that you think you should read, just output [NO TEXT].\n"
f"If the page has no English text on it at all, just output [NO ENGLISH TEXT].\n"
f"Do not hallucinate.\n"
f"RAW_TEXT_START\n{base_text}\nRAW_TEXT_END"
)
pdf_filter = PdfFilter()
@ -78,7 +66,7 @@ def build_page_query(local_pdf_path: str, pretty_pdf_path: str, page: int) -> di
{
"role": "user",
"content": [
{"type": "text", "text": _build_prompt(base_text)},
{"type": "text", "text": build_openai_silver_data_prompt(base_text)},
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{image_base64}"}}
],
}

View File

@ -4,6 +4,7 @@ from PIL import Image
import base64
import torch # Make sure to import torch as it's used in the DataCollator
from pdelfin.prompts import build_finetuning_prompt
def filter_by_max_seq_len(example, processor, max_prompt_len: int=2000, max_response_len: int=2000):
if len(processor.tokenizer.tokenize(example["input_prompt_text"])) > max_prompt_len:
@ -15,17 +16,6 @@ def filter_by_max_seq_len(example, processor, max_prompt_len: int=2000, max_resp
return True
# This is a base prompt that will be used for training and running the fine tuned model
# It's simplified from the prompt which was used to generate the silver data, and can change from dataset to dataset
def _build_finetuning_prompt(base_text: str) -> str:
return (
f"Below is the image of one page of a document, as well as some raw textual content that was previously extracted for it. "
f"Just return the plain text representation of this document as if you were reading it naturally.\n"
f"Do not hallucinate.\n"
f"RAW_TEXT_START\n{base_text}\nRAW_TEXT_END"
)
def prepare_data_for_qwen2_training(example, processor, add_batch_dim=False):
# Prepare messages
messages = [
@ -36,7 +26,7 @@ def prepare_data_for_qwen2_training(example, processor, add_batch_dim=False):
"type": "image",
"image": example["input_prompt_image_base64"] # Placeholder
},
{"type": "text", "text": _build_finetuning_prompt(example["raw_page_text"])},
{"type": "text", "text": build_finetuning_prompt(example["raw_page_text"])},
],
}
]