128batch config, wsd config

This commit is contained in:
Jake Poznanski 2025-07-11 17:19:02 +00:00
parent 0c773c40af
commit 24a3fb87e8
2 changed files with 96 additions and 1 deletions

View File

@ -74,7 +74,6 @@ training:
learning_rate: 2e-5
lr_scheduler_type: warmup_stable_decay
lr_scheduler_kwargs:
num_warmup_steps: 1000
num_decay_steps: 2000
warmup_ratio: 0.1

View File

@ -0,0 +1,96 @@
# Example OlmOCR Training Configuration
# Project metadata
project_name: olmocr-qwen-vl-training
run_name: qwen2.5-vl-7b-finetune-day3-yaml-1280-noanchor-128batch
# Model configuration
model:
name: Qwen/Qwen2.5-VL-7B-Instruct
trust_remote_code: true
torch_dtype: bfloat16
use_flash_attention: true
attn_implementation: flash_attention_2
# LoRA settings (disabled by default)
use_lora: false
# lora_rank: 8
# lora_alpha: 32
# lora_dropout: 0.1
# lora_target_modules:
# - q_proj
# - v_proj
# - k_proj
# - o_proj
# Dataset configuration
dataset:
train:
- name: processed_01_books_train_iabooks
root_dir: /weka/oe-data-default/jakep/olmOCR-mix-0225/processed_01_books_train_iabooks/
pipeline: &basic_pipeline
- name: FrontMatterParser
front_matter_class: PageResponse
- name: PDFRenderer
target_longest_image_dim: 1280
- name: StaticLengthDocumentAnchoring
target_anchor_text_len: -1
- name: FinetuningPrompt
- name: FrontMatterOutputFormat
- name: InstructUserMessages
- name: Tokenizer
masking_index: -100
end_of_message_token: "<|im_end|>"
- name: processed_00_documents_train_s2pdf
root_dir: /weka/oe-data-default/jakep/olmOCR-mix-0225/processed_00_documents_train_s2pdf/
pipeline: *basic_pipeline
eval:
- name: processed_00_documents_eval_s2pdf
root_dir: /weka/oe-data-default/jakep/olmOCR-mix-0225/processed_00_documents_eval_s2pdf/
pipeline: *basic_pipeline
- name: processed_01_books_eval_iabooks
root_dir: /weka/oe-data-default/jakep/olmOCR-mix-0225/processed_01_books_eval_iabooks/
pipeline: *basic_pipeline
# Training configuration
training:
output_dir: /weka/oe-data-default/jakep/olmocr-trainer/
num_train_epochs: 1
# Batch size and accumulation
per_device_train_batch_size: 1
per_device_eval_batch_size: 1
gradient_accumulation_steps: 128
gradient_checkpointing: False
collator_max_token_len: 8192
# Learning rate
learning_rate: 5e-5
lr_scheduler_type: linear
warmup_ratio: 0.1
# Optimization
optim: adamw_torch
weight_decay: 0.01
max_grad_norm: 1.0
# Evaluation and checkpointing
evaluation_strategy: steps
eval_steps: 500
save_strategy: steps
save_steps: 500
save_total_limit: 5
load_best_model_at_end: false # Needs to be false because it has a problem restoring checkpoints for some reason
metric_for_best_model: eval_processed_00_documents_eval_s2pdf_loss
greater_is_better: false
report_to:
- wandb