Ugh, lost some training runs because files got saved to the wrong place

This commit is contained in:
Jake Poznanski 2025-07-09 17:57:34 +00:00
parent 02f0706edc
commit ca8e503870
4 changed files with 12 additions and 12 deletions

View File

@ -2,7 +2,7 @@
# Project metadata
project_name: olmocr-qwen-vl-training
run_name: qwen2.5-vl-7b-finetune-day2-json
run_name: qwen2.5-vl-7b-finetune-day3-json
# Model configuration
model:
@ -58,7 +58,7 @@ dataset:
# Training configuration
training:
output_dir: /home/ubuntu/olmocr-trainer/
output_dir: /weka/oe-data-default/jakep/olmocr-trainer/
num_train_epochs: 1
# Batch size and accumulation
@ -72,7 +72,7 @@ training:
# Learning rate
learning_rate: 2e-5
lr_scheduler_type: cosine
lr_scheduler_type: linear
warmup_ratio: 0.1
# Optimization

View File

@ -2,7 +2,7 @@
# Project metadata
project_name: olmocr-qwen-vl-training
run_name: qwen2.5-vl-7b-finetune-day2-1280
run_name: qwen2.5-vl-7b-finetune-day3-1280
# Model configuration
model:
@ -58,7 +58,7 @@ dataset:
# Training configuration
training:
output_dir: /home/ubuntu/olmocr-trainer/
output_dir: /weka/oe-data-default/jakep/olmocr-trainer/
num_train_epochs: 1
# Batch size and accumulation
@ -72,7 +72,7 @@ training:
# Learning rate
learning_rate: 2e-5
lr_scheduler_type: cosine
lr_scheduler_type: linear
warmup_ratio: 0.1
# Optimization

View File

@ -2,7 +2,7 @@
# Project metadata
project_name: olmocr-qwen-vl-training
run_name: qwen2.5-vl-7b-finetune-day2-1280-noanchor
run_name: qwen2.5-vl-7b-finetune-day3-1280-noanchor
# Model configuration
model:
@ -58,7 +58,7 @@ dataset:
# Training configuration
training:
output_dir: /home/ubuntu/olmocr-trainer/
output_dir: /weka/oe-data-default/jakep/olmocr-trainer/
num_train_epochs: 1
# Batch size and accumulation
@ -72,7 +72,7 @@ training:
# Learning rate
learning_rate: 2e-5
lr_scheduler_type: cosine
lr_scheduler_type: linear
warmup_ratio: 0.1
# Optimization

View File

@ -2,7 +2,7 @@
# Project metadata
project_name: olmocr-qwen-vl-training
run_name: qwen2.5-vl-7b-finetune-day2-1600
run_name: qwen2.5-vl-7b-finetune-day3-1600
# Model configuration
model:
@ -58,7 +58,7 @@ dataset:
# Training configuration
training:
output_dir: /home/ubuntu/olmocr-trainer/
output_dir: /weka/oe-data-default/jakep/olmocr-trainer/
num_train_epochs: 1
# Batch size and accumulation
@ -72,7 +72,7 @@ training:
# Learning rate
learning_rate: 2e-5
lr_scheduler_type: cosine
lr_scheduler_type: linear
warmup_ratio: 0.1
# Optimization