Script adjustment

This commit is contained in:
Jake Poznanski 2024-09-23 14:41:35 -07:00
parent 79feb986a6
commit a30ca16e1f
2 changed files with 16 additions and 15 deletions

View File

@ -62,19 +62,20 @@ hparams:
warmup_ratio: 0.03
# From https://github.com/QwenLM/Qwen2/blob/main/examples/sft/finetune.py
lora:
rank: 32
alpha: 32
dropout: 0.05
task_type: causal_lm
target_modules:
- q_proj
- k_proj
- v_proj
- o_proj
- gate_proj
- up_proj
- down_proj
# Disable LORA for now, because we want the visual network to get trained too
# lora:
# rank: 32
# alpha: 32
# dropout: 0.05
# task_type: causal_lm
# target_modules:
# - q_proj
# - k_proj
# - v_proj
# - o_proj
# - gate_proj
# - up_proj
# - down_proj
save:
path: s3://ai2-oe-data/jakep/experiments/qwen2vl-pdf/v1/models/

View File

@ -29,8 +29,8 @@ gantry run \
--workspace ai2/oe-data-pdf \
--beaker-image 'lucas/refine-axelot-vllm' \
--venv 'base' \
--priority high \
--gpus 8 \
--priority normal \
--gpus 4 \
--preemptible \
--cluster "ai2/${CLUSTER}*" \
--budget ai2/oe-data \