Adding suppor for smaller error bars in benchmarking

This commit is contained in:
Jake Poznanski 2025-10-24 16:49:49 +00:00
parent 15496b973f
commit 10ab6a60e0
2 changed files with 72 additions and 33 deletions

View File

@ -68,7 +68,7 @@ def get_subdir_and_pdf_name(source_file_path):
return None, None return None, None
def create_markdown_files(entries, output_dir): def create_markdown_files(entries, output_dir, repeat_index=1):
"""Create markdown files from JSONL entries in subdir/{pdf_name}.md format.""" """Create markdown files from JSONL entries in subdir/{pdf_name}.md format."""
output_path = Path(output_dir) output_path = Path(output_dir)
@ -87,7 +87,7 @@ def create_markdown_files(entries, output_dir):
subdir_path = output_path / subdir subdir_path = output_path / subdir
subdir_path.mkdir(parents=True, exist_ok=True) subdir_path.mkdir(parents=True, exist_ok=True)
md_filename = f"{pdf_name}_pg1_repeat1.md" md_filename = f"{pdf_name}_pg1_repeat{repeat_index}.md"
md_filepath = subdir_path / md_filename md_filepath = subdir_path / md_filename
assert len(pdf_entries) == 1, "Expecting just one entry mapping to each file, otherwise something is wrong" assert len(pdf_entries) == 1, "Expecting just one entry mapping to each file, otherwise something is wrong"
@ -100,7 +100,7 @@ def create_markdown_files(entries, output_dir):
blank_files += 1 blank_files += 1
created_files.add((subdir, pdf_name)) created_files.add((subdir, pdf_name))
print(f"Created: {subdir}/{md_filename}_pg1_repeat1") print(f"Created: {subdir}/{md_filename}_pg1_repeat{repeat_index}")
print(f"Created {len(created_files)} markdown files from JSONL data") print(f"Created {len(created_files)} markdown files from JSONL data")
print(f"{blank_files} of those had empty content") print(f"{blank_files} of those had empty content")
@ -143,7 +143,7 @@ def find_missing_pdfs(pdf_sources, created_files, base_bench_path):
return missing_pdfs return missing_pdfs
def create_blank_markdown_files(missing_pdfs, output_dir): def create_blank_markdown_files(missing_pdfs, output_dir, repeat_index=1):
"""Create blank markdown files for missing PDFs in subdir/{pdf_name}.md format.""" """Create blank markdown files for missing PDFs in subdir/{pdf_name}.md format."""
output_path = Path(output_dir) output_path = Path(output_dir)
@ -154,7 +154,7 @@ def create_blank_markdown_files(missing_pdfs, output_dir):
subdir_path = output_path / subdir subdir_path = output_path / subdir
subdir_path.mkdir(parents=True, exist_ok=True) subdir_path.mkdir(parents=True, exist_ok=True)
md_filename = f"{pdf_name}_pg1_repeat1.md" md_filename = f"{pdf_name}_pg1_repeat{repeat_index}.md"
md_filepath = subdir_path / md_filename md_filepath = subdir_path / md_filename
content = "" content = ""
@ -162,7 +162,7 @@ def create_blank_markdown_files(missing_pdfs, output_dir):
with open(md_filepath, "w", encoding="utf-8") as f: with open(md_filepath, "w", encoding="utf-8") as f:
f.write(content) f.write(content)
print(f"Created blank: {subdir}/{md_filename}_pg1_repeat1") print(f"Created blank: {subdir}/{md_filename}_pg1_repeat{repeat_index}")
print(f"Created {len(missing_pdfs)} blank markdown files for missing PDFs") print(f"Created {len(missing_pdfs)} blank markdown files for missing PDFs")
@ -172,6 +172,9 @@ def main():
parser.add_argument("workspace_dir", help="Your workspace directory") parser.add_argument("workspace_dir", help="Your workspace directory")
parser.add_argument("output_dir", nargs="?", default="./markdown_output", help="Output directory for markdown files (default: ./markdown_output)") parser.add_argument("output_dir", nargs="?", default="./markdown_output", help="Output directory for markdown files (default: ./markdown_output)")
parser.add_argument("--bench-path", default="../olmOCR-bench", help="Path to olmOCR-bench directory (default: ../olmOCR-bench)") parser.add_argument("--bench-path", default="../olmOCR-bench", help="Path to olmOCR-bench directory (default: ../olmOCR-bench)")
parser.add_argument(
"--repeat-index", default=1, type=int, help="If you want to run multiple workspaces as different repeats to get a better average, set this"
)
args = parser.parse_args() args = parser.parse_args()
input_dir = args.workspace_dir + "/results" input_dir = args.workspace_dir + "/results"

View File

@ -5,25 +5,30 @@
# ./scripts/run_benchmark.sh # ./scripts/run_benchmark.sh
# With model parameter: for testing custom models # With model parameter: for testing custom models
# ./scripts/run_benchmark.sh --model your-model-name # ./scripts/run_benchmark.sh --model your-model-name
# With cluster parameter: specify a specific cluster to use
# ./scripts/run_benchmark.sh --cluster ai2/titan-cirrascale
# With beaker image: skip Docker build and use provided Beaker image # With beaker image: skip Docker build and use provided Beaker image
# ./scripts/run_benchmark.sh --beaker-image jakep/olmocr-benchmark-0.3.3-780bc7d934 # ./scripts/run_benchmark.sh --beaker-image jakep/olmocr-benchmark-0.3.3-780bc7d934
# With repeats parameter: run the pipeline multiple times for increased accuracy (default: 1)
# ./scripts/run_benchmark.sh --repeats 3
set -e set -e
# Parse command line arguments # Parse command line arguments
MODEL="" MODEL=""
B200_MODE="" CLUSTER=""
BENCH_BRANCH="" BENCH_BRANCH=""
BEAKER_IMAGE="" BEAKER_IMAGE=""
REPEATS="1"
while [[ $# -gt 0 ]]; do while [[ $# -gt 0 ]]; do
case $1 in case $1 in
--model) --model)
MODEL="$2" MODEL="$2"
shift 2 shift 2
;; ;;
--b200) --cluster)
B200_MODE="true" CLUSTER="$2"
shift shift 2
;; ;;
--benchbranch) --benchbranch)
BENCH_BRANCH="$2" BENCH_BRANCH="$2"
@ -33,9 +38,13 @@ while [[ $# -gt 0 ]]; do
BEAKER_IMAGE="$2" BEAKER_IMAGE="$2"
shift 2 shift 2
;; ;;
--repeats)
REPEATS="$2"
shift 2
;;
*) *)
echo "Unknown option: $1" echo "Unknown option: $1"
echo "Usage: $0 [--model MODEL_NAME] [--b200] [--benchbranch BRANCH_NAME] [--beaker-image IMAGE_NAME]" echo "Usage: $0 [--model MODEL_NAME] [--cluster CLUSTER_NAME] [--benchbranch BRANCH_NAME] [--beaker-image IMAGE_NAME] [--repeats NUMBER]"
exit 1 exit 1
;; ;;
esac esac
@ -105,24 +114,28 @@ cat << 'EOF' > /tmp/run_benchmark_experiment.py
import sys import sys
from beaker import Beaker, ExperimentSpec, TaskSpec, TaskContext, ResultSpec, TaskResources, ImageSource, Priority, Constraints, EnvVar from beaker import Beaker, ExperimentSpec, TaskSpec, TaskContext, ResultSpec, TaskResources, ImageSource, Priority, Constraints, EnvVar
# Get image tag, beaker user, git branch, git hash, optional model, b200 mode, and bench branch from command line # Get image tag, beaker user, git branch, git hash, optional model, cluster, bench branch, and repeats from command line
image_tag = sys.argv[1] image_tag = sys.argv[1]
beaker_user = sys.argv[2] beaker_user = sys.argv[2]
git_branch = sys.argv[3] git_branch = sys.argv[3]
git_hash = sys.argv[4] git_hash = sys.argv[4]
model = None model = None
b200_mode = False cluster = None
bench_branch = None bench_branch = None
repeats = 1
# Parse remaining arguments # Parse remaining arguments
arg_idx = 5 arg_idx = 5
while arg_idx < len(sys.argv): while arg_idx < len(sys.argv):
if sys.argv[arg_idx] == "--b200": if sys.argv[arg_idx] == "--cluster":
b200_mode = True cluster = sys.argv[arg_idx + 1]
arg_idx += 1 arg_idx += 2
elif sys.argv[arg_idx] == "--benchbranch": elif sys.argv[arg_idx] == "--benchbranch":
bench_branch = sys.argv[arg_idx + 1] bench_branch = sys.argv[arg_idx + 1]
arg_idx += 2 arg_idx += 2
elif sys.argv[arg_idx] == "--repeats":
repeats = int(sys.argv[arg_idx + 1])
arg_idx += 2
else: else:
model = sys.argv[arg_idx] model = sys.argv[arg_idx]
arg_idx += 1 arg_idx += 1
@ -130,10 +143,7 @@ while arg_idx < len(sys.argv):
# Initialize Beaker client # Initialize Beaker client
b = Beaker.from_env(default_workspace="ai2/olmocr") b = Beaker.from_env(default_workspace="ai2/olmocr")
# Build the pipeline command with optional model parameter # Note: pipeline commands will be built in the loop based on repeats
pipeline_cmd = "python -m olmocr.pipeline ./localworkspace --markdown --pdfs ./olmOCR-bench/bench_data/pdfs/**/*.pdf"
if model:
pipeline_cmd += f" --model {model}"
# Check if AWS credentials secret exists # Check if AWS credentials secret exists
aws_creds_secret = f"{beaker_user}-AWS_CREDENTIALS_FILE" aws_creds_secret = f"{beaker_user}-AWS_CREDENTIALS_FILE"
@ -162,13 +172,34 @@ if bench_branch:
commands.extend([ commands.extend([
git_clone_cmd, git_clone_cmd,
"cd olmOCR-bench && git lfs pull && cd ..", "cd olmOCR-bench && git lfs pull && cd ..",
pipeline_cmd,
"python olmocr/bench/scripts/workspace_to_bench.py localworkspace/ olmOCR-bench/bench_data/olmocr --bench-path ./olmOCR-bench/",
"pip install s5cmd",
"s5cmd cp localworkspace/ s3://ai2-oe-data/jakep/olmocr-bench-runs/$BEAKER_WORKLOAD_ID/",
"python -m olmocr.bench.benchmark --dir ./olmOCR-bench/bench_data"
]) ])
# Run pipeline multiple times based on repeats
for i in range(1, repeats + 1):
workspace_dir = f"./localworkspace{i}"
pipeline_cmd = f"python -m olmocr.pipeline {workspace_dir} --markdown --pdfs ./olmOCR-bench/bench_data/pdfs/**/*.pdf"
if model:
pipeline_cmd += f" --model {model}"
commands.append(pipeline_cmd)
# Process all workspaces with workspace_to_bench.py
for i in range(1, repeats + 1):
workspace_dir = f"localworkspace{i}/"
workspace_to_bench_cmd = f"python olmocr/bench/scripts/workspace_to_bench.py {workspace_dir} olmOCR-bench/bench_data/olmocr --bench-path ./olmOCR-bench/ --repeat-index {i}"
commands.append(workspace_to_bench_cmd)
# Copy all workspaces to S3 and run benchmark
commands.extend([
"pip install s5cmd",
])
# Copy each workspace to S3
for i in range(1, repeats + 1):
workspace_dir = f"localworkspace{i}/"
commands.append(f"s5cmd cp {workspace_dir} s3://ai2-oe-data/jakep/olmocr-bench-runs/$BEAKER_WORKLOAD_ID/workspace{i}/")
commands.append("python -m olmocr.bench.benchmark --dir ./olmOCR-bench/bench_data")
# Build task spec with optional env vars # Build task spec with optional env vars
# If image_tag contains '/', it's already a full beaker image reference # If image_tag contains '/', it's already a full beaker image reference
if '/' in image_tag: if '/' in image_tag:
@ -188,7 +219,7 @@ task_spec_args = {
preemptible=True, preemptible=True,
), ),
"resources": TaskResources(gpu_count=1), "resources": TaskResources(gpu_count=1),
"constraints": Constraints(cluster=["ai2/titan-cirrascale"] if b200_mode else ["ai2/ceres-cirrascale", "ai2/jupiter-cirrascale-2"]), "constraints": Constraints(cluster=[cluster] if cluster else ["ai2/ceres-cirrascale", "ai2/jupiter-cirrascale-2"]),
"result": ResultSpec(path="/noop-results"), "result": ResultSpec(path="/noop-results"),
} }
@ -213,7 +244,7 @@ print("-------")
print("") print("")
# Second experiment: Performance test job # Second experiment: Performance test job
perf_pipeline_cmd = "python -m olmocr.pipeline ./localworkspace --markdown --pdfs s3://ai2-oe-data/jakep/olmocr/olmOCR-mix-0225/benchmark_set/*.pdf" perf_pipeline_cmd = "python -m olmocr.pipeline ./localworkspace1 --markdown --pdfs s3://ai2-oe-data/jakep/olmocr/olmOCR-mix-0225/benchmark_set/*.pdf"
if model: if model:
perf_pipeline_cmd += f" --model {model}" perf_pipeline_cmd += f" --model {model}"
@ -237,9 +268,9 @@ perf_task_spec_args = {
priority=Priority.normal, priority=Priority.normal,
preemptible=True, preemptible=True,
), ),
# Need to reserve all 8 gpus for performance spec or else benchmark results can be off (1 for b200 mode) # Need to reserve all 8 gpus for performance spec or else benchmark results can be off (1 for titan-cirrascale)
"resources": TaskResources(gpu_count=1 if b200_mode else 8), "resources": TaskResources(gpu_count=1 if cluster == "ai2/titan-cirrascale" else 8),
"constraints": Constraints(cluster=["ai2/titan-cirrascale"] if b200_mode else ["ai2/ceres-cirrascale", "ai2/jupiter-cirrascale-2"]), "constraints": Constraints(cluster=[cluster] if cluster else ["ai2/ceres-cirrascale", "ai2/jupiter-cirrascale-2"]),
"result": ResultSpec(path="/noop-results"), "result": ResultSpec(path="/noop-results"),
} }
@ -273,9 +304,9 @@ if [ -n "$MODEL" ]; then
CMD="$CMD $MODEL" CMD="$CMD $MODEL"
fi fi
if [ -n "$B200_MODE" ]; then if [ -n "$CLUSTER" ]; then
echo "Using B200 mode: ai2/titan-cirrascale cluster with 1 GPU for perf task" echo "Using cluster: $CLUSTER"
CMD="$CMD --b200" CMD="$CMD --cluster $CLUSTER"
fi fi
if [ -n "$BENCH_BRANCH" ]; then if [ -n "$BENCH_BRANCH" ]; then
@ -283,6 +314,11 @@ if [ -n "$BENCH_BRANCH" ]; then
CMD="$CMD --benchbranch $BENCH_BRANCH" CMD="$CMD --benchbranch $BENCH_BRANCH"
fi fi
if [ "$REPEATS" != "1" ]; then
echo "Using repeats: $REPEATS"
CMD="$CMD --repeats $REPEATS"
fi
eval $CMD eval $CMD
# Clean up temporary file # Clean up temporary file