mirror of
				https://github.com/allenai/olmocr.git
				synced 2025-11-03 19:45:41 +00:00 
			
		
		
		
	Merge branch 'main' into jakep/v0.5
This commit is contained in:
		
						commit
						2a47b7067d
					
				
							
								
								
									
										13
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										13
									
								
								README.md
									
									
									
									
									
								
							@ -270,17 +270,18 @@ vllm serve allenai/olmOCR-2-7B-1025-FP8 --served-model-name olmocr --max-model-l
 | 
			
		||||
 | 
			
		||||
We have tested `olmOCR-2-7B-1025-FP8` on these external model providers and confirmed that they work
 | 
			
		||||
 | 
			
		||||
| Provider  | $/1M Input tokens | $/1M Output tokens | Example Command                                                                                                                                                            |
 | 
			
		||||
|-----------|-------------------|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
 | 
			
		||||
| [DeepInfra](https://deepinfra.com/) | $0.14             | $0.80              | `python -m olmocr.pipeline ./localworkspace1 --server https://api.deepinfra.com/v1/openai --api_key DfXXXXXXX --model allenai/olmOCR-2-7B-1025 --pdfs tests/gnarly_pdfs/*.pdf` |
 | 
			
		||||
| [Parasail](https://www.saas.parasail.io/serverless?name=olmocr-7b-1025-fp8)  | $0.10             | $0.20              | `python -m olmocr.pipeline ./localworkspace1 --server https://api.parasail.io/v1 --api_key psk-XXXXX --model allenai/olmOCR-2-7B-1025 --pdfs tests/gnarly_pdfs/*.pdf`              |
 | 
			
		||||
|           |                   |                    |                                                                                                                                                                            |
 | 
			
		||||
|                                                                             | $/1M Input tokens | $/1M Output tokens | Example Command                                                                                                                                                                |
 | 
			
		||||
|-----------------------------------------------------------------------------|-------------------|--------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
 | 
			
		||||
| [DeepInfra](https://deepinfra.com/)                                         | $0.09             | $0.19              | `python -m olmocr.pipeline ./localworkspace1 --server https://api.deepinfra.com/v1/openai --api_key DfXXXXXXX --model allenai/olmOCR-2-7B-1025 --pdfs tests/gnarly_pdfs/*.pdf` |
 | 
			
		||||
| [Parasail](https://www.saas.parasail.io/serverless?name=olmocr-7b-1025-fp8) | $0.10             | $0.20              | `python -m olmocr.pipeline ./localworkspace1 --server https://api.parasail.io/v1 --api_key psk-XXXXX --model allenai/olmOCR-2-7B-1025 --pdfs tests/gnarly_pdfs/*.pdf`          |
 | 
			
		||||
| [Cirrascale](https://ai2endpoints.cirrascale.ai/models/overview)            | $0.07             | $0.15              | `python -m olmocr.pipeline ./localworkspace1 --server https://ai2endpoints.cirrascale.ai/api --api_key sk-XXXXXXX --model olmOCR-2-7B-1025 --pdfs tests/gnarly_pdfs/*.pdf`     |
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Notes on arguments
 | 
			
		||||
- `--server`: Defines the OpenAI-compatible endpoint: ex `https://api.deepinfra.com/v1/openai`
 | 
			
		||||
- `--api_key`: Your API key, bassed in via Authorization Bearer HTTP header
 | 
			
		||||
- `--pages_per_group`: You may want a smaller number of pages per group as many external provides have lower concurrent request limits
 | 
			
		||||
- `--model`: The model identifier, ex. `allenai/olmOCR-7B-1025`, different providers have different names, and if you run locally, you can use `olmocr`
 | 
			
		||||
- `--model`: The model identifier, ex. `allenai/olmOCR-2-7B-1025`, different providers have different names, and if you run locally, you can use `olmocr`
 | 
			
		||||
- Other arguments work the same as with local inference
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -68,7 +68,7 @@ def get_subdir_and_pdf_name(source_file_path):
 | 
			
		||||
    return None, None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_markdown_files(entries, output_dir):
 | 
			
		||||
def create_markdown_files(entries, output_dir, repeat_index=1):
 | 
			
		||||
    """Create markdown files from JSONL entries in subdir/{pdf_name}.md format."""
 | 
			
		||||
    output_path = Path(output_dir)
 | 
			
		||||
 | 
			
		||||
@ -87,7 +87,7 @@ def create_markdown_files(entries, output_dir):
 | 
			
		||||
        subdir_path = output_path / subdir
 | 
			
		||||
        subdir_path.mkdir(parents=True, exist_ok=True)
 | 
			
		||||
 | 
			
		||||
        md_filename = f"{pdf_name}_pg1_repeat1.md"
 | 
			
		||||
        md_filename = f"{pdf_name}_pg1_repeat{repeat_index}.md"
 | 
			
		||||
        md_filepath = subdir_path / md_filename
 | 
			
		||||
 | 
			
		||||
        assert len(pdf_entries) == 1, "Expecting just one entry mapping to each file, otherwise something is wrong"
 | 
			
		||||
@ -100,7 +100,7 @@ def create_markdown_files(entries, output_dir):
 | 
			
		||||
            blank_files += 1
 | 
			
		||||
 | 
			
		||||
        created_files.add((subdir, pdf_name))
 | 
			
		||||
        print(f"Created: {subdir}/{md_filename}_pg1_repeat1")
 | 
			
		||||
        print(f"Created: {subdir}/{md_filename}_pg1_repeat{repeat_index}")
 | 
			
		||||
 | 
			
		||||
    print(f"Created {len(created_files)} markdown files from JSONL data")
 | 
			
		||||
    print(f"{blank_files} of those had empty content")
 | 
			
		||||
@ -143,7 +143,7 @@ def find_missing_pdfs(pdf_sources, created_files, base_bench_path):
 | 
			
		||||
    return missing_pdfs
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_blank_markdown_files(missing_pdfs, output_dir):
 | 
			
		||||
def create_blank_markdown_files(missing_pdfs, output_dir, repeat_index=1):
 | 
			
		||||
    """Create blank markdown files for missing PDFs in subdir/{pdf_name}.md format."""
 | 
			
		||||
    output_path = Path(output_dir)
 | 
			
		||||
 | 
			
		||||
@ -154,7 +154,7 @@ def create_blank_markdown_files(missing_pdfs, output_dir):
 | 
			
		||||
        subdir_path = output_path / subdir
 | 
			
		||||
        subdir_path.mkdir(parents=True, exist_ok=True)
 | 
			
		||||
 | 
			
		||||
        md_filename = f"{pdf_name}_pg1_repeat1.md"
 | 
			
		||||
        md_filename = f"{pdf_name}_pg1_repeat{repeat_index}.md"
 | 
			
		||||
        md_filepath = subdir_path / md_filename
 | 
			
		||||
 | 
			
		||||
        content = ""
 | 
			
		||||
@ -162,7 +162,7 @@ def create_blank_markdown_files(missing_pdfs, output_dir):
 | 
			
		||||
        with open(md_filepath, "w", encoding="utf-8") as f:
 | 
			
		||||
            f.write(content)
 | 
			
		||||
 | 
			
		||||
        print(f"Created blank: {subdir}/{md_filename}_pg1_repeat1")
 | 
			
		||||
        print(f"Created blank: {subdir}/{md_filename}_pg1_repeat{repeat_index}")
 | 
			
		||||
 | 
			
		||||
    print(f"Created {len(missing_pdfs)} blank markdown files for missing PDFs")
 | 
			
		||||
 | 
			
		||||
@ -172,6 +172,9 @@ def main():
 | 
			
		||||
    parser.add_argument("workspace_dir", help="Your workspace directory")
 | 
			
		||||
    parser.add_argument("output_dir", nargs="?", default="./markdown_output", help="Output directory for markdown files (default: ./markdown_output)")
 | 
			
		||||
    parser.add_argument("--bench-path", default="../olmOCR-bench", help="Path to olmOCR-bench directory (default: ../olmOCR-bench)")
 | 
			
		||||
    parser.add_argument(
 | 
			
		||||
        "--repeat-index", default=1, type=int, help="If you want to run multiple workspaces as different repeats to get a better average, set this"
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    input_dir = args.workspace_dir + "/results"
 | 
			
		||||
 | 
			
		||||
@ -11,8 +11,6 @@
 | 
			
		||||
#   ./scripts/run_benchmark.sh --beaker-image jakep/olmocr-benchmark-0.3.3-780bc7d934
 | 
			
		||||
#  With repeats parameter: run the pipeline multiple times for increased accuracy (default: 1)
 | 
			
		||||
#   ./scripts/run_benchmark.sh --repeats 3
 | 
			
		||||
#  With noperf parameter: skip the performance test job
 | 
			
		||||
#   ./scripts/run_benchmark.sh --noperf
 | 
			
		||||
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
@ -22,7 +20,6 @@ CLUSTER=""
 | 
			
		||||
BENCH_BRANCH=""
 | 
			
		||||
BEAKER_IMAGE=""
 | 
			
		||||
REPEATS="1"
 | 
			
		||||
NOPERF="0"
 | 
			
		||||
while [[ $# -gt 0 ]]; do
 | 
			
		||||
    case $1 in
 | 
			
		||||
        --model)
 | 
			
		||||
@ -45,13 +42,9 @@ while [[ $# -gt 0 ]]; do
 | 
			
		||||
            REPEATS="$2"
 | 
			
		||||
            shift 2
 | 
			
		||||
            ;;
 | 
			
		||||
        --noperf)
 | 
			
		||||
            NOPERF="1"
 | 
			
		||||
            shift
 | 
			
		||||
            ;;
 | 
			
		||||
        *)
 | 
			
		||||
            echo "Unknown option: $1"
 | 
			
		||||
            echo "Usage: $0 [--model MODEL_NAME] [--cluster CLUSTER_NAME] [--benchbranch BRANCH_NAME] [--beaker-image IMAGE_NAME] [--repeats NUMBER] [--noperf]"
 | 
			
		||||
            echo "Usage: $0 [--model MODEL_NAME] [--cluster CLUSTER_NAME] [--benchbranch BRANCH_NAME] [--beaker-image IMAGE_NAME] [--repeats NUMBER]"
 | 
			
		||||
            exit 1
 | 
			
		||||
            ;;
 | 
			
		||||
    esac
 | 
			
		||||
@ -121,7 +114,7 @@ cat << 'EOF' > /tmp/run_benchmark_experiment.py
 | 
			
		||||
import sys
 | 
			
		||||
from beaker import Beaker, ExperimentSpec, TaskSpec, TaskContext, ResultSpec, TaskResources, ImageSource, Priority, Constraints, EnvVar
 | 
			
		||||
 | 
			
		||||
# Get image tag, beaker user, git branch, git hash, optional model, cluster, bench branch, repeats, and noperf from command line
 | 
			
		||||
# Get image tag, beaker user, git branch, git hash, optional model, cluster, bench branch, and repeats from command line
 | 
			
		||||
image_tag = sys.argv[1]
 | 
			
		||||
beaker_user = sys.argv[2]
 | 
			
		||||
git_branch = sys.argv[3]
 | 
			
		||||
@ -130,7 +123,6 @@ model = None
 | 
			
		||||
cluster = None
 | 
			
		||||
bench_branch = None
 | 
			
		||||
repeats = 1
 | 
			
		||||
noperf = False
 | 
			
		||||
 | 
			
		||||
# Parse remaining arguments
 | 
			
		||||
arg_idx = 5
 | 
			
		||||
@ -144,9 +136,6 @@ while arg_idx < len(sys.argv):
 | 
			
		||||
    elif sys.argv[arg_idx] == "--repeats":
 | 
			
		||||
        repeats = int(sys.argv[arg_idx + 1])
 | 
			
		||||
        arg_idx += 2
 | 
			
		||||
    elif sys.argv[arg_idx] == "--noperf":
 | 
			
		||||
        noperf = True
 | 
			
		||||
        arg_idx += 1
 | 
			
		||||
    else:
 | 
			
		||||
        model = sys.argv[arg_idx]
 | 
			
		||||
        arg_idx += 1
 | 
			
		||||
@ -254,57 +243,54 @@ print(f"View at: https://beaker.org/ex/{experiment.id}")
 | 
			
		||||
print("-------")
 | 
			
		||||
print("")
 | 
			
		||||
 | 
			
		||||
# Second experiment: Performance test job (skip if --noperf is set)
 | 
			
		||||
if not noperf:
 | 
			
		||||
    perf_pipeline_cmd = "python -m olmocr.pipeline ./localworkspace1 --markdown --pdfs s3://ai2-oe-data/jakep/olmocr/olmOCR-mix-0225/benchmark_set/*.pdf"
 | 
			
		||||
    if model:
 | 
			
		||||
        perf_pipeline_cmd += f" --model {model}"
 | 
			
		||||
# Second experiment: Performance test job
 | 
			
		||||
perf_pipeline_cmd = "python -m olmocr.pipeline ./localworkspace1 --markdown --pdfs s3://ai2-oe-data/jakep/olmocr/olmOCR-mix-0225/benchmark_set/*.pdf"
 | 
			
		||||
if model:
 | 
			
		||||
    perf_pipeline_cmd += f" --model {model}"
 | 
			
		||||
 | 
			
		||||
    perf_commands = []
 | 
			
		||||
    if has_aws_creds:
 | 
			
		||||
        perf_commands.extend([
 | 
			
		||||
            "mkdir -p ~/.aws",
 | 
			
		||||
            'echo "$AWS_CREDENTIALS_FILE" > ~/.aws/credentials'
 | 
			
		||||
        ])
 | 
			
		||||
    perf_commands.append(perf_pipeline_cmd)
 | 
			
		||||
perf_commands = []
 | 
			
		||||
if has_aws_creds:
 | 
			
		||||
    perf_commands.extend([
 | 
			
		||||
        "mkdir -p ~/.aws",
 | 
			
		||||
        'echo "$AWS_CREDENTIALS_FILE" > ~/.aws/credentials'
 | 
			
		||||
    ])
 | 
			
		||||
perf_commands.append(perf_pipeline_cmd)
 | 
			
		||||
 | 
			
		||||
    # Build performance task spec
 | 
			
		||||
    perf_task_spec_args = {
 | 
			
		||||
        "name": "olmocr-performance",
 | 
			
		||||
        "image": ImageSource(beaker=image_ref),
 | 
			
		||||
        "command": [
 | 
			
		||||
            "bash", "-c",
 | 
			
		||||
            " && ".join(perf_commands)
 | 
			
		||||
        ],
 | 
			
		||||
        "context": TaskContext(
 | 
			
		||||
            priority=Priority.normal,
 | 
			
		||||
            preemptible=True,
 | 
			
		||||
        ),
 | 
			
		||||
        # Need to reserve all 8 gpus for performance spec or else benchmark results can be off (1 for titan-cirrascale)
 | 
			
		||||
        "resources": TaskResources(gpu_count=1 if cluster == "ai2/titan-cirrascale" else 8),
 | 
			
		||||
        "constraints": Constraints(cluster=[cluster] if cluster else ["ai2/ceres-cirrascale", "ai2/jupiter-cirrascale-2"]),
 | 
			
		||||
        "result": ResultSpec(path="/noop-results"),
 | 
			
		||||
    }
 | 
			
		||||
# Build performance task spec
 | 
			
		||||
perf_task_spec_args = {
 | 
			
		||||
    "name": "olmocr-performance",
 | 
			
		||||
    "image": ImageSource(beaker=image_ref),
 | 
			
		||||
    "command": [
 | 
			
		||||
        "bash", "-c",
 | 
			
		||||
        " && ".join(perf_commands)
 | 
			
		||||
    ],
 | 
			
		||||
    "context": TaskContext(
 | 
			
		||||
        priority=Priority.normal,
 | 
			
		||||
        preemptible=True,
 | 
			
		||||
    ),
 | 
			
		||||
    # Need to reserve all 8 gpus for performance spec or else benchmark results can be off (1 for titan-cirrascale)
 | 
			
		||||
    "resources": TaskResources(gpu_count=1 if cluster == "ai2/titan-cirrascale" else 8),
 | 
			
		||||
    "constraints": Constraints(cluster=[cluster] if cluster else ["ai2/ceres-cirrascale", "ai2/jupiter-cirrascale-2"]),
 | 
			
		||||
    "result": ResultSpec(path="/noop-results"),
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
    # Add env vars if AWS credentials exist
 | 
			
		||||
    if has_aws_creds:
 | 
			
		||||
        perf_task_spec_args["env_vars"] = [
 | 
			
		||||
            EnvVar(name="AWS_CREDENTIALS_FILE", secret=aws_creds_secret)
 | 
			
		||||
        ]
 | 
			
		||||
# Add env vars if AWS credentials exist
 | 
			
		||||
if has_aws_creds:
 | 
			
		||||
    perf_task_spec_args["env_vars"] = [
 | 
			
		||||
        EnvVar(name="AWS_CREDENTIALS_FILE", secret=aws_creds_secret)
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    # Create performance experiment spec
 | 
			
		||||
    perf_experiment_spec = ExperimentSpec(
 | 
			
		||||
        description=f"OlmOCR Performance Test - Branch: {git_branch}, Commit: {git_hash}",
 | 
			
		||||
        budget="ai2/oe-base",
 | 
			
		||||
        tasks=[TaskSpec(**perf_task_spec_args)],
 | 
			
		||||
    )
 | 
			
		||||
# Create performance experiment spec
 | 
			
		||||
perf_experiment_spec = ExperimentSpec(
 | 
			
		||||
    description=f"OlmOCR Performance Test - Branch: {git_branch}, Commit: {git_hash}",
 | 
			
		||||
    budget="ai2/oe-base",
 | 
			
		||||
    tasks=[TaskSpec(**perf_task_spec_args)],
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
    # Create the performance experiment
 | 
			
		||||
    perf_experiment = b.experiment.create(spec=perf_experiment_spec, workspace="ai2/olmocr")
 | 
			
		||||
    print(f"Created performance experiment: {perf_experiment.id}")
 | 
			
		||||
    print(f"View at: https://beaker.org/ex/{perf_experiment.id}")
 | 
			
		||||
else:
 | 
			
		||||
    print("Skipping performance experiment (--noperf flag set)")
 | 
			
		||||
# Create the performance experiment
 | 
			
		||||
perf_experiment = b.experiment.create(spec=perf_experiment_spec, workspace="ai2/olmocr")
 | 
			
		||||
print(f"Created performance experiment: {perf_experiment.id}")
 | 
			
		||||
print(f"View at: https://beaker.org/ex/{perf_experiment.id}")
 | 
			
		||||
EOF
 | 
			
		||||
 | 
			
		||||
# Run the Python script to create the experiments
 | 
			
		||||
@ -333,11 +319,6 @@ if [ "$REPEATS" != "1" ]; then
 | 
			
		||||
    CMD="$CMD --repeats $REPEATS"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
if [ "$NOPERF" == "1" ]; then
 | 
			
		||||
    echo "Skipping performance test (--noperf flag set)"
 | 
			
		||||
    CMD="$CMD --noperf"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
eval $CMD
 | 
			
		||||
 | 
			
		||||
# Clean up temporary file
 | 
			
		||||
 | 
			
		||||
@ -1,12 +1,14 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
# Runs an olmocr-bench run using the full pipeline (no fallback) for infrapartner testing
 | 
			
		||||
# This version skips the performance task and adds support for --server, --model, and --beaker-secret arguments
 | 
			
		||||
#
 | 
			
		||||
# Usage examples:
 | 
			
		||||
#   ./scripts/run_infrapartner_benchmark.sh --server http://example.com --model your-model-name --beaker-secret my-api-key-secret
 | 
			
		||||
#   ./scripts/run_infrapartner_benchmark.sh --beaker-image jakep/olmocr-benchmark-0.3.3-780bc7d934 --server http://example.com
 | 
			
		||||
 | 
			
		||||
# Just make a beaker secret in the ai2/olmocr workspace with your API key
 | 
			
		||||
#
 | 
			
		||||
# Testing parasail
 | 
			
		||||
# scripts/run_infrapartner_benchmark.sh --server https://api.parasail.io/v1 --model allenai/olmOCR-2-7B-1025 --beaker-secret jakep-parasail-api-key 
 | 
			
		||||
#
 | 
			
		||||
# Testing deepinfra
 | 
			
		||||
# scripts/run_infrapartner_benchmark.sh --server https://api.deepinfra.com/v1/openai --model allenai/olmOCR-2-7B-1025 --beaker-secret jakep-deepinfra-api-key
 | 
			
		||||
set -e
 | 
			
		||||
 | 
			
		||||
# Parse command line arguments
 | 
			
		||||
 | 
			
		||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user