mirror of
https://github.com/allenai/olmocr.git
synced 2025-11-08 06:29:29 +00:00
Merge branch 'main' into jakep/v0.5
This commit is contained in:
commit
2a47b7067d
11
README.md
11
README.md
@ -270,17 +270,18 @@ vllm serve allenai/olmOCR-2-7B-1025-FP8 --served-model-name olmocr --max-model-l
|
|||||||
|
|
||||||
We have tested `olmOCR-2-7B-1025-FP8` on these external model providers and confirmed that they work
|
We have tested `olmOCR-2-7B-1025-FP8` on these external model providers and confirmed that they work
|
||||||
|
|
||||||
| Provider | $/1M Input tokens | $/1M Output tokens | Example Command |
|
| | $/1M Input tokens | $/1M Output tokens | Example Command |
|
||||||
|-----------|-------------------|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|-----------------------------------------------------------------------------|-------------------|--------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| [DeepInfra](https://deepinfra.com/) | $0.14 | $0.80 | `python -m olmocr.pipeline ./localworkspace1 --server https://api.deepinfra.com/v1/openai --api_key DfXXXXXXX --model allenai/olmOCR-2-7B-1025 --pdfs tests/gnarly_pdfs/*.pdf` |
|
| [DeepInfra](https://deepinfra.com/) | $0.09 | $0.19 | `python -m olmocr.pipeline ./localworkspace1 --server https://api.deepinfra.com/v1/openai --api_key DfXXXXXXX --model allenai/olmOCR-2-7B-1025 --pdfs tests/gnarly_pdfs/*.pdf` |
|
||||||
| [Parasail](https://www.saas.parasail.io/serverless?name=olmocr-7b-1025-fp8) | $0.10 | $0.20 | `python -m olmocr.pipeline ./localworkspace1 --server https://api.parasail.io/v1 --api_key psk-XXXXX --model allenai/olmOCR-2-7B-1025 --pdfs tests/gnarly_pdfs/*.pdf` |
|
| [Parasail](https://www.saas.parasail.io/serverless?name=olmocr-7b-1025-fp8) | $0.10 | $0.20 | `python -m olmocr.pipeline ./localworkspace1 --server https://api.parasail.io/v1 --api_key psk-XXXXX --model allenai/olmOCR-2-7B-1025 --pdfs tests/gnarly_pdfs/*.pdf` |
|
||||||
| | | | |
|
| [Cirrascale](https://ai2endpoints.cirrascale.ai/models/overview) | $0.07 | $0.15 | `python -m olmocr.pipeline ./localworkspace1 --server https://ai2endpoints.cirrascale.ai/api --api_key sk-XXXXXXX --model olmOCR-2-7B-1025 --pdfs tests/gnarly_pdfs/*.pdf` |
|
||||||
|
|
||||||
|
|
||||||
Notes on arguments
|
Notes on arguments
|
||||||
- `--server`: Defines the OpenAI-compatible endpoint: ex `https://api.deepinfra.com/v1/openai`
|
- `--server`: Defines the OpenAI-compatible endpoint: ex `https://api.deepinfra.com/v1/openai`
|
||||||
- `--api_key`: Your API key, bassed in via Authorization Bearer HTTP header
|
- `--api_key`: Your API key, bassed in via Authorization Bearer HTTP header
|
||||||
- `--pages_per_group`: You may want a smaller number of pages per group as many external provides have lower concurrent request limits
|
- `--pages_per_group`: You may want a smaller number of pages per group as many external provides have lower concurrent request limits
|
||||||
- `--model`: The model identifier, ex. `allenai/olmOCR-7B-1025`, different providers have different names, and if you run locally, you can use `olmocr`
|
- `--model`: The model identifier, ex. `allenai/olmOCR-2-7B-1025`, different providers have different names, and if you run locally, you can use `olmocr`
|
||||||
- Other arguments work the same as with local inference
|
- Other arguments work the same as with local inference
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -68,7 +68,7 @@ def get_subdir_and_pdf_name(source_file_path):
|
|||||||
return None, None
|
return None, None
|
||||||
|
|
||||||
|
|
||||||
def create_markdown_files(entries, output_dir):
|
def create_markdown_files(entries, output_dir, repeat_index=1):
|
||||||
"""Create markdown files from JSONL entries in subdir/{pdf_name}.md format."""
|
"""Create markdown files from JSONL entries in subdir/{pdf_name}.md format."""
|
||||||
output_path = Path(output_dir)
|
output_path = Path(output_dir)
|
||||||
|
|
||||||
@ -87,7 +87,7 @@ def create_markdown_files(entries, output_dir):
|
|||||||
subdir_path = output_path / subdir
|
subdir_path = output_path / subdir
|
||||||
subdir_path.mkdir(parents=True, exist_ok=True)
|
subdir_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
md_filename = f"{pdf_name}_pg1_repeat1.md"
|
md_filename = f"{pdf_name}_pg1_repeat{repeat_index}.md"
|
||||||
md_filepath = subdir_path / md_filename
|
md_filepath = subdir_path / md_filename
|
||||||
|
|
||||||
assert len(pdf_entries) == 1, "Expecting just one entry mapping to each file, otherwise something is wrong"
|
assert len(pdf_entries) == 1, "Expecting just one entry mapping to each file, otherwise something is wrong"
|
||||||
@ -100,7 +100,7 @@ def create_markdown_files(entries, output_dir):
|
|||||||
blank_files += 1
|
blank_files += 1
|
||||||
|
|
||||||
created_files.add((subdir, pdf_name))
|
created_files.add((subdir, pdf_name))
|
||||||
print(f"Created: {subdir}/{md_filename}_pg1_repeat1")
|
print(f"Created: {subdir}/{md_filename}_pg1_repeat{repeat_index}")
|
||||||
|
|
||||||
print(f"Created {len(created_files)} markdown files from JSONL data")
|
print(f"Created {len(created_files)} markdown files from JSONL data")
|
||||||
print(f"{blank_files} of those had empty content")
|
print(f"{blank_files} of those had empty content")
|
||||||
@ -143,7 +143,7 @@ def find_missing_pdfs(pdf_sources, created_files, base_bench_path):
|
|||||||
return missing_pdfs
|
return missing_pdfs
|
||||||
|
|
||||||
|
|
||||||
def create_blank_markdown_files(missing_pdfs, output_dir):
|
def create_blank_markdown_files(missing_pdfs, output_dir, repeat_index=1):
|
||||||
"""Create blank markdown files for missing PDFs in subdir/{pdf_name}.md format."""
|
"""Create blank markdown files for missing PDFs in subdir/{pdf_name}.md format."""
|
||||||
output_path = Path(output_dir)
|
output_path = Path(output_dir)
|
||||||
|
|
||||||
@ -154,7 +154,7 @@ def create_blank_markdown_files(missing_pdfs, output_dir):
|
|||||||
subdir_path = output_path / subdir
|
subdir_path = output_path / subdir
|
||||||
subdir_path.mkdir(parents=True, exist_ok=True)
|
subdir_path.mkdir(parents=True, exist_ok=True)
|
||||||
|
|
||||||
md_filename = f"{pdf_name}_pg1_repeat1.md"
|
md_filename = f"{pdf_name}_pg1_repeat{repeat_index}.md"
|
||||||
md_filepath = subdir_path / md_filename
|
md_filepath = subdir_path / md_filename
|
||||||
|
|
||||||
content = ""
|
content = ""
|
||||||
@ -162,7 +162,7 @@ def create_blank_markdown_files(missing_pdfs, output_dir):
|
|||||||
with open(md_filepath, "w", encoding="utf-8") as f:
|
with open(md_filepath, "w", encoding="utf-8") as f:
|
||||||
f.write(content)
|
f.write(content)
|
||||||
|
|
||||||
print(f"Created blank: {subdir}/{md_filename}_pg1_repeat1")
|
print(f"Created blank: {subdir}/{md_filename}_pg1_repeat{repeat_index}")
|
||||||
|
|
||||||
print(f"Created {len(missing_pdfs)} blank markdown files for missing PDFs")
|
print(f"Created {len(missing_pdfs)} blank markdown files for missing PDFs")
|
||||||
|
|
||||||
@ -172,6 +172,9 @@ def main():
|
|||||||
parser.add_argument("workspace_dir", help="Your workspace directory")
|
parser.add_argument("workspace_dir", help="Your workspace directory")
|
||||||
parser.add_argument("output_dir", nargs="?", default="./markdown_output", help="Output directory for markdown files (default: ./markdown_output)")
|
parser.add_argument("output_dir", nargs="?", default="./markdown_output", help="Output directory for markdown files (default: ./markdown_output)")
|
||||||
parser.add_argument("--bench-path", default="../olmOCR-bench", help="Path to olmOCR-bench directory (default: ../olmOCR-bench)")
|
parser.add_argument("--bench-path", default="../olmOCR-bench", help="Path to olmOCR-bench directory (default: ../olmOCR-bench)")
|
||||||
|
parser.add_argument(
|
||||||
|
"--repeat-index", default=1, type=int, help="If you want to run multiple workspaces as different repeats to get a better average, set this"
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
input_dir = args.workspace_dir + "/results"
|
input_dir = args.workspace_dir + "/results"
|
||||||
|
|||||||
@ -11,8 +11,6 @@
|
|||||||
# ./scripts/run_benchmark.sh --beaker-image jakep/olmocr-benchmark-0.3.3-780bc7d934
|
# ./scripts/run_benchmark.sh --beaker-image jakep/olmocr-benchmark-0.3.3-780bc7d934
|
||||||
# With repeats parameter: run the pipeline multiple times for increased accuracy (default: 1)
|
# With repeats parameter: run the pipeline multiple times for increased accuracy (default: 1)
|
||||||
# ./scripts/run_benchmark.sh --repeats 3
|
# ./scripts/run_benchmark.sh --repeats 3
|
||||||
# With noperf parameter: skip the performance test job
|
|
||||||
# ./scripts/run_benchmark.sh --noperf
|
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
@ -22,7 +20,6 @@ CLUSTER=""
|
|||||||
BENCH_BRANCH=""
|
BENCH_BRANCH=""
|
||||||
BEAKER_IMAGE=""
|
BEAKER_IMAGE=""
|
||||||
REPEATS="1"
|
REPEATS="1"
|
||||||
NOPERF="0"
|
|
||||||
while [[ $# -gt 0 ]]; do
|
while [[ $# -gt 0 ]]; do
|
||||||
case $1 in
|
case $1 in
|
||||||
--model)
|
--model)
|
||||||
@ -45,13 +42,9 @@ while [[ $# -gt 0 ]]; do
|
|||||||
REPEATS="$2"
|
REPEATS="$2"
|
||||||
shift 2
|
shift 2
|
||||||
;;
|
;;
|
||||||
--noperf)
|
|
||||||
NOPERF="1"
|
|
||||||
shift
|
|
||||||
;;
|
|
||||||
*)
|
*)
|
||||||
echo "Unknown option: $1"
|
echo "Unknown option: $1"
|
||||||
echo "Usage: $0 [--model MODEL_NAME] [--cluster CLUSTER_NAME] [--benchbranch BRANCH_NAME] [--beaker-image IMAGE_NAME] [--repeats NUMBER] [--noperf]"
|
echo "Usage: $0 [--model MODEL_NAME] [--cluster CLUSTER_NAME] [--benchbranch BRANCH_NAME] [--beaker-image IMAGE_NAME] [--repeats NUMBER]"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
@ -121,7 +114,7 @@ cat << 'EOF' > /tmp/run_benchmark_experiment.py
|
|||||||
import sys
|
import sys
|
||||||
from beaker import Beaker, ExperimentSpec, TaskSpec, TaskContext, ResultSpec, TaskResources, ImageSource, Priority, Constraints, EnvVar
|
from beaker import Beaker, ExperimentSpec, TaskSpec, TaskContext, ResultSpec, TaskResources, ImageSource, Priority, Constraints, EnvVar
|
||||||
|
|
||||||
# Get image tag, beaker user, git branch, git hash, optional model, cluster, bench branch, repeats, and noperf from command line
|
# Get image tag, beaker user, git branch, git hash, optional model, cluster, bench branch, and repeats from command line
|
||||||
image_tag = sys.argv[1]
|
image_tag = sys.argv[1]
|
||||||
beaker_user = sys.argv[2]
|
beaker_user = sys.argv[2]
|
||||||
git_branch = sys.argv[3]
|
git_branch = sys.argv[3]
|
||||||
@ -130,7 +123,6 @@ model = None
|
|||||||
cluster = None
|
cluster = None
|
||||||
bench_branch = None
|
bench_branch = None
|
||||||
repeats = 1
|
repeats = 1
|
||||||
noperf = False
|
|
||||||
|
|
||||||
# Parse remaining arguments
|
# Parse remaining arguments
|
||||||
arg_idx = 5
|
arg_idx = 5
|
||||||
@ -144,9 +136,6 @@ while arg_idx < len(sys.argv):
|
|||||||
elif sys.argv[arg_idx] == "--repeats":
|
elif sys.argv[arg_idx] == "--repeats":
|
||||||
repeats = int(sys.argv[arg_idx + 1])
|
repeats = int(sys.argv[arg_idx + 1])
|
||||||
arg_idx += 2
|
arg_idx += 2
|
||||||
elif sys.argv[arg_idx] == "--noperf":
|
|
||||||
noperf = True
|
|
||||||
arg_idx += 1
|
|
||||||
else:
|
else:
|
||||||
model = sys.argv[arg_idx]
|
model = sys.argv[arg_idx]
|
||||||
arg_idx += 1
|
arg_idx += 1
|
||||||
@ -254,8 +243,7 @@ print(f"View at: https://beaker.org/ex/{experiment.id}")
|
|||||||
print("-------")
|
print("-------")
|
||||||
print("")
|
print("")
|
||||||
|
|
||||||
# Second experiment: Performance test job (skip if --noperf is set)
|
# Second experiment: Performance test job
|
||||||
if not noperf:
|
|
||||||
perf_pipeline_cmd = "python -m olmocr.pipeline ./localworkspace1 --markdown --pdfs s3://ai2-oe-data/jakep/olmocr/olmOCR-mix-0225/benchmark_set/*.pdf"
|
perf_pipeline_cmd = "python -m olmocr.pipeline ./localworkspace1 --markdown --pdfs s3://ai2-oe-data/jakep/olmocr/olmOCR-mix-0225/benchmark_set/*.pdf"
|
||||||
if model:
|
if model:
|
||||||
perf_pipeline_cmd += f" --model {model}"
|
perf_pipeline_cmd += f" --model {model}"
|
||||||
@ -303,8 +291,6 @@ if not noperf:
|
|||||||
perf_experiment = b.experiment.create(spec=perf_experiment_spec, workspace="ai2/olmocr")
|
perf_experiment = b.experiment.create(spec=perf_experiment_spec, workspace="ai2/olmocr")
|
||||||
print(f"Created performance experiment: {perf_experiment.id}")
|
print(f"Created performance experiment: {perf_experiment.id}")
|
||||||
print(f"View at: https://beaker.org/ex/{perf_experiment.id}")
|
print(f"View at: https://beaker.org/ex/{perf_experiment.id}")
|
||||||
else:
|
|
||||||
print("Skipping performance experiment (--noperf flag set)")
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Run the Python script to create the experiments
|
# Run the Python script to create the experiments
|
||||||
@ -333,11 +319,6 @@ if [ "$REPEATS" != "1" ]; then
|
|||||||
CMD="$CMD --repeats $REPEATS"
|
CMD="$CMD --repeats $REPEATS"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$NOPERF" == "1" ]; then
|
|
||||||
echo "Skipping performance test (--noperf flag set)"
|
|
||||||
CMD="$CMD --noperf"
|
|
||||||
fi
|
|
||||||
|
|
||||||
eval $CMD
|
eval $CMD
|
||||||
|
|
||||||
# Clean up temporary file
|
# Clean up temporary file
|
||||||
|
|||||||
@ -1,12 +1,14 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Runs an olmocr-bench run using the full pipeline (no fallback) for infrapartner testing
|
# Runs an olmocr-bench run using the full pipeline (no fallback) for infrapartner testing
|
||||||
# This version skips the performance task and adds support for --server, --model, and --beaker-secret arguments
|
|
||||||
#
|
#
|
||||||
# Usage examples:
|
# Just make a beaker secret in the ai2/olmocr workspace with your API key
|
||||||
# ./scripts/run_infrapartner_benchmark.sh --server http://example.com --model your-model-name --beaker-secret my-api-key-secret
|
#
|
||||||
# ./scripts/run_infrapartner_benchmark.sh --beaker-image jakep/olmocr-benchmark-0.3.3-780bc7d934 --server http://example.com
|
# Testing parasail
|
||||||
|
# scripts/run_infrapartner_benchmark.sh --server https://api.parasail.io/v1 --model allenai/olmOCR-2-7B-1025 --beaker-secret jakep-parasail-api-key
|
||||||
|
#
|
||||||
|
# Testing deepinfra
|
||||||
|
# scripts/run_infrapartner_benchmark.sh --server https://api.deepinfra.com/v1/openai --model allenai/olmOCR-2-7B-1025 --beaker-secret jakep-deepinfra-api-key
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Parse command line arguments
|
# Parse command line arguments
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user