mirror of
https://github.com/allenai/olmocr.git
synced 2025-10-12 16:52:20 +00:00
Merge remote-tracking branch 'origin/main' into jakep/vllm_perf
This commit is contained in:
commit
8c62072832
1
.gitignore
vendored
1
.gitignore
vendored
@ -20,6 +20,7 @@ olmOCR-bench/*
|
||||
table_data*/
|
||||
/synth*/
|
||||
dolma_samples/*
|
||||
old_train/
|
||||
/*.html
|
||||
scoreelo.csv
|
||||
debug.log
|
||||
|
26
README.md
26
README.md
@ -61,18 +61,6 @@ We also ship a comprehensive benchmark suite covering over 7,000 test cases acro
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="left">Marker v1.6.2</td>
|
||||
<td align="center">24.3</td>
|
||||
<td align="center">22.1</td>
|
||||
<td align="center">69.8</td>
|
||||
<td align="center">24.3</td>
|
||||
<td align="center">87.1</td>
|
||||
<td align="center">71.0</td>
|
||||
<td align="center">76.9</td>
|
||||
<td align="center"><strong>99.5</strong></td>
|
||||
<td align="center">59.4 ± 1.1</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="left">MinerU v1.3.10</td>
|
||||
<td align="center">75.4</td>
|
||||
@ -87,7 +75,7 @@ We also ship a comprehensive benchmark suite covering over 7,000 test cases acro
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="left">Mistral OCR API</td>
|
||||
<td align="center"><strong>77.2</strong></td>
|
||||
<td align="center">77.2</td>
|
||||
<td align="center">67.5</td>
|
||||
<td align="center">60.6</td>
|
||||
<td align="center">29.3</td>
|
||||
@ -97,6 +85,18 @@ We also ship a comprehensive benchmark suite covering over 7,000 test cases acro
|
||||
<td align="center">99.4</td>
|
||||
<td align="center">72.0 ± 1.1</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="left">Marker v1.7.4 (hybrid)</td>
|
||||
<td align="center"><strong>77.7</strong></td>
|
||||
<td align="center">71.2</td>
|
||||
<td align="center"><strong>78.1</strong></td>
|
||||
<td align="center">32.3</td>
|
||||
<td align="center">83.4</td>
|
||||
<td align="center">73.8</td>
|
||||
<td align="center">79.0</td>
|
||||
<td align="center">99.2</td>
|
||||
<td align="center">74.3 ± 1.1</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="left">olmOCR v0.1.68 (pipeline.py)</td>
|
||||
<td align="center">75.6</td>
|
||||
|
@ -37,7 +37,7 @@ to run it against your own OCR tools. Your tool just needs to support Markdown o
|
||||
<td align="left">GOT OCR</td>
|
||||
<td align="center">52.7</td>
|
||||
<td align="center">52.0</td>
|
||||
<td align="center">0.2</td>
|
||||
<td align="center">0.20</td>
|
||||
<td align="center">22.1</td>
|
||||
<td align="center">93.6</td>
|
||||
<td align="center">42.0</td>
|
||||
@ -46,16 +46,16 @@ to run it against your own OCR tools. Your tool just needs to support Markdown o
|
||||
<td align="center">48.3 ± 1.1</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="left">Marker v1.6.2</td>
|
||||
<td align="center">24.3</td>
|
||||
<td align="center">22.1</td>
|
||||
<td align="center">69.8</td>
|
||||
<td align="center">24.3</td>
|
||||
<td align="center">87.1</td>
|
||||
<td align="center">71.0</td>
|
||||
<td align="center">76.9</td>
|
||||
<td align="center"><strong>99.5</strong></td>
|
||||
<td align="center">59.4 ± 1.1</td>
|
||||
<td align="left">Marker v1.7.5 (base)</td>
|
||||
<td align="center">76.0</td>
|
||||
<td align="center">57.9</td>
|
||||
<td align="center">57.6</td>
|
||||
<td align="center">27.8</td>
|
||||
<td align="center">84.9</td>
|
||||
<td align="center">72.9</td>
|
||||
<td align="center">84.6</td>
|
||||
<td align="center">99.1</td>
|
||||
<td align="center">70.1 ± 1.1</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="left">MinerU v1.3.10</td>
|
||||
@ -78,7 +78,7 @@ to run it against your own OCR tools. Your tool just needs to support Markdown o
|
||||
<td align="center">93.6</td>
|
||||
<td align="center">71.3</td>
|
||||
<td align="center">77.1</td>
|
||||
<td align="center">99.4</td>
|
||||
<td align="center"><strong>99.4</strong></td>
|
||||
<td align="center">72.0 ± 1.1</td>
|
||||
</tr>
|
||||
<tr>
|
||||
@ -121,7 +121,7 @@ to run it against your own OCR tools. Your tool just needs to support Markdown o
|
||||
<td align="left">Gemini Flash 2 (Anchored)</td>
|
||||
<td align="center">54.5</td>
|
||||
<td align="center">56.1</td>
|
||||
<td align="center"><strong>72.1</strong></td>
|
||||
<td align="center">72.1</td>
|
||||
<td align="center">34.2</td>
|
||||
<td align="center">64.7</td>
|
||||
<td align="center">61.5</td>
|
||||
@ -157,7 +157,7 @@ to run it against your own OCR tools. Your tool just needs to support Markdown o
|
||||
<td align="left">olmOCR v0.1.68 (No Anchor)</td>
|
||||
<td align="center">72.1</td>
|
||||
<td align="center">74.7</td>
|
||||
<td align="center">71.5</td>
|
||||
<td align="center"><strong>71.5</strong></td>
|
||||
<td align="center">43.7</td>
|
||||
<td align="center">91.6</td>
|
||||
<td align="center">78.5</td>
|
||||
@ -288,6 +288,3 @@ We have an internal data annotation tool that can be used to review the question
|
||||
```bash
|
||||
python -m olmocr.bench.review_app --port 5000 --debug ./olmOCR-bench/bench_data/multi_column.jsonl --force
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
@ -4,6 +4,7 @@ import tempfile
|
||||
from marker.converters.pdf import PdfConverter
|
||||
from marker.models import create_model_dict
|
||||
from marker.output import text_from_rendered
|
||||
from marker.config.parser import ConfigParser
|
||||
from pypdf import PdfReader, PdfWriter
|
||||
|
||||
_marker_converter = None
|
||||
@ -15,10 +16,22 @@ def run_marker(pdf_path: str, page_num: int = 1) -> str:
|
||||
if _marker_converter is None:
|
||||
# Create a configuration dictionary with the necessary settings
|
||||
config = {
|
||||
"texify_inline_spans": True, # This enables conversion of inline math to LaTeX
|
||||
"force_ocr": True, # This enables conversion of inline math to LaTeX
|
||||
"use_llm": False, # We would prefer to run just plain marker for reporting bench results, not hybrid mode
|
||||
"disable_tqdm": True, # Disable tqdm for cleaner output
|
||||
"recognition_batch_size": 256,
|
||||
"layout_batch_size": 48,
|
||||
"detection_batch_size": 48,
|
||||
"equation_batch_size": 64,
|
||||
"table_rec_batch_size": 48,
|
||||
"ocr_error_batch_size": 64,
|
||||
}
|
||||
config_parser = ConfigParser(config)
|
||||
|
||||
_marker_converter = PdfConverter(artifact_dict=create_model_dict(), config=config)
|
||||
_marker_converter = PdfConverter(
|
||||
artifact_dict=create_model_dict(),
|
||||
config=config_parser.generate_config_dict(),
|
||||
)
|
||||
|
||||
# Extract the specific page from the PDF
|
||||
pdf_to_process = pdf_path
|
||||
|
@ -123,6 +123,8 @@ def normalize_text(md_content: str) -> str:
|
||||
# Remove markdown bold formatting (** or __ for bold)
|
||||
md_content = re.sub(r"\*\*(.*?)\*\*", r"\1", md_content)
|
||||
md_content = re.sub(r"__(.*?)__", r"\1", md_content)
|
||||
md_content = re.sub(r"</?b>", "", md_content) # Remove <b> tags if they exist
|
||||
md_content = re.sub(r"</?i>", "", md_content) # Remove <i> tags if they exist
|
||||
|
||||
# Remove markdown italics formatting (* or _ for italics)
|
||||
md_content = re.sub(r"\*(.*?)\*", r"\1", md_content)
|
||||
|
@ -329,7 +329,7 @@ async def process_page(args, worker_id: int, pdf_orig_path: str, pdf_local_path:
|
||||
|
||||
|
||||
async def process_pdf(args, worker_id: int, pdf_orig_path: str):
|
||||
with tempfile.NamedTemporaryFile("wb+", suffix=".pdf") as tf:
|
||||
with tempfile.NamedTemporaryFile("wb+", suffix=".pdf", delete=False) as tf:
|
||||
try:
|
||||
data = await asyncio.to_thread(lambda: get_s3_bytes_with_backoff(pdf_s3, pdf_orig_path))
|
||||
tf.write(data)
|
||||
@ -347,6 +347,7 @@ async def process_pdf(args, worker_id: int, pdf_orig_path: str):
|
||||
tf.write(convert_image_to_pdf_bytes(tf.name))
|
||||
tf.flush()
|
||||
|
||||
try:
|
||||
try:
|
||||
reader = PdfReader(tf.name)
|
||||
num_pages = reader.get_num_pages()
|
||||
@ -398,6 +399,9 @@ async def process_pdf(args, worker_id: int, pdf_orig_path: str):
|
||||
# You can't build a dolma doc with even 1 failed page, so just get out of here
|
||||
# However, you don't want to propagate an exception higher up and cancel the entire work_group
|
||||
return None
|
||||
finally:
|
||||
if os.path.exists(tf.name):
|
||||
os.unlink(tf.name)
|
||||
|
||||
|
||||
def build_dolma_document(pdf_orig_path, page_results):
|
||||
@ -698,19 +702,31 @@ async def vllm_server_ready():
|
||||
raise Exception("vllm server did not become ready after waiting.")
|
||||
|
||||
|
||||
async def download_model(model_name_or_path: str):
|
||||
if model_name_or_path.startswith("s3://") or model_name_or_path.startswith("gs://") or model_name_or_path.startswith("weka://"):
|
||||
logger.info(f"Downloading model directory from '{model_name_or_path}'")
|
||||
model_cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "olmocr", "model")
|
||||
download_directory([model_name_or_path], model_cache_dir)
|
||||
return model_cache_dir
|
||||
elif os.path.isabs(model_name_or_path) and os.path.isdir(model_name_or_path):
|
||||
logger.info(f"Using local model path at '{model_name_or_path}'")
|
||||
return model_name_or_path
|
||||
else:
|
||||
logger.info(f"Downloading model with hugging face '{model_name_or_path}'")
|
||||
snapshot_download(repo_id=model_name_or_path)
|
||||
return model_name_or_path
|
||||
async def download_model(model_name_or_path: str, max_retries: int = 5):
|
||||
for retry in range(max_retries):
|
||||
try:
|
||||
if model_name_or_path.startswith("s3://") or model_name_or_path.startswith("gs://") or model_name_or_path.startswith("weka://"):
|
||||
logger.info(f"Downloading model directory from '{model_name_or_path}'")
|
||||
model_cache_dir = os.path.join(os.path.expanduser("~"), ".cache", "olmocr", "model")
|
||||
# Delete existing model cache directory if it exists
|
||||
if os.path.exists(model_cache_dir):
|
||||
shutil.rmtree(model_cache_dir)
|
||||
download_directory([model_name_or_path], model_cache_dir)
|
||||
return model_cache_dir
|
||||
elif os.path.isabs(model_name_or_path) and os.path.isdir(model_name_or_path):
|
||||
logger.info(f"Using local model path at '{model_name_or_path}'")
|
||||
return model_name_or_path
|
||||
else:
|
||||
logger.info(f"Downloading model with hugging face '{model_name_or_path}'")
|
||||
snapshot_download(repo_id=model_name_or_path)
|
||||
return model_name_or_path
|
||||
except Exception:
|
||||
if retry == max_retries - 1:
|
||||
raise # Raise on final attempt and fail the job
|
||||
|
||||
sleep_time = random.randrange(2, 20) * 2**retry
|
||||
logger.exception(f"Could not download model, sleeping for {sleep_time} seconds to retry ({retry + 1}/{max_retries})")
|
||||
await asyncio.sleep(random.randrange(10, 30) * 2**retry)
|
||||
|
||||
|
||||
async def metrics_reporter(work_queue):
|
||||
@ -899,6 +915,7 @@ def print_stats(args, root_work_queue):
|
||||
logger.warning(f"Error processing {s3_path}: {e}")
|
||||
return 0, 0, 0, 0, 0, set(), 0, 0
|
||||
|
||||
print(f"\nCompleted work items {completed_items:,} out of {total_items:,}: {completed_items/total_items*100:.2f}%")
|
||||
print("\nProcessing output files...")
|
||||
docs_total = 0
|
||||
input_tokens_total = 0
|
||||
@ -1026,8 +1043,8 @@ async def main():
|
||||
|
||||
# Wait a little bit so that not all beaker jobs in a task start at the same time and download the model at the same time
|
||||
replica_count = int(os.environ.get("BEAKER_REPLICA_COUNT", "1"))
|
||||
interval = 10 if (replica_count - 1) * 10 <= 240 else 240 / max(1, replica_count - 1)
|
||||
sleep_time = int(int(os.environ.get("BEAKER_REPLICA_RANK", "0")) * interval)
|
||||
interval = 10 if (replica_count - 1) * 10 <= 30 else 30 / max(1, replica_count - 1)
|
||||
sleep_time = int(os.environ.get("BEAKER_REPLICA_RANK", "0")) * interval
|
||||
logger.info(f"Beaker job sleeping for {sleep_time} seconds to stagger model downloads")
|
||||
await asyncio.sleep(sleep_time)
|
||||
|
||||
|
@ -64,7 +64,7 @@ data = {
|
||||
"MinerU",
|
||||
"Gemini Flash 2",
|
||||
"Gemini Flash 2 (Batch)",
|
||||
"Marker v1.6.2",
|
||||
"Marker v1.7.5",
|
||||
"Ours",
|
||||
"Qwen 2 VL",
|
||||
"Qwen 2.5 VL",
|
||||
@ -77,7 +77,7 @@ data = {
|
||||
61.5, # MinerU
|
||||
63.8, # Gemini Flash 2 (Anchored)
|
||||
63.8, # Same performance for batch
|
||||
59.4, # marker v1.6.2
|
||||
70.1, # marker v1.7.5 base
|
||||
77.4, # Ours (performance is the same across hardware)
|
||||
31.5, # Qwen2VL
|
||||
65.5, # Qwen2.5VL
|
||||
@ -94,7 +94,7 @@ model_categories = {
|
||||
"MinerU": "Open Source Tool",
|
||||
"Gemini Flash 2": "Commercial VLM",
|
||||
"Gemini Flash 2 (Batch)": "Commercial VLM",
|
||||
"Marker v1.6.2": "Open Source Tool",
|
||||
"Marker v1.7.5": "Open Source Tool",
|
||||
"Ours": "Ours",
|
||||
"Qwen 2 VL": "Open VLM",
|
||||
"Qwen 2.5 VL": "Open VLM",
|
||||
@ -132,7 +132,7 @@ model_label_offsets = {
|
||||
"MinerU": [-15, -20],
|
||||
"Gemini Flash 2": [-10, 10],
|
||||
"Gemini Flash 2 (Batch)": [-50, -15],
|
||||
"Marker v1.6.2": [-35, -20],
|
||||
"Marker v1.7.5": [-20, 15],
|
||||
"Ours": [-20, 10],
|
||||
"Qwen 2 VL": [-35, 10],
|
||||
"Qwen 2.5 VL": [-35, 10],
|
||||
|
@ -104,7 +104,7 @@ except:
|
||||
has_aws_creds = False
|
||||
print(f"AWS credentials secret not found: {aws_creds_secret}")
|
||||
|
||||
# Build commands list
|
||||
# First experiment: Original benchmark job
|
||||
commands = []
|
||||
if has_aws_creds:
|
||||
commands.extend([
|
||||
@ -142,21 +142,71 @@ if has_aws_creds:
|
||||
EnvVar(name="AWS_CREDENTIALS_FILE", secret=aws_creds_secret)
|
||||
]
|
||||
|
||||
# Create experiment spec
|
||||
# Create first experiment spec
|
||||
experiment_spec = ExperimentSpec(
|
||||
description=f"OlmOCR Benchmark Run - Branch: {git_branch}, Commit: {git_hash}",
|
||||
budget="ai2/oe-data",
|
||||
tasks=[TaskSpec(**task_spec_args)],
|
||||
)
|
||||
|
||||
# Create the experiment
|
||||
# Create the first experiment
|
||||
experiment = b.experiment.create(spec=experiment_spec, workspace="ai2/olmocr")
|
||||
print(f"Created experiment: {experiment.id}")
|
||||
print(f"Created benchmark experiment: {experiment.id}")
|
||||
print(f"View at: https://beaker.org/ex/{experiment.id}")
|
||||
print("-------")
|
||||
print("")
|
||||
|
||||
# Second experiment: Performance test job
|
||||
perf_pipeline_cmd = "python -m olmocr.pipeline ./localworkspace --markdown --pdfs s3://ai2-oe-data/jakep/olmocr/olmOCR-mix-0225/benchmark_set/*.pdf"
|
||||
if model:
|
||||
perf_pipeline_cmd += f" --model {model}"
|
||||
|
||||
perf_commands = []
|
||||
if has_aws_creds:
|
||||
perf_commands.extend([
|
||||
"mkdir -p ~/.aws",
|
||||
'echo "$AWS_CREDENTIALS_FILE" > ~/.aws/credentials'
|
||||
])
|
||||
perf_commands.append(perf_pipeline_cmd)
|
||||
|
||||
# Build performance task spec
|
||||
perf_task_spec_args = {
|
||||
"name": "olmocr-performance",
|
||||
"image": ImageSource(beaker=f"{beaker_user}/{image_tag}"),
|
||||
"command": [
|
||||
"bash", "-c",
|
||||
" && ".join(perf_commands)
|
||||
],
|
||||
"context": TaskContext(
|
||||
priority=Priority.normal,
|
||||
preemptible=True,
|
||||
),
|
||||
"resources": TaskResources(gpu_count=1),
|
||||
"constraints": Constraints(cluster=["ai2/ceres-cirrascale", "ai2/jupiter-cirrascale-2"]),
|
||||
"result": ResultSpec(path="/noop-results"),
|
||||
}
|
||||
|
||||
# Add env vars if AWS credentials exist
|
||||
if has_aws_creds:
|
||||
perf_task_spec_args["env_vars"] = [
|
||||
EnvVar(name="AWS_CREDENTIALS_FILE", secret=aws_creds_secret)
|
||||
]
|
||||
|
||||
# Create performance experiment spec
|
||||
perf_experiment_spec = ExperimentSpec(
|
||||
description=f"OlmOCR Performance Test - Branch: {git_branch}, Commit: {git_hash}",
|
||||
budget="ai2/oe-data",
|
||||
tasks=[TaskSpec(**perf_task_spec_args)],
|
||||
)
|
||||
|
||||
# Create the performance experiment
|
||||
perf_experiment = b.experiment.create(spec=perf_experiment_spec, workspace="ai2/olmocr")
|
||||
print(f"Created performance experiment: {perf_experiment.id}")
|
||||
print(f"View at: https://beaker.org/ex/{perf_experiment.id}")
|
||||
EOF
|
||||
|
||||
# Run the Python script to create the experiment
|
||||
echo "Creating Beaker experiment..."
|
||||
# Run the Python script to create the experiments
|
||||
echo "Creating Beaker experiments..."
|
||||
if [ -n "$MODEL" ]; then
|
||||
echo "Using model: $MODEL"
|
||||
$PYTHON /tmp/run_benchmark_experiment.py $IMAGE_TAG $BEAKER_USER $GIT_BRANCH $GIT_HASH "$MODEL"
|
||||
@ -167,4 +217,4 @@ fi
|
||||
# Clean up temporary file
|
||||
rm /tmp/run_benchmark_experiment.py
|
||||
|
||||
echo "Benchmark experiment submitted successfully!"
|
||||
echo "Benchmark experiments submitted successfully!"
|
202
scripts/run_marker_benchmark.sh
Executable file
202
scripts/run_marker_benchmark.sh
Executable file
@ -0,0 +1,202 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Runs marker benchmark, measuring both olmOCR-bench performance and per document processing performance
|
||||
# ./scripts/run_marker_benchmark.sh
|
||||
# ./scripts/run_marker_benchmark.sh 1.7.5
|
||||
|
||||
set -e
|
||||
|
||||
# Parse command line arguments
|
||||
MARKER_VERSION="${1:-1.7.5}"
|
||||
echo "Using marker version: $MARKER_VERSION"
|
||||
|
||||
# Check for uncommitted changes
|
||||
if ! git diff-index --quiet HEAD --; then
|
||||
echo "Error: There are uncommitted changes in the repository."
|
||||
echo "Please commit or stash your changes before running the benchmark."
|
||||
echo ""
|
||||
echo "Uncommitted changes:"
|
||||
git status --short
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Use conda environment Python if available, otherwise use system Python
|
||||
if [ -n "$CONDA_PREFIX" ]; then
|
||||
PYTHON="$CONDA_PREFIX/bin/python"
|
||||
echo "Using conda Python from: $CONDA_PREFIX"
|
||||
else
|
||||
PYTHON="python"
|
||||
echo "Warning: No conda environment detected, using system Python"
|
||||
fi
|
||||
|
||||
# Get version from version.py
|
||||
VERSION=$($PYTHON -c 'import olmocr.version; print(olmocr.version.VERSION)')
|
||||
echo "OlmOCR version: $VERSION"
|
||||
|
||||
# Get first 10 characters of git hash
|
||||
GIT_HASH=$(git rev-parse HEAD | cut -c1-10)
|
||||
echo "Git hash: $GIT_HASH"
|
||||
|
||||
# Get current git branch name
|
||||
GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
|
||||
echo "Git branch: $GIT_BRANCH"
|
||||
|
||||
# Create full image tag
|
||||
IMAGE_TAG="olmocr-benchmark-${VERSION}-${GIT_HASH}"
|
||||
echo "Building Docker image with tag: $IMAGE_TAG"
|
||||
|
||||
# Build the Docker image
|
||||
echo "Building Docker image..."
|
||||
docker build --platform linux/amd64 -f ./Dockerfile -t $IMAGE_TAG .
|
||||
|
||||
# Get Beaker username
|
||||
BEAKER_USER=$(beaker account whoami --format json | jq -r '.[0].name')
|
||||
echo "Beaker user: $BEAKER_USER"
|
||||
|
||||
# Push image to beaker
|
||||
echo "Trying to push image to Beaker..."
|
||||
if ! beaker image create --workspace ai2/oe-data-pdf --name $IMAGE_TAG $IMAGE_TAG 2>/dev/null; then
|
||||
echo "Warning: Beaker image with tag $IMAGE_TAG already exists. Using existing image."
|
||||
fi
|
||||
|
||||
# Create Python script to run beaker experiment
|
||||
cat << 'EOF' > /tmp/run_benchmark_experiment.py
|
||||
import sys
|
||||
from beaker import Beaker, ExperimentSpec, TaskSpec, TaskContext, ResultSpec, TaskResources, ImageSource, Priority, Constraints, EnvVar
|
||||
|
||||
# Get image tag, beaker user, git branch, git hash, and marker version from command line
|
||||
image_tag = sys.argv[1]
|
||||
beaker_user = sys.argv[2]
|
||||
git_branch = sys.argv[3]
|
||||
git_hash = sys.argv[4]
|
||||
marker_version = sys.argv[5]
|
||||
|
||||
# Initialize Beaker client
|
||||
b = Beaker.from_env(default_workspace="ai2/olmocr")
|
||||
|
||||
|
||||
# Check if AWS credentials secret exists
|
||||
aws_creds_secret = f"{beaker_user}-AWS_CREDENTIALS_FILE"
|
||||
try:
|
||||
# Try to get the secret to see if it exists
|
||||
b.secret.get(aws_creds_secret, workspace="ai2/olmocr")
|
||||
has_aws_creds = True
|
||||
print(f"Found AWS credentials secret: {aws_creds_secret}")
|
||||
except:
|
||||
has_aws_creds = False
|
||||
print(f"AWS credentials secret not found: {aws_creds_secret}")
|
||||
|
||||
# First experiment: Original benchmark job
|
||||
commands = []
|
||||
if has_aws_creds:
|
||||
commands.extend([
|
||||
"mkdir -p ~/.aws",
|
||||
'echo "$AWS_CREDENTIALS_FILE" > ~/.aws/credentials'
|
||||
])
|
||||
commands.extend([
|
||||
"git clone https://huggingface.co/datasets/allenai/olmOCR-bench",
|
||||
"cd olmOCR-bench && git lfs pull && cd ..",
|
||||
f"pip install marker-pdf=={marker_version}",
|
||||
"pip install --upgrade torchvision",
|
||||
"python -m olmocr.bench.convert marker --dir ./olmOCR-bench/bench_data",
|
||||
"python -m olmocr.bench.benchmark --dir ./olmOCR-bench/bench_data"
|
||||
])
|
||||
|
||||
# Build task spec with optional env vars
|
||||
task_spec_args = {
|
||||
"name": "marker-benchmark",
|
||||
"image": ImageSource(beaker=f"{beaker_user}/{image_tag}"),
|
||||
"command": [
|
||||
"bash", "-c",
|
||||
" && ".join(commands)
|
||||
],
|
||||
"context": TaskContext(
|
||||
priority=Priority.normal,
|
||||
preemptible=True,
|
||||
),
|
||||
"resources": TaskResources(gpu_count=1),
|
||||
"constraints": Constraints(cluster=["ai2/ceres-cirrascale", "ai2/jupiter-cirrascale-2"]),
|
||||
"result": ResultSpec(path="/noop-results"),
|
||||
}
|
||||
|
||||
# Add env vars if AWS credentials exist
|
||||
if has_aws_creds:
|
||||
task_spec_args["env_vars"] = [
|
||||
EnvVar(name="AWS_CREDENTIALS_FILE", secret=aws_creds_secret)
|
||||
]
|
||||
|
||||
# Create first experiment spec
|
||||
experiment_spec = ExperimentSpec(
|
||||
description=f"Marker {marker_version} Benchmark Run - Branch: {git_branch}, Commit: {git_hash}",
|
||||
budget="ai2/oe-data",
|
||||
tasks=[TaskSpec(**task_spec_args)],
|
||||
)
|
||||
|
||||
# Create the first experiment
|
||||
experiment = b.experiment.create(spec=experiment_spec, workspace="ai2/olmocr")
|
||||
print(f"Created benchmark experiment: {experiment.id}")
|
||||
print(f"View at: https://beaker.org/ex/{experiment.id}")
|
||||
print("-------")
|
||||
print("")
|
||||
|
||||
|
||||
perf_commands = []
|
||||
if has_aws_creds:
|
||||
perf_commands.extend([
|
||||
"mkdir -p ~/.aws",
|
||||
'echo "$AWS_CREDENTIALS_FILE" > ~/.aws/credentials'
|
||||
])
|
||||
perf_commands.extend([
|
||||
f"pip install marker-pdf=={marker_version}",
|
||||
"pip install --upgrade torchvision",
|
||||
"pip install awscli",
|
||||
"aws s3 cp --recursive s3://ai2-oe-data/jakep/olmocr/olmOCR-mix-0225/benchmark_set/ /root/olmOCR-mix-0225_benchmark_set/",
|
||||
# Tried with workers 8, but it was taking a really huge amount of time
|
||||
#"time marker --force_ocr /root/olmOCR-mix-0225_benchmark_set/ --output_dir /root/olmOCR-mix-0225_benchmark_set_marker --workers 8"
|
||||
"time marker --force_ocr /root/olmOCR-mix-0225_benchmark_set/ --output_dir /root/olmOCR-mix-0225_benchmark_set_marker"
|
||||
])
|
||||
|
||||
# Build performance task spec
|
||||
perf_task_spec_args = {
|
||||
"name": "marker-performance",
|
||||
"image": ImageSource(beaker=f"{beaker_user}/{image_tag}"),
|
||||
"command": [
|
||||
"bash", "-c",
|
||||
" && ".join(perf_commands)
|
||||
],
|
||||
"context": TaskContext(
|
||||
priority=Priority.normal,
|
||||
preemptible=True,
|
||||
),
|
||||
"resources": TaskResources(gpu_count=1),
|
||||
"constraints": Constraints(cluster=["ai2/ceres-cirrascale", "ai2/jupiter-cirrascale-2"]),
|
||||
"result": ResultSpec(path="/noop-results"),
|
||||
}
|
||||
|
||||
# Add env vars if AWS credentials exist
|
||||
if has_aws_creds:
|
||||
perf_task_spec_args["env_vars"] = [
|
||||
EnvVar(name="AWS_CREDENTIALS_FILE", secret=aws_creds_secret)
|
||||
]
|
||||
|
||||
# Create performance experiment spec
|
||||
perf_experiment_spec = ExperimentSpec(
|
||||
description=f"Marker {marker_version} Performance Test - Branch: {git_branch}, Commit: {git_hash}",
|
||||
budget="ai2/oe-data",
|
||||
tasks=[TaskSpec(**perf_task_spec_args)],
|
||||
)
|
||||
|
||||
# Create the performance experiment
|
||||
perf_experiment = b.experiment.create(spec=perf_experiment_spec, workspace="ai2/olmocr")
|
||||
print(f"Created performance experiment: {perf_experiment.id}")
|
||||
print(f"View at: https://beaker.org/ex/{perf_experiment.id}")
|
||||
EOF
|
||||
|
||||
# Run the Python script to create the experiments
|
||||
echo "Creating Beaker experiments..."
|
||||
$PYTHON /tmp/run_benchmark_experiment.py $IMAGE_TAG $BEAKER_USER $GIT_BRANCH $GIT_HASH $MARKER_VERSION
|
||||
|
||||
# Clean up temporary file
|
||||
rm /tmp/run_benchmark_experiment.py
|
||||
|
||||
echo "Benchmark experiments submitted successfully!"
|
Loading…
x
Reference in New Issue
Block a user