mirror of
				https://github.com/allenai/olmocr.git
				synced 2025-10-31 18:15:44 +00:00 
			
		
		
		
	Merge pull request #218 from allenai/amanr/benchmark_automation
Updated Dockerfile and added a workspace_to_benchmark file
This commit is contained in:
		
						commit
						faddf44897
					
				| @ -10,12 +10,14 @@ RUN apt-get update -y && apt-get install -y poppler-utils ttf-mscorefonts-instal | |||||||
| 
 | 
 | ||||||
| RUN apt-get update -y && apt-get install -y --no-install-recommends \ | RUN apt-get update -y && apt-get install -y --no-install-recommends \ | ||||||
|     git \ |     git \ | ||||||
|  |     git-lfs \ | ||||||
|     python3.11 \ |     python3.11 \ | ||||||
|     python3.11-dev \ |     python3.11-dev \ | ||||||
|     python3.11-distutils \ |     python3.11-distutils \ | ||||||
|     ca-certificates \ |     ca-certificates \ | ||||||
|     build-essential \ |     build-essential \ | ||||||
|     curl \ |     curl \ | ||||||
|  |     wget \ | ||||||
|     unzip |     unzip | ||||||
| 
 | 
 | ||||||
| RUN rm -rf /var/lib/apt/lists/* \ | RUN rm -rf /var/lib/apt/lists/* \ | ||||||
| @ -30,6 +32,7 @@ ADD --chmod=755 https://astral.sh/uv/install.sh /install.sh | |||||||
| RUN /install.sh && rm /install.sh | RUN /install.sh && rm /install.sh | ||||||
| 
 | 
 | ||||||
| ENV PYTHONUNBUFFERED=1 | ENV PYTHONUNBUFFERED=1 | ||||||
|  | 
 | ||||||
| WORKDIR /root | WORKDIR /root | ||||||
| COPY pyproject.toml pyproject.toml | COPY pyproject.toml pyproject.toml | ||||||
| COPY olmocr/version.py olmocr/version.py | COPY olmocr/version.py olmocr/version.py | ||||||
| @ -40,9 +43,7 @@ RUN /root/.local/bin/uv pip install --system --no-cache ".[bench]" | |||||||
| RUN playwright install-deps | RUN playwright install-deps | ||||||
| RUN playwright install chromium | RUN playwright install chromium | ||||||
| COPY olmocr olmocr | COPY olmocr olmocr | ||||||
| 
 | COPY scripts scripts | ||||||
| WORKDIR /root |  | ||||||
| COPY olmocr olmocr |  | ||||||
| 
 | 
 | ||||||
| RUN python3 -m sglang.launch_server --help | RUN python3 -m sglang.launch_server --help | ||||||
| RUN python3 -m olmocr.pipeline --help | RUN python3 -m olmocr.pipeline --help | ||||||
							
								
								
									
										217
									
								
								olmocr/bench/scripts/workspace_to_bench.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										217
									
								
								olmocr/bench/scripts/workspace_to_bench.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,217 @@ | |||||||
|  | """ | ||||||
|  | Convert JSONL files to Markdown files and handle missing PDFs | ||||||
|  | Usage: | ||||||
|  |     python workspace_to_benchmark.py localworkspace ./markdown_output --bench-path ../olmOCR-bench/ | ||||||
|  | """ | ||||||
|  | 
 | ||||||
|  | import argparse | ||||||
|  | import json | ||||||
|  | import sys | ||||||
|  | from collections import defaultdict | ||||||
|  | from pathlib import Path | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def load_jsonl_files(input_dir): | ||||||
|  |     """Load all JSONL files from the input directory.""" | ||||||
|  |     jsonl_files = list(Path(input_dir).glob("*.jsonl")) | ||||||
|  |     if not jsonl_files: | ||||||
|  |         print(f"No JSONL files found in {input_dir}") | ||||||
|  |         return [] | ||||||
|  | 
 | ||||||
|  |     print(f"Found {len(jsonl_files)} JSONL files: {[f.name for f in jsonl_files]}") | ||||||
|  |     return jsonl_files | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def parse_jsonl_entries(jsonl_files): | ||||||
|  |     """Parse all JSONL files and extract entries with text and metadata.""" | ||||||
|  |     all_entries = [] | ||||||
|  |     pdf_sources = set() | ||||||
|  | 
 | ||||||
|  |     for jsonl_file in jsonl_files: | ||||||
|  |         print(f"Processing {jsonl_file.name}...") | ||||||
|  | 
 | ||||||
|  |         with open(jsonl_file, "r", encoding="utf-8") as f: | ||||||
|  |             for line_num, line in enumerate(f, 1): | ||||||
|  |                 line = line.strip() | ||||||
|  |                 if not line: | ||||||
|  |                     continue | ||||||
|  | 
 | ||||||
|  |                 try: | ||||||
|  |                     entry = json.loads(line) | ||||||
|  |                     text = entry.get("text", "") | ||||||
|  |                     metadata = entry.get("metadata", {}) | ||||||
|  |                     source_file = metadata.get("Source-File", "") | ||||||
|  | 
 | ||||||
|  |                     if source_file: | ||||||
|  |                         pdf_sources.add(source_file) | ||||||
|  | 
 | ||||||
|  |                     all_entries.append({"text": text, "source_file": source_file, "metadata": metadata, "entry": entry}) | ||||||
|  | 
 | ||||||
|  |                 except json.JSONDecodeError as e: | ||||||
|  |                     print(f"Error parsing line {line_num} in {jsonl_file.name}: {e}") | ||||||
|  |                     continue | ||||||
|  | 
 | ||||||
|  |     print(f"Loaded {len(all_entries)} entries from JSONL files") | ||||||
|  |     print(f"Found {len(pdf_sources)} unique PDF sources") | ||||||
|  | 
 | ||||||
|  |     return all_entries, pdf_sources | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def get_subdir_and_pdf_name(source_file_path): | ||||||
|  |     """Extract subdirectory and PDF filename from source file path.""" | ||||||
|  |     if not source_file_path: | ||||||
|  |         return None, None | ||||||
|  | 
 | ||||||
|  |     path_parts = Path(source_file_path).parts | ||||||
|  | 
 | ||||||
|  |     try: | ||||||
|  |         pdfs_index = path_parts.index("pdfs") | ||||||
|  |         if pdfs_index + 1 < len(path_parts): | ||||||
|  |             subdir = path_parts[pdfs_index + 1] | ||||||
|  |             pdf_name = Path(source_file_path).stem | ||||||
|  |             return subdir, pdf_name | ||||||
|  |     except ValueError: | ||||||
|  |         pass | ||||||
|  | 
 | ||||||
|  |     return None, None | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def create_markdown_files(entries, output_dir): | ||||||
|  |     """Create markdown files from JSONL entries in subdir/{pdf_name}.md format.""" | ||||||
|  |     output_path = Path(output_dir) | ||||||
|  | 
 | ||||||
|  |     subdir_pdf_to_entries = defaultdict(list) | ||||||
|  | 
 | ||||||
|  |     for entry in entries: | ||||||
|  |         subdir, pdf_name = get_subdir_and_pdf_name(entry["source_file"]) | ||||||
|  |         if subdir and pdf_name: | ||||||
|  |             key = (subdir, pdf_name) | ||||||
|  |             subdir_pdf_to_entries[key].append(entry) | ||||||
|  | 
 | ||||||
|  |     created_files = set() | ||||||
|  | 
 | ||||||
|  |     for (subdir, pdf_name), pdf_entries in subdir_pdf_to_entries.items(): | ||||||
|  |         subdir_path = output_path / subdir | ||||||
|  |         subdir_path.mkdir(parents=True, exist_ok=True) | ||||||
|  | 
 | ||||||
|  |         md_filename = f"{pdf_name}_pg1_repeat1.md" | ||||||
|  |         md_filepath = subdir_path / md_filename | ||||||
|  |         combined_text = [] | ||||||
|  | 
 | ||||||
|  |         for entry in pdf_entries: | ||||||
|  |             text = entry["text"] | ||||||
|  |             if text.strip(): | ||||||
|  |                 # source_file = entry["source_file"] | ||||||
|  |                 combined_text.append(text) | ||||||
|  | 
 | ||||||
|  |         with open(md_filepath, "w", encoding="utf-8") as f: | ||||||
|  |             f.write("\n".join(combined_text)) | ||||||
|  | 
 | ||||||
|  |         created_files.add((subdir, pdf_name)) | ||||||
|  |         print(f"Created: {subdir}/{md_filename}_pg1_repeat1") | ||||||
|  | 
 | ||||||
|  |     print(f"Created {len(created_files)} markdown files from JSONL data") | ||||||
|  |     return created_files | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def find_missing_pdfs(pdf_sources, created_files, base_bench_path): | ||||||
|  |     """Find PDFs that exist in directories but are missing from JSONL data.""" | ||||||
|  |     subdirs = set() | ||||||
|  | 
 | ||||||
|  |     for source_file in pdf_sources: | ||||||
|  |         if not source_file: | ||||||
|  |             continue | ||||||
|  | 
 | ||||||
|  |         subdir, _ = get_subdir_and_pdf_name(source_file) | ||||||
|  |         if subdir: | ||||||
|  |             subdirs.add(subdir) | ||||||
|  | 
 | ||||||
|  |     print(f"Found PDF subdirectories: {sorted(subdirs)}") | ||||||
|  | 
 | ||||||
|  |     missing_pdfs = [] | ||||||
|  | 
 | ||||||
|  |     for subdir in subdirs: | ||||||
|  |         pdf_dir = Path(base_bench_path) / "bench_data" / "pdfs" / subdir | ||||||
|  | 
 | ||||||
|  |         if not pdf_dir.exists(): | ||||||
|  |             print(f"Warning: Directory {pdf_dir} does not exist") | ||||||
|  |             continue | ||||||
|  | 
 | ||||||
|  |         pdf_files = list(pdf_dir.glob("*.pdf")) | ||||||
|  |         print(f"Found {len(pdf_files)} PDF files in {subdir}/") | ||||||
|  | 
 | ||||||
|  |         for pdf_file in pdf_files: | ||||||
|  |             pdf_name = pdf_file.stem | ||||||
|  | 
 | ||||||
|  |             if (subdir, pdf_name) not in created_files: | ||||||
|  |                 missing_pdfs.append({"pdf_name": pdf_name, "full_path": pdf_file, "subdir": subdir}) | ||||||
|  | 
 | ||||||
|  |     print(f"Found {len(missing_pdfs)} missing PDFs") | ||||||
|  |     return missing_pdfs | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def create_blank_markdown_files(missing_pdfs, output_dir): | ||||||
|  |     """Create blank markdown files for missing PDFs in subdir/{pdf_name}.md format.""" | ||||||
|  |     output_path = Path(output_dir) | ||||||
|  | 
 | ||||||
|  |     for missing_pdf in missing_pdfs: | ||||||
|  |         subdir = missing_pdf["subdir"] | ||||||
|  |         pdf_name = missing_pdf["pdf_name"] | ||||||
|  | 
 | ||||||
|  |         subdir_path = output_path / subdir | ||||||
|  |         subdir_path.mkdir(parents=True, exist_ok=True) | ||||||
|  | 
 | ||||||
|  |         md_filename = f"{pdf_name}_pg1_repeat1.md" | ||||||
|  |         md_filepath = subdir_path / md_filename | ||||||
|  | 
 | ||||||
|  |         content = "" | ||||||
|  | 
 | ||||||
|  |         with open(md_filepath, "w", encoding="utf-8") as f: | ||||||
|  |             f.write(content) | ||||||
|  | 
 | ||||||
|  |         print(f"Created blank: {subdir}/{md_filename}_pg1_repeat1") | ||||||
|  | 
 | ||||||
|  |     print(f"Created {len(missing_pdfs)} blank markdown files for missing PDFs") | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | def main(): | ||||||
|  |     parser = argparse.ArgumentParser(description="Convert JSONL files to Markdown and handle missing PDFs") | ||||||
|  |     parser.add_argument("workspace_dir", help="Your workspace directory") | ||||||
|  |     parser.add_argument("output_dir", nargs="?", default="./markdown_output", help="Output directory for markdown files (default: ./markdown_output)") | ||||||
|  |     parser.add_argument("--bench-path", default="../olmOCR-bench", help="Path to olmOCR-bench directory (default: ../olmOCR-bench)") | ||||||
|  | 
 | ||||||
|  |     args = parser.parse_args() | ||||||
|  |     input_dir = args.workspace_dir + "/results" | ||||||
|  |     input_dir = Path(input_dir) | ||||||
|  |     output_dir = Path(args.output_dir) | ||||||
|  |     bench_path = Path(args.bench_path) | ||||||
|  | 
 | ||||||
|  |     if not input_dir.exists(): | ||||||
|  |         print(f"Error: Input directory {input_dir} does not exist") | ||||||
|  |         sys.exit(1) | ||||||
|  | 
 | ||||||
|  |     jsonl_files = load_jsonl_files(input_dir) | ||||||
|  |     if not jsonl_files: | ||||||
|  |         sys.exit(1) | ||||||
|  | 
 | ||||||
|  |     entries, pdf_sources = parse_jsonl_entries(jsonl_files) | ||||||
|  |     if not entries: | ||||||
|  |         print("No entries found in JSONL files") | ||||||
|  |         sys.exit(1) | ||||||
|  | 
 | ||||||
|  |     created_files = create_markdown_files(entries, output_dir) | ||||||
|  | 
 | ||||||
|  |     missing_pdfs = find_missing_pdfs(pdf_sources, created_files, bench_path) | ||||||
|  | 
 | ||||||
|  |     if missing_pdfs: | ||||||
|  |         create_blank_markdown_files(missing_pdfs, output_dir) | ||||||
|  | 
 | ||||||
|  |     print("\nSummary:") | ||||||
|  |     print(f"Created {len(created_files)} markdown files from JSONL data") | ||||||
|  |     print(f"Created {len(missing_pdfs)} blank markdown files for missing PDFs") | ||||||
|  |     print(f"Total markdown files: {len(created_files) + len(missing_pdfs)}") | ||||||
|  |     print(f"Output directory: {output_dir.absolute()}") | ||||||
|  | 
 | ||||||
|  | 
 | ||||||
|  | if __name__ == "__main__": | ||||||
|  |     main() | ||||||
							
								
								
									
										92
									
								
								scripts/run_benchmark.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										92
									
								
								scripts/run_benchmark.sh
									
									
									
									
									
										Executable file
									
								
							| @ -0,0 +1,92 @@ | |||||||
|  | #!/bin/bash | ||||||
|  | 
 | ||||||
|  | set -e | ||||||
|  | 
 | ||||||
|  | # Use conda environment Python if available, otherwise use system Python | ||||||
|  | if [ -n "$CONDA_PREFIX" ]; then | ||||||
|  |     PYTHON="$CONDA_PREFIX/bin/python" | ||||||
|  |     echo "Using conda Python from: $CONDA_PREFIX" | ||||||
|  | else | ||||||
|  |     PYTHON="python" | ||||||
|  |     echo "Warning: No conda environment detected, using system Python" | ||||||
|  | fi | ||||||
|  | 
 | ||||||
|  | # Get version from version.py | ||||||
|  | VERSION=$($PYTHON -c 'import olmocr.version; print(olmocr.version.VERSION)') | ||||||
|  | echo "OlmOCR version: $VERSION" | ||||||
|  | 
 | ||||||
|  | # Get first 10 characters of git hash | ||||||
|  | GIT_HASH=$(git rev-parse HEAD | cut -c1-10) | ||||||
|  | echo "Git hash: $GIT_HASH" | ||||||
|  | 
 | ||||||
|  | # Create full image tag | ||||||
|  | IMAGE_TAG="olmocr-benchmark-${VERSION}-${GIT_HASH}" | ||||||
|  | echo "Building Docker image with tag: $IMAGE_TAG" | ||||||
|  | 
 | ||||||
|  | # Build the Docker image | ||||||
|  | echo "Building Docker image..." | ||||||
|  | docker build --platform linux/amd64 -f ./Dockerfile -t $IMAGE_TAG . | ||||||
|  | 
 | ||||||
|  | # Get Beaker username | ||||||
|  | BEAKER_USER=$(beaker account whoami --format json | jq -r '.[0].name') | ||||||
|  | echo "Beaker user: $BEAKER_USER" | ||||||
|  | 
 | ||||||
|  | # Push image to beaker | ||||||
|  | echo "Pushing image to Beaker..." | ||||||
|  | beaker image create --workspace ai2/oe-data-pdf --name $IMAGE_TAG $IMAGE_TAG | ||||||
|  | 
 | ||||||
|  | # Create Python script to run beaker experiment | ||||||
|  | cat << 'EOF' > /tmp/run_benchmark_experiment.py | ||||||
|  | import sys | ||||||
|  | from beaker import Beaker, ExperimentSpec, TaskSpec, TaskContext, ResultSpec, TaskResources, ImageSource, Priority, Constraints | ||||||
|  | 
 | ||||||
|  | # Get image tag and beaker user from command line | ||||||
|  | image_tag = sys.argv[1] | ||||||
|  | beaker_user = sys.argv[2] | ||||||
|  | 
 | ||||||
|  | # Initialize Beaker client | ||||||
|  | b = Beaker.from_env(default_workspace="ai2/olmocr") | ||||||
|  | 
 | ||||||
|  | # Create experiment spec | ||||||
|  | experiment_spec = ExperimentSpec( | ||||||
|  |     description="OlmOCR Benchmark Run", | ||||||
|  |     budget="ai2/oe-data", | ||||||
|  |     tasks=[ | ||||||
|  |         TaskSpec( | ||||||
|  |             name="olmocr-benchmark", | ||||||
|  |             image=ImageSource(beaker=f"{beaker_user}/{image_tag}"), | ||||||
|  |             command=[ | ||||||
|  |                 "bash", "-c", | ||||||
|  |                 " && ".join([ | ||||||
|  |                     "git clone https://huggingface.co/datasets/allenai/olmOCR-bench", | ||||||
|  |                     "cd olmOCR-bench && git lfs pull && cd ..", | ||||||
|  |                     "python -m olmocr.pipeline ./localworkspace --markdown --pdfs ./olmOCR-bench/bench_data/pdfs/**/*.pdf", | ||||||
|  |                     "python olmocr/bench/scripts/workspace_to_bench.py localworkspace/ olmOCR-bench/bench_data/markdown_output --bench-path ./olmOCR-bench/", | ||||||
|  |                     "python -m olmocr.bench.benchmark --dir ./olmOCR-bench/bench_data" | ||||||
|  |                 ]) | ||||||
|  |             ], | ||||||
|  |             context=TaskContext( | ||||||
|  |                 priority=Priority.normal, | ||||||
|  |                 preemptible=True, | ||||||
|  |             ), | ||||||
|  |             resources=TaskResources(gpu_count=1), | ||||||
|  |             constraints=Constraints(cluster=["ai2/ceres-cirrascale", "ai2/jupiter-cirrascale-2"]), | ||||||
|  |             result=ResultSpec(path="/noop-results"), | ||||||
|  |         ) | ||||||
|  |     ], | ||||||
|  | ) | ||||||
|  | 
 | ||||||
|  | # Create the experiment | ||||||
|  | experiment = b.experiment.create(spec=experiment_spec, workspace="ai2/olmocr") | ||||||
|  | print(f"Created experiment: {experiment.id}") | ||||||
|  | print(f"View at: https://beaker.org/ex/{experiment.id}") | ||||||
|  | EOF | ||||||
|  | 
 | ||||||
|  | # Run the Python script to create the experiment | ||||||
|  | echo "Creating Beaker experiment..." | ||||||
|  | $PYTHON /tmp/run_benchmark_experiment.py $IMAGE_TAG $BEAKER_USER | ||||||
|  | 
 | ||||||
|  | # Clean up temporary file | ||||||
|  | rm /tmp/run_benchmark_experiment.py | ||||||
|  | 
 | ||||||
|  | echo "Benchmark experiment submitted successfully!" | ||||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 Jake Poznanski
						Jake Poznanski