import argparse import base64 import csv import datetime import json import os import random import re import sqlite3 import tempfile from concurrent.futures import ThreadPoolExecutor from pathlib import Path from typing import Any, Dict, List, Optional, Tuple import boto3 import requests import tinyhost from tqdm import tqdm from olmocr.data.renderpdf import render_pdf_to_base64webp from olmocr.s3_utils import get_s3_bytes, parse_s3_path def parse_args(): parser = argparse.ArgumentParser(description="Scan OLMO OCR workspace results and create visual samples") parser.add_argument("workspace", help="OLMO OCR workspace path (s3://bucket/workspace)") parser.add_argument("--pages_per_output", type=int, default=30, help="Number of pages per output file") parser.add_argument("--repeats", type=int, default=1, help="Number of output files to generate") parser.add_argument("--pdf_profile", help="AWS profile for accessing PDFs") parser.add_argument("--output_dir", default="dolma_samples", help="Directory to save output HTML files") parser.add_argument("--max_workers", type=int, default=4, help="Maximum number of worker threads") parser.add_argument( "--db_path", default="~/s2pdf_url_data/d65142df-6588-4b68-a12c-d468b3761189.csv.db", help="Path to the SQLite database containing PDF hash to URL mapping", ) parser.add_argument( "--prolific_code", required=True, help="Fixed completion code to use for all outputs", ) parser.add_argument( "--prolific_csv", default="prolific_codes.csv", help="Path to save the file with tinyhost links (one URL per line)", ) parser.add_argument( "--read_results", help="Path to a CSV file containing previously generated tinyhost links to extract annotations", ) return parser.parse_args() # Fixed prolific code is now passed in as a command line argument def obfuscate_code(code): """Gently obfuscate the Prolific code so it's not immediately visible in source.""" # Convert to base64 and reverse encoded = base64.b64encode(code.encode()).decode() return encoded[::-1] def deobfuscate_code(obfuscated_code): """Deobfuscate the code - this will be done in JavaScript.""" # Reverse and decode from base64 reversed_encoded = obfuscated_code[::-1] try: return base64.b64decode(reversed_encoded).decode() except: return "ERROR_DECODING" def parse_pdf_hash(pretty_pdf_path: str) -> Optional[str]: pattern = r"s3://ai2-s2-pdfs/([a-f0-9]{4})/([a-f0-9]+)\.pdf" match = re.match(pattern, pretty_pdf_path) if match: return match.group(1) + match.group(2) return None def get_original_url(pdf_hash: str, db_path: str) -> Optional[str]: """Look up the original URL for a PDF hash in the SQLite database.""" if not pdf_hash: return None try: sqlite_db_path = os.path.expanduser(db_path) if not os.path.exists(sqlite_db_path): print(f"SQLite database not found at {sqlite_db_path}") return None conn = sqlite3.connect(sqlite_db_path) cursor = conn.cursor() cursor.execute("SELECT uri FROM pdf_mapping WHERE pdf_hash = ?", (pdf_hash,)) result = cursor.fetchone() conn.close() if result: return result[0] return None except Exception as e: print(f"Error looking up URL for PDF hash {pdf_hash}: {e}") return None def list_result_files(s3_client, workspace_path): """List all JSON result files in the workspace results directory.""" bucket, prefix = parse_s3_path(workspace_path) results_prefix = os.path.join(prefix, "results").rstrip("/") + "/" all_files = [] paginator = s3_client.get_paginator("list_objects_v2") for page in paginator.paginate(Bucket=bucket, Prefix=results_prefix): if "Contents" in page: all_files.extend([f"s3://{bucket}/{obj['Key']}" for obj in page["Contents"] if obj["Key"].endswith(".jsonl") or obj["Key"].endswith(".json")]) # if len(all_files) > 1000: # break return all_files def get_random_pages(s3_client, result_files, count=30): """Get random pages from the result files.""" random_pages = [] # Try to collect the requested number of pages attempts = 0 max_attempts = count * 3 # Allow extra attempts to handle potential failures while len(random_pages) < count and attempts < max_attempts: attempts += 1 # Pick a random result file if not result_files: print("No result files found!") break result_file = random.choice(result_files) try: # Get the content of the file content = get_s3_bytes(s3_client, result_file) lines = content.decode("utf-8").strip().split("\n") if not lines: continue # Pick a random line (which contains a complete document) line = random.choice(lines) doc = json.loads(line) # A Dolma document has "text", "metadata", and "attributes" fields if "text" not in doc or "metadata" not in doc or "attributes" not in doc: print(f"Document in {result_file} is not a valid Dolma document") continue # Get the original PDF path from metadata pdf_path = doc["metadata"].get("Source-File") if not pdf_path: continue # Get page spans from attributes page_spans = doc["attributes"].get("pdf_page_numbers", []) if not page_spans: continue # Pick a random page span page_span = random.choice(page_spans) if len(page_span) >= 3: # Page spans are [start_pos, end_pos, page_num] page_num = page_span[2] # Extract text for this page start_pos, end_pos = page_span[0], page_span[1] page_text = doc["text"][start_pos:end_pos].strip() # Include the text snippet with the page info random_pages.append((pdf_path, page_num, page_text, result_file)) if len(random_pages) >= count: break except Exception as e: print(f"Error processing {result_file}: {e}") continue print(f"Found {len(random_pages)} random pages from Dolma documents") return random_pages def create_presigned_url(s3_client, pdf_path, expiration=3600 * 24 * 7): """Create a presigned URL for the given S3 path.""" try: bucket, key = parse_s3_path(pdf_path) url = s3_client.generate_presigned_url("get_object", Params={"Bucket": bucket, "Key": key}, ExpiresIn=expiration) return url except Exception as e: print(f"Error creating presigned URL for {pdf_path}: {e}") return None def create_html_output(random_pages, pdf_s3_client, output_path, workspace_path, db_path, prolific_code, resolution=2048): """Create an HTML file with rendered PDF pages.""" # Obfuscate the provided Prolific code obfuscated_code = obfuscate_code(prolific_code) # Get current date and time for the report current_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") html_content = f"""
In this task, you will review {len(random_pages)} document pages and determine whether they contain any Personally Identifiable Information (PII). For each page, please follow the decision flow outlined in the "How to Annotate" section below.
Carefully but efficiently inspect each page and select the appropriate response. You do not need to read every word. Instead, focus on ascertaining the document's intended use and spotting information that would qualify as PII.
The entire task should take about 20-25 minutes.
The current annotation will be highlighted with a blue outline and a set of response buttons will be displayed directly below the page preview. If you are having trouble viewing the displayed page, click the “View Cached PDF” link for a better look. However, DO NOT examine the entire document; ONLY review the single page being previewed (also indicated in the parentheses after “Viewed Cached PDF”).
For each page, complete the following steps:
Determine if the document is intended for public release.
Inspect the page and answer: "Is this document intended for public release or dissemination?"
If you selected "Yes," "Cannot Read," or "Report Content," you will automatically move to the next document. If you selected "No," proceed to Step 2.
Identify the kind of PII found in the private document (if any).
You will be shown a checklist with a set of PII options.
Press the blue Continue button to complete your annotation.
You will automatically be moved to the next annotation.
Note: If you cannot confidently tell that a page is private, treat it as public and do not mark any PII you are unsure about. We anticipate very few private pages or instances of PII in these documents, so erring towards public and no PII minimizes false positives and keeps the review process consistent.
You may review and edit your previous annotations at any time. To do so, press the green Edit button directly above the page preview for the annotation you want to edit.
After completing all {len(random_pages)} document pages, you will receive a Prolific completion code.
Some personal information needs to be accompanied by an identifier to be considered PII. Identifiers that trigger PII include:
Note that the reverse is also true - an identifier must be accompanied by additional personal information or another identifier (e.g., name + email address) to be considered PII.
The following types of information should only be marked as PII if they occur alongside an identifier (commonly, a person's name):
For example, a street address might be personal information, but is not PII by itself; however, a street address associated with a name is regulated PII.
Certain types of sensitive information should always be classified as PII because the information is inherently self-identifying. The following should always be marked as PII even if they do not occur alongside an identifier:
{f'View Cached PDF (page {page_num})' if presigned_url else pdf_path}
Status: Pending
Is this document meant for public dissemination? (ex. news article, research paper, etc.)
{f'View Cached PDF (page {page_num})' if presigned_url else pdf_path}
Status: Pending
Is this document intended for public release or dissemination?