Commit
·
9701750
1
Parent(s):
789f1a5
Add long-context PDF extraction script
Browse files- long-context-pdfs.py +136 -0
long-context-pdfs.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# /// script
|
| 2 |
+
# requires-python = ">=3.10"
|
| 3 |
+
# dependencies = [
|
| 4 |
+
# "polars",
|
| 5 |
+
# "datasets",
|
| 6 |
+
# ]
|
| 7 |
+
# ///
|
| 8 |
+
"""
|
| 9 |
+
Extract long-context, high-quality PDFs from finepdfs.
|
| 10 |
+
|
| 11 |
+
Creates a curated subset of long documents with high OCR quality -
|
| 12 |
+
useful for long-context model training.
|
| 13 |
+
|
| 14 |
+
Examples:
|
| 15 |
+
# Quick test (Welsh)
|
| 16 |
+
uv run long-context-pdfs.py --lang cym_Latn --limit 100
|
| 17 |
+
|
| 18 |
+
# English long docs
|
| 19 |
+
uv run long-context-pdfs.py --lang eng_Latn --output user/finepdfs-eng-long
|
| 20 |
+
|
| 21 |
+
# All Latin scripts
|
| 22 |
+
uv run long-context-pdfs.py --lang "*_Latn" --output user/finepdfs-long-context
|
| 23 |
+
|
| 24 |
+
# HF Jobs
|
| 25 |
+
hf jobs run uv --flavor cpu-basic -- run \\
|
| 26 |
+
https://huggingface.co/.../long-context-pdfs.py \\
|
| 27 |
+
--lang eng_Latn --output user/finepdfs-eng-long
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
import argparse
|
| 31 |
+
import polars as pl
|
| 32 |
+
from datasets import Dataset
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def polars_to_generator(lf: pl.LazyFrame):
|
| 36 |
+
"""Stream LazyFrame as row generator."""
|
| 37 |
+
for batch_df in lf.collect_batches():
|
| 38 |
+
yield from batch_df.iter_rows(named=True)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def main():
|
| 42 |
+
parser = argparse.ArgumentParser(description="Extract long-context high-quality PDFs")
|
| 43 |
+
parser.add_argument("--lang", type=str, default="cym_Latn",
|
| 44 |
+
help="Language code or glob pattern (default: cym_Latn, use '*_Latn' for all Latin)")
|
| 45 |
+
parser.add_argument("--min-tokens", type=int, default=10000,
|
| 46 |
+
help="Minimum token count (default: 10000)")
|
| 47 |
+
parser.add_argument("--min-lid-score", type=float, default=0.8,
|
| 48 |
+
help="Minimum language ID score (default: 0.8)")
|
| 49 |
+
parser.add_argument("--limit", type=int, help="Limit rows")
|
| 50 |
+
parser.add_argument("--output", type=str, help="Output dataset repo")
|
| 51 |
+
parser.add_argument("--private", action="store_true")
|
| 52 |
+
|
| 53 |
+
args = parser.parse_args()
|
| 54 |
+
|
| 55 |
+
source = f"hf://datasets/HuggingFaceFW/finepdfs/data/{args.lang}/train/*.parquet"
|
| 56 |
+
|
| 57 |
+
print("=" * 60)
|
| 58 |
+
print("Long-Context High-Quality PDF Extraction")
|
| 59 |
+
print("=" * 60)
|
| 60 |
+
print(f"Source: {source}")
|
| 61 |
+
print(f"Filters:")
|
| 62 |
+
print(f" - token_count >= {args.min_tokens}")
|
| 63 |
+
print(f" - page_average_lid_score >= {args.min_lid_score}")
|
| 64 |
+
print(f" - extractor == 'docling'")
|
| 65 |
+
if args.limit:
|
| 66 |
+
print(f" - limit: {args.limit}")
|
| 67 |
+
print("=" * 60)
|
| 68 |
+
|
| 69 |
+
# Build query - simpler filters first, OCR quality filter can be tricky
|
| 70 |
+
lf = (
|
| 71 |
+
pl.scan_parquet(source)
|
| 72 |
+
.filter(
|
| 73 |
+
(pl.col("token_count") >= args.min_tokens)
|
| 74 |
+
& (pl.col("page_average_lid_score") >= args.min_lid_score)
|
| 75 |
+
& (pl.col("extractor") == "docling")
|
| 76 |
+
)
|
| 77 |
+
.select([
|
| 78 |
+
"id",
|
| 79 |
+
"url",
|
| 80 |
+
"text",
|
| 81 |
+
"language",
|
| 82 |
+
"token_count",
|
| 83 |
+
"dump",
|
| 84 |
+
"page_average_lid_score",
|
| 85 |
+
])
|
| 86 |
+
)
|
| 87 |
+
|
| 88 |
+
if args.limit:
|
| 89 |
+
lf = lf.limit(args.limit)
|
| 90 |
+
|
| 91 |
+
# Preview
|
| 92 |
+
print("\nPreviewing...")
|
| 93 |
+
preview = lf.limit(5).collect()
|
| 94 |
+
print(f"Sample rows: {len(preview)}")
|
| 95 |
+
if len(preview) > 0:
|
| 96 |
+
print(preview.select(["language", "token_count", "page_average_lid_score"]))
|
| 97 |
+
else:
|
| 98 |
+
print("No rows matched! Try lowering thresholds.")
|
| 99 |
+
return
|
| 100 |
+
|
| 101 |
+
if not args.output:
|
| 102 |
+
print("\nNo --output specified. Use --output to push to Hub.")
|
| 103 |
+
return
|
| 104 |
+
|
| 105 |
+
# Rebuild query for streaming
|
| 106 |
+
lf = (
|
| 107 |
+
pl.scan_parquet(source)
|
| 108 |
+
.filter(
|
| 109 |
+
(pl.col("token_count") >= args.min_tokens)
|
| 110 |
+
& (pl.col("page_average_lid_score") >= args.min_lid_score)
|
| 111 |
+
& (pl.col("extractor") == "docling")
|
| 112 |
+
)
|
| 113 |
+
.select([
|
| 114 |
+
"id",
|
| 115 |
+
"url",
|
| 116 |
+
"text",
|
| 117 |
+
"language",
|
| 118 |
+
"token_count",
|
| 119 |
+
"dump",
|
| 120 |
+
"page_average_lid_score",
|
| 121 |
+
])
|
| 122 |
+
)
|
| 123 |
+
if args.limit:
|
| 124 |
+
lf = lf.limit(args.limit)
|
| 125 |
+
|
| 126 |
+
print(f"\nStreaming to dataset...")
|
| 127 |
+
ds = Dataset.from_generator(lambda: polars_to_generator(lf))
|
| 128 |
+
print(f"Created dataset with {len(ds)} rows")
|
| 129 |
+
|
| 130 |
+
print(f"\nPushing to {args.output}...")
|
| 131 |
+
ds.push_to_hub(args.output, private=args.private)
|
| 132 |
+
print(f"\nDone! https://huggingface.co/datasets/{args.output}")
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
if __name__ == "__main__":
|
| 136 |
+
main()
|