Commit
·
8e83419
1
Parent(s):
9701750
fix: Add memory controls to prevent OOM on HF Jobs
Browse files- Add pl.Config.set_streaming_chunk_size(1000) for large text docs
- Add chunk_size parameter to polars_to_generator (default: 100)
- Add --chunk-size CLI argument for user tuning
- Update HF Jobs command syntax in docstring
Fixes exit code 137 (OOM) when processing large text documents.
🤖 Generated with [Claude Code](https://claude.com/claude-code)
Co-Authored-By: Claude Opus 4.5 <[email protected]>
- long-context-pdfs.py +71 -36
long-context-pdfs.py
CHANGED
|
@@ -21,10 +21,11 @@ Examples:
|
|
| 21 |
# All Latin scripts
|
| 22 |
uv run long-context-pdfs.py --lang "*_Latn" --output user/finepdfs-long-context
|
| 23 |
|
| 24 |
-
# HF Jobs
|
| 25 |
-
hf jobs
|
| 26 |
-
|
| 27 |
-
|
|
|
|
| 28 |
"""
|
| 29 |
|
| 30 |
import argparse
|
|
@@ -32,36 +33,64 @@ import polars as pl
|
|
| 32 |
from datasets import Dataset
|
| 33 |
|
| 34 |
|
| 35 |
-
def polars_to_generator(lf: pl.LazyFrame):
|
| 36 |
-
"""Stream LazyFrame as row generator.
|
| 37 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
yield from batch_df.iter_rows(named=True)
|
| 39 |
|
| 40 |
|
| 41 |
def main():
|
| 42 |
-
parser = argparse.ArgumentParser(
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
parser.add_argument(
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
parser.add_argument("--limit", type=int, help="Limit rows")
|
| 50 |
parser.add_argument("--output", type=str, help="Output dataset repo")
|
| 51 |
parser.add_argument("--private", action="store_true")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
|
| 53 |
args = parser.parse_args()
|
| 54 |
|
|
|
|
|
|
|
|
|
|
| 55 |
source = f"hf://datasets/HuggingFaceFW/finepdfs/data/{args.lang}/train/*.parquet"
|
| 56 |
|
| 57 |
print("=" * 60)
|
| 58 |
print("Long-Context High-Quality PDF Extraction")
|
| 59 |
print("=" * 60)
|
| 60 |
print(f"Source: {source}")
|
| 61 |
-
print(
|
| 62 |
print(f" - token_count >= {args.min_tokens}")
|
| 63 |
print(f" - page_average_lid_score >= {args.min_lid_score}")
|
| 64 |
-
print(
|
| 65 |
if args.limit:
|
| 66 |
print(f" - limit: {args.limit}")
|
| 67 |
print("=" * 60)
|
|
@@ -74,15 +103,17 @@ def main():
|
|
| 74 |
& (pl.col("page_average_lid_score") >= args.min_lid_score)
|
| 75 |
& (pl.col("extractor") == "docling")
|
| 76 |
)
|
| 77 |
-
.select(
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
| 86 |
)
|
| 87 |
|
| 88 |
if args.limit:
|
|
@@ -110,21 +141,25 @@ def main():
|
|
| 110 |
& (pl.col("page_average_lid_score") >= args.min_lid_score)
|
| 111 |
& (pl.col("extractor") == "docling")
|
| 112 |
)
|
| 113 |
-
.select(
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
|
|
|
|
|
|
| 122 |
)
|
| 123 |
if args.limit:
|
| 124 |
lf = lf.limit(args.limit)
|
| 125 |
|
| 126 |
-
print(f"\nStreaming to dataset...")
|
| 127 |
-
ds = Dataset.from_generator(
|
|
|
|
|
|
|
| 128 |
print(f"Created dataset with {len(ds)} rows")
|
| 129 |
|
| 130 |
print(f"\nPushing to {args.output}...")
|
|
|
|
| 21 |
# All Latin scripts
|
| 22 |
uv run long-context-pdfs.py --lang "*_Latn" --output user/finepdfs-long-context
|
| 23 |
|
| 24 |
+
# HF Jobs (memory-efficient with small chunk size)
|
| 25 |
+
hf jobs uv run \\
|
| 26 |
+
-s HF_TOKEN \\
|
| 27 |
+
https://huggingface.co/datasets/uv-scripts/dataset-stats/raw/main/long-context-pdfs.py \\
|
| 28 |
+
-- --lang eng_Latn --output user/finepdfs-eng-long
|
| 29 |
"""
|
| 30 |
|
| 31 |
import argparse
|
|
|
|
| 33 |
from datasets import Dataset
|
| 34 |
|
| 35 |
|
| 36 |
+
def polars_to_generator(lf: pl.LazyFrame, chunk_size: int = 100):
|
| 37 |
+
"""Stream LazyFrame as row generator with controlled memory.
|
| 38 |
+
|
| 39 |
+
Args:
|
| 40 |
+
lf: LazyFrame to stream
|
| 41 |
+
chunk_size: Rows per batch (lower = less memory). Default 100 for large text docs.
|
| 42 |
+
"""
|
| 43 |
+
for batch_df in lf.collect_batches(chunk_size=chunk_size):
|
| 44 |
yield from batch_df.iter_rows(named=True)
|
| 45 |
|
| 46 |
|
| 47 |
def main():
|
| 48 |
+
parser = argparse.ArgumentParser(
|
| 49 |
+
description="Extract long-context high-quality PDFs"
|
| 50 |
+
)
|
| 51 |
+
parser.add_argument(
|
| 52 |
+
"--lang",
|
| 53 |
+
type=str,
|
| 54 |
+
default="cym_Latn",
|
| 55 |
+
help="Language code or glob pattern (default: cym_Latn, use '*_Latn' for all Latin)",
|
| 56 |
+
)
|
| 57 |
+
parser.add_argument(
|
| 58 |
+
"--min-tokens",
|
| 59 |
+
type=int,
|
| 60 |
+
default=10000,
|
| 61 |
+
help="Minimum token count (default: 10000)",
|
| 62 |
+
)
|
| 63 |
+
parser.add_argument(
|
| 64 |
+
"--min-lid-score",
|
| 65 |
+
type=float,
|
| 66 |
+
default=0.8,
|
| 67 |
+
help="Minimum language ID score (default: 0.8)",
|
| 68 |
+
)
|
| 69 |
parser.add_argument("--limit", type=int, help="Limit rows")
|
| 70 |
parser.add_argument("--output", type=str, help="Output dataset repo")
|
| 71 |
parser.add_argument("--private", action="store_true")
|
| 72 |
+
parser.add_argument(
|
| 73 |
+
"--chunk-size",
|
| 74 |
+
type=int,
|
| 75 |
+
default=100,
|
| 76 |
+
help="Rows per batch (lower = less memory, default: 100)",
|
| 77 |
+
)
|
| 78 |
|
| 79 |
args = parser.parse_args()
|
| 80 |
|
| 81 |
+
# Reduce streaming chunk size for large text documents (default can cause OOM)
|
| 82 |
+
pl.Config.set_streaming_chunk_size(1000)
|
| 83 |
+
|
| 84 |
source = f"hf://datasets/HuggingFaceFW/finepdfs/data/{args.lang}/train/*.parquet"
|
| 85 |
|
| 86 |
print("=" * 60)
|
| 87 |
print("Long-Context High-Quality PDF Extraction")
|
| 88 |
print("=" * 60)
|
| 89 |
print(f"Source: {source}")
|
| 90 |
+
print("Filters:")
|
| 91 |
print(f" - token_count >= {args.min_tokens}")
|
| 92 |
print(f" - page_average_lid_score >= {args.min_lid_score}")
|
| 93 |
+
print(" - extractor == 'docling'")
|
| 94 |
if args.limit:
|
| 95 |
print(f" - limit: {args.limit}")
|
| 96 |
print("=" * 60)
|
|
|
|
| 103 |
& (pl.col("page_average_lid_score") >= args.min_lid_score)
|
| 104 |
& (pl.col("extractor") == "docling")
|
| 105 |
)
|
| 106 |
+
.select(
|
| 107 |
+
[
|
| 108 |
+
"id",
|
| 109 |
+
"url",
|
| 110 |
+
"text",
|
| 111 |
+
"language",
|
| 112 |
+
"token_count",
|
| 113 |
+
"dump",
|
| 114 |
+
"page_average_lid_score",
|
| 115 |
+
]
|
| 116 |
+
)
|
| 117 |
)
|
| 118 |
|
| 119 |
if args.limit:
|
|
|
|
| 141 |
& (pl.col("page_average_lid_score") >= args.min_lid_score)
|
| 142 |
& (pl.col("extractor") == "docling")
|
| 143 |
)
|
| 144 |
+
.select(
|
| 145 |
+
[
|
| 146 |
+
"id",
|
| 147 |
+
"url",
|
| 148 |
+
"text",
|
| 149 |
+
"language",
|
| 150 |
+
"token_count",
|
| 151 |
+
"dump",
|
| 152 |
+
"page_average_lid_score",
|
| 153 |
+
]
|
| 154 |
+
)
|
| 155 |
)
|
| 156 |
if args.limit:
|
| 157 |
lf = lf.limit(args.limit)
|
| 158 |
|
| 159 |
+
print(f"\nStreaming to dataset (chunk_size={args.chunk_size})...")
|
| 160 |
+
ds = Dataset.from_generator(
|
| 161 |
+
lambda: polars_to_generator(lf, chunk_size=args.chunk_size)
|
| 162 |
+
)
|
| 163 |
print(f"Created dataset with {len(ds)} rows")
|
| 164 |
|
| 165 |
print(f"\nPushing to {args.output}...")
|