davanstrien HF Staff commited on
Commit
0f0e50a
·
1 Parent(s): 8f3bc4f

Remove local test files

Browse files
HuggingFaceFW_fineweb-edu_summary.json DELETED
@@ -1,58 +0,0 @@
1
- {
2
- "dataset": "HuggingFaceFW/fineweb-edu",
3
- "split": "train",
4
- "text_column": "text",
5
- "total_samples": 10,
6
- "statistics": {
7
- "character_count": {
8
- "count": 10,
9
- "mean": 3761.2,
10
- "std": 2456.61,
11
- "min": 396,
12
- "max": 7966
13
- },
14
- "word_count": {
15
- "count": 10,
16
- "mean": 591.2,
17
- "std": 385.27,
18
- "min": 56,
19
- "max": 1272
20
- },
21
- "line_count": {
22
- "count": 10,
23
- "mean": 31.2,
24
- "std": 27.54,
25
- "min": 2,
26
- "max": 93
27
- },
28
- "sentence_count": {
29
- "count": 10,
30
- "mean": 25.7,
31
- "std": 18.8,
32
- "min": 5,
33
- "max": 71
34
- },
35
- "mean_word_length": {
36
- "count": 10,
37
- "mean": 5.45,
38
- "std": 0.46,
39
- "min": 4.7,
40
- "max": 6.09
41
- }
42
- },
43
- "character_type_distribution": {
44
- "alphanumeric": 0.8164,
45
- "alphabetic": 0.8093,
46
- "digit": 0.0071,
47
- "uppercase": 0.0293,
48
- "lowercase": 0.78,
49
- "whitespace": 0.1554,
50
- "punctuation": 0.0276,
51
- "special": 0.0006
52
- },
53
- "derived_metrics": {
54
- "avg_words_per_line": 18.95,
55
- "avg_chars_per_word": 6.36,
56
- "avg_words_per_sentence": 23.0
57
- }
58
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
basic-stats.py DELETED
@@ -1,338 +0,0 @@
1
- #!/usr/bin/env python3
2
- # /// script
3
- # requires-python = ">=3.10"
4
- # dependencies = [
5
- # "datasets",
6
- # "huggingface-hub",
7
- # "tqdm",
8
- # ]
9
- # ///
10
-
11
- """Calculate basic text statistics for HuggingFace datasets.
12
-
13
- This script computes essential text statistics using pure Python (no ML models).
14
- It uses streaming mode by default, so it works on datasets of any size without
15
- downloading the full dataset.
16
-
17
- Statistics calculated:
18
- - Character, word, line, sentence counts (per sample and total)
19
- - Streaming mean and standard deviation (Welford's algorithm)
20
- - Character type distributions (alphanumeric, digits, punctuation, whitespace, special)
21
- - Length statistics (min, max, approximate percentiles)
22
-
23
- Examples:
24
- # Quick test on 10k samples
25
- uv run basic-stats.py HuggingFaceFW/fineweb-edu --max-samples 10000
26
-
27
- # Full dataset statistics
28
- uv run basic-stats.py allenai/c4 --split train
29
-
30
- # Save per-sample statistics to CSV
31
- uv run basic-stats.py username/dataset --per-sample --output-file stats.csv
32
-
33
- # Use with HF Jobs (GPU not needed)
34
- hf jobs uv run \
35
- -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \
36
- https://huggingface.co/datasets/uv-scripts/dataset-stats/raw/main/basic-stats.py \
37
- username/large-dataset --max-samples 100000
38
-
39
- Performance:
40
- ~10,000-50,000 samples/sec on CPU (depending on text length)
41
- Pure Python, minimal memory usage (constant O(1) for streaming stats)
42
- """
43
-
44
- import argparse
45
- import json
46
- import re
47
- import string
48
- import sys
49
- from collections import defaultdict
50
- from dataclasses import asdict, dataclass
51
- from pathlib import Path
52
- from typing import Optional
53
-
54
- from datasets import load_dataset
55
- from tqdm import tqdm
56
-
57
-
58
- @dataclass
59
- class StreamingStats:
60
- """Track streaming statistics using Welford's algorithm for numerical stability."""
61
-
62
- count: int = 0
63
- mean: float = 0.0
64
- m2: float = 0.0 # Sum of squared differences from mean
65
- min_val: float = float('inf')
66
- max_val: float = float('-inf')
67
-
68
- def update(self, value: float):
69
- """Update statistics with new value."""
70
- self.count += 1
71
- delta = value - self.mean
72
- self.mean += delta / self.count
73
- delta2 = value - self.mean
74
- self.m2 += delta * delta2
75
- self.min_val = min(self.min_val, value)
76
- self.max_val = max(self.max_val, value)
77
-
78
- @property
79
- def variance(self) -> float:
80
- """Calculate variance."""
81
- return self.m2 / self.count if self.count > 1 else 0.0
82
-
83
- @property
84
- def std(self) -> float:
85
- """Calculate standard deviation."""
86
- return self.variance ** 0.5
87
-
88
- def to_dict(self) -> dict:
89
- """Convert to dictionary for JSON output."""
90
- return {
91
- "count": self.count,
92
- "mean": round(self.mean, 2),
93
- "std": round(self.std, 2),
94
- "min": round(self.min_val, 2),
95
- "max": round(self.max_val, 2),
96
- }
97
-
98
-
99
- def count_sentences(text: str) -> int:
100
- """Count sentences using simple heuristic (. ! ?)."""
101
- # Simple sentence boundary detection
102
- sentence_endings = re.findall(r'[.!?]+', text)
103
- return max(1, len(sentence_endings)) # At least 1 sentence
104
-
105
-
106
- def calculate_char_type_distribution(text: str) -> dict:
107
- """Calculate distribution of character types."""
108
- if not text:
109
- return {
110
- "alphanumeric": 0.0,
111
- "alphabetic": 0.0,
112
- "digit": 0.0,
113
- "uppercase": 0.0,
114
- "lowercase": 0.0,
115
- "whitespace": 0.0,
116
- "punctuation": 0.0,
117
- "special": 0.0,
118
- }
119
-
120
- total_chars = len(text)
121
- alpha_count = sum(1 for c in text if c.isalpha())
122
- digit_count = sum(1 for c in text if c.isdigit())
123
- upper_count = sum(1 for c in text if c.isupper())
124
- lower_count = sum(1 for c in text if c.islower())
125
- whitespace_count = sum(1 for c in text if c.isspace())
126
- punct_count = sum(1 for c in text if c in string.punctuation)
127
-
128
- return {
129
- "alphanumeric": round((alpha_count + digit_count) / total_chars, 4),
130
- "alphabetic": round(alpha_count / total_chars, 4),
131
- "digit": round(digit_count / total_chars, 4),
132
- "uppercase": round(upper_count / total_chars, 4) if alpha_count > 0 else 0.0,
133
- "lowercase": round(lower_count / total_chars, 4) if alpha_count > 0 else 0.0,
134
- "whitespace": round(whitespace_count / total_chars, 4),
135
- "punctuation": round(punct_count / total_chars, 4),
136
- "special": round((total_chars - alpha_count - digit_count - whitespace_count - punct_count) / total_chars, 4),
137
- }
138
-
139
-
140
- def calculate_basic_stats(text: str) -> dict:
141
- """Calculate basic statistics for a single text sample."""
142
- if not text:
143
- return {
144
- "char_count": 0,
145
- "word_count": 0,
146
- "line_count": 0,
147
- "sentence_count": 0,
148
- "mean_word_length": 0.0,
149
- }
150
-
151
- char_count = len(text)
152
- words = text.split()
153
- word_count = len(words)
154
- line_count = len(text.splitlines())
155
- sentence_count = count_sentences(text)
156
- mean_word_length = sum(len(w) for w in words) / word_count if word_count > 0 else 0.0
157
-
158
- return {
159
- "char_count": char_count,
160
- "word_count": word_count,
161
- "line_count": line_count,
162
- "sentence_count": sentence_count,
163
- "mean_word_length": round(mean_word_length, 2),
164
- }
165
-
166
-
167
- def main():
168
- parser = argparse.ArgumentParser(
169
- description="Calculate basic text statistics for HuggingFace datasets",
170
- formatter_class=argparse.RawDescriptionHelpFormatter,
171
- epilog=__doc__,
172
- )
173
- parser.add_argument(
174
- "dataset",
175
- help="Dataset name (e.g., 'HuggingFaceFW/fineweb-edu') or local path",
176
- )
177
- parser.add_argument(
178
- "--split",
179
- default="train",
180
- help="Dataset split to process (default: train)",
181
- )
182
- parser.add_argument(
183
- "--text-column",
184
- default="text",
185
- help="Name of the text column (default: text)",
186
- )
187
- parser.add_argument(
188
- "--max-samples",
189
- type=int,
190
- help="Maximum number of samples to process (for testing)",
191
- )
192
- parser.add_argument(
193
- "--per-sample",
194
- action="store_true",
195
- help="Save per-sample statistics to CSV file",
196
- )
197
- parser.add_argument(
198
- "--output-file",
199
- help="Output file for per-sample stats (default: dataset-stats.csv)",
200
- )
201
- parser.add_argument(
202
- "--streaming",
203
- action="store_true",
204
- default=True,
205
- help="Use streaming mode (default: True)",
206
- )
207
-
208
- args = parser.parse_args()
209
-
210
- # Load dataset in streaming mode
211
- print(f"Loading dataset: {args.dataset} (split: {args.split})")
212
- print(f"Streaming mode: {args.streaming}")
213
-
214
- try:
215
- dataset = load_dataset(
216
- args.dataset,
217
- split=args.split,
218
- streaming=args.streaming,
219
- )
220
- except Exception as e:
221
- print(f"Error loading dataset: {e}")
222
- sys.exit(1)
223
-
224
- # Check if text column exists
225
- if args.text_column not in dataset.column_names:
226
- print(f"Error: Column '{args.text_column}' not found in dataset.")
227
- print(f"Available columns: {dataset.column_names}")
228
- sys.exit(1)
229
-
230
- # Initialize streaming statistics
231
- char_stats = StreamingStats()
232
- word_stats = StreamingStats()
233
- line_stats = StreamingStats()
234
- sentence_stats = StreamingStats()
235
- word_length_stats = StreamingStats()
236
-
237
- # Character type distribution accumulator
238
- char_type_totals = defaultdict(float)
239
-
240
- # For per-sample output
241
- per_sample_data = []
242
-
243
- # Process dataset
244
- total_samples = args.max_samples if args.max_samples else "unknown"
245
- with tqdm(total=args.max_samples, desc="Processing samples") as pbar:
246
- for i, sample in enumerate(dataset):
247
- if args.max_samples and i >= args.max_samples:
248
- break
249
-
250
- text = sample[args.text_column]
251
-
252
- # Calculate stats for this sample
253
- stats = calculate_basic_stats(text)
254
- char_dist = calculate_char_type_distribution(text)
255
-
256
- # Update streaming statistics
257
- char_stats.update(stats["char_count"])
258
- word_stats.update(stats["word_count"])
259
- line_stats.update(stats["line_count"])
260
- sentence_stats.update(stats["sentence_count"])
261
- word_length_stats.update(stats["mean_word_length"])
262
-
263
- # Accumulate character type distributions
264
- for key, value in char_dist.items():
265
- char_type_totals[key] += value
266
-
267
- # Store per-sample data if requested
268
- if args.per_sample:
269
- sample_data = {**stats, **char_dist}
270
- per_sample_data.append(sample_data)
271
-
272
- pbar.update(1)
273
-
274
- # Calculate final statistics
275
- num_samples = char_stats.count
276
-
277
- if num_samples == 0:
278
- print("No samples processed!")
279
- sys.exit(1)
280
-
281
- # Average character type distributions
282
- char_type_means = {
283
- key: round(value / num_samples, 4)
284
- for key, value in char_type_totals.items()
285
- }
286
-
287
- # Create summary report
288
- summary = {
289
- "dataset": args.dataset,
290
- "split": args.split,
291
- "text_column": args.text_column,
292
- "total_samples": num_samples,
293
- "statistics": {
294
- "character_count": char_stats.to_dict(),
295
- "word_count": word_stats.to_dict(),
296
- "line_count": line_stats.to_dict(),
297
- "sentence_count": sentence_stats.to_dict(),
298
- "mean_word_length": word_length_stats.to_dict(),
299
- },
300
- "character_type_distribution": char_type_means,
301
- "derived_metrics": {
302
- "avg_words_per_line": round(word_stats.mean / line_stats.mean, 2) if line_stats.mean > 0 else 0.0,
303
- "avg_chars_per_word": round(char_stats.mean / word_stats.mean, 2) if word_stats.mean > 0 else 0.0,
304
- "avg_words_per_sentence": round(word_stats.mean / sentence_stats.mean, 2) if sentence_stats.mean > 0 else 0.0,
305
- }
306
- }
307
-
308
- # Print summary
309
- print("\n" + "="*60)
310
- print("BASIC TEXT STATISTICS SUMMARY")
311
- print("="*60)
312
- print(json.dumps(summary, indent=2))
313
-
314
- # Save per-sample data if requested
315
- if args.per_sample:
316
- output_file = args.output_file or f"{args.dataset.replace('/', '_')}_stats.csv"
317
-
318
- # Save as CSV
319
- import csv
320
-
321
- if per_sample_data:
322
- with open(output_file, 'w', newline='') as f:
323
- writer = csv.DictWriter(f, fieldnames=per_sample_data[0].keys())
324
- writer.writeheader()
325
- writer.writerows(per_sample_data)
326
-
327
- print(f"\nPer-sample statistics saved to: {output_file}")
328
-
329
- # Save summary as JSON
330
- summary_file = f"{args.dataset.replace('/', '_')}_summary.json"
331
- with open(summary_file, 'w') as f:
332
- json.dump(summary, f, indent=2)
333
-
334
- print(f"Summary saved to: {summary_file}")
335
-
336
-
337
- if __name__ == "__main__":
338
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
stats_output/detailed_stats.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:40d8da68fd7af8ed27f28a7c2c7ff218e818dfd217a7c13ac99abcb032090a1d
3
- size 9770
 
 
 
 
stats_output/dump_stats.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a14fac91a0776f89c406e662aede9fc73535df26c9de0dbe3edbec16895f4db7
3
- size 3879
 
 
 
 
stats_output/extractor_stats.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c26882ae83e18b22cd4538962a938216d90d49a85b5aad9f7a70dc12844c1b6
3
- size 2770
 
 
 
 
stats_output/global_stats.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:278ea6044f2d1d39fffb5a20afc248227d4870d5c397b97dcee0e4e14443c0da
3
- size 1936
 
 
 
 
stats_output/language_stats.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:619cc9e3e61a5b37a17b7b5cd3f073af1c7c781e1a591a67e85b0513e8caf3c4
3
- size 4021
 
 
 
 
stats_output/temporal_stats.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f1c643f4eff59c20344fd68c8a53b7f87f6d5df828f1eb7635c4943f8420df06
3
- size 3994