hf-transformers-bot commited on
Commit
887a47e
·
verified ·
1 Parent(s): 0c2b7f4

Upload benchmark results for run 18412667006

Browse files
2025-10-10/18412667006/benchmark_results/Llama-2-7b-hf/Llama-2-7b-hf_benchmark_20251010_165231.json ADDED
@@ -0,0 +1,1175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model_name": "Llama-2-7b-hf",
3
+ "benchmark_scenarios": [
4
+ {
5
+ "scenario_name": "eager_eager_attn",
6
+ "metadata": {
7
+ "timestamp": "2025-10-10T16:46:32.667289",
8
+ "commit_id": "7164924a7e83f223a2bf2e104bef98eabe545091",
9
+ "hardware_info": {
10
+ "gpu_name": "NVIDIA A10G",
11
+ "gpu_memory_total_mb": 23028,
12
+ "cpu_count": 16,
13
+ "memory_total_mb": 63607,
14
+ "python_version": "3.10.12",
15
+ "torch_version": "2.8.0+cu126",
16
+ "cuda_version": "12.6"
17
+ },
18
+ "config": {
19
+ "name": "eager",
20
+ "model_id": "meta-llama/Llama-2-7b-hf",
21
+ "variant": "eager",
22
+ "warmup_iterations": 3,
23
+ "measurement_iterations": 5,
24
+ "num_tokens_to_generate": 100,
25
+ "device": "cuda",
26
+ "torch_dtype": "float16",
27
+ "compile_mode": null,
28
+ "compile_options": {},
29
+ "use_cache": true,
30
+ "batch_size": 1,
31
+ "sequence_length": null,
32
+ "attn_implementation": "eager",
33
+ "sdpa_backend": null,
34
+ "custom_params": {}
35
+ }
36
+ },
37
+ "measurements": {
38
+ "latency_seconds": {
39
+ "name": "latency_seconds",
40
+ "measurements": [
41
+ 3.640752197265625,
42
+ 3.64964599609375,
43
+ 3.637513916015625,
44
+ 3.644445068359375,
45
+ 3.641325439453125
46
+ ],
47
+ "mean": 3.6427365234375,
48
+ "median": 3.641325439453125,
49
+ "std": 0.004095467611589816,
50
+ "min": 3.637513916015625,
51
+ "max": 3.64964599609375,
52
+ "p25": 3.640752197265625,
53
+ "p75": 3.644445068359375,
54
+ "p90": 3.6475656250000004,
55
+ "p95": 3.6486058105468753,
56
+ "p99": 3.6494379589843753,
57
+ "unit": "seconds"
58
+ },
59
+ "time_to_first_token_seconds": {
60
+ "name": "time_to_first_token_seconds",
61
+ "measurements": [
62
+ 0.03803145599365235,
63
+ 0.03747055816650391,
64
+ 0.03703228759765625,
65
+ 0.03719222259521485,
66
+ 0.0370159683227539
67
+ ],
68
+ "mean": 0.037348498535156247,
69
+ "median": 0.03719222259521485,
70
+ "std": 0.00037850160165571906,
71
+ "min": 0.0370159683227539,
72
+ "max": 0.03803145599365235,
73
+ "p25": 0.03703228759765625,
74
+ "p75": 0.03747055816650391,
75
+ "p90": 0.037807096862792974,
76
+ "p95": 0.03791927642822266,
77
+ "p99": 0.03800902008056641,
78
+ "unit": "seconds"
79
+ },
80
+ "tokens_per_second": {
81
+ "name": "tokens_per_second",
82
+ "measurements": [
83
+ 27.466851513570372,
84
+ 27.399917719973644,
85
+ 27.491303760986202,
86
+ 27.439019692788822,
87
+ 27.462527495213
88
+ ],
89
+ "mean": 27.45192403650641,
90
+ "median": 27.462527495213,
91
+ "std": 0.03084543614290371,
92
+ "min": 27.399917719973644,
93
+ "max": 27.491303760986202,
94
+ "p25": 27.439019692788822,
95
+ "p75": 27.466851513570372,
96
+ "p90": 27.48152286201987,
97
+ "p95": 27.486413311503036,
98
+ "p99": 27.49032567108957,
99
+ "unit": "tokens/sec"
100
+ },
101
+ "time_per_output_token_seconds": {
102
+ "name": "time_per_output_token_seconds",
103
+ "measurements": [
104
+ 0.03640752197265625,
105
+ 0.036496459960937505,
106
+ 0.03637513916015625,
107
+ 0.036444450683593754,
108
+ 0.036413254394531254
109
+ ],
110
+ "mean": 0.036427365234375,
111
+ "median": 0.036413254394531254,
112
+ "std": 4.095467611589893e-05,
113
+ "min": 0.03637513916015625,
114
+ "max": 0.036496459960937505,
115
+ "p25": 0.03640752197265625,
116
+ "p75": 0.036444450683593754,
117
+ "p90": 0.03647565625,
118
+ "p95": 0.036486058105468754,
119
+ "p99": 0.03649437958984376,
120
+ "unit": "seconds/token"
121
+ }
122
+ },
123
+ "gpu_metrics": {
124
+ "gpu_utilization_mean": 91.56521739130434,
125
+ "gpu_utilization_max": 94,
126
+ "gpu_utilization_min": 87,
127
+ "gpu_memory_used_mean": 13253,
128
+ "gpu_memory_used_max": 13253,
129
+ "gpu_memory_used_min": 13253,
130
+ "sample_count": 92,
131
+ "gpu_monitoring_status": "success"
132
+ }
133
+ },
134
+ {
135
+ "scenario_name": "compiled_compile_max-autotune_eager_attn",
136
+ "metadata": {
137
+ "timestamp": "2025-10-10T16:47:07.881243",
138
+ "commit_id": "7164924a7e83f223a2bf2e104bef98eabe545091",
139
+ "hardware_info": {
140
+ "gpu_name": "NVIDIA A10G",
141
+ "gpu_memory_total_mb": 23028,
142
+ "cpu_count": 16,
143
+ "memory_total_mb": 63607,
144
+ "python_version": "3.10.12",
145
+ "torch_version": "2.8.0+cu126",
146
+ "cuda_version": "12.6"
147
+ },
148
+ "config": {
149
+ "name": "compiled",
150
+ "model_id": "meta-llama/Llama-2-7b-hf",
151
+ "variant": "compiled",
152
+ "warmup_iterations": 3,
153
+ "measurement_iterations": 5,
154
+ "num_tokens_to_generate": 100,
155
+ "device": "cuda",
156
+ "torch_dtype": "float16",
157
+ "compile_mode": "max-autotune",
158
+ "compile_options": {},
159
+ "use_cache": true,
160
+ "batch_size": 1,
161
+ "sequence_length": null,
162
+ "attn_implementation": "eager",
163
+ "sdpa_backend": null,
164
+ "custom_params": {}
165
+ }
166
+ },
167
+ "measurements": {
168
+ "latency_seconds": {
169
+ "name": "latency_seconds",
170
+ "measurements": [
171
+ 7.650619140625,
172
+ 7.65998828125,
173
+ 7.7081708984375,
174
+ 7.72244189453125,
175
+ 7.75087158203125
176
+ ],
177
+ "mean": 7.698418359375,
178
+ "median": 7.7081708984375,
179
+ "std": 0.03790836067254607,
180
+ "min": 7.650619140625,
181
+ "max": 7.75087158203125,
182
+ "p25": 7.65998828125,
183
+ "p75": 7.72244189453125,
184
+ "p90": 7.73949970703125,
185
+ "p95": 7.74518564453125,
186
+ "p99": 7.74973439453125,
187
+ "unit": "seconds"
188
+ },
189
+ "time_to_first_token_seconds": {
190
+ "name": "time_to_first_token_seconds",
191
+ "measurements": [
192
+ 0.03917836761474609,
193
+ 0.03819475173950195,
194
+ 0.038026912689208985,
195
+ 0.03802223968505859,
196
+ 0.03802880096435547
197
+ ],
198
+ "mean": 0.038290214538574216,
199
+ "median": 0.03802880096435547,
200
+ "std": 0.00044886623296968306,
201
+ "min": 0.03802223968505859,
202
+ "max": 0.03917836761474609,
203
+ "p25": 0.038026912689208985,
204
+ "p75": 0.03819475173950195,
205
+ "p90": 0.038784921264648435,
206
+ "p95": 0.03898164443969727,
207
+ "p99": 0.03913902297973633,
208
+ "unit": "seconds"
209
+ },
210
+ "tokens_per_second": {
211
+ "name": "tokens_per_second",
212
+ "measurements": [
213
+ 13.07083755731575,
214
+ 13.054850259337659,
215
+ 12.973246353459897,
216
+ 12.949271922760122,
217
+ 12.90177484449991
218
+ ],
219
+ "mean": 12.989996187474668,
220
+ "median": 12.973246353459897,
221
+ "std": 0.0639740662258158,
222
+ "min": 12.90177484449991,
223
+ "max": 13.07083755731575,
224
+ "p25": 12.949271922760122,
225
+ "p75": 13.054850259337659,
226
+ "p90": 13.064442638124513,
227
+ "p95": 13.067640097720131,
228
+ "p99": 13.070198065396626,
229
+ "unit": "tokens/sec"
230
+ },
231
+ "time_per_output_token_seconds": {
232
+ "name": "time_per_output_token_seconds",
233
+ "measurements": [
234
+ 0.07650619140625001,
235
+ 0.0765998828125,
236
+ 0.077081708984375,
237
+ 0.0772244189453125,
238
+ 0.0775087158203125
239
+ ],
240
+ "mean": 0.07698418359375,
241
+ "median": 0.077081708984375,
242
+ "std": 0.00037908360672546046,
243
+ "min": 0.07650619140625001,
244
+ "max": 0.0775087158203125,
245
+ "p25": 0.0765998828125,
246
+ "p75": 0.0772244189453125,
247
+ "p90": 0.0773949970703125,
248
+ "p95": 0.07745185644531251,
249
+ "p99": 0.0774973439453125,
250
+ "unit": "seconds/token"
251
+ }
252
+ },
253
+ "gpu_metrics": {
254
+ "gpu_utilization_mean": 45.21556886227545,
255
+ "gpu_utilization_max": 92,
256
+ "gpu_utilization_min": 0,
257
+ "gpu_memory_used_mean": 13629.790419161676,
258
+ "gpu_memory_used_max": 13777,
259
+ "gpu_memory_used_min": 13415,
260
+ "sample_count": 167,
261
+ "gpu_monitoring_status": "success"
262
+ }
263
+ },
264
+ {
265
+ "scenario_name": "eager_sdpa_default",
266
+ "metadata": {
267
+ "timestamp": "2025-10-10T16:48:33.979994",
268
+ "commit_id": "7164924a7e83f223a2bf2e104bef98eabe545091",
269
+ "hardware_info": {
270
+ "gpu_name": "NVIDIA A10G",
271
+ "gpu_memory_total_mb": 23028,
272
+ "cpu_count": 16,
273
+ "memory_total_mb": 63607,
274
+ "python_version": "3.10.12",
275
+ "torch_version": "2.8.0+cu126",
276
+ "cuda_version": "12.6"
277
+ },
278
+ "config": {
279
+ "name": "eager",
280
+ "model_id": "meta-llama/Llama-2-7b-hf",
281
+ "variant": "eager",
282
+ "warmup_iterations": 3,
283
+ "measurement_iterations": 5,
284
+ "num_tokens_to_generate": 100,
285
+ "device": "cuda",
286
+ "torch_dtype": "float16",
287
+ "compile_mode": null,
288
+ "compile_options": {},
289
+ "use_cache": true,
290
+ "batch_size": 1,
291
+ "sequence_length": null,
292
+ "attn_implementation": "sdpa",
293
+ "sdpa_backend": null,
294
+ "custom_params": {}
295
+ }
296
+ },
297
+ "measurements": {
298
+ "latency_seconds": {
299
+ "name": "latency_seconds",
300
+ "measurements": [
301
+ 3.405870361328125,
302
+ 3.408725830078125,
303
+ 3.404103271484375,
304
+ 3.40480615234375,
305
+ 3.406157958984375
306
+ ],
307
+ "mean": 3.4059327148437504,
308
+ "median": 3.405870361328125,
309
+ "std": 0.001579374665357239,
310
+ "min": 3.404103271484375,
311
+ "max": 3.408725830078125,
312
+ "p25": 3.40480615234375,
313
+ "p75": 3.406157958984375,
314
+ "p90": 3.4076986816406247,
315
+ "p95": 3.408212255859375,
316
+ "p99": 3.408623115234375,
317
+ "unit": "seconds"
318
+ },
319
+ "time_to_first_token_seconds": {
320
+ "name": "time_to_first_token_seconds",
321
+ "measurements": [
322
+ 0.03502486419677735,
323
+ 0.03468195343017578,
324
+ 0.03458627319335938,
325
+ 0.03452726364135742,
326
+ 0.03470694351196289
327
+ ],
328
+ "mean": 0.03470545959472656,
329
+ "median": 0.03468195343017578,
330
+ "std": 0.0001723561647644028,
331
+ "min": 0.03452726364135742,
332
+ "max": 0.03502486419677735,
333
+ "p25": 0.03458627319335938,
334
+ "p75": 0.03470694351196289,
335
+ "p90": 0.034897695922851565,
336
+ "p95": 0.034961280059814456,
337
+ "p99": 0.035012147369384766,
338
+ "unit": "seconds"
339
+ },
340
+ "tokens_per_second": {
341
+ "name": "tokens_per_second",
342
+ "measurements": [
343
+ 29.36107056083157,
344
+ 29.336474971854244,
345
+ 29.37631206364504,
346
+ 29.370247680962244,
347
+ 29.35859146996733
348
+ ],
349
+ "mean": 29.360539349452086,
350
+ "median": 29.36107056083157,
351
+ "std": 0.01361029685871791,
352
+ "min": 29.336474971854244,
353
+ "max": 29.37631206364504,
354
+ "p25": 29.35859146996733,
355
+ "p75": 29.370247680962244,
356
+ "p90": 29.373886310571923,
357
+ "p95": 29.375099187108482,
358
+ "p99": 29.37606948833773,
359
+ "unit": "tokens/sec"
360
+ },
361
+ "time_per_output_token_seconds": {
362
+ "name": "time_per_output_token_seconds",
363
+ "measurements": [
364
+ 0.03405870361328125,
365
+ 0.03408725830078125,
366
+ 0.03404103271484375,
367
+ 0.0340480615234375,
368
+ 0.03406157958984375
369
+ ],
370
+ "mean": 0.0340593271484375,
371
+ "median": 0.03405870361328125,
372
+ "std": 1.5793746653572076e-05,
373
+ "min": 0.03404103271484375,
374
+ "max": 0.03408725830078125,
375
+ "p25": 0.0340480615234375,
376
+ "p75": 0.03406157958984375,
377
+ "p90": 0.03407698681640625,
378
+ "p95": 0.03408212255859375,
379
+ "p99": 0.03408623115234375,
380
+ "unit": "seconds/token"
381
+ }
382
+ },
383
+ "gpu_metrics": {
384
+ "gpu_utilization_mean": 96.96511627906976,
385
+ "gpu_utilization_max": 98,
386
+ "gpu_utilization_min": 95,
387
+ "gpu_memory_used_mean": 13849,
388
+ "gpu_memory_used_max": 13849,
389
+ "gpu_memory_used_min": 13849,
390
+ "sample_count": 86,
391
+ "gpu_monitoring_status": "success"
392
+ }
393
+ },
394
+ {
395
+ "scenario_name": "eager_sdpa_math",
396
+ "metadata": {
397
+ "timestamp": "2025-10-10T16:49:06.331622",
398
+ "commit_id": "7164924a7e83f223a2bf2e104bef98eabe545091",
399
+ "hardware_info": {
400
+ "gpu_name": "NVIDIA A10G",
401
+ "gpu_memory_total_mb": 23028,
402
+ "cpu_count": 16,
403
+ "memory_total_mb": 63607,
404
+ "python_version": "3.10.12",
405
+ "torch_version": "2.8.0+cu126",
406
+ "cuda_version": "12.6"
407
+ },
408
+ "config": {
409
+ "name": "eager",
410
+ "model_id": "meta-llama/Llama-2-7b-hf",
411
+ "variant": "eager",
412
+ "warmup_iterations": 3,
413
+ "measurement_iterations": 5,
414
+ "num_tokens_to_generate": 100,
415
+ "device": "cuda",
416
+ "torch_dtype": "float16",
417
+ "compile_mode": null,
418
+ "compile_options": {},
419
+ "use_cache": true,
420
+ "batch_size": 1,
421
+ "sequence_length": null,
422
+ "attn_implementation": "sdpa",
423
+ "sdpa_backend": "math",
424
+ "custom_params": {}
425
+ }
426
+ },
427
+ "measurements": {
428
+ "latency_seconds": {
429
+ "name": "latency_seconds",
430
+ "measurements": [
431
+ 3.531345703125,
432
+ 3.526765380859375,
433
+ 3.530473388671875,
434
+ 3.5378603515625,
435
+ 3.525578125
436
+ ],
437
+ "mean": 3.53040458984375,
438
+ "median": 3.530473388671875,
439
+ "std": 0.004313175517262949,
440
+ "min": 3.525578125,
441
+ "max": 3.5378603515625,
442
+ "p25": 3.526765380859375,
443
+ "p75": 3.531345703125,
444
+ "p90": 3.5352544921874998,
445
+ "p95": 3.536557421875,
446
+ "p99": 3.537599765625,
447
+ "unit": "seconds"
448
+ },
449
+ "time_to_first_token_seconds": {
450
+ "name": "time_to_first_token_seconds",
451
+ "measurements": [
452
+ 0.03957625579833984,
453
+ 0.03693795013427734,
454
+ 0.03893779373168945,
455
+ 0.037542625427246096,
456
+ 0.03836092758178711
457
+ ],
458
+ "mean": 0.03827111053466797,
459
+ "median": 0.03836092758178711,
460
+ "std": 0.0009448751188738397,
461
+ "min": 0.03693795013427734,
462
+ "max": 0.03957625579833984,
463
+ "p25": 0.037542625427246096,
464
+ "p75": 0.03893779373168945,
465
+ "p90": 0.03932087097167969,
466
+ "p95": 0.039448563385009766,
467
+ "p99": 0.039550717315673826,
468
+ "unit": "seconds"
469
+ },
470
+ "tokens_per_second": {
471
+ "name": "tokens_per_second",
472
+ "measurements": [
473
+ 28.317816607846357,
474
+ 28.35459385609393,
475
+ 28.324813414786536,
476
+ 28.2656719211189,
477
+ 28.364142405722465
478
+ ],
479
+ "mean": 28.32540764111364,
480
+ "median": 28.324813414786536,
481
+ "std": 0.034579040670497135,
482
+ "min": 28.2656719211189,
483
+ "max": 28.364142405722465,
484
+ "p25": 28.317816607846357,
485
+ "p75": 28.35459385609393,
486
+ "p90": 28.36032298587105,
487
+ "p95": 28.36223269579676,
488
+ "p99": 28.363760463737325,
489
+ "unit": "tokens/sec"
490
+ },
491
+ "time_per_output_token_seconds": {
492
+ "name": "time_per_output_token_seconds",
493
+ "measurements": [
494
+ 0.03531345703125,
495
+ 0.03526765380859375,
496
+ 0.03530473388671875,
497
+ 0.035378603515624996,
498
+ 0.03525578125
499
+ ],
500
+ "mean": 0.035304045898437504,
501
+ "median": 0.03530473388671875,
502
+ "std": 4.313175517262858e-05,
503
+ "min": 0.03525578125,
504
+ "max": 0.035378603515624996,
505
+ "p25": 0.03526765380859375,
506
+ "p75": 0.03531345703125,
507
+ "p90": 0.035352544921875,
508
+ "p95": 0.035365574218749994,
509
+ "p99": 0.03537599765625,
510
+ "unit": "seconds/token"
511
+ }
512
+ },
513
+ "gpu_metrics": {
514
+ "gpu_utilization_mean": 96.28089887640449,
515
+ "gpu_utilization_max": 98,
516
+ "gpu_utilization_min": 90,
517
+ "gpu_memory_used_mean": 13849,
518
+ "gpu_memory_used_max": 13849,
519
+ "gpu_memory_used_min": 13849,
520
+ "sample_count": 89,
521
+ "gpu_monitoring_status": "success"
522
+ }
523
+ },
524
+ {
525
+ "scenario_name": "eager_sdpa_flash_attention",
526
+ "metadata": {
527
+ "timestamp": "2025-10-10T16:49:39.714945",
528
+ "commit_id": "7164924a7e83f223a2bf2e104bef98eabe545091",
529
+ "hardware_info": {
530
+ "gpu_name": "NVIDIA A10G",
531
+ "gpu_memory_total_mb": 23028,
532
+ "cpu_count": 16,
533
+ "memory_total_mb": 63607,
534
+ "python_version": "3.10.12",
535
+ "torch_version": "2.8.0+cu126",
536
+ "cuda_version": "12.6"
537
+ },
538
+ "config": {
539
+ "name": "eager",
540
+ "model_id": "meta-llama/Llama-2-7b-hf",
541
+ "variant": "eager",
542
+ "warmup_iterations": 3,
543
+ "measurement_iterations": 5,
544
+ "num_tokens_to_generate": 100,
545
+ "device": "cuda",
546
+ "torch_dtype": "float16",
547
+ "compile_mode": null,
548
+ "compile_options": {},
549
+ "use_cache": true,
550
+ "batch_size": 1,
551
+ "sequence_length": null,
552
+ "attn_implementation": "sdpa",
553
+ "sdpa_backend": "flash_attention",
554
+ "custom_params": {}
555
+ }
556
+ },
557
+ "measurements": {
558
+ "latency_seconds": {
559
+ "name": "latency_seconds",
560
+ "measurements": [
561
+ 3.40709716796875,
562
+ 3.404139892578125,
563
+ 3.408701171875,
564
+ 3.40428125,
565
+ 3.40566357421875
566
+ ],
567
+ "mean": 3.405976611328125,
568
+ "median": 3.40566357421875,
569
+ "std": 0.0017334737027014202,
570
+ "min": 3.404139892578125,
571
+ "max": 3.408701171875,
572
+ "p25": 3.40428125,
573
+ "p75": 3.40709716796875,
574
+ "p90": 3.4080595703125,
575
+ "p95": 3.40838037109375,
576
+ "p99": 3.40863701171875,
577
+ "unit": "seconds"
578
+ },
579
+ "time_to_first_token_seconds": {
580
+ "name": "time_to_first_token_seconds",
581
+ "measurements": [
582
+ 0.03554316711425781,
583
+ 0.03459260940551758,
584
+ 0.03462351989746094,
585
+ 0.03459699249267578,
586
+ 0.03454345703125
587
+ ],
588
+ "mean": 0.03477994918823242,
589
+ "median": 0.03459699249267578,
590
+ "std": 0.0003824838852993976,
591
+ "min": 0.03454345703125,
592
+ "max": 0.03554316711425781,
593
+ "p25": 0.03459260940551758,
594
+ "p75": 0.03462351989746094,
595
+ "p90": 0.03517530822753906,
596
+ "p95": 0.035359237670898436,
597
+ "p99": 0.035506381225585935,
598
+ "unit": "seconds"
599
+ },
600
+ "tokens_per_second": {
601
+ "name": "tokens_per_second",
602
+ "measurements": [
603
+ 29.35049840671794,
604
+ 29.375996038830536,
605
+ 29.336687188978114,
606
+ 29.374776246821558,
607
+ 29.362853323801875
608
+ ],
609
+ "mean": 29.360162241030004,
610
+ "median": 29.362853323801875,
611
+ "std": 0.014939799242283799,
612
+ "min": 29.336687188978114,
613
+ "max": 29.375996038830536,
614
+ "p25": 29.35049840671794,
615
+ "p75": 29.374776246821558,
616
+ "p90": 29.375508122026943,
617
+ "p95": 29.37575208042874,
618
+ "p99": 29.375947247150176,
619
+ "unit": "tokens/sec"
620
+ },
621
+ "time_per_output_token_seconds": {
622
+ "name": "time_per_output_token_seconds",
623
+ "measurements": [
624
+ 0.0340709716796875,
625
+ 0.03404139892578125,
626
+ 0.034087011718750004,
627
+ 0.0340428125,
628
+ 0.0340566357421875
629
+ ],
630
+ "mean": 0.03405976611328125,
631
+ "median": 0.0340566357421875,
632
+ "std": 1.733473702701496e-05,
633
+ "min": 0.03404139892578125,
634
+ "max": 0.034087011718750004,
635
+ "p25": 0.0340428125,
636
+ "p75": 0.0340709716796875,
637
+ "p90": 0.034080595703125,
638
+ "p95": 0.034083803710937506,
639
+ "p99": 0.03408637011718751,
640
+ "unit": "seconds/token"
641
+ }
642
+ },
643
+ "gpu_metrics": {
644
+ "gpu_utilization_mean": 96.98837209302326,
645
+ "gpu_utilization_max": 98,
646
+ "gpu_utilization_min": 95,
647
+ "gpu_memory_used_mean": 13849,
648
+ "gpu_memory_used_max": 13849,
649
+ "gpu_memory_used_min": 13849,
650
+ "sample_count": 86,
651
+ "gpu_monitoring_status": "success"
652
+ }
653
+ },
654
+ {
655
+ "scenario_name": "eager_sdpa_efficient_attention",
656
+ "metadata": {
657
+ "timestamp": "2025-10-10T16:50:12.090955",
658
+ "commit_id": "7164924a7e83f223a2bf2e104bef98eabe545091",
659
+ "hardware_info": {
660
+ "gpu_name": "NVIDIA A10G",
661
+ "gpu_memory_total_mb": 23028,
662
+ "cpu_count": 16,
663
+ "memory_total_mb": 63607,
664
+ "python_version": "3.10.12",
665
+ "torch_version": "2.8.0+cu126",
666
+ "cuda_version": "12.6"
667
+ },
668
+ "config": {
669
+ "name": "eager",
670
+ "model_id": "meta-llama/Llama-2-7b-hf",
671
+ "variant": "eager",
672
+ "warmup_iterations": 3,
673
+ "measurement_iterations": 5,
674
+ "num_tokens_to_generate": 100,
675
+ "device": "cuda",
676
+ "torch_dtype": "float16",
677
+ "compile_mode": null,
678
+ "compile_options": {},
679
+ "use_cache": true,
680
+ "batch_size": 1,
681
+ "sequence_length": null,
682
+ "attn_implementation": "sdpa",
683
+ "sdpa_backend": "efficient_attention",
684
+ "custom_params": {}
685
+ }
686
+ },
687
+ "measurements": {
688
+ "latency_seconds": {
689
+ "name": "latency_seconds",
690
+ "measurements": [
691
+ 3.403771240234375,
692
+ 3.40585107421875,
693
+ 3.4067802734375,
694
+ 3.403892578125,
695
+ 3.4060400390625
696
+ ],
697
+ "mean": 3.4052670410156254,
698
+ "median": 3.40585107421875,
699
+ "std": 0.0012128529457714912,
700
+ "min": 3.403771240234375,
701
+ "max": 3.4067802734375,
702
+ "p25": 3.403892578125,
703
+ "p75": 3.4060400390625,
704
+ "p90": 3.4064841796875,
705
+ "p95": 3.4066322265625,
706
+ "p99": 3.4067506640625003,
707
+ "unit": "seconds"
708
+ },
709
+ "time_to_first_token_seconds": {
710
+ "name": "time_to_first_token_seconds",
711
+ "measurements": [
712
+ 0.035146785736083985,
713
+ 0.034685470581054687,
714
+ 0.03467977523803711,
715
+ 0.03471414566040039,
716
+ 0.03468172836303711
717
+ ],
718
+ "mean": 0.03478158111572266,
719
+ "median": 0.034685470581054687,
720
+ "std": 0.0001830268828299018,
721
+ "min": 0.03467977523803711,
722
+ "max": 0.035146785736083985,
723
+ "p25": 0.03468172836303711,
724
+ "p75": 0.03471414566040039,
725
+ "p90": 0.034973729705810544,
726
+ "p95": 0.035060257720947265,
727
+ "p99": 0.03512948013305664,
728
+ "unit": "seconds"
729
+ },
730
+ "tokens_per_second": {
731
+ "name": "tokens_per_second",
732
+ "measurements": [
733
+ 29.379177665627804,
734
+ 29.361236830632258,
735
+ 29.353228554155702,
736
+ 29.37813039184803,
737
+ 29.359607888674333
738
+ ],
739
+ "mean": 29.36627626618763,
740
+ "median": 29.361236830632258,
741
+ "std": 0.010460137296729316,
742
+ "min": 29.353228554155702,
743
+ "max": 29.379177665627804,
744
+ "p25": 29.359607888674333,
745
+ "p75": 29.37813039184803,
746
+ "p90": 29.378758756115893,
747
+ "p95": 29.37896821087185,
748
+ "p99": 29.379135774676612,
749
+ "unit": "tokens/sec"
750
+ },
751
+ "time_per_output_token_seconds": {
752
+ "name": "time_per_output_token_seconds",
753
+ "measurements": [
754
+ 0.034037712402343746,
755
+ 0.0340585107421875,
756
+ 0.034067802734375,
757
+ 0.03403892578125,
758
+ 0.034060400390624995
759
+ ],
760
+ "mean": 0.03405267041015625,
761
+ "median": 0.0340585107421875,
762
+ "std": 1.2128529457714306e-05,
763
+ "min": 0.034037712402343746,
764
+ "max": 0.034067802734375,
765
+ "p25": 0.03403892578125,
766
+ "p75": 0.034060400390624995,
767
+ "p90": 0.034064841796874995,
768
+ "p95": 0.034066322265625,
769
+ "p99": 0.034067506640625,
770
+ "unit": "seconds/token"
771
+ }
772
+ },
773
+ "gpu_metrics": {
774
+ "gpu_utilization_mean": 96.8953488372093,
775
+ "gpu_utilization_max": 98,
776
+ "gpu_utilization_min": 94,
777
+ "gpu_memory_used_mean": 13849,
778
+ "gpu_memory_used_max": 13849,
779
+ "gpu_memory_used_min": 13849,
780
+ "sample_count": 86,
781
+ "gpu_monitoring_status": "success"
782
+ }
783
+ },
784
+ {
785
+ "scenario_name": "compiled_compile_max-autotune_sdpa_default",
786
+ "metadata": {
787
+ "timestamp": "2025-10-10T16:50:44.476639",
788
+ "commit_id": "7164924a7e83f223a2bf2e104bef98eabe545091",
789
+ "hardware_info": {
790
+ "gpu_name": "NVIDIA A10G",
791
+ "gpu_memory_total_mb": 23028,
792
+ "cpu_count": 16,
793
+ "memory_total_mb": 63607,
794
+ "python_version": "3.10.12",
795
+ "torch_version": "2.8.0+cu126",
796
+ "cuda_version": "12.6"
797
+ },
798
+ "config": {
799
+ "name": "compiled",
800
+ "model_id": "meta-llama/Llama-2-7b-hf",
801
+ "variant": "compiled",
802
+ "warmup_iterations": 3,
803
+ "measurement_iterations": 5,
804
+ "num_tokens_to_generate": 100,
805
+ "device": "cuda",
806
+ "torch_dtype": "float16",
807
+ "compile_mode": "max-autotune",
808
+ "compile_options": {},
809
+ "use_cache": true,
810
+ "batch_size": 1,
811
+ "sequence_length": null,
812
+ "attn_implementation": "sdpa",
813
+ "sdpa_backend": null,
814
+ "custom_params": {}
815
+ }
816
+ },
817
+ "measurements": {
818
+ "latency_seconds": {
819
+ "name": "latency_seconds",
820
+ "measurements": [
821
+ 3.69090087890625,
822
+ 3.694206298828125,
823
+ 3.68985009765625,
824
+ 3.68645751953125,
825
+ 3.676801025390625
826
+ ],
827
+ "mean": 3.687643164062499,
828
+ "median": 3.68985009765625,
829
+ "std": 0.005958451540070074,
830
+ "min": 3.676801025390625,
831
+ "max": 3.694206298828125,
832
+ "p25": 3.68645751953125,
833
+ "p75": 3.69090087890625,
834
+ "p90": 3.6928841308593747,
835
+ "p95": 3.6935452148437498,
836
+ "p99": 3.69407408203125,
837
+ "unit": "seconds"
838
+ },
839
+ "time_to_first_token_seconds": {
840
+ "name": "time_to_first_token_seconds",
841
+ "measurements": [
842
+ 0.03815641784667969,
843
+ 0.037461631774902346,
844
+ 0.03761782455444336,
845
+ 0.03744464111328125,
846
+ 0.037463966369628905
847
+ ],
848
+ "mean": 0.03762889633178711,
849
+ "median": 0.037463966369628905,
850
+ "std": 0.0002711202105642868,
851
+ "min": 0.03744464111328125,
852
+ "max": 0.03815641784667969,
853
+ "p25": 0.037461631774902346,
854
+ "p75": 0.03761782455444336,
855
+ "p90": 0.03794098052978516,
856
+ "p95": 0.03804869918823242,
857
+ "p99": 0.03813487411499024,
858
+ "unit": "seconds"
859
+ },
860
+ "tokens_per_second": {
861
+ "name": "tokens_per_second",
862
+ "measurements": [
863
+ 27.093656340517516,
864
+ 27.069414080020916,
865
+ 27.101371967256565,
866
+ 27.126312854600712,
867
+ 27.19755551345778
868
+ ],
869
+ "mean": 27.1176621511707,
870
+ "median": 27.101371967256565,
871
+ "std": 0.04388008526453874,
872
+ "min": 27.069414080020916,
873
+ "max": 27.19755551345778,
874
+ "p25": 27.093656340517516,
875
+ "p75": 27.126312854600712,
876
+ "p90": 27.169058449914953,
877
+ "p95": 27.183306981686364,
878
+ "p99": 27.194705807103496,
879
+ "unit": "tokens/sec"
880
+ },
881
+ "time_per_output_token_seconds": {
882
+ "name": "time_per_output_token_seconds",
883
+ "measurements": [
884
+ 0.0369090087890625,
885
+ 0.036942062988281246,
886
+ 0.0368985009765625,
887
+ 0.0368645751953125,
888
+ 0.03676801025390625
889
+ ],
890
+ "mean": 0.036876431640625,
891
+ "median": 0.0368985009765625,
892
+ "std": 5.9584515400701254e-05,
893
+ "min": 0.03676801025390625,
894
+ "max": 0.036942062988281246,
895
+ "p25": 0.0368645751953125,
896
+ "p75": 0.0369090087890625,
897
+ "p90": 0.03692884130859375,
898
+ "p95": 0.0369354521484375,
899
+ "p99": 0.0369407408203125,
900
+ "unit": "seconds/token"
901
+ }
902
+ },
903
+ "gpu_metrics": {
904
+ "gpu_utilization_mean": 90.56989247311827,
905
+ "gpu_utilization_max": 94,
906
+ "gpu_utilization_min": 87,
907
+ "gpu_memory_used_mean": 13845,
908
+ "gpu_memory_used_max": 13845,
909
+ "gpu_memory_used_min": 13845,
910
+ "sample_count": 93,
911
+ "gpu_monitoring_status": "success"
912
+ }
913
+ },
914
+ {
915
+ "scenario_name": "compiled_compile_max-autotune_sdpa_math",
916
+ "metadata": {
917
+ "timestamp": "2025-10-10T16:51:19.135716",
918
+ "commit_id": "7164924a7e83f223a2bf2e104bef98eabe545091",
919
+ "hardware_info": {
920
+ "gpu_name": "NVIDIA A10G",
921
+ "gpu_memory_total_mb": 23028,
922
+ "cpu_count": 16,
923
+ "memory_total_mb": 63607,
924
+ "python_version": "3.10.12",
925
+ "torch_version": "2.8.0+cu126",
926
+ "cuda_version": "12.6"
927
+ },
928
+ "config": {
929
+ "name": "compiled",
930
+ "model_id": "meta-llama/Llama-2-7b-hf",
931
+ "variant": "compiled",
932
+ "warmup_iterations": 3,
933
+ "measurement_iterations": 5,
934
+ "num_tokens_to_generate": 100,
935
+ "device": "cuda",
936
+ "torch_dtype": "float16",
937
+ "compile_mode": "max-autotune",
938
+ "compile_options": {},
939
+ "use_cache": true,
940
+ "batch_size": 1,
941
+ "sequence_length": null,
942
+ "attn_implementation": "sdpa",
943
+ "sdpa_backend": "math",
944
+ "custom_params": {}
945
+ }
946
+ },
947
+ "measurements": {
948
+ "latency_seconds": {
949
+ "name": "latency_seconds",
950
+ "measurements": [
951
+ 3.903764404296875,
952
+ 3.872013427734375,
953
+ 3.873169189453125,
954
+ 3.853333984375,
955
+ 3.90305322265625
956
+ ],
957
+ "mean": 3.881066845703125,
958
+ "median": 3.873169189453125,
959
+ "std": 0.019555198215547152,
960
+ "min": 3.853333984375,
961
+ "max": 3.903764404296875,
962
+ "p25": 3.872013427734375,
963
+ "p75": 3.90305322265625,
964
+ "p90": 3.903479931640625,
965
+ "p95": 3.90362216796875,
966
+ "p99": 3.90373595703125,
967
+ "unit": "seconds"
968
+ },
969
+ "time_to_first_token_seconds": {
970
+ "name": "time_to_first_token_seconds",
971
+ "measurements": [
972
+ 0.042030494689941404,
973
+ 0.039236705780029295,
974
+ 0.04177679824829102,
975
+ 0.039336097717285155,
976
+ 0.041143840789794925
977
+ ],
978
+ "mean": 0.040704787445068356,
979
+ "median": 0.041143840789794925,
980
+ "std": 0.0011939891161156942,
981
+ "min": 0.039236705780029295,
982
+ "max": 0.042030494689941404,
983
+ "p25": 0.039336097717285155,
984
+ "p75": 0.04177679824829102,
985
+ "p90": 0.04192901611328125,
986
+ "p95": 0.04197975540161133,
987
+ "p99": 0.04202034683227539,
988
+ "unit": "seconds"
989
+ },
990
+ "tokens_per_second": {
991
+ "name": "tokens_per_second",
992
+ "measurements": [
993
+ 25.616299971875854,
994
+ 25.82635671759869,
995
+ 25.81865007919253,
996
+ 25.951552708769345,
997
+ 25.620967559326363
998
+ ],
999
+ "mean": 25.76676540735256,
1000
+ "median": 25.81865007919253,
1001
+ "std": 0.1298355497973911,
1002
+ "min": 25.616299971875854,
1003
+ "max": 25.951552708769345,
1004
+ "p25": 25.620967559326363,
1005
+ "p75": 25.82635671759869,
1006
+ "p90": 25.901474312301083,
1007
+ "p95": 25.926513510535212,
1008
+ "p99": 25.94654486912252,
1009
+ "unit": "tokens/sec"
1010
+ },
1011
+ "time_per_output_token_seconds": {
1012
+ "name": "time_per_output_token_seconds",
1013
+ "measurements": [
1014
+ 0.03903764404296875,
1015
+ 0.03872013427734375,
1016
+ 0.03873169189453125,
1017
+ 0.03853333984375,
1018
+ 0.039030532226562505
1019
+ ],
1020
+ "mean": 0.03881066845703125,
1021
+ "median": 0.03873169189453125,
1022
+ "std": 0.00019555198215547178,
1023
+ "min": 0.03853333984375,
1024
+ "max": 0.03903764404296875,
1025
+ "p25": 0.03872013427734375,
1026
+ "p75": 0.039030532226562505,
1027
+ "p90": 0.03903479931640625,
1028
+ "p95": 0.039036221679687504,
1029
+ "p99": 0.0390373595703125,
1030
+ "unit": "seconds/token"
1031
+ }
1032
+ },
1033
+ "gpu_metrics": {
1034
+ "gpu_utilization_mean": 89.18367346938776,
1035
+ "gpu_utilization_max": 93,
1036
+ "gpu_utilization_min": 84,
1037
+ "gpu_memory_used_mean": 13845,
1038
+ "gpu_memory_used_max": 13845,
1039
+ "gpu_memory_used_min": 13845,
1040
+ "sample_count": 98,
1041
+ "gpu_monitoring_status": "success"
1042
+ }
1043
+ },
1044
+ {
1045
+ "scenario_name": "compiled_compile_max-autotune_sdpa_efficient_attention",
1046
+ "metadata": {
1047
+ "timestamp": "2025-10-10T16:51:59.996588",
1048
+ "commit_id": "7164924a7e83f223a2bf2e104bef98eabe545091",
1049
+ "hardware_info": {
1050
+ "gpu_name": "NVIDIA A10G",
1051
+ "gpu_memory_total_mb": 23028,
1052
+ "cpu_count": 16,
1053
+ "memory_total_mb": 63607,
1054
+ "python_version": "3.10.12",
1055
+ "torch_version": "2.8.0+cu126",
1056
+ "cuda_version": "12.6"
1057
+ },
1058
+ "config": {
1059
+ "name": "compiled",
1060
+ "model_id": "meta-llama/Llama-2-7b-hf",
1061
+ "variant": "compiled",
1062
+ "warmup_iterations": 3,
1063
+ "measurement_iterations": 5,
1064
+ "num_tokens_to_generate": 100,
1065
+ "device": "cuda",
1066
+ "torch_dtype": "float16",
1067
+ "compile_mode": "max-autotune",
1068
+ "compile_options": {},
1069
+ "use_cache": true,
1070
+ "batch_size": 1,
1071
+ "sequence_length": null,
1072
+ "attn_implementation": "sdpa",
1073
+ "sdpa_backend": "efficient_attention",
1074
+ "custom_params": {}
1075
+ }
1076
+ },
1077
+ "measurements": {
1078
+ "latency_seconds": {
1079
+ "name": "latency_seconds",
1080
+ "measurements": [
1081
+ 3.68957861328125,
1082
+ 3.68287841796875,
1083
+ 3.68667919921875,
1084
+ 3.6754833984375,
1085
+ 3.67550244140625
1086
+ ],
1087
+ "mean": 3.6820244140625,
1088
+ "median": 3.68287841796875,
1089
+ "std": 0.005740788902139874,
1090
+ "min": 3.6754833984375,
1091
+ "max": 3.68957861328125,
1092
+ "p25": 3.67550244140625,
1093
+ "p75": 3.68667919921875,
1094
+ "p90": 3.68841884765625,
1095
+ "p95": 3.6889987304687497,
1096
+ "p99": 3.68946263671875,
1097
+ "unit": "seconds"
1098
+ },
1099
+ "time_to_first_token_seconds": {
1100
+ "name": "time_to_first_token_seconds",
1101
+ "measurements": [
1102
+ 0.038233985900878904,
1103
+ 0.03761558532714844,
1104
+ 0.03831206512451172,
1105
+ 0.03737094497680664,
1106
+ 0.03739225769042969
1107
+ ],
1108
+ "mean": 0.037784967803955086,
1109
+ "median": 0.03761558532714844,
1110
+ "std": 0.0004083565586144897,
1111
+ "min": 0.03737094497680664,
1112
+ "max": 0.03831206512451172,
1113
+ "p25": 0.03739225769042969,
1114
+ "p75": 0.038233985900878904,
1115
+ "p90": 0.038280833435058594,
1116
+ "p95": 0.03829644927978516,
1117
+ "p99": 0.03830894195556641,
1118
+ "unit": "seconds"
1119
+ },
1120
+ "tokens_per_second": {
1121
+ "name": "tokens_per_second",
1122
+ "measurements": [
1123
+ 27.103366124259672,
1124
+ 27.152674797001275,
1125
+ 27.124681751857107,
1126
+ 27.20730558666417,
1127
+ 27.20716462420303
1128
+ ],
1129
+ "mean": 27.159038576797048,
1130
+ "median": 27.152674797001275,
1131
+ "std": 0.04234650343347122,
1132
+ "min": 27.103366124259672,
1133
+ "max": 27.20730558666417,
1134
+ "p25": 27.124681751857107,
1135
+ "p75": 27.20716462420303,
1136
+ "p90": 27.207249201679716,
1137
+ "p95": 27.207277394171943,
1138
+ "p99": 27.207299948165723,
1139
+ "unit": "tokens/sec"
1140
+ },
1141
+ "time_per_output_token_seconds": {
1142
+ "name": "time_per_output_token_seconds",
1143
+ "measurements": [
1144
+ 0.0368957861328125,
1145
+ 0.0368287841796875,
1146
+ 0.0368667919921875,
1147
+ 0.036754833984375,
1148
+ 0.036755024414062504
1149
+ ],
1150
+ "mean": 0.036820244140625,
1151
+ "median": 0.0368287841796875,
1152
+ "std": 5.740788902139823e-05,
1153
+ "min": 0.036754833984375,
1154
+ "max": 0.0368957861328125,
1155
+ "p25": 0.036755024414062504,
1156
+ "p75": 0.0368667919921875,
1157
+ "p90": 0.036884188476562496,
1158
+ "p95": 0.0368899873046875,
1159
+ "p99": 0.036894626367187495,
1160
+ "unit": "seconds/token"
1161
+ }
1162
+ },
1163
+ "gpu_metrics": {
1164
+ "gpu_utilization_mean": 90.73118279569893,
1165
+ "gpu_utilization_max": 94,
1166
+ "gpu_utilization_min": 86,
1167
+ "gpu_memory_used_mean": 13845,
1168
+ "gpu_memory_used_max": 13845,
1169
+ "gpu_memory_used_min": 13845,
1170
+ "sample_count": 93,
1171
+ "gpu_monitoring_status": "success"
1172
+ }
1173
+ }
1174
+ ]
1175
+ }