Meta
MLX Community

Llama 4 Scout benchmark on an Apple's logo.M4 Max · 128 GB

<- Runs

Prompt tokens

40,960

Generation tokens

10,240

Trials passed

10/10

Verified

33.0 tok/s

291.1 tok/s

Peak memory

59.00/128 GB

Runs poorly

Trials

Decode / Prefill Speeds

Metadata

metadata.json
{
"runId": "run_adc23832-592d-49ee-a747-b7b1b8e44044",
"bundleId": "mlx-llama-4-scout-17b-16e-instruct-4bit-81d091",
"status": "verified",
"promptTokens": 40960,
"completionTokens": 10240,
"contextLength": 5120,
"harness": {
"version": "0.1.18",
"gitSha": "c71d7d8"
},
"runtime": {
"name": "mlx_lm",
"version": "0.31.2",
"buildFlags": null
},
"model": {
"displayName": "Llama 4 Scout",
"format": "mlx",
"quant": "4bit",
"architecture": "llama4",
"source": "mlx-community/Llama-4-Scout-17B-16E-Instruct-4bit",
"fileSizeBytes": 61115340220,
"lab": {
"name": "Meta",
"slug": "meta"
},
"quantizedBy": {
"name": "MLX Community",
"slug": "mlx-community"
}
},
"device": {
"cpu": "Apple M4 Max",
"cpuCores": 16,
"gpu": "Apple M4 Max",
"gpuCores": 40,
"gpuCount": 1,
"ramGb": 128,
"osName": "macOS",
"osVersion": "26.4"
},
"decodeTpsMean": 33,
"prefillTpsMean": 291.1,
"ttftP50Ms": 14256.38,
"idleTpsMean": 59392,
"peakRssMb": 60416,
"trialsPassed": 10,
"trialsTotal": 10,
"runnabilityScore": 0.4961386904761905,
"bundleSha256": "8931988bffa2f0d8dff707e20c82db57357993c489e7eac45534365ef3087cec",
"createdAt": "2026-03-31T16:52:16.702Z"
}