Meta
MLX Community

Llama 3.2 3B Instruct benchmark on an Apple's logo.M5 Pro · 48 GB

<- Runs

Prompt tokens

40,960

Generation tokens

10,240

Trials passed

10/10

Verified

106.3 tok/s

3,225.0 tok/s

Peak memory

3.75/48 GB

Runs great

Trials

Decode / Prefill Speeds

Metadata

metadata.json
{
"runId": "run_4cd9d1b1-8bd0-4ab4-b6cc-eb0d03e0bd14",
"bundleId": "mlx-llama-3.2-3b-instruct-4bit-278877",
"status": "verified",
"promptTokens": 40960,
"completionTokens": 10240,
"contextLength": 5120,
"harness": {
"version": "0.1.20",
"gitSha": "unknown"
},
"runtime": {
"name": "mlx_lm",
"version": "0.31.2",
"buildFlags": null
},
"model": {
"displayName": "Llama 3.2 3B Instruct",
"format": "mlx",
"quant": "4bit",
"architecture": "llama",
"source": "mlx-community/Llama-3.2-3B-Instruct-4bit",
"fileSizeBytes": 1807496278,
"lab": {
"name": "Meta",
"slug": "meta"
},
"quantizedBy": {
"name": "MLX Community",
"slug": "mlx-community"
}
},
"device": {
"cpu": "Apple M5 Pro",
"cpuCores": 15,
"gpu": "Apple M5 Pro",
"gpuCores": 16,
"gpuCount": 1,
"ramGb": 48,
"osName": "macOS",
"osVersion": "26.4.1"
},
"decodeTpsMean": 106.3,
"prefillTpsMean": 3225,
"ttftP50Ms": 1313.94,
"idleTpsMean": 1566,
"peakRssMb": 3838,
"trialsPassed": 10,
"trialsTotal": 10,
"runnabilityScore": 0.9471602957589286,
"bundleSha256": "f79fb464c2b90ad9dd50a02662da147bead2d34b2ba6b506831a667efb14b160",
"createdAt": "2026-04-13T18:25:57.493Z"
}