Meta
MLX Community

Llama 3.2 3B Instruct benchmark on an Apple's logo.M4 Max · 128 GB

<- Runs

Prompt tokens

40,960

Generation tokens

10,240

Trials passed

10/10

Verified

160.1 tok/s

1,548.1 tok/s

Peak memory

3.64/128 GB

Runs great

Trials

Decode / Prefill Speeds

Metadata

metadata.json
{
"runId": "run_61a6ee75-44af-4e34-a865-df357854f10f",
"bundleId": "mlx-llama-3.2-3b-instruct-4bit-9243bc",
"status": "verified",
"promptTokens": 40960,
"completionTokens": 10240,
"contextLength": 5120,
"harness": {
"version": "0.1.18",
"gitSha": "c71d7d8"
},
"runtime": {
"name": "mlx_lm",
"version": "0.31.2",
"buildFlags": null
},
"model": {
"displayName": "Llama 3.2 3B Instruct",
"format": "mlx",
"quant": "4bit",
"architecture": "llama",
"source": "mlx-community/Llama-3.2-3B-Instruct-4bit",
"fileSizeBytes": 1807496278,
"lab": {
"name": "Meta",
"slug": "meta"
},
"quantizedBy": {
"name": "MLX Community",
"slug": "mlx-community"
}
},
"device": {
"cpu": "Apple M4 Max",
"cpuCores": 16,
"gpu": "Apple M4 Max",
"gpuCores": 40,
"gpuCount": 1,
"ramGb": 128,
"osName": "macOS",
"osVersion": "26.4"
},
"decodeTpsMean": 160.1,
"prefillTpsMean": 1548.1,
"ttftP50Ms": 2669.74,
"idleTpsMean": 2661,
"peakRssMb": 3725,
"trialsPassed": 10,
"trialsTotal": 10,
"runnabilityScore": 0.9152252165876116,
"bundleSha256": "146e7961e75e0e56fc7d556fbc174eda5617a72856d9e69a0602ef9c27fa778e",
"createdAt": "2026-03-31T16:41:08.157Z"
}