Meta
MLX Community

Llama 3.1 8B Instruct benchmark on an Apple's logo.M4 Max · 128 GB

<- Runs

Prompt tokens

40,960

Generation tokens

10,240

Trials passed

10/10

Verified

79.5 tok/s

727.0 tok/s

Peak memory

6.92/128 GB

Runs well

Trials

Decode / Prefill Speeds

Metadata

metadata.json
{
"runId": "run_0eb21a7f-a5db-43cf-b897-2d5f78507ae4",
"bundleId": "mlx-meta-llama-3.1-8b-instruct-4bit-108168",
"status": "verified",
"promptTokens": 40960,
"completionTokens": 10240,
"contextLength": 5120,
"harness": {
"version": "0.1.18",
"gitSha": "c71d7d8"
},
"runtime": {
"name": "mlx_lm",
"version": "0.31.2",
"buildFlags": null
},
"model": {
"displayName": "Llama 3.1 8B Instruct",
"format": "mlx",
"quant": "4bit",
"architecture": "llama",
"source": "mlx-community/Meta-Llama-3.1-8B-Instruct-4bit",
"fileSizeBytes": 4517488999,
"lab": {
"name": "Meta",
"slug": "meta"
},
"quantizedBy": {
"name": "MLX Community",
"slug": "mlx-community"
}
},
"device": {
"cpu": "Apple M4 Max",
"cpuCores": 16,
"gpu": "Apple M4 Max",
"gpuCores": 40,
"gpuCount": 1,
"ramGb": 128,
"osName": "macOS",
"osVersion": "26.4"
},
"decodeTpsMean": 79.5,
"prefillTpsMean": 727,
"ttftP50Ms": 5656,
"idleTpsMean": 2624,
"peakRssMb": 7089,
"trialsPassed": 10,
"trialsTotal": 10,
"runnabilityScore": 0.8187708095005581,
"bundleSha256": "379074290fe5c1486e2ef2120a9e86d29439bf5816a6cbe481164a49ec382825",
"createdAt": "2026-03-31T16:31:19.577Z"
}