Meta
MLX Community

Llama 3.1 8B Instruct benchmark on an Apple's logo.M5 Pro · 48 GB

<- Runs

Prompt tokens

40,960

Generation tokens

10,240

Trials passed

10/10

Verified

54.5 tok/s

1,351.2 tok/s

Peak memory

6.24/48 GB

Runs well

Trials

Decode / Prefill Speeds

Metadata

metadata.json
{
"runId": "run_f8ec959c-5597-4e6b-a9b3-2b1026e760bf",
"bundleId": "mlx-meta-llama-3.1-8b-instruct-4bit-8c043e",
"status": "verified",
"promptTokens": 40960,
"completionTokens": 10240,
"contextLength": 5120,
"harness": {
"version": "0.1.20",
"gitSha": "unknown"
},
"runtime": {
"name": "mlx_lm",
"version": "0.31.2",
"buildFlags": null
},
"model": {
"displayName": "Llama 3.1 8B Instruct",
"format": "mlx",
"quant": "4bit",
"architecture": "llama",
"source": "mlx-community/Meta-Llama-3.1-8B-Instruct-4bit",
"fileSizeBytes": 4517488999,
"lab": {
"name": "Meta",
"slug": "meta"
},
"quantizedBy": {
"name": "MLX Community",
"slug": "mlx-community"
}
},
"device": {
"cpu": "Apple M5 Pro",
"cpuCores": 15,
"gpu": "Apple M5 Pro",
"gpuCores": 16,
"gpuCount": 1,
"ramGb": 48,
"osName": "macOS",
"osVersion": "26.4.1"
},
"decodeTpsMean": 54.5,
"prefillTpsMean": 1351.2,
"ttftP50Ms": 3104.07,
"idleTpsMean": 338,
"peakRssMb": 6394,
"trialsPassed": 10,
"trialsTotal": 10,
"runnabilityScore": 0.7935587444196428,
"bundleSha256": "4d6cbda3ad17b6abaa1315606b7c103ca95e345f2e750252836cfb01d9e1f729",
"createdAt": "2026-04-13T20:18:46.382Z"
}