|
| 1 | +#!/usr/bin/env bash |
| 2 | +# Needle-in-a-Haystack benchmark for quant.cpp KV cache compression. |
| 3 | +# |
| 4 | +# Compares FP32 KV (baseline) vs turbo_kv_4b -v q4 --k-window 128 (6.4× compression). |
| 5 | +# Uses common-English-word needles that survive Q4 weight visual jitter. |
| 6 | +# Scoring: case-insensitive grep for distinctive keywords from the needle. |
| 7 | +# |
| 8 | +# Usage: |
| 9 | +# bash bench/niah_test.sh # default grid |
| 10 | +# GRID=quick bash bench/niah_test.sh # smaller grid for fast iteration |
| 11 | +# GRID=full bash bench/niah_test.sh # full grid (slow) |
| 12 | + |
| 13 | +set -e |
| 14 | + |
| 15 | +TQ=${TQ:-./build_metal/quant} |
| 16 | +MODEL=${MODEL:-models/Llama-3.2-3B-Instruct-Q8_0.gguf} |
| 17 | +THREADS=${THREADS:-8} |
| 18 | +GRID=${GRID:-default} |
| 19 | +OUT_DIR=${OUT_DIR:-bench/results/niah} |
| 20 | +RUN_ID=$(date -u +%Y%m%dT%H%M%S) |
| 21 | +RAW_LOG="$OUT_DIR/raw_${RUN_ID}.log" |
| 22 | +RESULT_CSV="$OUT_DIR/results_${RUN_ID}.csv" |
| 23 | + |
| 24 | +mkdir -p "$OUT_DIR" |
| 25 | + |
| 26 | +if [ ! -x "$TQ" ]; then |
| 27 | + echo "ERROR: $TQ not built. Run: cmake --build build_metal -j8" >&2 |
| 28 | + exit 1 |
| 29 | +fi |
| 30 | +if [ ! -f "$MODEL" ]; then |
| 31 | + echo "ERROR: $MODEL missing." >&2 |
| 32 | + exit 1 |
| 33 | +fi |
| 34 | + |
| 35 | +# ---------------------------------------------------------------------------- |
| 36 | +# Grid configuration |
| 37 | +# |
| 38 | +# IMPORTANT: contexts here are TOKEN counts, not chars. Llama-3.2-3B-Instruct-Q8_0 |
| 39 | +# runs from this CLI default-converts weights to Q4 on the fly. Empirically the |
| 40 | +# effective working memory of that build is ~1500 tokens — beyond that the |
| 41 | +# chat template gets overpowered by the document continuation prior and the |
| 42 | +# model fails to answer the question (just continues the haystack text). |
| 43 | +# Grid sizes therefore stay within the regime where the model can actually |
| 44 | +# retrieve, so we measure compression-vs-baseline cleanly. |
| 45 | +# ---------------------------------------------------------------------------- |
| 46 | +case "$GRID" in |
| 47 | + quick) |
| 48 | + CONTEXTS=(512 1024) |
| 49 | + DEPTHS=(0.1 0.5 0.9) |
| 50 | + ;; |
| 51 | + default) |
| 52 | + CONTEXTS=(512 1024 1536) |
| 53 | + DEPTHS=(0.1 0.5 0.9) |
| 54 | + ;; |
| 55 | + full) |
| 56 | + CONTEXTS=(512 1024 1536) |
| 57 | + DEPTHS=(0.1 0.25 0.5 0.75 0.9) |
| 58 | + ;; |
| 59 | + *) |
| 60 | + echo "Unknown GRID: $GRID" >&2; exit 1 ;; |
| 61 | +esac |
| 62 | + |
| 63 | +# Three needles, all common-English-word so the answer survives Q4 jitter. |
| 64 | +NEEDLE_0="The chief financial officer of Northwind Logistics is Sarah Chen, hired in 2023." |
| 65 | +QUESTION_0="Who is the chief financial officer of Northwind Logistics? Answer with the full name." |
| 66 | +KEYWORD_0="Sarah\|Chen" |
| 67 | + |
| 68 | +NEEDLE_1="The launch date for Project Aurora is November 14th in San Francisco." |
| 69 | +QUESTION_1="When and where will Project Aurora launch? Answer in one sentence." |
| 70 | +KEYWORD_1="November\|San Francisco" |
| 71 | + |
| 72 | +NEEDLE_2="The reactor cooling tank at the Helios facility holds exactly eight thousand liters of distilled water." |
| 73 | +QUESTION_2="How much distilled water does the reactor cooling tank at Helios hold?" |
| 74 | +KEYWORD_2="eight thousand\|8000\|8,000" |
| 75 | + |
| 76 | +NEEDLES=("$NEEDLE_0" "$NEEDLE_1" "$NEEDLE_2") |
| 77 | +QUESTIONS=("$QUESTION_0" "$QUESTION_1" "$QUESTION_2") |
| 78 | +KEYWORDS=("$KEYWORD_0" "$KEYWORD_1" "$KEYWORD_2") |
| 79 | + |
| 80 | +# Methods: name|kv-flag|v-flag|extra |
| 81 | +METHOD_NAMES=("fp32" "turbo_q4_w128") |
| 82 | +METHOD_FLAGS=("-k fp32" "-k turbo_kv_4b -v q4 --k-window 128") |
| 83 | + |
| 84 | +# ---------------------------------------------------------------------------- |
| 85 | +# Helpers |
| 86 | +# ---------------------------------------------------------------------------- |
| 87 | +# build_prompt CTX_TOKENS DEPTH NEEDLE QUESTION → echoes the prompt |
| 88 | +# |
| 89 | +# Uses real wikitext-2 text as varied haystack (synthetic repetitive filler |
| 90 | +# triggers a "stuck in repetition loop" failure mode in 3B Q4: the model |
| 91 | +# generates meta-text like "I'm trapped in an infinite loop of repetition" |
| 92 | +# instead of answering the question — see bench/results/niah/findings.md). |
| 93 | +build_prompt() { |
| 94 | + python3 - "$1" "$2" "$3" "$4" <<'PYEOF' |
| 95 | +import sys |
| 96 | +ctx_tokens = int(sys.argv[1]) |
| 97 | +depth = float(sys.argv[2]) |
| 98 | +needle = sys.argv[3] |
| 99 | +question = sys.argv[4] |
| 100 | +
|
| 101 | +with open("bench/data/wikitext2_test.txt") as f: |
| 102 | + raw = f.read() |
| 103 | +
|
| 104 | +# ~4 chars per token for English wikitext, sized below ctx to leave room |
| 105 | +# for the question + chat template + answer headroom. |
| 106 | +target_chars = int(ctx_tokens * 3.6) |
| 107 | +hay = raw[:target_chars] |
| 108 | +# Trim to last full sentence so the model isn't fed a partial word. |
| 109 | +end = hay.rfind(". ") |
| 110 | +if end > 0: |
| 111 | + hay = hay[:end + 1] |
| 112 | +
|
| 113 | +# Insert needle at sentence boundary nearest the requested depth. |
| 114 | +desired = int(len(hay) * depth) |
| 115 | +sb = hay.rfind(". ", 0, max(desired, 2)) |
| 116 | +if sb < 0: |
| 117 | + sb = 0 |
| 118 | +else: |
| 119 | + sb += 2 |
| 120 | +hay2 = hay[:sb] + needle + " " + hay[sb:] |
| 121 | +
|
| 122 | +# Simple format that works with --chat at sub-1500-token contexts. |
| 123 | +# The structured "Based on this document..." prefix overpowers the |
| 124 | +# chat template at this scale and causes the model to continue the |
| 125 | +# haystack — keep it minimal. |
| 126 | +prompt = hay2 + "\n\nQuestion: " + question |
| 127 | +sys.stdout.write(prompt) |
| 128 | +PYEOF |
| 129 | +} |
| 130 | + |
| 131 | +# score_response RESPONSE KEYWORD → echoes 1 (pass) or 0 (fail) |
| 132 | +score_response() { |
| 133 | + local resp="$1" |
| 134 | + local kw="$2" |
| 135 | + if echo "$resp" | grep -qiE "$(echo "$kw" | sed 's/\\|/|/g')"; then |
| 136 | + echo 1 |
| 137 | + else |
| 138 | + echo 0 |
| 139 | + fi |
| 140 | +} |
| 141 | + |
| 142 | +# ---------------------------------------------------------------------------- |
| 143 | +# Header |
| 144 | +# ---------------------------------------------------------------------------- |
| 145 | +echo "method,context,depth,needle_idx,pass,response" > "$RESULT_CSV" |
| 146 | +echo "==> NIAH Benchmark" |
| 147 | +echo " binary: $TQ" |
| 148 | +echo " model: $MODEL" |
| 149 | +echo " grid: $GRID contexts=${CONTEXTS[*]} depths=${DEPTHS[*]}" |
| 150 | +echo " needles: ${#NEEDLES[@]}" |
| 151 | +echo " methods: ${METHOD_NAMES[*]}" |
| 152 | +echo " raw: $RAW_LOG" |
| 153 | +echo " results: $RESULT_CSV" |
| 154 | +echo "" |
| 155 | + |
| 156 | +total_runs=$(( ${#METHOD_NAMES[@]} * ${#CONTEXTS[@]} * ${#DEPTHS[@]} * ${#NEEDLES[@]} )) |
| 157 | +run_idx=0 |
| 158 | + |
| 159 | +for mi in "${!METHOD_NAMES[@]}"; do |
| 160 | + mname="${METHOD_NAMES[$mi]}" |
| 161 | + mflags="${METHOD_FLAGS[$mi]}" |
| 162 | + for ctx in "${CONTEXTS[@]}"; do |
| 163 | + # Need ctx + question + answer headroom; round up to power of 2 + slack |
| 164 | + cli_ctx=$(( ctx + 256 )) |
| 165 | + for depth in "${DEPTHS[@]}"; do |
| 166 | + for ni in "${!NEEDLES[@]}"; do |
| 167 | + run_idx=$(( run_idx + 1 )) |
| 168 | + needle="${NEEDLES[$ni]}" |
| 169 | + question="${QUESTIONS[$ni]}" |
| 170 | + keyword="${KEYWORDS[$ni]}" |
| 171 | + |
| 172 | + prompt=$(build_prompt "$ctx" "$depth" "$needle" "$question") |
| 173 | + |
| 174 | + printf "[%3d/%d] %-14s ctx=%-5d depth=%.2f needle=%d " \ |
| 175 | + "$run_idx" "$total_runs" "$mname" "$ctx" "$depth" "$ni" |
| 176 | + |
| 177 | + # Run inference |
| 178 | + out=$( "$TQ" "$MODEL" -p "$prompt" -n 32 -T 0.0 -j "$THREADS" \ |
| 179 | + --chat --ctx "$cli_ctx" $mflags 2>&1 || true ) |
| 180 | + |
| 181 | + # Extract response — between 1st and 2nd '---' delimiters, |
| 182 | + # skipping the [tokenizer] line that the CLI prints first. |
| 183 | + resp=$(echo "$out" | awk ' |
| 184 | + /^---$/ { n++; next } |
| 185 | + n==1 && /^\[tokenizer\]/ { next } |
| 186 | + n==1 { print } |
| 187 | + ') |
| 188 | + if [ -z "$resp" ]; then |
| 189 | + resp=$(echo "$out" | tail -3 | head -1) |
| 190 | + fi |
| 191 | + # Strip newlines for CSV |
| 192 | + resp_csv=$(echo "$resp" | tr '\n' ' ' | sed 's/"/""/g') |
| 193 | + |
| 194 | + pass=$(score_response "$resp" "$keyword") |
| 195 | + if [ "$pass" = "1" ]; then echo "PASS"; else echo "FAIL: ${resp:0:60}"; fi |
| 196 | + |
| 197 | + echo "$mname,$ctx,$depth,$ni,$pass,\"$resp_csv\"" >> "$RESULT_CSV" |
| 198 | + echo "===== $mname ctx=$ctx depth=$depth needle=$ni =====" >> "$RAW_LOG" |
| 199 | + echo "$out" >> "$RAW_LOG" |
| 200 | + echo "" >> "$RAW_LOG" |
| 201 | + done |
| 202 | + done |
| 203 | + done |
| 204 | +done |
| 205 | + |
| 206 | +# ---------------------------------------------------------------------------- |
| 207 | +# Summary |
| 208 | +# ---------------------------------------------------------------------------- |
| 209 | +echo "" |
| 210 | +echo "==> Results CSV: $RESULT_CSV" |
| 211 | +echo "" |
| 212 | +echo "==> Summary by method:" |
| 213 | +for mname in "${METHOD_NAMES[@]}"; do |
| 214 | + pass=$(awk -F, -v m="$mname" 'NR>1 && $1==m {p+=$5; t++} END{printf "%d/%d", p, t}' "$RESULT_CSV") |
| 215 | + pct=$(awk -F, -v m="$mname" 'NR>1 && $1==m {p+=$5; t++} END{if(t>0)printf "%.1f%%", 100*p/t; else print "n/a"}' "$RESULT_CSV") |
| 216 | + printf " %-16s %s (%s)\n" "$mname" "$pass" "$pct" |
| 217 | +done |
| 218 | + |
| 219 | +echo "" |
| 220 | +echo "==> Summary by (method × context):" |
| 221 | +printf " %-16s" "method" |
| 222 | +for ctx in "${CONTEXTS[@]}"; do printf " %7d" "$ctx"; done |
| 223 | +echo "" |
| 224 | +for mname in "${METHOD_NAMES[@]}"; do |
| 225 | + printf " %-16s" "$mname" |
| 226 | + for ctx in "${CONTEXTS[@]}"; do |
| 227 | + pct=$(awk -F, -v m="$mname" -v c="$ctx" 'NR>1 && $1==m && $2==c {p+=$5; t++} END{if(t>0)printf "%5.0f%%", 100*p/t; else print " n/a"}' "$RESULT_CSV") |
| 228 | + printf " %7s" "$pct" |
| 229 | + done |
| 230 | + echo "" |
| 231 | +done |
0 commit comments