|
| 1 | +#!/bin/bash |
| 2 | +# Shared stop hook logic — sourced by all vendor hooks. |
| 3 | +# See ADR-0008 for governance protocol; ADR-0013 for structured tracker. |
| 4 | +# |
| 5 | +# Uses the structured tracker CLI (scripts/tracker) for TODO counting, |
| 6 | +# circuit breaker hashing, and guidance generation. Fail-closed: if the |
| 7 | +# tracker CLI is present but fails, the hook blocks (exit 1). |
| 8 | +# |
| 9 | +# Expects REPO_ROOT to be set by the caller. |
| 10 | +# Exports: TOTAL_HARD, TOTAL_SOFT, TOTAL_TODOS, CIRCUIT_BREAKER_TRIPPED, |
| 11 | +# GUIDANCE_FILE, GUIDANCE_FILE_COOLDOWN, GUIDANCE_FILE_REFLECTION, |
| 12 | +# STATE_FILE, ELAPSED_MIN |
| 13 | +# |
| 14 | +# Optional env vars for dry-run (set by scripts/stop-hook-preview): |
| 15 | +# STOP_HOOK_DRY_RUN - When non-empty, writes go to a temp dir instead |
| 16 | +# of permanent locations. No side effects on disk. |
| 17 | +# STOP_HOOK_BAKEOFF_FILTER - Glob pattern for bakeoff session dirs (e.g. |
| 18 | +# "broad-*"). Default: "*" (all sessions). |
| 19 | + |
| 20 | +# --- Setup --- |
| 21 | +GUIDANCE_LOG_DIR="$HOME/hypergumbo_lab_notebook/guidance_log" |
| 22 | +mkdir -p "$GUIDANCE_LOG_DIR" |
| 23 | +HASH_FILE="/tmp/hypergumbo_stop_hashes" |
| 24 | +HASH_THRESHOLD=5 |
| 25 | + |
| 26 | +# --- Dry-run support --- |
| 27 | +if [[ -n "${STOP_HOOK_DRY_RUN:-}" ]]; then |
| 28 | + _DRY_RUN_TMPDIR=$(mktemp -d) |
| 29 | +fi |
| 30 | + |
| 31 | +# --- Structured tracker (fail-closed) --- |
| 32 | +TOTAL_HARD=0 |
| 33 | +TOTAL_SOFT=0 |
| 34 | +if [[ -x "$REPO_ROOT/scripts/tracker" ]] && [[ -d "$REPO_ROOT/.agent/tracker" ]]; then |
| 35 | + TOTAL_HARD=$("$REPO_ROOT/scripts/tracker" count-todos --hard 2>/dev/null) || \ |
| 36 | + { echo "ERROR: tracker count-todos --hard failed" >&2; exit 1; } |
| 37 | + TOTAL_SOFT=$("$REPO_ROOT/scripts/tracker" count-todos --soft 2>/dev/null) || \ |
| 38 | + { echo "ERROR: tracker count-todos --soft failed" >&2; exit 1; } |
| 39 | +fi |
| 40 | +TOTAL_TODOS=$((TOTAL_HARD + TOTAL_SOFT)) |
| 41 | + |
| 42 | +# --- Circuit breaker (hash-based no-progress detection) --- |
| 43 | +CIRCUIT_BREAKER_TRIPPED=false |
| 44 | +if [[ "$TOTAL_TODOS" -gt 0 ]]; then |
| 45 | + CURRENT_HASH=$("$REPO_ROOT/scripts/tracker" hash-todos 2>/dev/null) || \ |
| 46 | + { echo "WARNING: hash-todos failed, using fallback hash" >&2; CURRENT_HASH="fallback-$$"; } |
| 47 | + if [[ -z "${STOP_HOOK_DRY_RUN:-}" ]]; then |
| 48 | + echo "$CURRENT_HASH" >> "$HASH_FILE" |
| 49 | + fi |
| 50 | + TAIL_COUNT=$(tail -n "$HASH_THRESHOLD" "$HASH_FILE" | wc -l) |
| 51 | + UNIQUE_COUNT=$(tail -n "$HASH_THRESHOLD" "$HASH_FILE" | sort -u | wc -l) |
| 52 | + if [[ "$TAIL_COUNT" -ge "$HASH_THRESHOLD" && "$UNIQUE_COUNT" -eq 1 ]]; then |
| 53 | + CIRCUIT_BREAKER_TRIPPED=true |
| 54 | + fi |
| 55 | +fi |
| 56 | + |
| 57 | +# --- Bakeoff convergence summary (shared across all vendors) --- |
| 58 | +# Computed early so it can be appended to guidance files. |
| 59 | +BAKEOFF_SUFFIX="" |
| 60 | +BAKEOFF_CONVERGENCE_LINE="" |
| 61 | +BAKEOFF_DIR="$HOME/hypergumbo_lab_notebook/bakeoff_artifacts" |
| 62 | +if [[ -d "$BAKEOFF_DIR" ]]; then |
| 63 | + BAKEOFF_GLOB="${STOP_HOOK_BAKEOFF_FILTER:-*}" |
| 64 | + LATEST_STATE=$(find "$BAKEOFF_DIR" -maxdepth 3 -path "*/${BAKEOFF_GLOB}/state.json" -printf '%T@ %p\n' 2>/dev/null | sort -rn | head -1 | cut -d' ' -f2-) |
| 65 | + if [[ -n "$LATEST_STATE" ]]; then |
| 66 | + BAKEOFF_SUMMARY=$(python3 -c " |
| 67 | +import json, sys |
| 68 | +try: |
| 69 | + with open('$LATEST_STATE') as f: |
| 70 | + state = json.load(f) |
| 71 | + cohort_num = state.get('cohort_number', '?') |
| 72 | + iteration = state.get('iteration', '?') |
| 73 | + # BROAD schema: convergence_history with critical/high/new_issues |
| 74 | + ch = state.get('convergence_history') or [] |
| 75 | + if ch: |
| 76 | + latest = ch[-1] |
| 77 | + crit = latest.get('critical', 0) |
| 78 | + high = latest.get('high', 0) |
| 79 | + new = latest.get('new_issues', 0) |
| 80 | + cohort_num = latest.get('cohort', cohort_num) |
| 81 | + iteration = latest.get('iteration', iteration) |
| 82 | + if crit == 0 and high == 0 and new == 0: |
| 83 | + print(f'CONVERGED cohort={cohort_num} iter={iteration}') |
| 84 | + else: |
| 85 | + print(f'NEEDS_WORK cohort={cohort_num} iter={iteration} critical={crit} high={high} new={new}') |
| 86 | + sys.exit(0) |
| 87 | + # DEEP schema: verdicts with per-repo verdict (GOOD/WARN/FAIL) |
| 88 | + verdicts = state.get('verdicts') or [] |
| 89 | + if verdicts: |
| 90 | + good = sum(1 for v in verdicts if v.get('verdict') == 'GOOD') |
| 91 | + warn = sum(1 for v in verdicts if v.get('verdict') == 'WARN') |
| 92 | + fail = sum(1 for v in verdicts if v.get('verdict') == 'FAIL') |
| 93 | + # Collect worst repos (FAIL first, then WARN) with their top concern |
| 94 | + worst = [] |
| 95 | + for v in verdicts: |
| 96 | + if v.get('verdict') in ('FAIL', 'WARN') and v.get('concerns'): |
| 97 | + worst.append(f\"{v['repo_name']}: {v['concerns'][0]}\") |
| 98 | + worst_str = '' |
| 99 | + if worst: |
| 100 | + worst_str = '\\n Worst: ' + '; '.join(worst[:3]) |
| 101 | + if fail == 0 and warn == 0: |
| 102 | + print(f'CONVERGED cohort={cohort_num} iter={iteration}') |
| 103 | + else: |
| 104 | + print(f'NEEDS_WORK cohort={cohort_num} iter={iteration} good={good} warn={warn} fail={fail}{worst_str}') |
| 105 | + sys.exit(0) |
| 106 | +except Exception: |
| 107 | + pass |
| 108 | +" 2>/dev/null || true) |
| 109 | + |
| 110 | + if [[ "$BAKEOFF_SUMMARY" == CONVERGED* ]]; then |
| 111 | + BAKEOFF_CONVERGENCE_LINE="$BAKEOFF_SUMMARY" |
| 112 | + BAKEOFF_SUFFIX=$'\n\n---\nBakeoff convergence: '"$BAKEOFF_SUMMARY"$'\nLatest bakeoff session is CONVERGED — no critical/high issues. Running another bakeoff on the same cohort would be redundant. Consider: selecting a new cohort, mining existing artifacts, or moving to other work items.' |
| 113 | + elif [[ "$BAKEOFF_SUMMARY" == NEEDS_WORK* ]]; then |
| 114 | + BAKEOFF_CONVERGENCE_LINE="$BAKEOFF_SUMMARY" |
| 115 | + BAKEOFF_SUFFIX=$'\n\n---\nBakeoff convergence: '"$BAKEOFF_SUMMARY"$'\nLatest bakeoff session has outstanding issues. Consider investigating these before starting new work.' |
| 116 | + fi |
| 117 | + fi |
| 118 | +fi |
| 119 | + |
| 120 | +# --- Write guidance file (if any TODOs exist) --- |
| 121 | +GUIDANCE_FILE="" |
| 122 | +if [[ "$TOTAL_TODOS" -gt 0 ]]; then |
| 123 | + if [[ -n "${STOP_HOOK_DRY_RUN:-}" ]]; then |
| 124 | + GUIDANCE_FILE=$("$REPO_ROOT/scripts/tracker" guidance --guidance-dir "$_DRY_RUN_TMPDIR" 2>/dev/null) || true |
| 125 | + else |
| 126 | + GUIDANCE_FILE=$("$REPO_ROOT/scripts/tracker" guidance --guidance-dir "$GUIDANCE_LOG_DIR" 2>/dev/null) || true |
| 127 | + fi |
| 128 | + if [[ -z "$GUIDANCE_FILE" ]] || [[ ! -f "$GUIDANCE_FILE" ]]; then |
| 129 | + echo "WARNING: tracker guidance generation failed, continuing without guidance file" >&2 |
| 130 | + GUIDANCE_FILE="" |
| 131 | + fi |
| 132 | + |
| 133 | + # Phase 1a: append bakeoff convergence to guidance file |
| 134 | + if [[ -n "$GUIDANCE_FILE" && -n "$BAKEOFF_SUFFIX" ]]; then |
| 135 | + printf '%s' "$BAKEOFF_SUFFIX" >> "$GUIDANCE_FILE" |
| 136 | + fi |
| 137 | + |
| 138 | + # Update last_stop_check.json with guidance_file pointer + bakeoff convergence |
| 139 | + if [[ -n "$GUIDANCE_FILE" && -z "${STOP_HOOK_DRY_RUN:-}" ]]; then |
| 140 | + STATE_FILE_FOR_GF="$REPO_ROOT/.agent/last_stop_check.json" |
| 141 | + if command -v jq &>/dev/null && [[ -f "$STATE_FILE_FOR_GF" ]]; then |
| 142 | + TMP=$(mktemp) |
| 143 | + if jq --arg gf "$GUIDANCE_FILE" \ |
| 144 | + --arg bc "${BAKEOFF_CONVERGENCE_LINE:-}" \ |
| 145 | + '. + {guidance_file: $gf} + (if $bc != "" then {bakeoff_convergence: $bc} else {} end)' \ |
| 146 | + "$STATE_FILE_FOR_GF" > "$TMP" 2>/dev/null; then |
| 147 | + mv "$TMP" "$STATE_FILE_FOR_GF" |
| 148 | + else |
| 149 | + rm -f "$TMP" |
| 150 | + fi |
| 151 | + fi |
| 152 | + fi |
| 153 | +fi |
| 154 | + |
| 155 | +# (Bakeoff convergence computed above, before guidance file write) |
| 156 | + |
| 157 | +# --- Cooldown & reflection: compute elapsed time, write guidance files --- |
| 158 | +STATE_FILE="$REPO_ROOT/.agent/last_stop_check.json" |
| 159 | +# Backward compat: fall back to old filename if new one doesn't exist |
| 160 | +if [[ ! -f "$STATE_FILE" && -f "$REPO_ROOT/.agent/stop_hook_state.json" ]]; then |
| 161 | + STATE_FILE="$REPO_ROOT/.agent/stop_hook_state.json" |
| 162 | +fi |
| 163 | + |
| 164 | +ELAPSED_MIN=9999 # Default: stale (will trigger Path 3) |
| 165 | +if [[ -f "$STATE_FILE" ]]; then |
| 166 | + LAST_TS=$(jq -r '.last_completed_utc // "1970-01-01T00:00:00Z"' "$STATE_FILE" 2>/dev/null || echo "1970-01-01T00:00:00Z") |
| 167 | + LAST_EPOCH=$(date -d "$LAST_TS" +%s 2>/dev/null || echo 0) |
| 168 | + NOW_EPOCH=$(date +%s) |
| 169 | + ELAPSED_MIN=$(( (NOW_EPOCH - LAST_EPOCH) / 60 )) |
| 170 | +fi |
| 171 | + |
| 172 | +# --- Write guidance file for Path 2: Cooldown --- |
| 173 | +# Combines cooldown_prompt.md + last reflection notes + bakeoff suffix. |
| 174 | +GUIDANCE_FILE_COOLDOWN="" |
| 175 | +if [[ "$ELAPSED_MIN" -lt 30 ]]; then |
| 176 | + TIMESTAMP=$(date +%m%d%Y_%H%M) |
| 177 | + if [[ -n "${STOP_HOOK_DRY_RUN:-}" ]]; then |
| 178 | + GUIDANCE_FILE_COOLDOWN="${_DRY_RUN_TMPDIR}/stop_guidance_cooldown_${TIMESTAMP}.md" |
| 179 | + else |
| 180 | + GUIDANCE_FILE_COOLDOWN="$GUIDANCE_LOG_DIR/stop_guidance_cooldown_${TIMESTAMP}.md" |
| 181 | + fi |
| 182 | + { |
| 183 | + cat "$REPO_ROOT/.agent/cooldown_prompt.md" |
| 184 | + # Append last reflection notes if present |
| 185 | + if [[ -f "$STATE_FILE" ]]; then |
| 186 | + NOTES=$(jq -r '.notes // ""' "$STATE_FILE" 2>/dev/null || true) |
| 187 | + if [[ -n "$NOTES" ]]; then |
| 188 | + printf '\n\n---\n## LAST REFLECTION NOTES\n%s\n---' "$NOTES" |
| 189 | + fi |
| 190 | + fi |
| 191 | + # Append bakeoff convergence if present |
| 192 | + if [[ -n "$BAKEOFF_SUFFIX" ]]; then |
| 193 | + printf '%s' "$BAKEOFF_SUFFIX" |
| 194 | + fi |
| 195 | + } > "$GUIDANCE_FILE_COOLDOWN" |
| 196 | +fi |
| 197 | + |
| 198 | +# --- Write guidance file for Path 3: Full reflection --- |
| 199 | +# Combines stop_reflect.md + bakeoff suffix. |
| 200 | +GUIDANCE_FILE_REFLECTION="" |
| 201 | +if [[ "$ELAPSED_MIN" -ge 30 ]]; then |
| 202 | + TIMESTAMP=$(date +%m%d%Y_%H%M) |
| 203 | + if [[ -n "${STOP_HOOK_DRY_RUN:-}" ]]; then |
| 204 | + GUIDANCE_FILE_REFLECTION="${_DRY_RUN_TMPDIR}/stop_guidance_reflect_${TIMESTAMP}.md" |
| 205 | + else |
| 206 | + GUIDANCE_FILE_REFLECTION="$GUIDANCE_LOG_DIR/stop_guidance_reflect_${TIMESTAMP}.md" |
| 207 | + fi |
| 208 | + { |
| 209 | + cat "$REPO_ROOT/.agent/stop_reflect.md" |
| 210 | + if [[ -n "$BAKEOFF_SUFFIX" ]]; then |
| 211 | + printf '%s' "$BAKEOFF_SUFFIX" |
| 212 | + fi |
| 213 | + } > "$GUIDANCE_FILE_REFLECTION" |
| 214 | +fi |
0 commit comments