-
Notifications
You must be signed in to change notification settings - Fork 114
411 lines (356 loc) · 16.7 KB
/
docs-publish.yml
File metadata and controls
411 lines (356 loc) · 16.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
name: Docs
# Builds, validates, and deploys documentation to orphan deployment branches.
# Mintlify reads from these branches — main stays clean of generated artifacts.
#
# See docs/PUBLISHING.md for the full architecture and strategy.
on:
push:
branches: [main]
paths:
- "docs/**"
- "mellea/**"
- "cli/**"
- "tooling/docs-autogen/**"
- ".github/workflows/docs-publish.yml"
release:
types: [published]
pull_request:
types: [opened, synchronize, reopened, labeled]
paths:
- "docs/**"
- "mellea/**"
- "cli/**"
- "tooling/docs-autogen/**"
- ".github/workflows/docs-publish.yml"
workflow_dispatch:
inputs:
force_publish:
description: "Deploy even from a non-main context (for testing)"
type: boolean
default: false
target_branch:
description: "Override deploy target branch (default: docs/preview)"
type: string
default: "docs/preview"
strict_validation:
description: "Fail the build if validation checks fail"
type: boolean
default: false
permissions: {}
concurrency:
group: docs-publish-${{ github.ref }}
cancel-in-progress: true
env:
UV_FROZEN: "1"
FORCE_JAVASCRIPT_ACTIONS_TO_NODE24: "true"
jobs:
# ---------------------------------------------------------------------------
# Build & Validate
# ---------------------------------------------------------------------------
build-and-validate:
runs-on: ubuntu-latest
permissions:
contents: read
timeout-minutes: 30
steps:
- name: Checkout
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
with:
fetch-depth: 0
persist-credentials: false
- name: Set up uv
uses: astral-sh/setup-uv@37802adc94f370d6bfd71619e3f0bf239e1f3b78 # v7
with:
enable-cache: true
cache-dependency-glob: "uv.lock"
- name: Install dependencies
run: uv sync --frozen --all-extras --group dev
# -- Generate API documentation ------------------------------------------
- name: Generate API documentation
run: uv run python tooling/docs-autogen/build.py
# -- Run docs-autogen unit tests ------------------------------------------
- name: Run CLI reference tests
run: uv run pytest tooling/docs-autogen/test_cli_reference.py -v --tb=short
# -- Validate static docs ------------------------------------------------
- name: Lint static docs (markdownlint)
id: markdownlint
run: |
set -o pipefail
npx --yes markdownlint-cli "docs/docs/**/*.md" --config docs/docs/.markdownlint.json 2>&1 \
| tee /tmp/markdownlint.log
continue-on-error: ${{ inputs.strict_validation != true }}
# -- Validate generated API docs -----------------------------------------
- name: Validate MDX syntax and links
id: validate_mdx
run: |
set -o pipefail
uv run python tooling/docs-autogen/validate.py docs/docs/api --skip-coverage --docs-root docs 2>&1 \
| tee /tmp/validate_mdx.log
continue-on-error: ${{ inputs.strict_validation != true }}
- name: Audit API coverage
id: audit_coverage
run: |
set -o pipefail
uv run python tooling/docs-autogen/audit_coverage.py --docs-dir docs/docs/api --threshold 80 2>&1 \
| tee /tmp/audit_coverage.log
continue-on-error: ${{ inputs.strict_validation != true }}
- name: Docstring quality gate
id: quality_gate
run: |
set -o pipefail
uv run python tooling/docs-autogen/audit_coverage.py \
--docs-dir docs/docs/api \
--quality --fail-on-quality --threshold 100 \
--orphans \
--output /tmp/quality_report.json 2>&1 \
| tee /tmp/quality_gate.log
# -- Upload artifact for deploy job --------------------------------------
- name: Upload quality report
if: always()
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7
with:
name: docstring-quality-report
path: /tmp/quality_report.json
retention-days: 30
- name: Upload docs artifact
if: success() || (inputs.strict_validation != true)
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7
with:
name: docs-site
path: docs/docs/
retention-days: 7
# -- Write job summary ---------------------------------------------------
- name: Write job summary
if: always()
run: |
python3 - <<'PYEOF'
import os, re
def icon(outcome):
return "✅" if outcome == "success" else ("❌" if outcome == "failure" else "⏭️")
def read_log(path):
try:
raw = open(path).read().strip()
# Strip ANSI escape codes (colour output from uv/pytest etc.)
return re.sub(r'\x1b\[[0-9;]*[mK]', '', raw)
except FileNotFoundError:
return ""
markdownlint_outcome = os.environ.get("STEPS_MARKDOWNLINT_OUTCOME", "")
validate_outcome = os.environ.get("STEPS_VALIDATE_MDX_OUTCOME", "")
coverage_outcome = os.environ.get("STEPS_AUDIT_COVERAGE_OUTCOME", "")
quality_gate_outcome = os.environ.get("STEPS_QUALITY_GATE_OUTCOME", "")
lint_log = read_log("/tmp/markdownlint.log")
validate_log = read_log("/tmp/validate_mdx.log")
coverage_log = read_log("/tmp/audit_coverage.log")
quality_gate_log = read_log("/tmp/quality_gate.log")
# Count markdownlint issues (lines matching file:line:col format)
lint_issues = len([l for l in lint_log.splitlines() if re.match(r'.+:\d+:\d+ ', l)])
lint_detail = f"{lint_issues} issue(s)" if lint_issues else "no issues"
# Extract coverage stats from audit_coverage output
cov_pct = re.search(r"Coverage:\s+(\S+%)", coverage_log)
cov_sym = re.search(r"Documented:\s+(\d+)", coverage_log)
cov_tot = re.search(r"Total classes \+ functions:\s+(\d+)", coverage_log)
cov_detail = (
f"{cov_pct.group(1)} ({cov_sym.group(1)}/{cov_tot.group(1)} symbols)"
if cov_pct and cov_sym and cov_tot else ""
)
# Parse per-check error counts from validate output.
# Each check prints "N errors found" on the next line when it fails.
def parse_validate_detail(log):
counts = {}
for label, key in [
("Source links", "source"), ("MDX syntax", "syntax"),
("Internal links", "links"), ("Anchor collisions", "anchors"),
("Stale files", "stale"),
]:
m = re.search(rf"{label}: (?:PASS|FAIL)(?:\s+(\d+) errors found)?", log, re.DOTALL)
if m:
counts[key] = int(m.group(1)) if m.group(1) else 0
total = sum(counts.values())
if not total:
return "no issues"
parts = []
if counts.get("syntax"): parts.append(f"{counts['syntax']} syntax error(s)")
if counts.get("links"): parts.append(f"{counts['links']} broken link(s)")
if counts.get("anchors"): parts.append(f"{counts['anchors']} anchor collision(s)")
if counts.get("source"): parts.append(f"{counts['source']} source link error(s)")
if counts.get("stale"): parts.append(f"{counts['stale']} stale file(s)")
return ", ".join(parts)
mdx_detail = parse_validate_detail(validate_log)
# Parse per-kind counts from the quality gate log.
# _print_quality_report emits section headers like:
# " Missing docstrings (12)"
# " Missing Args section (5)"
# Capture label -> count from those lines, then build a compact
# per-kind breakdown for the summary table cell.
kind_short = {
"Missing docstrings": "missing",
"Short docstrings": "short",
"Missing Args section": "no_args",
"Missing Returns section": "no_returns",
"Missing Yields section (generator)": "no_yields",
"Missing Raises section": "no_raises",
"Missing class Args section": "no_class_args",
"Duplicate Args: in class + __init__ (Option C violation)": "dup_init_args",
"Param name mismatches (documented but not in signature)": "param_mismatch",
"TypedDict phantom fields (documented but not declared)": "td_phantom",
"TypedDict undocumented fields (declared but missing from Attributes:)": "td_undoc",
"Missing parameter type annotations (type absent from API docs)": "missing_param_type",
"Missing return type annotations (type absent from API docs)": "missing_return_type",
"Param type mismatch (docstring vs annotation)": "param_type_mismatch",
"Return type mismatch (docstring vs annotation)": "return_type_mismatch",
}
section_re = re.compile(r"^\s{2}(.+?)\s+\((\d+)\)\s*$", re.MULTILINE)
kind_counts = {}
for m in section_re.finditer(quality_gate_log):
label, count = m.group(1), int(m.group(2))
short = kind_short.get(label)
if short:
kind_counts[short] = count
if kind_counts:
parts = [f"{v} {k}" for k, v in kind_counts.items()]
quality_gate_detail = ", ".join(parts)
else:
# Fall back to the summary annotation message
qm = re.search(r"::(notice|warning|error) title=Docstring quality::(.+)", quality_gate_log)
quality_gate_detail = re.sub(r"\s*—\s*see job summary.*$", "", qm.group(2)) if qm else ""
CONTRIB_URL = (
"https://github.com/generative-computing/mellea/blob/main"
"/docs/docs/guide/CONTRIBUTING.md"
)
REPO = os.environ.get("GITHUB_REPOSITORY", "")
RUN_ID = os.environ.get("GITHUB_RUN_ID", "")
ARTIFACT_URL = f"https://github.com/{REPO}/actions/runs/{RUN_ID}#artifacts"
lines = [
"## Docs Build — Validation Summary\n",
"| Check | Result | Details |",
"|-------|--------|---------|",
f"| Markdownlint | {icon(markdownlint_outcome)} {markdownlint_outcome} | {lint_detail} |",
f"| MDX Validation | {icon(validate_outcome)} {validate_outcome} | {mdx_detail} |",
f"| API Coverage | {icon(coverage_outcome)} {coverage_outcome} | {cov_detail} |",
f"| Docstring Quality | {icon(quality_gate_outcome)} {quality_gate_outcome} | {quality_gate_detail} |",
]
lines.append("")
# When the quality gate fails, surface a direct link to the fix reference.
# Per-kind Ref: URLs in the log output are inside a ```text``` block and
# don't render as links there.
if quality_gate_outcome == "failure":
lines += [
"> ❌ **Docstring quality gate failed.** "
f"See the [CI docstring checks reference]({CONTRIB_URL}#ci-docstring-checks-reference) "
"for per-kind fix instructions, or expand **Docstring quality details** below for the full list. \n"
f"> The full machine-readable report is available as the [`docstring-quality-report` artifact]({ARTIFACT_URL}).",
"",
]
for title, log, limit in [
("Markdownlint output", lint_log, 5_000),
("MDX validation output", validate_log, 5_000),
("API coverage output", coverage_log, 5_000),
("Docstring quality details", quality_gate_log, 1_000_000),
]:
if log:
lines += [
f"<details><summary>{title}</summary>\n",
"```text",
log[:limit] + (" [truncated]" if len(log) > limit else ""),
"```",
"</details>\n",
]
with open(os.environ["GITHUB_STEP_SUMMARY"], "a") as f:
f.write("\n".join(lines))
PYEOF
env:
STEPS_MARKDOWNLINT_OUTCOME: ${{ steps.markdownlint.outcome }}
STEPS_VALIDATE_MDX_OUTCOME: ${{ steps.validate_mdx.outcome }}
STEPS_AUDIT_COVERAGE_OUTCOME: ${{ steps.audit_coverage.outcome }}
STEPS_QUALITY_GATE_OUTCOME: ${{ steps.quality_gate.outcome }}
# ---------------------------------------------------------------------------
# Deploy to orphan branch
# ---------------------------------------------------------------------------
deploy:
needs: build-and-validate
runs-on: ubuntu-latest
permissions:
contents: write
timeout-minutes: 10
# Deploy on: push to main, release, force_publish via dispatch,
# or PRs labelled "docs-preview" (→ docs/preview branch).
if: >-
github.event_name == 'push' ||
github.event_name == 'release' ||
(github.event_name == 'pull_request' && contains(github.event.pull_request.labels.*.name, 'docs-preview')) ||
(github.event_name == 'workflow_dispatch' && inputs.force_publish)
steps:
- name: Download docs artifact
uses: actions/download-artifact@3e5f45b2cfb9172054b4087a40e8e0b5a5461e7c # v8
with:
name: docs-site
path: docs-site/
- name: Determine target branch
id: target
run: |
if [ "$EVENT_NAME" = "release" ]; then
echo "branch=docs/production" >> "$GITHUB_OUTPUT"
elif [ "$EVENT_NAME" = "pull_request" ]; then
echo "branch=docs/preview" >> "$GITHUB_OUTPUT"
elif [ "$EVENT_NAME" = "workflow_dispatch" ] && [ -n "${INPUTS_TARGET_BRANCH}" ]; then
echo "branch=${INPUTS_TARGET_BRANCH}" >> "$GITHUB_OUTPUT"
else
echo "branch=docs/staging" >> "$GITHUB_OUTPUT"
fi
env:
EVENT_NAME: ${{ github.event_name }}
INPUTS_TARGET_BRANCH: ${{ inputs.target_branch }}
- name: Add DO NOT EDIT warning
run: |
cat > docs-site/_DO_NOT_EDIT.md << 'EOF'
# DO NOT EDIT THIS BRANCH
This branch is **fully automated**. Every file here is generated by
the `docs-publish` GitHub Actions workflow and force-pushed on each run.
**Any manual edits will be overwritten without warning.**
To change documentation:
- Static guides: edit files under `docs/docs/` on `main`
- API reference: improve docstrings in Python source (`mellea/`, `cli/`)
- Pipeline config: see `tooling/docs-autogen/` on `main`
For details, see `docs/PUBLISHING.md` on `main`.
EOF
- name: Deploy to ${{ steps.target.outputs.branch }}
uses: peaceiris/actions-gh-pages@4f9cc6602d3f66b9c108549d475ec49e8ef4d45e # v4
with:
github_token: ${{ secrets.GITHUB_TOKEN }}
publish_branch: ${{ steps.target.outputs.branch }}
publish_dir: docs-site/
force_orphan: true
user_name: "github-actions[bot]"
user_email: "github-actions[bot]@users.noreply.github.com"
commit_message: |
docs: publish from ${{ github.sha }}
Branch: ${{ github.ref_name }}
Trigger: ${{ github.event_name }}${{ github.event.pull_request.number && format(' (PR #{0})', github.event.pull_request.number) }}
Run: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
- name: Write deploy summary
if: always()
run: |
TARGET="${STEPS_TARGET_OUTPUTS_BRANCH}"
REPO="${GITHUB_REPOSITORY}"
SHA="${GITHUB_SHA}"
if [ "${JOB_STATUS}" = "success" ]; then
STATUS="✅ Deployed"
DETAIL="Published to [\`${TARGET}\`](https://github.com/${REPO}/tree/${TARGET})"
else
STATUS="❌ Failed"
DETAIL="Attempted deploy to \`${TARGET}\`"
fi
cat >> "$GITHUB_STEP_SUMMARY" << EOF
## Docs Deploy — ${STATUS}
| | |
|-|-|
| Branch | \`${TARGET}\` |
| Source | \`${SHA:0:7}\` |
| Trigger | ${EVENT_NAME}${PR_SUFFIX} |
${DETAIL}
EOF
env:
STEPS_TARGET_OUTPUTS_BRANCH: ${{ steps.target.outputs.branch }}
JOB_STATUS: ${{ job.status }}
EVENT_NAME: ${{ github.event_name }}
PR_SUFFIX: "${{ github.event.pull_request.number && format(' (PR #{0})', github.event.pull_request.number) }}"