|
| 1 | +import { describe, it, expect } from "vitest"; |
| 2 | +import fs from "node:fs"; |
| 3 | +import path from "node:path"; |
| 4 | + |
| 5 | +// --------------------------------------------------------------------------- |
| 6 | +// Helpers |
| 7 | +// --------------------------------------------------------------------------- |
| 8 | + |
| 9 | +const DOCS_DIR = path.resolve(import.meta.dirname, "../../docs"); |
| 10 | + |
| 11 | +/** Recursively collect all HTML files under a directory. */ |
| 12 | +function collectHtmlFiles(dir: string): string[] { |
| 13 | + const results: string[] = []; |
| 14 | + for (const entry of fs.readdirSync(dir, { withFileTypes: true })) { |
| 15 | + const full = path.join(dir, entry.name); |
| 16 | + if (entry.isDirectory()) { |
| 17 | + results.push(...collectHtmlFiles(full)); |
| 18 | + } else if (entry.name.endsWith(".html")) { |
| 19 | + results.push(full); |
| 20 | + } |
| 21 | + } |
| 22 | + return results; |
| 23 | +} |
| 24 | + |
| 25 | +/** Extract all href values from an HTML string (excludes external links and anchors). */ |
| 26 | +function extractInternalHrefs(html: string): string[] { |
| 27 | + const hrefRegex = /href="([^"]+)"/g; |
| 28 | + const hrefs: string[] = []; |
| 29 | + let match; |
| 30 | + while ((match = hrefRegex.exec(html)) !== null) { |
| 31 | + const href = match[1]; |
| 32 | + // Skip external links, anchors, and protocol-relative URLs |
| 33 | + if (href.startsWith("http") || href.startsWith("#") || href.startsWith("//")) continue; |
| 34 | + hrefs.push(href); |
| 35 | + } |
| 36 | + return hrefs; |
| 37 | +} |
| 38 | + |
| 39 | +const allHtmlFiles = collectHtmlFiles(DOCS_DIR); |
| 40 | +const SKIP_FILES = ["index.html", "og-image.html"]; |
| 41 | + |
| 42 | +// --------------------------------------------------------------------------- |
| 43 | +// Tests |
| 44 | +// --------------------------------------------------------------------------- |
| 45 | + |
| 46 | +describe("docs clean URLs", () => { |
| 47 | + it("docs directory exists and contains HTML files", () => { |
| 48 | + expect(fs.existsSync(DOCS_DIR)).toBe(true); |
| 49 | + expect(allHtmlFiles.length).toBeGreaterThan(0); |
| 50 | + }); |
| 51 | + |
| 52 | + describe("file structure", () => { |
| 53 | + it("only index.html and og-image.html exist at root level", () => { |
| 54 | + const rootHtmlFiles = fs |
| 55 | + .readdirSync(DOCS_DIR, { withFileTypes: true }) |
| 56 | + .filter((e) => e.isFile() && e.name.endsWith(".html")) |
| 57 | + .map((e) => e.name); |
| 58 | + |
| 59 | + for (const file of rootHtmlFiles) { |
| 60 | + expect(SKIP_FILES).toContain(file); |
| 61 | + } |
| 62 | + }); |
| 63 | + |
| 64 | + it("each doc page lives in its own directory as index.html", () => { |
| 65 | + // Non-page directories (specs, assets, etc.) are excluded |
| 66 | + const NON_PAGE_DIRS = new Set(["superpowers"]); |
| 67 | + const subdirs = fs |
| 68 | + .readdirSync(DOCS_DIR, { withFileTypes: true }) |
| 69 | + .filter((e) => e.isDirectory() && !e.name.startsWith(".") && !NON_PAGE_DIRS.has(e.name)); |
| 70 | + |
| 71 | + expect(subdirs.length).toBeGreaterThan(0); |
| 72 | + |
| 73 | + for (const dir of subdirs) { |
| 74 | + const indexPath = path.join(DOCS_DIR, dir.name, "index.html"); |
| 75 | + expect(fs.existsSync(indexPath), `${dir.name}/index.html should exist`).toBe(true); |
| 76 | + } |
| 77 | + }); |
| 78 | + |
| 79 | + it("no stale .html files remain at the docs root (besides allowed ones)", () => { |
| 80 | + const rootHtmlFiles = fs |
| 81 | + .readdirSync(DOCS_DIR, { withFileTypes: true }) |
| 82 | + .filter((e) => e.isFile() && e.name.endsWith(".html")) |
| 83 | + .map((e) => e.name); |
| 84 | + |
| 85 | + const unexpected = rootHtmlFiles.filter((f) => !SKIP_FILES.includes(f)); |
| 86 | + expect(unexpected, `Unexpected root-level HTML files: ${unexpected.join(", ")}`).toEqual([]); |
| 87 | + }); |
| 88 | + }); |
| 89 | + |
| 90 | + describe("internal links have no .html extension", () => { |
| 91 | + for (const filePath of allHtmlFiles) { |
| 92 | + const relative = path.relative(DOCS_DIR, filePath); |
| 93 | + |
| 94 | + it(`${relative} — no .html in internal hrefs`, () => { |
| 95 | + const html = fs.readFileSync(filePath, "utf-8"); |
| 96 | + const hrefs = extractInternalHrefs(html); |
| 97 | + |
| 98 | + const badHrefs = hrefs.filter((h) => /\.html/.test(h)); |
| 99 | + expect(badHrefs, `Found .html hrefs in ${relative}: ${badHrefs.join(", ")}`).toEqual([]); |
| 100 | + }); |
| 101 | + } |
| 102 | + }); |
| 103 | + |
| 104 | + describe("sidebar.js links have no .html extension", () => { |
| 105 | + it("no .html in sidebar href values", () => { |
| 106 | + const sidebarPath = path.join(DOCS_DIR, "sidebar.js"); |
| 107 | + const content = fs.readFileSync(sidebarPath, "utf-8"); |
| 108 | + |
| 109 | + // Extract all href: "..." values |
| 110 | + const hrefRegex = /href:\s*"([^"]+)"/g; |
| 111 | + const hrefs: string[] = []; |
| 112 | + let match; |
| 113 | + while ((match = hrefRegex.exec(content)) !== null) { |
| 114 | + hrefs.push(match[1]); |
| 115 | + } |
| 116 | + |
| 117 | + const badHrefs = hrefs.filter((h) => /\.html/.test(h)); |
| 118 | + expect(badHrefs, `Found .html hrefs in sidebar.js: ${badHrefs.join(", ")}`).toEqual([]); |
| 119 | + }); |
| 120 | + }); |
| 121 | + |
| 122 | + describe("homepage replaceState redirect exists", () => { |
| 123 | + it("index.html contains history.replaceState for /index.html cleanup", () => { |
| 124 | + const indexPath = path.join(DOCS_DIR, "index.html"); |
| 125 | + const html = fs.readFileSync(indexPath, "utf-8"); |
| 126 | + |
| 127 | + expect(html).toContain("replaceState"); |
| 128 | + expect(html).toContain("index.html"); |
| 129 | + }); |
| 130 | + }); |
| 131 | + |
| 132 | + describe("all internal link targets resolve to existing pages", () => { |
| 133 | + const knownPages = new Set<string>(); |
| 134 | + |
| 135 | + // Build set of known page slugs from directory names |
| 136 | + const subdirs = fs |
| 137 | + .readdirSync(DOCS_DIR, { withFileTypes: true }) |
| 138 | + .filter((e) => e.isDirectory() && !e.name.startsWith(".")) |
| 139 | + .map((e) => e.name); |
| 140 | + |
| 141 | + for (const dir of subdirs) { |
| 142 | + knownPages.add(`/${dir}`); |
| 143 | + } |
| 144 | + |
| 145 | + for (const filePath of allHtmlFiles) { |
| 146 | + const relative = path.relative(DOCS_DIR, filePath); |
| 147 | + |
| 148 | + it(`${relative} — all internal links point to existing pages`, () => { |
| 149 | + const html = fs.readFileSync(filePath, "utf-8"); |
| 150 | + const hrefs = extractInternalHrefs(html); |
| 151 | + |
| 152 | + for (const href of hrefs) { |
| 153 | + // Strip anchor |
| 154 | + const base = href.split("#")[0]; |
| 155 | + if (base === "" || base === "/") continue; // root or anchor-only |
| 156 | + if (base.startsWith("/")) { |
| 157 | + expect( |
| 158 | + knownPages.has(base), |
| 159 | + `${relative} links to ${href} but no page directory exists for "${base}"`, |
| 160 | + ).toBe(true); |
| 161 | + } |
| 162 | + } |
| 163 | + }); |
| 164 | + } |
| 165 | + }); |
| 166 | +}); |
0 commit comments