|
| 1 | +"""mdbook preprocessor to auto-link glossary terms. |
| 2 | +
|
| 3 | +Parses glossary.md for terms defined using definition list syntax: |
| 4 | +
|
| 5 | + term A |
| 6 | + : This is a definition of term A. |
| 7 | +
|
| 8 | + term B |
| 9 | + : This is a definition of term B. |
| 10 | +
|
| 11 | +For each chapter (except the glossary itself), the first occurrence of each |
| 12 | +term is replaced with a link to the glossary entry. |
| 13 | +
|
| 14 | +An HTML comment block at the bottom of glossary.md maps additional |
| 15 | +terms to arbitrary URLs (e.g. upstream Python/Rust glossary entries): |
| 16 | +
|
| 17 | +Tested against mdbook 0.5.0. |
| 18 | +""" |
| 19 | + |
| 20 | +import json |
| 21 | +import re |
| 22 | +import sys |
| 23 | + |
| 24 | + |
| 25 | +def slugify(term): |
| 26 | + """Convert a term to a URL-friendly anchor slug.""" |
| 27 | + return re.sub(r"[^\w\s-]", "", term.lower()).strip().replace(" ", "-") |
| 28 | + |
| 29 | + |
| 30 | +def parse_glossary(content): |
| 31 | + """Parse glossary.md and return (local_terms, external_terms). |
| 32 | +
|
| 33 | + local_terms: dict of {term: url} for definition-list entries (link to glossary anchors). |
| 34 | + external_terms: dict of {term: url} from the <!-- external-glossary-links --> comment. |
| 35 | + """ |
| 36 | + local_terms = {} |
| 37 | + external_terms = {} |
| 38 | + |
| 39 | + # Parse definition-list terms: a non-indented line followed by a line |
| 40 | + # starting with " : " (the definition). |
| 41 | + for m in re.finditer(r"(?m)^([^\s#<>!`\[].+)\n : ", content): |
| 42 | + term = m.group(1).strip() |
| 43 | + local_terms[term] = f"glossary.md#{slugify(term)}" |
| 44 | + |
| 45 | + # Parse <!-- external-glossary-links ... --> block for external URLs. |
| 46 | + link_block = re.search( |
| 47 | + r"<!--\s*external-glossary-links\s*\n(.*?)-->", content, re.DOTALL |
| 48 | + ) |
| 49 | + if not link_block: |
| 50 | + raise ValueError("Glossary is missing <!-- external-glossary-links --> block") |
| 51 | + |
| 52 | + for line in link_block.group(1).strip().splitlines(): |
| 53 | + line = line.strip() |
| 54 | + if not line: |
| 55 | + continue |
| 56 | + # "term: url" or "term | url" |
| 57 | + parts = re.split(r"\s*[:|]\s*", line, maxsplit=1) |
| 58 | + if len(parts) == 2: |
| 59 | + term, url = parts[0].strip(), parts[1].strip() |
| 60 | + if term and url: |
| 61 | + external_terms[term] = url |
| 62 | + |
| 63 | + return local_terms, external_terms |
| 64 | + |
| 65 | + |
| 66 | +# --------------------------------------------------------------------------- |
| 67 | +# Replacement logic |
| 68 | +# --------------------------------------------------------------------------- |
| 69 | + |
| 70 | +# Matches fenced code blocks, inline code, and markdown links so we can skip |
| 71 | +# them. Everything else is "plain text" we can scan for terms. |
| 72 | +_SKIP_PATTERN = re.compile( |
| 73 | + r"^```[^\n]*\n[\s\S]*?^```\s*$" # fenced code blocks (``` to ```, multiline) |
| 74 | + r"|`[^`\n]+`" # inline code (single line only) |
| 75 | + r"|\[[^\]]*\]\([^\)]*\)" # markdown links (entire [...](...)) |
| 76 | + r"|<[^>\n]+>" # HTML tags (single line only) |
| 77 | + r"|^\s*>.*$" # block quotes (single line) |
| 78 | + r"|^.+(?=\n : )" # definition list term lines |
| 79 | + r"|^#{1,6}\s+.*$", # headings |
| 80 | + re.MULTILINE, |
| 81 | +) |
| 82 | + |
| 83 | + |
| 84 | +def link_terms_in_content(content, terms, first_only=True, url_prefix=""): |
| 85 | + """Replace occurrences of glossary terms with markdown links. |
| 86 | +
|
| 87 | + If first_only is True (default), only the first occurrence of each term is |
| 88 | + linked. If False, every occurrence is linked. |
| 89 | +
|
| 90 | + Skips code blocks, inline code, existing links, and HTML tags. |
| 91 | + """ |
| 92 | + linked = set() |
| 93 | + |
| 94 | + # Build a combined pattern matching any term, longest first so that |
| 95 | + # multi-word terms match before their sub-terms. |
| 96 | + sorted_terms = sorted(terms.keys(), key=len, reverse=True) |
| 97 | + if not sorted_terms: |
| 98 | + return content |
| 99 | + |
| 100 | + # Allow optional trailing "s" so plurals like "wheels" match "wheel". |
| 101 | + escaped = [re.escape(t) + r"s?" for t in sorted_terms] |
| 102 | + term_pattern = re.compile(r"\b(" + "|".join(escaped) + r")\b") |
| 103 | + |
| 104 | + # Find all protected spans. |
| 105 | + protected = [] |
| 106 | + for m in _SKIP_PATTERN.finditer(content): |
| 107 | + protected.append((m.start(), m.end())) |
| 108 | + |
| 109 | + # Parse <!-- no-glossary:term --> comments to suppress specific terms. |
| 110 | + # Maps line number to set of suppressed term names (lowercased). |
| 111 | + _suppressed_re = re.compile(r"<!--\s*no-glossary-link:([\w\s-]+?)\s*-->") |
| 112 | + suppressed_at_line = {} |
| 113 | + for m in _suppressed_re.finditer(content): |
| 114 | + term_name = m.group(1).strip().lower() |
| 115 | + # Find the next line after the comment. |
| 116 | + next_line_start = content.find("\n", m.end()) |
| 117 | + if next_line_start == -1: |
| 118 | + continue |
| 119 | + next_line_start += 1 |
| 120 | + next_line_end = content.find("\n", next_line_start) |
| 121 | + if next_line_end == -1: |
| 122 | + next_line_end = len(content) |
| 123 | + suppressed_at_line.setdefault((next_line_start, next_line_end), set()).add( |
| 124 | + term_name |
| 125 | + ) |
| 126 | + |
| 127 | + def in_protected(start, end): |
| 128 | + for ps, pe in protected: |
| 129 | + if start >= ps and end <= pe: |
| 130 | + return True |
| 131 | + return False |
| 132 | + |
| 133 | + def is_suppressed(canonical, start): |
| 134 | + """Check if this term is suppressed by a <!-- no-glossary:term --> on the same line.""" |
| 135 | + for (ls, le), suppressed_terms in suppressed_at_line.items(): |
| 136 | + if ls <= start < le and canonical.lower() in suppressed_terms: |
| 137 | + return True |
| 138 | + return False |
| 139 | + |
| 140 | + if suppressed_at_line: |
| 141 | + print( |
| 142 | + f"Found {sum(len(s) for s in suppressed_at_line.values())} suppressed terms at {len(suppressed_at_line)} lines", |
| 143 | + file=sys.stderr, |
| 144 | + ) |
| 145 | + |
| 146 | + result = [] |
| 147 | + last = 0 |
| 148 | + |
| 149 | + for m in term_pattern.finditer(content): |
| 150 | + matched_term = m.group(1) |
| 151 | + |
| 152 | + # Find the canonical term. Try exact match, then case-insensitive, |
| 153 | + # then strip trailing "s" for plural forms. |
| 154 | + canonical = None |
| 155 | + for candidate in (matched_term, matched_term.rstrip("s")): |
| 156 | + if candidate in terms: |
| 157 | + canonical = candidate |
| 158 | + break |
| 159 | + for t in terms: |
| 160 | + if t.lower() == candidate.lower(): |
| 161 | + canonical = t |
| 162 | + break |
| 163 | + if canonical is not None: |
| 164 | + break |
| 165 | + if canonical is None: |
| 166 | + continue |
| 167 | + |
| 168 | + if first_only and canonical in linked: |
| 169 | + continue |
| 170 | + |
| 171 | + if in_protected(m.start(), m.end()): |
| 172 | + continue |
| 173 | + |
| 174 | + if is_suppressed(canonical, m.start()): |
| 175 | + continue |
| 176 | + |
| 177 | + linked.add(canonical) |
| 178 | + result.append(content[last : m.start()]) |
| 179 | + url = terms[canonical] |
| 180 | + if url_prefix and not url.startswith("http"): |
| 181 | + url = url_prefix + url |
| 182 | + result.append(f"[{matched_term}]({url})") |
| 183 | + last = m.end() |
| 184 | + |
| 185 | + result.append(content[last:]) |
| 186 | + return "".join(result) |
| 187 | + |
| 188 | + |
| 189 | +# --------------------------------------------------------------------------- |
| 190 | +# mdbook preprocessor interface |
| 191 | +# --------------------------------------------------------------------------- |
| 192 | + |
| 193 | + |
| 194 | +def find_glossary_content(items): |
| 195 | + """Walk the book items to find and return the glossary chapter content.""" |
| 196 | + for item in items: |
| 197 | + if not isinstance(item, dict) or "Chapter" not in item: |
| 198 | + continue |
| 199 | + ch = item["Chapter"] |
| 200 | + if ch.get("path") and ch["path"].endswith("glossary.md"): |
| 201 | + return ch["content"] |
| 202 | + result = find_glossary_content(ch.get("sub_items", [])) |
| 203 | + if result is not None: |
| 204 | + return result |
| 205 | + return None |
| 206 | + |
| 207 | + |
| 208 | +def process_item(item, local_terms, external_terms): |
| 209 | + """Recursively process a book item, linking glossary terms.""" |
| 210 | + if not isinstance(item, dict) or "Chapter" not in item: |
| 211 | + return |
| 212 | + |
| 213 | + ch = item["Chapter"] |
| 214 | + path = ch.get("path", "") |
| 215 | + |
| 216 | + if path and path.endswith("glossary.md"): |
| 217 | + # On the glossary page itself, link external terms (all occurrences). |
| 218 | + ch["content"] = link_terms_in_content( |
| 219 | + ch["content"], external_terms, first_only=False |
| 220 | + ) |
| 221 | + elif path: |
| 222 | + # Compute relative prefix for local glossary links based on depth. |
| 223 | + prefix = "../" * path.count("/") |
| 224 | + all_terms = {**local_terms, **external_terms} |
| 225 | + ch["content"] = link_terms_in_content( |
| 226 | + ch["content"], all_terms, url_prefix=prefix |
| 227 | + ) |
| 228 | + |
| 229 | + for sub in ch.get("sub_items", []): |
| 230 | + process_item(sub, local_terms, external_terms) |
| 231 | + |
| 232 | + |
| 233 | +def main(): |
| 234 | + for line in sys.stdin: |
| 235 | + if not line.strip(): |
| 236 | + continue |
| 237 | + [context, book] = json.loads(line) |
| 238 | + |
| 239 | + # Parse glossary terms from the glossary chapter. |
| 240 | + glossary_content = find_glossary_content(book["items"]) |
| 241 | + if glossary_content is None: |
| 242 | + # No glossary found; pass through unchanged. |
| 243 | + json.dump(book, fp=sys.stdout) |
| 244 | + return |
| 245 | + |
| 246 | + local_terms, external_terms = parse_glossary(glossary_content) |
| 247 | + |
| 248 | + # Process all chapters. |
| 249 | + for item in book["items"]: |
| 250 | + process_item(item, local_terms, external_terms) |
| 251 | + |
| 252 | + json.dump(book, fp=sys.stdout) |
| 253 | + return |
| 254 | + |
| 255 | + |
| 256 | +if __name__ == "__main__": |
| 257 | + main() |
0 commit comments