-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathserver.py
More file actions
2240 lines (1984 loc) · 86.4 KB
/
server.py
File metadata and controls
2240 lines (1984 loc) · 86.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# /// script
# requires-python = ">=3.11"
# dependencies = ["fastapi", "uvicorn[standard]", "anthropic", "python-dotenv"]
# ///
"""Periscope — live tmux dashboard. Run with: uv run server.py"""
import asyncio
import json
import os
import re
import shutil
import subprocess
import threading
import time
import uuid
from contextlib import asynccontextmanager
from pathlib import Path
from dotenv import load_dotenv
from fastapi import FastAPI, Query, Request, WebSocket, WebSocketDisconnect
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
# Load .env from the script's directory (existing env vars take precedence).
load_dotenv(Path(__file__).parent / ".env")
@asynccontextmanager
async def lifespan(_app: FastAPI):
# prewarm_pr_cache and cached_scraped_usage are defined later in the file;
# Python resolves the names at call-time, so the forward references are
# fine. We kick off both eagerly so the first /api/state poll already has
# PR badges and the usage bars populated.
threading.Thread(target=prewarm_pr_cache, daemon=True).start()
threading.Thread(target=cached_scraped_usage, daemon=True).start()
yield
app = FastAPI(lifespan=lifespan)
STATIC = Path(__file__).parent / "static"
# --- Persistent state (state.json) ----------------------------------------
#
# Single JSON file mutated only by the server, under a threading.Lock, with
# atomic tempfile+rename writes. See
# docs/superpowers/specs/2026-05-13-persistent-config-layer-design.md.
#
# Lock primitive choice: threading.Lock (not asyncio.Lock). FastAPI runs
# sync `def` endpoints on anyio's threadpool, so two concurrent /api/state
# polls execute in parallel threads. asyncio.Lock only blocks coroutines,
# not threads — it would let sync handlers race past each other into the
# critical section. threading.Lock works correctly from both sync handlers
# and async ones (acquired synchronously; the file write is fast enough
# that briefly blocking the event loop is fine).
def _state_path() -> Path:
base = os.environ.get("XDG_CONFIG_HOME") or os.path.expanduser("~/.config")
return Path(base) / "periscope" / "state.json"
_STATE_LOCK = threading.Lock()
_STATE_DEFAULTS: dict = {
"version": 1,
"ui": {},
"windows": {},
"commands": [],
}
def _load_state() -> dict:
"""Read state.json. On parse failure rename to .corrupt-<ts> and return
defaults — the next save writes a fresh valid file, and the user can
recover from the renamed file if they care."""
path = _state_path()
if not path.exists():
return json.loads(json.dumps(_STATE_DEFAULTS))
try:
data = json.loads(path.read_text(encoding="utf-8"))
# Missing keys default to their empty value — older files written by
# earlier phases never carry `windows` or `commands`.
for k, v in _STATE_DEFAULTS.items():
data.setdefault(k, json.loads(json.dumps(v)))
return data
except (json.JSONDecodeError, OSError) as e:
corrupt = path.with_name(f"{path.name}.corrupt-{int(time.time())}")
try:
path.rename(corrupt)
print(f"periscope: state.json unreadable ({e}); renamed to {corrupt}")
except OSError:
pass
return json.loads(json.dumps(_STATE_DEFAULTS))
def _write_state(data: dict) -> None:
"""Atomic write: tempfile + os.replace. Caller must hold _STATE_LOCK."""
path = _state_path()
path.parent.mkdir(parents=True, exist_ok=True)
tmp = path.with_suffix(path.suffix + ".tmp")
tmp.write_text(json.dumps(data, indent=2), encoding="utf-8")
os.replace(tmp, path)
# In-memory cache — every endpoint reads from this, writes go through
# _write_state under the lock. Loaded once at startup.
_STATE: dict = _load_state()
_DEFAULT_COMMANDS = [
{"label": "claude", "exec": "claude"},
{"label": "shell", "exec": ""},
{"label": "vim", "exec": "vim"},
]
def _seed_commands_if_empty() -> None:
"""If `commands` is empty (fresh install or pre-phase-4 state.json),
seed the three legacy defaults so the new-window tile keeps working
while phase 4 is in flight.
Side effect: if a user deliberately drains commands to zero, the next
server restart re-seeds the defaults. To keep zero commands, leave at
least one no-op entry around. This tradeoff is deliberate — making
"empty by accident" recoverable matters more than supporting a
zero-commands configuration nobody asks for."""
with _STATE_LOCK:
if not _STATE["commands"]:
_STATE["commands"] = [dict(c) for c in _DEFAULT_COMMANDS]
_write_state(_STATE)
_seed_commands_if_empty()
# Server-tracked "last user-focused" per target.
# Tmux's window_activity bumps on any output (Claude streaming, build logs, dev
# servers, etc), which surprises users expecting "last accessed" semantics.
# We instead record when each window most recently became the active window in
# its session, plus any time the user acts on it via the dashboard.
_focused_at: dict[str, int] = {}
# `_acted_at` is a *user-action-only* recency stamp. Unlike `_focused_at` it
# does NOT bump on tmux active-window changes (which fire when Tom switches
# between sessions in his terminal, not when he engages a window via the
# periscope UI). Stream view sorts by this; grid view continues to sort by
# `_focused_at`. Bumped from the periscope-side handlers only:
# - /ws/pane WS-connect (modal-open is the canonical "opened in periscope")
# - /api/focus, /api/send, /api/paste-image, /api/rename
# - /api/session/new, /api/window/new (creation through periscope)
# Reset on process restart; not persisted.
_acted_at: dict[str, int] = {}
_active_per_session: dict[str, str] = {}
# Active resume operations, keyed by session_id. Each entry tracks where a
# `claude --resume <id>` is currently running so we can refuse concurrent
# resume requests (they'd interleave appends into the same JSONL).
_resuming: dict[str, dict] = {}
RESUME_EXPIRY_S = 30 * 60 # forget about a resume after 30 min idle
# Per-target spinner hysteresis. Tmux capture-pane occasionally catches Claude's
# TUI mid-redraw, dropping the spinner line for one cycle even when Claude is
# still working. We remember the last positive detection per target and treat
# it as sticky for SPINNER_GRACE_S so cards + modal subtitles don't flicker.
_spinner_last_seen: dict[str, tuple[str, float]] = {}
SPINNER_GRACE_S = 4.0
# Per-target "is this a Claude pane" stickiness. Detection is via STATUS_RE
# matching CC's bottom status line, but CC's interactive dialogs (e.g.
# AskUserQuestion) take over the screen and temporarily hide that line — we
# don't want the card to flip back to "shell" while the user is mid-prompt.
_claude_last_seen: dict[str, float] = {}
CLAUDE_STICKY_S = 120.0
def smooth_spinner(target: str, current: str | None) -> str | None:
now = time.time()
if current:
_spinner_last_seen[target] = (current, now)
return current
last = _spinner_last_seen.get(target)
if last and now - last[1] < SPINNER_GRACE_S:
return last[0]
_spinner_last_seen.pop(target, None)
return None
def smooth_is_claude(target: str, current: bool) -> bool:
now = time.time()
if current:
_claude_last_seen[target] = now
return True
last = _claude_last_seen.get(target, 0)
if now - last < CLAUDE_STICKY_S:
return True
_claude_last_seen.pop(target, None)
return False
def note_focus(target: str) -> None:
_focused_at[target] = int(time.time())
def note_action(target: str) -> None:
"""Stamp a periscope-side user action. Separate from `note_focus`: the
stream view orders by *only* actions the user took through periscope,
not tmux activity. Callers that bump focus due to a user action should
bump both; tmux-derived bumps go through `note_focus` alone."""
_acted_at[target] = int(time.time())
def update_focus_from_windows(windows: list[dict]) -> None:
"""Walk the freshly-listed windows and stamp focus times when the active
window for a session changes."""
by_session_active: dict[str, str] = {}
for w in windows:
if w.get("active"):
by_session_active[w["session"]] = f"{w['session']}:{w['index']}"
for session, target in by_session_active.items():
prev = _active_per_session.get(session)
if prev != target or target not in _focused_at:
note_focus(target)
_active_per_session[session] = target
# Status line at the bottom of every Claude pane:
# " 24% | ↑235k ↓479 | $17.04 | Opus 4.7 (1M context)"
STATUS_RE = re.compile(
r"^\s*(?P<context>\d+)%\s*\|\s*↑\S+\s+↓\S+\s*\|\s*\$[\d.,]+\s*\|\s*(?P<model>.+?)\s*$"
)
# Branch / PR / CI used to come from a custom statusline rendered in the line
# above STATUS_RE. We now pull those from the pane's cwd directly (git +
# `gh pr list`), independent of any statusline customization.
# Active-op detection — two patterns covering the variations Claude Code's
# TUI shows for a running operation. Both are used with `.match()` so the
# spinner glyph must be at line start (after optional indent); this rejects
# prose embeds where a previous response or user message quotes the marker
# mid-sentence.
#
# An active marker is always `<non-ASCII glyph> <verb-phrase>` followed by
# either a trailing `…`, a `(timing/tokens)` parenthetical, or both.
# Glyph enumeration is intentionally avoided (Claude rotates through
# ✻ ✶ ✷ ✳ ✦ ⏺ … and adds new ones over time) — `[^\x00-\x7f]` matches any.
#
# SPINNER_RE handles the ellipsis form, single- OR multi-word phrase:
# "✻ Envisioning…"
# "✳ Wiring resolve_pids into endpoints…(910m 2 · ↓ 14.78 tokens · ...)"
# The phrase character class excludes `(` so it can't grow into parens —
# without that, tool-call headers like `⏺ Bash(cd /Users/tom/… --skip-glo…)`
# would match (the `…` inside the bash invocation isn't an active marker).
SPINNER_RE = re.compile(r"^\s*[^\x00-\x7f]\s+(?P<phrase>[^(\n…]+?)…")
# ACTIVE_OP_RE handles the parens form (no trailing `…`):
# "● Bootstrapping packages (7m 29s · ↑ 22.1k tokens · thought for 2s)"
# The `↑/↓ Nk tokens` is the live uplink/downlink meter — present only while
# the op is running. Completion drops the arrow (`Done (5 tool uses · 25.5k
# tokens · 21s)`), so completed lines don't match. Distinguishable from
# STATUS_RE because the status line has both arrows on the same line, no
# `tokens` word, and no parens around the metering.
ACTIVE_OP_RE = re.compile(
r"^\s*[^\x00-\x7f]\s+\S+.*\([^)]*[↑↓]\s*[\d.]+\w*\s+tokens[^)]*\)"
)
# Pull out a verb-shaped word for the card label (`envisioning…`,
# `planning…`). Falls back to the first word if there's no clean verb.
SPINNER_VERB_RE = re.compile(r"\b([A-Z]\w+(?:ing|ed))\b")
# Needs-input: the numbered-choice permission dialog. `❯ 1.` plus the
# "Esc to cancel" footer is Claude-Code-specific; either alone false-positives
# (shells use ❯ as a prompt; "Esc to cancel" appears in transient toasts).
# Claude's choice dialogs always render a single footer line that combines
# navigation hints with the cancel marker — e.g. one of:
# "Enter to select · Esc to cancel"
# "Enter to select · ↑/↓ to navigate · Esc to cancel"
# "Submit · Esc to cancel"
# Matching the whole footer pattern on a single line is much more specific
# than scanning for the marker and a numbered option anywhere in the tail:
# prose responses (or shell output) that happen to mention both in different
# places will no longer false-positive. The dialog's options can sit far
# above the footer, so we don't need to find them — the footer is sufficient.
NEEDS_INPUT_FOOTER_RE = re.compile(
r"(?:Enter\s+to\s+\w+|↑/↓|Submit\b).*Esc\s+to\s+cancel",
)
RECAP_RE = re.compile(
r"※ recap:\s*(?P<text>.+?)(?=\n\s*[─❯]|\Z)", re.DOTALL
)
PROMPT_LINE_RE = re.compile(r"^❯\s*(?P<input>.*)$")
def tmux(*args: str) -> str:
r = subprocess.run(
["tmux", *args], capture_output=True, text=True, timeout=5
)
return r.stdout
# --- Git + PR state derived from each pane's current working directory ----
#
# Independent of any custom Claude statusline. We ask tmux for the pane's
# current path, run git from there, and (if gh is installed) ask for the
# PR + CI rollup attached to that branch. Results are cached because both
# git status and gh queries cost real wall-clock time and the data changes
# slowly compared to our polling cadence.
_GIT_TTL = 15.0
_PR_TTL = 60.0
_git_cache: dict[str, tuple[float, dict | None]] = {}
_pr_cache: dict[tuple[str, str], tuple[float, dict | None]] = {}
_pr_fetching: set[tuple[str, str]] = set()
_pr_lock = threading.Lock()
_GH_AVAILABLE = shutil.which("gh") is not None
def _run(cmd: list[str], cwd: str | None = None, timeout: float = 3.0) -> tuple[int, str]:
try:
r = subprocess.run(
cmd, cwd=cwd, capture_output=True, text=True, timeout=timeout
)
return r.returncode, r.stdout.strip()
except Exception:
return -1, ""
def git_state_for(path: str) -> dict | None:
"""Return {branch, git} for the git repo at `path`, or None."""
if not path or not os.path.isdir(path):
return None
code, _ = _run(["git", "-C", path, "rev-parse", "--git-dir"])
if code != 0:
return None
_, branch = _run(["git", "-C", path, "rev-parse", "--abbrev-ref", "HEAD"])
if not branch or branch == "HEAD":
_, sha = _run(["git", "-C", path, "rev-parse", "--short", "HEAD"])
branch = f"@{sha}" if sha else "?"
# Compact diff stats vs HEAD (covers staged + unstaged together).
_, diff = _run(["git", "-C", path, "diff", "HEAD", "--shortstat"])
adds = int(re.search(r"(\d+) insertion", diff).group(1)) if "insertion" in diff else 0
dels = int(re.search(r"(\d+) deletion", diff).group(1)) if "deletion" in diff else 0
# Unpushed commits ahead of upstream.
code, ahead_s = _run(["git", "-C", path, "rev-list", "--count", "@{u}..HEAD"])
ahead = int(ahead_s) if code == 0 and ahead_s.isdigit() else 0
state = "clean" if (adds == 0 and dels == 0) else f"+{adds} -{dels}"
if ahead > 0:
state += " *"
return {"branch": branch, "git": state}
def cached_git_state(path: str) -> dict | None:
if not path:
return None
now = time.time()
cached = _git_cache.get(path)
if cached and now - cached[0] < _GIT_TTL:
return cached[1]
data = git_state_for(path)
_git_cache[path] = (now, data)
return data
def pr_state_for(path: str, branch: str) -> dict | None:
"""Return PR metadata + CI rollup for the PR open against `branch` in
repo at `path`. Modal sidebar surfaces title/draft/+/−/reviewers; the
grid card uses {pr, ci} as before."""
if not _GH_AVAILABLE or not path or not branch:
return None
code, out = _run(
[
"gh", "pr", "list",
"--head", branch,
"--state", "open",
"--json",
"number,title,isDraft,additions,deletions,reviewRequests,statusCheckRollup",
"--limit", "1",
],
cwd=path,
timeout=8.0,
)
if code != 0 or not out:
return None
try:
prs = json.loads(out)
except Exception:
return None
if not prs:
return None
pr = prs[0]
rollup = pr.get("statusCheckRollup") or []
states = {(c.get("conclusion") or c.get("status") or "").upper() for c in rollup}
states.discard("")
ci = None
if states & {"FAILURE", "CANCELLED", "TIMED_OUT", "ACTION_REQUIRED"}:
ci = "✗"
elif states & {"PENDING", "QUEUED", "IN_PROGRESS", "WAITING"}:
ci = "⟳"
elif states and states <= {"SUCCESS", "NEUTRAL", "SKIPPED"}:
ci = "✓"
# gh exposes requested reviewers as either users (with `login`) or teams
# (with `name`) — take the login for users, name for teams, and trim to
# the leading letters as the avatar text (2 chars max).
reviewers: list[str] = []
for r in pr.get("reviewRequests") or []:
handle = r.get("login") or r.get("name") or ""
if handle:
reviewers.append(handle)
return {
"pr": pr.get("number"),
"ci": ci,
"pr_title": pr.get("title") or "",
"pr_draft": bool(pr.get("isDraft")),
"pr_additions": int(pr.get("additions") or 0),
"pr_deletions": int(pr.get("deletions") or 0),
"pr_reviewers": reviewers,
}
def _fetch_pr_into_cache(path: str, branch: str) -> None:
try:
data = pr_state_for(path, branch)
except Exception:
data = None
with _pr_lock:
_pr_cache[(path, branch)] = (time.time(), data)
_pr_fetching.discard((path, branch))
# --- Activity timeline (for modal sidebar) -------------------------------
#
# Per pane, surface a short timeline of recent events: commits on the repo
# in the last 24h, CI runs on the branch, and a single "opened in periscope"
# anchor sourced from _acted_at. Repo+branch events are cached by
# (cwd, branch) since they're the same for every window on the same branch;
# the per-target open event is layered in fresh on each call.
_ACTIVITY_TTL = 60.0
_activity_cache: dict[tuple[str, str], tuple[float, list[dict]]] = {}
_activity_fetching: set[tuple[str, str]] = set()
_activity_lock = threading.Lock()
def _gh_run_state(run: dict) -> str | None:
"""Map a gh run record to one of 'passed' / 'failed' / 'running', or
None for runs we don't surface (skipped, neutral)."""
s = (run.get("status") or "").upper()
c = (run.get("conclusion") or "").upper()
if c == "SUCCESS":
return "passed"
if c in ("FAILURE", "TIMED_OUT", "CANCELLED", "ACTION_REQUIRED"):
return "failed"
if c in ("NEUTRAL", "SKIPPED"):
return None
if s in ("QUEUED", "IN_PROGRESS", "WAITING"):
return "running"
return None
def shared_activity_for(path: str, branch: str) -> list[dict]:
"""Repo/branch-scoped events: commits in last 24h + CI runs on branch."""
events: list[dict] = []
if not path or not os.path.isdir(path):
return events
code, _ = _run(["git", "-C", path, "rev-parse", "--git-dir"])
if code != 0:
return events
# %ct = committer date as unix seconds; %s = subject. Tab-separated so
# subjects with spaces don't confuse the split.
code, out = _run(
["git", "-C", path, "log", "-10", "--since=24h", "--pretty=format:%ct%x09%s"],
timeout=3.0,
)
if code == 0 and out:
for line in out.split("\n"):
tab = line.find("\t")
if tab < 0:
continue
try:
at = int(line[:tab])
except ValueError:
continue
subj = line[tab + 1 :].strip()
if subj:
events.append({"kind": "commit", "at": at, "text": subj})
if _GH_AVAILABLE and branch:
code, out = _run(
[
"gh", "run", "list",
"--branch", branch,
"--limit", "5",
"--json", "conclusion,status,createdAt,displayTitle,name",
],
cwd=path,
timeout=5.0,
)
if code == 0 and out:
try:
runs = json.loads(out)
except Exception:
runs = []
from datetime import datetime
for run in runs:
state = _gh_run_state(run)
if state is None:
continue
created = run.get("createdAt") or ""
try:
# GitHub timestamps are RFC3339 with a trailing Z.
at = int(
datetime.fromisoformat(created.replace("Z", "+00:00")).timestamp()
)
except Exception:
continue
name = run.get("displayTitle") or run.get("name") or "workflow"
events.append(
{"kind": "ci", "at": at, "text": name, "state": state}
)
return events
def _fetch_activity_into_cache(path: str, branch: str) -> None:
try:
events = shared_activity_for(path, branch)
except Exception:
events = []
with _activity_lock:
_activity_cache[(path, branch)] = (time.time(), events)
_activity_fetching.discard((path, branch))
def cached_pane_activity(target: str, path: str, branch: str | None) -> list[dict]:
"""Return up to 8 timeline events for this pane, newest-first. Shared
(repo+branch) events come from a stale-while-revalidate cache; the
per-target 'open' event is layered in fresh from _acted_at."""
events: list[dict] = []
if path and branch:
key = (path, branch)
now = time.time()
with _activity_lock:
cached = _activity_cache.get(key)
stale = cached is None or (now - cached[0] >= _ACTIVITY_TTL)
if stale and key not in _activity_fetching:
_activity_fetching.add(key)
threading.Thread(
target=_fetch_activity_into_cache,
args=(path, branch),
daemon=True,
).start()
shared = cached[1] if cached else []
events.extend(shared)
opened_at = _acted_at.get(target, 0)
if opened_at:
events.append(
{"kind": "open", "at": opened_at, "text": "opened in periscope"}
)
events.sort(key=lambda e: e.get("at", 0), reverse=True)
return events[:8]
# --- Claude Code plan usage (parsed from session JSONL files) -------------
#
# Claude Code logs every assistant message to ~/.claude/projects/<encoded-cwd>/
# <session-id>.jsonl. Each line is a JSON record; assistant lines carry a
# `message.usage` block with input_tokens, cache_creation_input_tokens,
# cache_read_input_tokens, and output_tokens. Summing across files in a
# rolling 5h window gives a real measurement of plan token usage, no API
# subscription / billing endpoint required.
_USAGE_TTL = 30.0
_usage_cache: tuple[float, dict] | None = None
_usage_lock = threading.Lock()
_CLAUDE_PROJECTS = Path.home() / ".claude" / "projects"
def compute_claude_usage(window_hours: float = 5.0) -> dict:
"""Walk every recent session JSONL and sum token usage in the window."""
if not _CLAUDE_PROJECTS.exists():
return {"available": False}
from datetime import datetime
cutoff = time.time() - window_hours * 3600
fresh = cache_w = cache_r = out = msgs = 0
earliest_msg_ts: float | None = None
for jsonl in _CLAUDE_PROJECTS.glob("*/*.jsonl"):
try:
if jsonl.stat().st_mtime < cutoff:
continue
except OSError:
continue
try:
with jsonl.open(encoding="utf-8", errors="replace") as f:
for line in f:
try:
rec = json.loads(line)
except json.JSONDecodeError:
continue
ts_str = rec.get("timestamp")
if not isinstance(ts_str, str):
continue
try:
ts = datetime.fromisoformat(ts_str.replace("Z", "+00:00")).timestamp()
except Exception:
continue
if ts < cutoff:
continue
usage = ((rec.get("message") or {}).get("usage")) or {}
if not usage:
continue
fresh += int(usage.get("input_tokens") or 0)
cache_w += int(usage.get("cache_creation_input_tokens") or 0)
cache_r += int(usage.get("cache_read_input_tokens") or 0)
out += int(usage.get("output_tokens") or 0)
msgs += 1
if earliest_msg_ts is None or ts < earliest_msg_ts:
earliest_msg_ts = ts
except OSError:
continue
# The plan's 5h rolling reset is anchored at the *first* message of the
# window, so the next reset is window_hours after the earliest in-window
# message (not "now + 5h"). If we found nothing, the window is wide open.
reset_at = int(earliest_msg_ts + window_hours * 3600) if earliest_msg_ts else None
return {
"available": True,
"window_hours": window_hours,
"messages": msgs,
"input_tokens": fresh,
"cache_creation_tokens": cache_w,
"cache_read_tokens": cache_r,
"output_tokens": out,
"total_tokens": fresh + cache_w + cache_r + out,
"reset_at": reset_at,
}
def cached_claude_usage() -> dict:
global _usage_cache
now = time.time()
with _usage_lock:
if _usage_cache and now - _usage_cache[0] < _USAGE_TTL:
return _usage_cache[1]
data = compute_claude_usage()
with _usage_lock:
_usage_cache = (now, data)
return data
# --- Authoritative plan usage scraped from `claude` TUI's /usage screen ---
#
# The JSONL aggregation above is a free local approximation. The real numbers
# (session %, week-all-models %, week-Sonnet %) only live server-side at
# Anthropic and only render inside `claude`'s interactive TUI. We spawn a
# headless tmux session, run claude, send /usage, capture the rendered screen,
# and parse out the three progress bars. Refreshed every 5 minutes in a
# background thread; that interval bounds the cost (a tiny haiku call per
# scrape) without making the bars feel stale.
USAGE_SCRAPE_REFRESH_S = 300.0
USAGE_SCRAPE_BOOT_TIMEOUT_S = 30.0
USAGE_SCRAPE_RENDER_TIMEOUT_S = 12.0
_scrape_cache: tuple[float, dict | None] = (0.0, None)
_scrape_in_flight = False
_scrape_lock = threading.Lock()
_USAGE_LABELS = {
"Current session": "session",
"Current week (all models)": "week_all",
"Current week (Sonnet only)": "week_sonnet",
}
def parse_usage_screen(text: str) -> dict:
"""Walk the captured /usage screen line-by-line, picking out each meter's
percentage and reset string. The TUI lays each meter out as three lines:
label, bar+percent, "Resets ...". Three known labels."""
lines = text.split("\n")
meters: dict[str, dict] = {}
for i, line in enumerate(lines):
stripped = line.strip()
key = _USAGE_LABELS.get(stripped)
if not key or i + 2 >= len(lines):
continue
pct_match = re.search(r"(\d+)%\s+used", lines[i + 1])
if not pct_match:
continue
resets = ""
rs = re.search(r"Resets\s+(.+?)\s*$", lines[i + 2])
if rs:
resets = rs.group(1).strip()
meters[key] = {
"label": stripped,
"percent": int(pct_match.group(1)),
"resets": resets,
}
return {"available": bool(meters), "meters": meters}
def scrape_usage_via_tmux() -> dict | None:
"""Drive `claude` in a hidden tmux session to capture its /usage output."""
sess = f"periscope-usage-{uuid.uuid4().hex[:8]}"
empty_mcp = STATIC.parent / ".empty-mcp.json"
if not empty_mcp.exists():
empty_mcp.write_text('{"mcpServers":{}}')
def cap() -> str:
return subprocess.run(
["tmux", "capture-pane", "-t", sess, "-p"],
capture_output=True, text=True, timeout=5,
).stdout
try:
subprocess.run(
[
"tmux", "new-session", "-d", "-s", sess, "-x", "200", "-y", "60",
f"claude --strict-mcp-config {empty_mcp}",
],
check=True, capture_output=True, timeout=5,
)
# Wait for the prompt chevron to indicate claude is ready for input.
deadline = time.time() + USAGE_SCRAPE_BOOT_TIMEOUT_S
booted = False
while time.time() < deadline:
time.sleep(0.5)
if "❯" in cap():
booted = True
break
if not booted:
return None
# Send /usage and wait for the bars to render.
subprocess.run(
["tmux", "send-keys", "-t", sess, "/usage", "Enter"],
check=False, capture_output=True, timeout=5,
)
deadline = time.time() + USAGE_SCRAPE_RENDER_TIMEOUT_S
usage_text = ""
while time.time() < deadline:
time.sleep(0.5)
content = cap()
if "% used" in content and "Resets" in content:
usage_text = content
break
if not usage_text:
return None
return parse_usage_screen(usage_text)
except Exception:
return None
finally:
subprocess.run(
["tmux", "kill-session", "-t", sess],
capture_output=True, check=False,
)
def _refresh_scrape_into_cache() -> None:
global _scrape_cache, _scrape_in_flight
try:
result = scrape_usage_via_tmux()
except Exception:
result = None
with _scrape_lock:
if result:
_scrape_cache = (time.time(), result)
_scrape_in_flight = False
def cached_scraped_usage() -> dict | None:
"""Stale-while-revalidate: serves the last successful scrape immediately
and kicks off a background refresh whenever the cache is older than
USAGE_SCRAPE_REFRESH_S. First-ever call returns None; the dashboard's
next poll will see the freshly-cached result."""
global _scrape_in_flight
now = time.time()
with _scrape_lock:
ts, data = _scrape_cache
if now - ts < USAGE_SCRAPE_REFRESH_S:
return data
if not _scrape_in_flight:
_scrape_in_flight = True
threading.Thread(target=_refresh_scrape_into_cache, daemon=True).start()
return data
def cached_pr_state(path: str, branch: str | None) -> dict | None:
"""Stale-while-revalidate. Returns cached data instantly; kicks off a
refresh in a background thread if the cache is missing or expired. The
next poll picks up the fresh value."""
if not branch:
return None
key = (path, branch)
now = time.time()
with _pr_lock:
cached = _pr_cache.get(key)
if cached and now - cached[0] < _PR_TTL:
return cached[1]
if key not in _pr_fetching:
_pr_fetching.add(key)
threading.Thread(
target=_fetch_pr_into_cache, args=(path, branch), daemon=True
).start()
return cached[1] if cached else None
def list_windows() -> list[dict]:
out = tmux(
"list-windows",
"-a",
"-F",
"#{session_name}\t#{window_index}\t#{window_name}\t#{window_active}\t#{pane_current_path}\t#{@periscope_id}",
)
rows = []
for line in out.strip().split("\n"):
if not line:
continue
parts = line.split("\t")
# pane_current_path is the active pane's cwd; safe even when missing.
# @periscope_id is empty for unmanaged windows — `resolve_pids` mints
# one on first sighting and stamps it onto the window.
s, idx, name, active = parts[:4]
cwd = parts[4] if len(parts) > 4 else ""
pid_raw = parts[5] if len(parts) > 5 else ""
rows.append(
{
"session": s,
"index": int(idx),
"name": name,
"active": active == "1",
"cwd": cwd,
"pid_raw": pid_raw,
}
)
return rows
# --- Periscope window-ids (@periscope_id) ---------------------------------
#
# Every window we see acquires a periscope-assigned 8-char hex id, stamped
# onto the window as a tmux user option `@periscope_id`. The id survives
# rename / move / reorder. When the tmux server restarts (reboot,
# kill-server, OOM) and the option is gone, `_rebind_pid` recovers it from
# the (session, name) hint in `last_seen` within a 30-day window — see the
# rebind heuristic in the design spec.
_PID_TTL_S = 30 * 86400 # 30 days
def _mint_pid() -> str:
return uuid.uuid4().hex[:8]
def _stamp_pid(target: str, pid: str) -> None:
"""Fire-and-forget set-option. If it fails (window gone, tmux racy),
the next poll repeats the attempt. Uses the project's read-style
`tmux()` helper because we don't need stderr-surfacing here."""
tmux("set-option", "-w", "-t", target, "@periscope_id", pid)
def _rebind_pid(
windows_block: dict,
session: str,
name: str,
branch: str | None,
cwd: str | None,
taken_pids: set[str],
) -> str | None:
"""Look for an orphan id in state's `windows` block that matches the
sighted window on (session, name) — or as a softer fallback,
(branch, cwd). Returns the matched pid, or None if no candidate
matches."""
now = time.time()
# Pass 1: strong match on (session, name).
# Pass 2: secondary match on (branch, cwd) when both are set.
for pass_n in (1, 2):
for pid, entry in windows_block.items():
if pid in taken_pids:
continue
ls = entry.get("last_seen") or {}
ts = ls.get("ts")
if not ts or now - ts > _PID_TTL_S:
continue
if pass_n == 1:
if ls.get("session") == session and ls.get("name") == name:
return pid
else:
if not branch or not cwd:
continue
if ls.get("branch") == branch and ls.get("cwd") == cwd:
return pid
return None
def resolve_pids(windows: list[dict]) -> None:
"""Mutates `windows` in place, adding a `pid` field to every entry.
For each window:
1. If @periscope_id is non-empty, use it.
2. Else attempt rebind from state.json's `windows` block.
3. Else mint a fresh id.
In cases 2 and 3, stamp the chosen id onto the tmux window (`set-option
-w @periscope_id`) so subsequent polls take the fast path.
Always updates the pid's `last_seen` block with (session, name, branch,
cwd, now) — but only flags `dirty` when something other than the `ts`
field changed, to avoid thrashing state.json on every 3s poll.
Callers MUST have populated each window's `branch` (from
cached_git_state) before calling, or rebind falls back to the
session/name-only path.
"""
if not windows:
return
now_ts = int(time.time())
# Everything that reads/writes _STATE goes through _STATE_LOCK. We hold
# the lock for the full resolve pass — it's cheap (kilobyte-scale JSON
# write at the end) and gives us a single consistent snapshot of the
# windows block to score rebinds against.
with _STATE_LOCK:
wblock = _STATE.setdefault("windows", {})
taken: set[str] = set()
dirty = False
for w in windows:
target = f"{w['session']}:{w['index']}"
pid_raw = (w.get("pid_raw") or "").strip()
pid: str | None = None
if pid_raw and len(pid_raw) == 8 and all(c in "0123456789abcdef" for c in pid_raw):
pid = pid_raw
if pid is None:
pid = _rebind_pid(
wblock,
session=w["session"],
name=w["name"],
branch=w.get("branch"),
cwd=w.get("cwd"),
taken_pids=taken,
)
if pid is None:
pid = _mint_pid()
# Stamp tmux only when we synthesized the id (mint or rebind).
if pid != pid_raw:
_stamp_pid(target, pid)
dirty = True
taken.add(pid)
w["pid"] = pid
# `pid_raw` was internal — strip it before emit.
w.pop("pid_raw", None)
# Refresh last_seen. Only flag dirty if something *other than*
# `ts` changed — a pure ts bump every 3s would thrash state.json
# to disk thousands of times an hour for no semantic gain.
entry = wblock.setdefault(pid, {})
prev = entry.get("last_seen") or {}
new_seen = {
"session": w["session"],
"name": w["name"],
"branch": w.get("branch"),
"cwd": w.get("cwd"),
"ts": now_ts,
}
identity_changed = (
"last_seen" not in entry
or any(prev.get(k) != new_seen[k] for k in ("session", "name", "branch", "cwd"))
)
entry["last_seen"] = new_seen
if identity_changed:
dirty = True
# GC: drop windows entries that (a) carry no notes and no tags, AND
# (b) weren't refreshed this pass, AND (c) have a last_seen older
# than 30 days. Annotated entries are immune — losing one would
# lose notes.
cutoff = now_ts - _PID_TTL_S
for pid in list(wblock.keys()):
if pid in taken:
continue
entry = wblock[pid]
if entry.get("notes") or entry.get("tags"):
continue
ts = (entry.get("last_seen") or {}).get("ts") or 0
if ts < cutoff:
del wblock[pid]
dirty = True
if dirty:
_write_state(_STATE)
def _attach_git_then_resolve_pids(windows: list[dict]) -> None:
"""resolve_pids relies on `branch` for its secondary match. Populate it
via cached_git_state before calling so the rebind heuristic has
everything it needs."""
for w in windows:
git = cached_git_state(w.get("cwd", "")) or {}
if "branch" in git:
w["branch"] = git["branch"]
resolve_pids(windows)
def capture(target: str, lines: int = 100) -> str:
return tmux("capture-pane", "-t", target, "-p", "-S", f"-{lines}")
def deliver_input(target: str, text: str) -> None: