Skip to content

Commit 32205c3

Browse files
feat: add architectural impact analyzer utility and spec
1 parent cdbea09 commit 32205c3

2 files changed

Lines changed: 151 additions & 0 deletions

File tree

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
---
2+
description: Preview the architectural impact and risks of a proposed change across all specifications.
3+
---
4+
5+
## User Input
6+
7+
```text
8+
$ARGUMENTS (The description of the proposed change)

scripts/impact_analyzer.py

Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,143 @@
1+
import os
2+
import json
3+
import argparse
4+
from openai import OpenAI
5+
from dotenv import load_dotenv
6+
load_dotenv()
7+
8+
def setup_client():
9+
"""
10+
Initializes the AI client using standard environment variables.
11+
Defaults to OpenAI, but can be overridden via AI_BASE_URL.
12+
"""
13+
api_key = os.getenv("AI_API_KEY")
14+
base_url = os.getenv("AI_BASE_URL", "https://api.openai.com/v1")
15+
16+
if not api_key:
17+
raise ValueError("CRITICAL ERROR: AI_API_KEY environment variable is not set.")
18+
19+
return OpenAI(api_key=api_key, base_url=base_url)
20+
21+
def load_project_context(target_dir):
22+
"""
23+
Aggregates content from all markdown files in the specified directory
24+
to provide the LLM with full architectural context.
25+
"""
26+
context_accumulator = []
27+
if not os.path.exists(target_dir):
28+
return None
29+
30+
for filename in os.listdir(target_dir):
31+
if filename.endswith(".md"):
32+
file_path = os.path.join(target_dir, filename)
33+
try:
34+
with open(file_path, "r", encoding="utf-8") as f:
35+
content = f.read()
36+
context_accumulator.append(f"--- FILE: {filename} ---\n{content}")
37+
except Exception as e:
38+
print(f"[!] Warning: Could not read {filename}: {e}")
39+
40+
return "\n\n".join(context_accumulator)
41+
42+
def perform_impact_analysis(client, context, change_request, model_id):
43+
"""
44+
Executes the analysis using the specified LLM and returns a structured JSON report.
45+
"""
46+
system_prompt = (
47+
"You are a Senior Systems Architect. Analyze the impact of a proposed change "
48+
"on the provided technical specifications and output a structured JSON report."
49+
)
50+
51+
user_prompt = f"""
52+
### PROJECT CONTEXT:
53+
{context}
54+
55+
### PROPOSED CHANGE:
56+
"{change_request}"
57+
58+
### OUTPUT REQUIREMENTS:
59+
Return a JSON object with EXACTLY these keys:
60+
- complexity_score_diff: (int 1-10)
61+
- estimated_hours_delta: (str)
62+
- affected_files: (list of filenames)
63+
- technical_tasks: (list of strings)
64+
- architecture_risks: (list of strings)
65+
- executive_summary: (str)
66+
"""
67+
68+
try:
69+
completion = client.chat.completions.create(
70+
model=model_id,
71+
messages=[
72+
{"role": "system", "content": system_prompt},
73+
{"role": "user", "content": user_prompt}
74+
]
75+
)
76+
return completion.choices[0].message.content
77+
except Exception as e:
78+
return json.dumps({"error": str(e)})
79+
80+
def render_architect_report(raw_json):
81+
"""
82+
Parses and renders the architectural report in a clean, professional format.
83+
"""
84+
try:
85+
clean_json = raw_json.strip().replace("```json", "").replace("```", "")
86+
data = json.loads(clean_json)
87+
88+
if "error" in data:
89+
print(f"[!] Analysis Failed: {data['error']}")
90+
return
91+
92+
score = data.get("complexity_score_diff", 0)
93+
status = "CRITICAL/HIGH" if score >= 7 else "MODERATE" if score >= 4 else "LOW"
94+
95+
print("\n" + "="*60)
96+
print(" SYSTEM ARCHITECT IMPACT ANALYSIS")
97+
print("="*60)
98+
print(f" IMPACT LEVEL : {status} (Score: {score}/10)")
99+
print(f" EST. EFFORT : {data.get('estimated_hours_delta')}")
100+
101+
print(f"\n [ ] TARGETED FILES:")
102+
for f in data.get('affected_files', []): print(f" * {f}")
103+
104+
print(f"\n [ ] REQUIRED TASKS:")
105+
for task in data.get('technical_tasks', []): print(f" - {task}")
106+
107+
print(f"\n [!] ARCHITECTURAL RISKS:")
108+
for risk in data.get('architecture_risks', []): print(f" ! {risk}")
109+
110+
print(f"\n [*] SUMMARY: {data.get('executive_summary')}")
111+
print("="*60 + "\n")
112+
113+
except Exception:
114+
print("[!] Error: Could not parse AI response as valid JSON.")
115+
116+
if __name__ == "__main__":
117+
parser = argparse.ArgumentParser(description="Spec-Kit Architectural Impact Previewer")
118+
parser.add_argument("--change", required=True, help="Description of the change request")
119+
parser.add_argument("--model", default=os.getenv("AI_MODEL_ID"), help="Model ID to invoke")
120+
121+
args = parser.parse_args()
122+
123+
# Dynamic path resolution to ensure it works from root
124+
script_dir = os.path.dirname(os.path.abspath(__file__))
125+
default_presets = os.path.abspath(os.path.join(script_dir, "..", "presets", "lean", "commands"))
126+
search_path = os.getenv("SPECKIT_PRESETS_DIR", default_presets)
127+
128+
try:
129+
ai_client = setup_client()
130+
project_context = load_project_context(search_path)
131+
132+
if not project_context:
133+
print(f"[!] Path Error: No specification files found at {search_path}")
134+
else:
135+
if not args.model:
136+
print("[!] Configuration Error: No Model ID provided via --model or AI_MODEL_ID.")
137+
else:
138+
print(f"[*] Analyzing global impact for: '{args.change[:50]}...'")
139+
raw_report = perform_impact_analysis(ai_client, project_context, args.change, args.model)
140+
render_architect_report(raw_report)
141+
142+
except Exception as error:
143+
print(f"[!] Runtime Exception: {error}")

0 commit comments

Comments
 (0)