Skip to content

Commit 87219a6

Browse files
committed
Implement Why Engine for decision intelligence
- Added WhyEvaluateRequest and WhyEvaluateResponse models for API interaction. - Created /why/evaluate route to process evaluation requests. - Integrated WhyEngine into CortexEngine for evaluating source items. - Developed EvaluationContext and SourceItem classes for structured input. - Implemented decision-making logic in WhyEngine, including relevance scoring and action recommendations. - Enhanced FocusEngine to utilize insights and signals for enriched brief generation. - Added comprehensive unit tests for Why Engine functionality and integration with existing components.
1 parent 69040c7 commit 87219a6

8 files changed

Lines changed: 1055 additions & 31 deletions

File tree

cortex_core/api/models.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,3 +178,30 @@ class DigestEvalResponse(BaseModel):
178178
context_keyword_coverage: float = 0.0
179179
project_fit_score: float = 0.0
180180
top_articles: list[ArticleScoreResponse] = Field(default_factory=list)
181+
182+
183+
# ── Why Engine ────────────────────────────────────────────────────
184+
185+
186+
class WhyEvaluateRequest(BaseModel):
187+
"""Input for the Why Engine. Swift clients send this."""
188+
189+
title: str
190+
content: str = ""
191+
source_type: str = "article" # article, note, link, digest_item, project_update
192+
url: str = ""
193+
tags: list[str] = Field(default_factory=list)
194+
195+
196+
class WhyEvaluateResponse(BaseModel):
197+
"""Structured decision output. Swift clients decode this."""
198+
199+
summary: str = ""
200+
why_it_matters: str = ""
201+
impact_on_active_project: str = ""
202+
contradiction_or_confirmation: str = "" # supports, contradicts, unclear
203+
recommended_action: str = ""
204+
ignore_or_watch: str = "" # act_now, watch, ignore
205+
confidence: float = 0.0
206+
tags: list[str] = Field(default_factory=list)
207+
evaluated_at: str = ""

cortex_core/api/routes/why.py

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
"""
2+
Why Engine API route
3+
---------------------
4+
POST /why/evaluate — evaluate a single source item through the Why Engine.
5+
6+
Thin route. All intelligence lives in cortex_core/why_engine.py.
7+
"""
8+
9+
from __future__ import annotations
10+
11+
from fastapi import APIRouter
12+
13+
from cortex_core.api.models import WhyEvaluateRequest, WhyEvaluateResponse
14+
from cortex_core.api.server import get_engine
15+
16+
router = APIRouter(prefix="/why", tags=["why-engine"])
17+
18+
19+
@router.post("/evaluate", response_model=WhyEvaluateResponse)
20+
async def evaluate_item(request: WhyEvaluateRequest) -> WhyEvaluateResponse:
21+
"""Evaluate a single source item and return a structured decision."""
22+
engine = get_engine()
23+
result = engine.evaluate_why(request.model_dump())
24+
return WhyEvaluateResponse(**result)

cortex_core/api/server.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,11 @@ def create_app() -> FastAPI:
5656
)
5757

5858
# Register routers
59-
from cortex_core.api.routes import context, digest, focus, health, knowledge, pipeline, posts, profile
59+
from cortex_core.api.routes import context, digest, focus, health, knowledge, pipeline, posts, profile, why
6060

6161
app.include_router(health.router)
6262
app.include_router(focus.router) # primary feature
63+
app.include_router(why.router) # why engine — per-item intelligence
6364
app.include_router(context.router) # agent context API
6465
app.include_router(profile.router)
6566
app.include_router(knowledge.router)

cortex_core/engine.py

Lines changed: 72 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,6 +29,7 @@
2929
from cortex_core.retrieve import HybridRetriever
3030
from cortex_core.scoring import evaluate_digest
3131
from cortex_core.signals import SignalStore, detect_signals
32+
from cortex_core.why_engine import EvaluationContext, SourceItem, WhyEngine
3233

3334

3435
class CortexEngine:
@@ -54,6 +55,7 @@ def __init__(self, config: CortexConfig | None = None):
5455
self.signal_store = SignalStore(self.config.data_dir / "signals.json")
5556
self.decision_engine = DecisionEngine(self.config.data_dir)
5657
self.retriever = HybridRetriever()
58+
self.why_engine = WhyEngine()
5759

5860
# ------------------------------------------------------ knowledge CRUD
5961

@@ -160,11 +162,28 @@ def generate_focus_brief(
160162
*,
161163
use_llm: bool = False,
162164
) -> dict:
163-
"""Generate today's focus recommendations."""
165+
"""Generate today's focus recommendations.
166+
167+
Passes pre-computed scored articles, insights, and signals into
168+
the FocusEngine so the brief reflects the full intelligence chain
169+
instead of re-deriving everything from scratch.
170+
"""
171+
# Gather pre-computed enrichment data
172+
scored = self._latest_scored_articles() or []
173+
insights_data = [i.to_dict() for i in self.insights.recent(20)]
174+
signals_data = [s.to_dict() for s in self.signal_store.active_signals()]
175+
176+
kwargs = dict(
177+
use_llm=use_llm,
178+
scored_articles=scored if scored else None,
179+
insights=insights_data if insights_data else None,
180+
signals=signals_data if signals_data else None,
181+
)
182+
164183
if digest_path:
165-
brief = self.focus.generate_from_file(Path(digest_path), use_llm=use_llm)
184+
brief = self.focus.generate_from_file(Path(digest_path), **kwargs)
166185
else:
167-
brief = self.focus.generate_from_latest(self.config.data_dir, use_llm=use_llm)
186+
brief = self.focus.generate_from_latest(self.config.data_dir, **kwargs)
168187
self.focus.save_brief(brief, self.config.data_dir)
169188
return brief.to_dict()
170189

@@ -465,6 +484,56 @@ def get_full_context(self) -> dict:
465484
"""Return complete memory state for agent consumption."""
466485
return self.memory.full_context()
467486

487+
# ── Why Engine ──────────────────────────────────────────────
488+
489+
def evaluate_why(self, item_data: dict) -> dict:
490+
"""Evaluate a single source item through the Why Engine.
491+
492+
Assembles full evaluation context from all memory layers and
493+
returns a structured DecisionResult.
494+
"""
495+
item = SourceItem.from_dict(item_data)
496+
context = self._build_evaluation_context()
497+
result = self.why_engine.evaluate(item, context)
498+
return result.to_dict()
499+
500+
def _build_evaluation_context(self) -> EvaluationContext:
501+
"""Assemble evaluation context from all four memory layers."""
502+
profile = self.memory.profile
503+
504+
# Project layer: milestones, blockers from first active project
505+
milestones: list[str] = []
506+
blockers: list[str] = []
507+
if profile.current_projects:
508+
pm = self.memory.get_project(profile.current_projects[0])
509+
milestones = [pm.current_milestone] if pm.current_milestone else []
510+
blockers = list(pm.active_blockers)
511+
512+
# Decision history: recent decision descriptions
513+
recent_decisions = [
514+
d.decision for d in self.decision_engine.recent_decisions(10)
515+
]
516+
517+
# Research layer: themes
518+
recent_themes = list(self.memory.research.recurring_themes)
519+
520+
# Assumptions: from recent decisions
521+
assumptions: list[str] = []
522+
for d in self.decision_engine.recent_decisions(5):
523+
assumptions.extend(d.assumptions)
524+
525+
return EvaluationContext(
526+
goals=profile.goals,
527+
interests=profile.interests,
528+
current_projects=profile.current_projects,
529+
ignored_topics=profile.ignored_topics,
530+
project_milestones=milestones,
531+
project_blockers=blockers,
532+
recent_decisions=recent_decisions,
533+
recent_themes=recent_themes,
534+
assumptions=assumptions,
535+
)
536+
468537
# ── Internal helpers ────────────────────────────────────────
469538

470539
def _latest_scored_articles(self) -> list:

0 commit comments

Comments
 (0)