-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
304 lines (256 loc) · 10.1 KB
/
main.py
File metadata and controls
304 lines (256 loc) · 10.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
#!/usr/bin/env python3
"""
Main entry point for the Agentic Research Framework.
This provides both CLI and web interfaces for running experiments
and testing the system design use case.
"""
import asyncio
import typer
from pathlib import Path
from typing import Optional
import structlog
from rich.console import Console
from rich.table import Table
from config.settings import settings
from research.experiment_orchestrator import GenericExperimentRunner, ExperimentConfig
from use_cases.system_design.orchestrator import SystemDesignOrchestrator
from use_cases.system_design.config import USE_CASE_CONFIG, TEST_SCENARIOS
# Configure structured logging
structlog.configure(
processors=[
structlog.stdlib.filter_by_level,
structlog.stdlib.add_logger_name,
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper(fmt="iso"),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.processors.UnicodeDecoder(),
structlog.processors.JSONRenderer()
],
context_class=dict,
logger_factory=structlog.stdlib.LoggerFactory(),
wrapper_class=structlog.stdlib.BoundLogger,
cache_logger_on_first_use=True,
)
logger = structlog.get_logger(__name__)
console = Console()
app = typer.Typer(
name="research-framework",
help="Agentic Research Framework for testing reflection vs model capability",
add_completion=False
)
@app.command()
def serve_api(
host: str = typer.Option("0.0.0.0", help="Host to bind the API server"),
port: int = typer.Option(8000, help="Port to bind the API server"),
reload: bool = typer.Option(True, help="Enable auto-reload for development")
):
"""
Start the FastAPI server for research framework.
This provides REST API endpoints for frontend integration.
"""
import uvicorn
console.print(f"[bold blue]Starting Research Framework API Server[/bold blue]")
console.print(f"Host: {host}")
console.print(f"Port: {port}")
console.print(f"Docs: http://{host}:{port}/docs")
console.print()
try:
uvicorn.run(
"api.main:app",
host=host,
port=port,
reload=reload,
log_level="info"
)
except Exception as e:
console.print(f"[red]Failed to start API server: {str(e)}[/red]")
raise typer.Exit(1)
@app.command()
def test_system_design(
scenario: str = typer.Option(
"simple",
help="Test scenario: simple, medium, or complex"
),
model: str = typer.Option(
"gemini-2.5-flash-lite",
help="Model to use for testing"
),
reflection_iterations: int = typer.Option(
0,
help="Number of reflection iterations (0 = baseline)"
),
verbose: bool = typer.Option(
False,
"--verbose",
"-v",
help="Enable verbose logging"
)
):
"""
Test the system design use case with a single scenario.
This is useful for development and debugging before running full experiments.
"""
if verbose:
structlog.configure(wrapper_class=structlog.make_filtering_bound_logger(20))
console.print(f"[bold blue]Testing System Design Use Case[/bold blue]")
console.print(f"Scenario: {scenario}")
console.print(f"Model: {model}")
console.print(f"Reflection iterations: {reflection_iterations}")
console.print()
# Get test scenario
if scenario in TEST_SCENARIOS:
test_input = TEST_SCENARIOS[scenario][0]["input"]
else:
console.print(f"[red]Unknown scenario: {scenario}[/red]")
console.print(f"Available scenarios: {list(TEST_SCENARIOS.keys())}")
return
# Run test
asyncio.run(_run_system_design_test(test_input, model, reflection_iterations))
async def _run_system_design_test(test_input: str, model: str, reflection_iterations: int):
"""Run a single system design test."""
try:
# Initialize orchestrator
orchestrator = SystemDesignOrchestrator(USE_CASE_CONFIG)
# Create appropriate pipeline
if reflection_iterations == 0:
pipeline = orchestrator.create_baseline_pipeline(model)
console.print("[yellow]Running baseline pipeline (no reflection)[/yellow]")
else:
pipeline = orchestrator.create_reflection_pipeline(
producer_model=model,
max_iterations=reflection_iterations
)
console.print(f"[green]Running reflection pipeline ({reflection_iterations} max iterations)[/green]")
console.print("\n[bold]Input:[/bold]")
console.print(test_input)
console.print("\n[bold]Processing...[/bold]")
# Execute pipeline
result = await pipeline.run({"input": test_input})
# Display results
console.print("\n[bold green]Results:[/bold green]")
if isinstance(result, dict):
for key, value in result.items():
console.print(f"[blue]{key}:[/blue] {str(value)[:200]}...")
else:
console.print(str(result))
console.print(f"\n[bold]Test completed successfully![/bold]")
except Exception as e:
console.print(f"[red]Test failed: {str(e)}[/red]")
logger.error("System design test failed", error=str(e))
raise typer.Exit(1)
@app.command()
def run_experiment(
config_file: Path = typer.Argument(
...,
help="Path to experiment configuration JSON file",
exists=True,
file_okay=True,
dir_okay=False
),
output_dir: Optional[Path] = typer.Option(
None,
help="Override output directory for results"
),
verbose: bool = typer.Option(
False,
"--verbose",
"-v",
help="Enable verbose logging"
)
):
"""
Run a complete research experiment from configuration file.
This executes the full experimental protocol for comparing
reflection vs model capability.
"""
if verbose:
structlog.configure(wrapper_class=structlog.make_filtering_bound_logger(20))
console.print(f"[bold blue]Running Research Experiment[/bold blue]")
console.print(f"Configuration: {config_file}")
if output_dir:
console.print(f"Output directory: {output_dir}")
console.print()
# Run experiment
asyncio.run(_run_research_experiment(config_file, output_dir))
async def _run_research_experiment(config_file: Path, output_dir: Optional[Path]):
"""Run a complete research experiment."""
try:
import json
# Load configuration
with open(config_file, "r") as f:
config_data = json.load(f)
config = ExperimentConfig(**config_data)
if output_dir:
config.output_dir = str(output_dir)
# Initialize experiment runner
runner = GenericExperimentRunner()
console.print(f"[yellow]Starting experiment: {config.experiment_id}[/yellow]")
console.print(f"Use case: {config.use_case}")
console.print(f"Models: {config.models_to_test}")
console.print(f"Reflection configs: {config.reflection_configs}")
console.print(f"Scenarios: {len(config.test_scenarios)}")
console.print(f"Repetitions: {config.repetitions}")
console.print()
# Run experiment
results = await runner.run_experiment(config)
# Display summary
console.print("\n[bold green]Experiment Completed![/bold green]")
console.print(f"Total experiments: {len(results['results'])}")
# Show summary table
summary = results['summary']
if 'by_model' in summary:
table = Table(title="Results by Model")
table.add_column("Model", style="cyan")
table.add_column("Mean Quality", style="green")
table.add_column("Count", style="blue")
for model, stats in summary['by_model'].items():
table.add_row(
model,
f"{stats['mean']:.3f}",
str(stats['count'])
)
console.print(table)
console.print(f"\nResults saved to: {results.get('output_dir', 'default location')}")
except Exception as e:
console.print(f"[red]Experiment failed: {str(e)}[/red]")
logger.error("Research experiment failed", error=str(e))
raise typer.Exit(1)
@app.command()
def list_scenarios():
"""List available test scenarios for system design."""
console.print("[bold blue]Available Test Scenarios[/bold blue]\n")
for complexity, scenarios in TEST_SCENARIOS.items():
console.print(f"[bold]{complexity.upper()}[/bold]")
for scenario in scenarios:
console.print(f" ID: {scenario['id']}")
console.print(f" Input: {scenario['input'][:100]}...")
console.print()
@app.command()
def check_config():
"""Check framework configuration and dependencies."""
console.print("[bold blue]Framework Configuration Check[/bold blue]\n")
# Check API key
if settings.google_api_key:
console.print("[green]✓[/green] Google API key configured")
else:
console.print("[red]✗[/red] Google API key not found")
# Check models
console.print(f"[blue]Available models:[/blue] {', '.join(settings.available_models)}")
console.print(f"[blue]Default model:[/blue] {settings.default_model}")
console.print(f"[blue]Pro model:[/blue] {settings.pro_model}")
# Check directories
settings.experiment_output_dir.mkdir(parents=True, exist_ok=True)
console.print(f"[blue]Output directory:[/blue] {settings.experiment_output_dir}")
# Check research mode
if settings.enable_research_mode:
console.print("[green]✓[/green] Research mode enabled")
else:
console.print("[yellow]![/yellow] Research mode disabled")
console.print("\n[bold green]Configuration check completed![/bold green]")
def cli_app():
"""Entry point for CLI application."""
app()
if __name__ == "__main__":
cli_app()