-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
88 lines (67 loc) · 2.54 KB
/
app.py
File metadata and controls
88 lines (67 loc) · 2.54 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
"""AgentWrit Live — Support Ticket Zero-Trust Demo.
Flask app with HTMX + SSE. Three LLM-driven agents process support
tickets under broker-issued scoped credentials.
"""
from __future__ import annotations
from pathlib import Path
from dotenv import load_dotenv
from flask import Flask, Response, render_template, request, stream_with_context
from openai import OpenAI
from agentwrit import AgentWritApp
from demo2.config import APP_SCOPE_CEILING, DemoConfig
from demo2.data import QUICK_FILLS
from demo2.pipeline import run_pipeline
load_dotenv(Path(__file__).parent / ".env")
app = Flask(
__name__,
template_folder=str(Path(__file__).parent / "templates"),
static_folder=str(Path(__file__).parent / "static"),
)
def _get_app_and_llm() -> tuple[AgentWritApp, OpenAI, str, str]:
"""Initialize SDK app and LLM client from env config."""
cfg = DemoConfig.from_env()
aa_app = AgentWritApp(
broker_url=cfg.broker_url,
client_id=cfg.client_id,
client_secret=cfg.client_secret,
)
llm_client = OpenAI(
base_url=cfg.llm_base_url,
api_key=cfg.llm_api_key,
)
return aa_app, llm_client, cfg.llm_model, cfg.broker_url
@app.route("/")
def index():
return render_template("index.html",
quick_fills=QUICK_FILLS,
scope_ceiling=APP_SCOPE_CEILING)
@app.route("/api/run", methods=["POST"])
def run_ticket():
"""SSE endpoint — runs the pipeline and streams events."""
ticket_text = request.form.get("ticket", "").strip()
if not ticket_text:
return Response("data: {\"error\": \"Empty ticket\"}\n\n",
content_type="text/event-stream")
aa_app, llm_client, llm_model, broker_url = _get_app_and_llm()
# Detect natural expiry scenario
natural_expiry = request.form.get("natural_expiry", "false") == "true"
# Also detect from ticket content matching the quick fill
if "no rush" in ticket_text.lower():
natural_expiry = True
def generate():
for event in run_pipeline(ticket_text, aa_app, llm_client, llm_model, broker_url,
natural_expiry=natural_expiry):
yield event.to_sse()
return Response(
stream_with_context(generate()),
content_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"X-Accel-Buffering": "no",
},
)
@app.route("/api/quick-fills")
def quick_fills():
return QUICK_FILLS
if __name__ == "__main__":
app.run(debug=True, port=5001)