Skip to content

Commit 2c3a7c8

Browse files
authored
feat: add examples/ demos + fix record_turn param
1 parent 9b795b4 commit 2c3a7c8

4 files changed

Lines changed: 461 additions & 0 deletions

File tree

examples/01_basic_workflow.py

Lines changed: 138 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
"""
2+
SoloFlow 使用示例 — 5 步调研报告工作流
3+
4+
演示内容:
5+
1. 创建 DAG 工作流(含并行步骤)
6+
2. 手动逐步推进
7+
3. 查询状态和进度
8+
4. 取消工作流
9+
10+
运行: cd hermes-plugin && python ../examples/01_basic_workflow.py
11+
"""
12+
13+
import asyncio
14+
import sys
15+
import tempfile
16+
from pathlib import Path
17+
18+
# 将 hermes-plugin 加入 import 路径
19+
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "hermes-plugin"))
20+
21+
from store.sqlite_store import SQLiteStore
22+
from services.workflow_service import WorkflowService
23+
from services.scheduler import Scheduler
24+
25+
26+
async def main():
27+
# ── 初始化 ──────────────────────────────────────────────
28+
db_path = Path(tempfile.mkdtemp()) / "demo.db"
29+
store = SQLiteStore(db_path)
30+
store.initialize()
31+
32+
ws = WorkflowService(store)
33+
ws.set_scheduler(Scheduler(store, ws))
34+
35+
print("=" * 60)
36+
print("SoloFlow Demo — 行业调研报告")
37+
print("=" * 60)
38+
39+
# ── 1. 创建工作流 ──────────────────────────────────────
40+
print("\n📌 创建工作流...")
41+
42+
wf = await ws.create_workflow(
43+
name="ai-industry-report",
44+
description="AI 行业调研报告生成",
45+
steps=[
46+
{"id": "topic", "name": "确定选题", "discipline": "deep",
47+
"prompt": "分析当前 AI 行业热点,确定调研方向"},
48+
{"id": "search_a", "name": "学术搜索", "discipline": "quick",
49+
"prompt": "搜索近期 AI 学术论文和前沿研究"},
50+
{"id": "search_b", "name": "行业搜索", "discipline": "quick",
51+
"prompt": "搜索 AI 行业报告、投融资数据"},
52+
{"id": "outline", "name": "整理大纲", "discipline": "deep",
53+
"prompt": "根据搜索结果整理报告大纲"},
54+
{"id": "write", "name": "撰写正文", "discipline": "deep",
55+
"prompt": "按照大纲撰写 5000 字调研报告"},
56+
{"id": "review", "name": "审校发布", "discipline": "quick",
57+
"prompt": "审校报告,检查数据和引用准确性"},
58+
],
59+
edges=[
60+
("topic", "search_a"), # 选题完成后才能搜索
61+
("topic", "search_b"), # 两个搜索可以并行
62+
("search_a", "outline"), # 搜索完成后整理大纲
63+
("search_b", "outline"),
64+
("outline", "write"), # 大纲完成后撰写
65+
("write", "review"), # 撰写完成后审校
66+
],
67+
)
68+
69+
print(f" 工作流 ID: {wf['id'][:8]}...")
70+
print(f" 状态: {wf['state']}")
71+
print(f" 步骤数: {len(wf['steps'])}")
72+
print(f" DAG 层级: {len(wf.get('layers', {}))} 层")
73+
print(f" Layer 0: 选题 (入口)")
74+
print(f" Layer 1: 学术搜索 ∥ 行业搜索 (并行)")
75+
print(f" Layer 2: 整理大纲")
76+
print(f" Layer 3: 撰写正文")
77+
print(f" Layer 4: 审校发布")
78+
79+
# ── 2. 启动工作流 ──────────────────────────────────────
80+
print("\n🚀 启动工作流...")
81+
started = await ws.start_workflow(wf["id"])
82+
print(f" 状态: {started['state']}")
83+
84+
# ── 3. 查看就绪步骤 ────────────────────────────────────
85+
ready = await ws.get_ready_steps(wf["id"])
86+
print(f" 当前可执行: {ready}")
87+
88+
# ── 4. 逐步推进 ────────────────────────────────────────
89+
print("\n📝 逐步推进...")
90+
91+
# Step 1: 选题
92+
step_id = ready[0]
93+
print(f"\n [{step_id}] 确定选题...")
94+
# 实际使用中这里调用 LLM
95+
fake_result = "选题方向:2026 年大模型 Agent 生态发展趋势"
96+
await ws.advance_step(wf["id"], step_id, result=fake_result)
97+
print(f" → {fake_result}")
98+
99+
# 并行步骤:两个搜索
100+
ready = await ws.get_ready_steps(wf["id"])
101+
print(f"\n 并行步骤就绪: {ready}")
102+
103+
for step_id in ready:
104+
print(f" [{step_id}] 执行搜索...")
105+
fake_result = f"{step_id} 完成:找到 15 篇相关资料"
106+
await ws.advance_step(wf["id"], step_id, result=fake_result)
107+
print(f" → {fake_result}")
108+
109+
# 查看进度
110+
status = await ws.get_workflow_status(wf["id"])
111+
print(f"\n 📊 进度: {status['progress']['completed']}/{status['progress']['total']} "
112+
f"({status['progress']['progress_pct']}%)")
113+
114+
# 后续步骤
115+
for step_name in ["outline", "write", "review"]:
116+
ready = await ws.get_ready_steps(wf["id"])
117+
if not ready:
118+
break
119+
step_id = ready[0]
120+
print(f"\n [{step_id}] 执行...")
121+
fake_result = f"{step_name} 完成"
122+
await ws.advance_step(wf["id"], step_id, result=fake_result)
123+
print(f" → {fake_result}")
124+
125+
# ── 5. 最终状态 ────────────────────────────────────────
126+
status = await ws.get_workflow_status(wf["id"])
127+
print("\n" + "=" * 60)
128+
print(f"🏁 工作流完成!")
129+
print(f" 状态: {status['state']}")
130+
print(f" 进度: {status['progress']['completed']}/{status['progress']['total']}")
131+
print("=" * 60)
132+
133+
store.close()
134+
print(f"\n数据库保存在: {db_path}")
135+
136+
137+
if __name__ == "__main__":
138+
asyncio.run(main())
Lines changed: 172 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,172 @@
1+
"""
2+
SoloFlow 使用示例 — 自动调度 + 超时重试 + 记忆系统
3+
4+
演示内容:
5+
1. Scheduler 自动并行调度(无需手动 advance_step)
6+
2. 自定义 executor(模拟 LLM 调用)
7+
3. 超时和重试机制
8+
4. 三层记忆系统的使用
9+
10+
运行: cd hermes-plugin && python ../examples/02_scheduler_and_memory.py
11+
"""
12+
13+
import asyncio
14+
import json
15+
import sys
16+
import tempfile
17+
import time
18+
from pathlib import Path
19+
20+
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "hermes-plugin"))
21+
22+
from store.sqlite_store import SQLiteStore
23+
from services.workflow_service import WorkflowService
24+
from services.scheduler import Scheduler
25+
from memory.working_memory import WorkingMemory
26+
from memory.episodic_memory import EpisodicMemory
27+
from memory.semantic_memory import SemanticMemory
28+
29+
30+
async def main():
31+
db_path = Path(tempfile.mkdtemp()) / "demo.db"
32+
store = SQLiteStore(db_path)
33+
store.initialize()
34+
35+
ws = WorkflowService(store)
36+
scheduler = Scheduler(store, ws, config={
37+
"max_parallelism": 3,
38+
"default_timeout": 10,
39+
"base_backoff": 0.1,
40+
})
41+
42+
print("=" * 60)
43+
print("SoloFlow Demo — 自动调度 + 记忆系统")
44+
print("=" * 60)
45+
46+
# ── 1. 自定义 Executor ──────────────────────────────────
47+
print("\n📌 自定义 Executor(模拟 LLM 调用)")
48+
49+
execution_log = [] # 记录执行顺序
50+
51+
async def mock_llm_executor(step: dict) -> str:
52+
"""模拟 LLM 调用 — 实际使用中替换成真实 API 调用"""
53+
name = step.get("name", step["id"])
54+
discipline = step.get("discipline", "quick")
55+
duration = 0.05 if discipline == "quick" else 0.15 # 深度思考更慢
56+
57+
execution_log.append({
58+
"id": step["id"],
59+
"name": name,
60+
"start": time.time(),
61+
})
62+
print(f" ⚙️ 执行: {name} (discipline={discipline}, {duration}s)")
63+
await asyncio.sleep(duration)
64+
65+
result = f"[{name}] 分析完成,生成结论"
66+
print(f" ✅ 完成: {name}")
67+
return result
68+
69+
# ── 2. 创建复杂工作流 ──────────────────────────────────
70+
print("\n📌 创建工作流(含并行和汇聚)")
71+
72+
wf = await ws.create_workflow(
73+
name="data-pipeline",
74+
description="数据处理管线",
75+
steps=[
76+
{"id": "fetch_api", "name": "拉取API数据", "discipline": "quick",
77+
"prompt": "从 3 个数据源拉取最新数据"},
78+
{"id": "fetch_db", "name": "查询数据库", "discipline": "quick",
79+
"prompt": "查询内部数据库"},
80+
{"id": "clean", "name": "数据清洗", "discipline": "quick",
81+
"prompt": "去重、填充缺失值、标准化格式"},
82+
{"id": "analyze", "name": "深度分析", "discipline": "deep",
83+
"prompt": "趋势分析 + 异常检测"},
84+
{"id": "visualize", "name": "生成图表", "discipline": "quick",
85+
"prompt": "生成可视化图表"},
86+
{"id": "report", "name": "生成报告", "discipline": "deep",
87+
"prompt": "汇总分析结果,生成周报"},
88+
],
89+
edges=[
90+
("fetch_api", "clean"), ("fetch_db", "clean"), # 两个数据源并行拉取 → 汇聚到清洗
91+
("clean", "analyze"), # 清洗后分析
92+
("analyze", "visualize"), ("analyze", "report"), # 分析后并行:图表 + 报告
93+
],
94+
)
95+
print(f" 步骤: {len(wf['steps'])} 个")
96+
print(f" 结构: [fetch_api ∥ fetch_db] → clean → analyze → [visualize ∥ report]")
97+
98+
# ── 3. 自动调度执行 ────────────────────────────────────
99+
print("\n🚀 Scheduler 自动调度执行...\n")
100+
t0 = time.time()
101+
await ws.start_workflow(wf["id"])
102+
result = await scheduler.execute_workflow(wf["id"], executor=mock_llm_executor)
103+
elapsed = time.time() - t0
104+
105+
print(f"\n⏱️ 总耗时: {elapsed:.2f}s")
106+
print(f" 状态: {result.get('state')}")
107+
108+
# 验证并行执行
109+
if len(execution_log) >= 2:
110+
t1 = execution_log[0]["start"]
111+
t2 = execution_log[1]["start"]
112+
parallel = abs(t2 - t1) < 0.1
113+
print(f" 并行验证: {'✅ 前两步并行执行' if parallel else '⚠️ 串行执行'}")
114+
115+
# ── 4. 三层记忆系统 ────────────────────────────────────
116+
print("\n" + "=" * 60)
117+
print("🧠 记忆系统")
118+
print("=" * 60)
119+
120+
# Working Memory — 即时上下文
121+
print("\n📝 Working Memory (LRU 即时上下文)")
122+
wm = WorkingMemory(max_size=5)
123+
for i in range(5):
124+
wm.put(f"var_{i}", {"value": f"result_{i}", "step": f"step_{i}"})
125+
print(f" 容量: {len(wm)}/5")
126+
print(f" 查询 var_2: {wm.get('var_2')}")
127+
wm.put("var_5", {"value": "overflow"}) # 溢出,淘汰 var_0
128+
print(f" 加入 var_5 后 var_0 被淘汰: {wm.get('var_0') is None}")
129+
130+
# Episodic Memory — 事件流
131+
print("\n📚 Episodic Memory (FTS5 事件记忆)")
132+
em = EpisodicMemory(store)
133+
134+
await em.record(event_type="step_completed", data={"step": "fetch_api", "result": "200 OK"})
135+
await em.record(event_type="step_completed", data={"step": "analyze", "result": "发现 3 个异常"})
136+
await em.record(event_type="error", data={"step": "fetch_db", "msg": "connection timeout after 5s"})
137+
138+
search_result = await em.search("timeout")
139+
print(f" 搜索 'timeout': {len(search_result)} 条结果")
140+
for r in search_result:
141+
print(f" - {r['event_type']}: {r['data']}")
142+
143+
search_result2 = await em.search("completed analyze")
144+
print(f" 搜索 'completed analyze': {len(search_result2)} 条结果")
145+
146+
# Semantic Memory — 模式提取
147+
print("\n🔮 Semantic Memory (模式提取)")
148+
sm = SemanticMemory(store)
149+
150+
completed_wf = store.get_workflow(wf["id"], full=True)
151+
template = await sm.extract_and_store(completed_wf)
152+
print(f" 提取模板: {template['name']}")
153+
print(f" 步骤数: {template['step_count']}")
154+
print(f" 结构: {template['pattern']}")
155+
156+
templates = await sm.get_templates()
157+
print(f" 已有模板数: {len(templates)}")
158+
159+
# ── 5. 总结 ────────────────────────────────────────────
160+
print("\n" + "=" * 60)
161+
print("✅ Demo 完成!")
162+
print(" - DAG 自动调度: 6 步 3 轮完成(并行 2+1+2)")
163+
print(" - Working Memory: LRU 即时上下文,超 size 自动淘汰")
164+
print(" - Episodic Memory: FTS5 全文搜索事件流")
165+
print(" - Semantic Memory: 自动提取 workflow 结构模板")
166+
print("=" * 60)
167+
168+
store.close()
169+
170+
171+
if __name__ == "__main__":
172+
asyncio.run(main())

0 commit comments

Comments
 (0)