-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathchat_agent.py
More file actions
96 lines (81 loc) · 3.02 KB
/
chat_agent.py
File metadata and controls
96 lines (81 loc) · 3.02 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
"""Simple chat agent example using the ECS-based LLM Agent framework.
This example demonstrates dual-mode LLM model selection:
- Without LLM_API_KEY: Uses FakeModel for deterministic testing
- With LLM_API_KEY: Uses OpenAIModel with DashScope/Qwen
Also demonstrates:
- Creating a World with ReasoningSystem, MemorySystem, and ErrorHandlingSystem
- Creating an Agent Entity with LLMComponent and ConversationComponent
- Running the agent with a user message
- Printing the conversation history
"""
import asyncio
import os
from ecs_agent.components import ConversationComponent, LLMComponent
from ecs_agent.core import Runner, World
from ecs_agent.logging import configure_logging
from ecs_agent.providers import FakeModel, Model
from ecs_agent.providers.config import ApiFormat
from ecs_agent.providers.protocol import LLMModel
from ecs_agent.systems.error_handling import ErrorHandlingSystem
from ecs_agent.systems.memory import MemorySystem
from ecs_agent.systems.reasoning import ReasoningSystem
from ecs_agent.types import CompletionResult, Message
async def main() -> None:
"""Run a simple chat agent example."""
configure_logging(json_output=False)
# Create World
world = World()
# --- Create LLM model ---
api_key: str = os.environ.get("LLM_API_KEY", "")
base_url: str = os.environ.get(
"LLM_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1"
)
model: str = os.environ.get("LLM_MODEL", "qwen3.5-flash")
model: LLMModel
if api_key:
print(f"Using model: {model}")
model = Model(model, base_url=base_url, api_key=api_key, api_format=ApiFormat.OPENAI_CHAT_COMPLETIONS)
else:
print("No LLM_API_KEY set. Using FakeModel for demonstration.")
model = FakeModel(
responses=[
CompletionResult(
message=Message(
role="assistant",
content="Hello! I'm doing great, thank you for asking! How can I help you today?",
)
)
]
)
# Create Agent Entity
agent_id = world.create_entity()
world.add_component(
agent_id,
LLMComponent(
model=model,
system_prompt="You are a helpful assistant.",
),
)
world.add_component(
agent_id,
ConversationComponent(
messages=[Message(role="user", content="Hello, how are you?")]
),
)
# Register Systems
world.register_system(ReasoningSystem(priority=0), priority=0)
world.register_system(MemorySystem(), priority=10)
world.register_system(ErrorHandlingSystem(priority=99), priority=99)
# Run
runner = Runner()
await runner.run(world, max_ticks=3)
# Print results
conv = world.get_component(agent_id, ConversationComponent)
if conv is not None:
print("Conversation:")
for msg in conv.messages:
print(f" {msg.role}: {msg.content}")
else:
print("No conversation found")
if __name__ == "__main__":
asyncio.run(main())