-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathlitellm_agent.py
More file actions
169 lines (146 loc) · 5.29 KB
/
litellm_agent.py
File metadata and controls
169 lines (146 loc) · 5.29 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
"""Model(...) + LiteLLM agent example supporting 100+ LLM providers.
This example demonstrates using the unified Model(...) constructor with
``model_type="litellm"`` to support a wide range of LLM providers through the
LiteLLM integration layer. LiteLLM normalizes API calls to OpenAI format.
Usage:
1. Install litellm: pip install litellm
2. Set environment variables:
- LLM_API_KEY: Your API key
- LLM_MODEL: Model in format "provider/model" (e.g., "openai/gpt-4" or "anthropic/claude-3-opus-20240229")
3. Run: uv run python examples/litellm_agent.py
If litellm is not installed or API key is missing, the example gracefully falls back to FakeModel.
"""
from __future__ import annotations
import asyncio
import os
import sys
from typing import Any
from ecs_agent.components import (
ConversationComponent,
LLMComponent,
ToolRegistryComponent,
)
from ecs_agent.core import Runner, World
from ecs_agent.providers import FakeModel, Model, ModelType
from ecs_agent.providers.protocol import LLMModel
from ecs_agent.types import CompletionResult, Message, ToolSchema
from ecs_agent.systems.error_handling import ErrorHandlingSystem
from ecs_agent.systems.memory import MemorySystem
from ecs_agent.systems.reasoning import ReasoningSystem
from ecs_agent.systems.tool_execution import ToolExecutionSystem
# Probe whether the LiteLLM integration is available.
try:
import litellm # noqa: F401
HAS_LITELLM = True
except (ImportError, AttributeError):
HAS_LITELLM = False
async def search_database(query: str) -> str:
"""Simulate searching a database."""
results = {
"users": "Found 42 users matching the search",
"products": "Found 156 products in inventory",
"orders": "Found 89 recent orders",
}
return results.get(query.lower(), f"No results for '{query}'")
async def main() -> None:
"""Run LiteLLM-backed Model(...) agent example."""
# Check if litellm is installed
if not HAS_LITELLM:
print("litellm is not installed.")
print("Install with: pip install litellm")
sys.exit(0)
# Load config from environment
api_key = os.environ.get("LLM_API_KEY", "")
model = os.environ.get("LLM_MODEL", "")
# Decide which model to use
model: LLMModel
if api_key and model:
print(f"Using Model(..., model_type=\"litellm\"): {model}")
model = Model(
model,
base_url=os.environ.get("LLM_BASE_URL", "https://api.openai.com/v1"),
api_key=api_key,
model_type=ModelType.LITELLM,
)
else:
print("No API key or model specified. Using FakeModel instead.")
model = FakeModel(
responses=[
CompletionResult(
message=Message(
role="assistant",
content="I searched the database and found relevant results for you.",
)
)
]
)
# Create World
world = World()
agent_id = world.create_entity()
# Add components
world.add_component(
agent_id,
LLMComponent(model=model),
)
world.add_component(
agent_id,
ConversationComponent(
messages=[
Message(
role="user",
content="Search for users in the database and tell me what you find.",
)
]
),
)
# Register tools
world.add_component(
agent_id,
ToolRegistryComponent(
tools={
"search_database": ToolSchema(
name="search_database",
description="Search the database for entities",
parameters={
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "What to search for (users, products, or orders)",
}
},
"required": ["query"],
},
),
},
handlers={"search_database": search_database},
),
)
# Register systems
world.register_system(ReasoningSystem(priority=0), priority=0)
world.register_system(ToolExecutionSystem(priority=5), priority=5)
world.register_system(MemorySystem(), priority=10)
world.register_system(ErrorHandlingSystem(priority=99), priority=99)
# Run the agent
print("Running agent...\n")
runner = Runner()
await runner.run(world, max_ticks=5)
# Print conversation
conv = world.get_component(agent_id, ConversationComponent)
if conv:
print("\n" + "=" * 60)
print("CONVERSATION")
print("=" * 60)
for msg in conv.messages:
if msg.role == "user":
print(f"\n[User] {msg.content}")
elif msg.role == "assistant":
if msg.tool_calls:
for tc in msg.tool_calls:
print(f"\n[Tool Call] {tc.name}({tc.arguments})")
else:
print(f"\n[Assistant] {msg.content}")
elif msg.role == "tool":
print(f"[Tool Result] {msg.content}")
if __name__ == "__main__":
asyncio.run(main())