Skip to content

Commit 16bee7c

Browse files
authored
Agno enhancement (#1112)
* refactor agent attributes * Update Agno instrumentation * ruff checks :)
1 parent 033d0cc commit 16bee7c

File tree

11 files changed

+766
-497
lines changed

11 files changed

+766
-497
lines changed

agentops/instrumentation/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ class InstrumentorConfig(TypedDict):
110110
"agno": {
111111
"module_name": "agentops.instrumentation.agentic.agno",
112112
"class_name": "AgnoInstrumentor",
113-
"min_version": "0.1.0",
113+
"min_version": "1.5.8",
114114
},
115115
"smolagents": {
116116
"module_name": "agentops.instrumentation.agentic.smolagents",

agentops/instrumentation/agentic/agno/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
logger = logging.getLogger(__name__)
99

1010
# Library information
11-
_library_info = LibraryInfo(name="agno")
11+
_library_info = LibraryInfo(name="agno", default_version="1.5.8")
1212
LIBRARY_NAME = _library_info.name
1313
LIBRARY_VERSION = _library_info.version
1414

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,20 @@
1-
"""Agno Agent attributes package for span instrumentation."""
1+
"""Agno instrumentation attribute handlers."""
22

33
from .agent import get_agent_run_attributes
4+
from .metrics import get_metrics_attributes
45
from .team import get_team_run_attributes
56
from .tool import get_tool_execution_attributes
6-
from .workflow import get_workflow_run_attributes, get_workflow_session_attributes
7+
from .workflow import get_workflow_run_attributes, get_workflow_session_attributes, get_workflow_cache_attributes
8+
from .storage import get_storage_read_attributes, get_storage_write_attributes
79

810
__all__ = [
911
"get_agent_run_attributes",
12+
"get_metrics_attributes",
1013
"get_team_run_attributes",
1114
"get_tool_execution_attributes",
1215
"get_workflow_run_attributes",
1316
"get_workflow_session_attributes",
17+
"get_workflow_cache_attributes",
18+
"get_storage_read_attributes",
19+
"get_storage_write_attributes",
1420
]

agentops/instrumentation/agentic/agno/attributes/agent.py

Lines changed: 93 additions & 133 deletions
Large diffs are not rendered by default.

agentops/instrumentation/agentic/agno/attributes/metrics.py

Lines changed: 5 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ def get_metrics_attributes(
2828
attributes[SpanAttributes.LLM_SYSTEM] = "agno"
2929
attributes[SpanAttributes.AGENTOPS_ENTITY_NAME] = "LLM"
3030

31-
# Initialize usage tracking variables (but don't set attributes yet)
31+
# Initialize usage tracking variables
3232
usage_data = {}
3333

3434
# Initialize counters for indexed messages
@@ -66,7 +66,6 @@ def get_metrics_attributes(
6666
model_class = model.__class__.__name__
6767
attributes["agno.model.class"] = model_class
6868

69-
# === EXTRACT CONVERSATION STRUCTURE ===
7069
if hasattr(run_messages, "messages") and run_messages.messages:
7170
messages = run_messages.messages
7271

@@ -82,12 +81,10 @@ def get_metrics_attributes(
8281
for i, msg in enumerate(messages):
8382
# Extract message content for prompts/completions
8483
if hasattr(msg, "role") and hasattr(msg, "content"):
85-
# Only set content if it's not None/empty
84+
# Only process messages with actual content
8685
if msg.content is not None and str(msg.content).strip() != "" and str(msg.content) != "None":
8786
content = str(msg.content)
88-
# Truncate very long content to avoid oversized attributes
89-
if len(content) > 1000:
90-
content = content[:997] + "..."
87+
# No truncation - keep full content for observability
9188

9289
if msg.role == "user":
9390
attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.role"] = "user"
@@ -101,17 +98,6 @@ def get_metrics_attributes(
10198
attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.role"] = "system"
10299
attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.content"] = content
103100
prompt_count += 1
104-
else:
105-
# For messages with None content, still set the role but skip content
106-
if msg.role == "user":
107-
attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.role"] = "user"
108-
prompt_count += 1
109-
elif msg.role == "assistant":
110-
attributes[f"{SpanAttributes.LLM_COMPLETIONS}.{completion_count}.role"] = "assistant"
111-
completion_count += 1
112-
elif msg.role == "system":
113-
attributes[f"{SpanAttributes.LLM_PROMPTS}.{prompt_count}.role"] = "system"
114-
prompt_count += 1
115101

116102
# Extract token metrics from message
117103
if hasattr(msg, "metrics") and msg.metrics:
@@ -124,15 +110,15 @@ def get_metrics_attributes(
124110
total_completion_tokens += metrics.completion_tokens
125111
if hasattr(metrics, "total_tokens") and metrics.total_tokens > 0:
126112
total_tokens += metrics.total_tokens
127-
# For messages that only have output_tokens (like Anthropic)
113+
# For messages that only have output_tokens
128114
if hasattr(metrics, "output_tokens") and metrics.output_tokens > 0:
129115
total_output_tokens += metrics.output_tokens
130116
if hasattr(metrics, "input_tokens") and metrics.input_tokens > 0:
131117
total_input_tokens += metrics.input_tokens
132118
if hasattr(metrics, "time") and metrics.time:
133119
total_time += metrics.time
134120

135-
# === TOKEN METRICS FROM AGENT SESSION METRICS ===
121+
# Token metrics from agent session metrics
136122
if hasattr(agent, "session_metrics") and agent.session_metrics:
137123
session_metrics = agent.session_metrics
138124

@@ -191,7 +177,6 @@ def get_metrics_attributes(
191177
if hasattr(session_metrics, "reasoning_tokens") and session_metrics.reasoning_tokens > 0:
192178
usage_data["reasoning_tokens"] = session_metrics.reasoning_tokens
193179

194-
# === FALLBACK TO MESSAGE AGGREGATION IF SESSION METRICS ARE EMPTY ===
195180
# If we don't have token data from session metrics, try message aggregation
196181
if "total_tokens" not in usage_data:
197182
# Set aggregated token usage from messages
@@ -207,8 +192,6 @@ def get_metrics_attributes(
207192
user_msg = run_messages.user_message
208193
if hasattr(user_msg, "content"):
209194
content = str(user_msg.content)
210-
if len(content) > 1000:
211-
content = content[:997] + "..."
212195
attributes["agno.metrics.user_input"] = content
213196

214197
# Set individual LLM usage attributes only for values we actually have
Lines changed: 158 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,158 @@
1+
"""Storage operation attribute handlers for Agno workflow instrumentation."""
2+
3+
import json
4+
from typing import Any, Dict, Optional, Tuple
5+
from opentelemetry.util.types import AttributeValue
6+
7+
from agentops.semconv.span_attributes import SpanAttributes
8+
from agentops.semconv.span_kinds import SpanKind as AgentOpsSpanKind
9+
from agentops.instrumentation.common.attributes import get_common_attributes
10+
11+
12+
def get_storage_read_attributes(
13+
args: Tuple[Any, ...] = (),
14+
kwargs: Optional[Dict[str, Any]] = None,
15+
return_value: Optional[Any] = None,
16+
) -> Dict[str, AttributeValue]:
17+
"""Extract attributes from storage read operations.
18+
19+
Args:
20+
args: Positional arguments passed to read_from_storage
21+
kwargs: Keyword arguments passed to read_from_storage
22+
return_value: Return value from read_from_storage (the cached data or None)
23+
24+
Returns:
25+
Dictionary of OpenTelemetry attributes for storage read operations
26+
"""
27+
attributes = get_common_attributes()
28+
kwargs = kwargs or {}
29+
30+
# Mark this as a storage operation within workflow context
31+
attributes["storage.operation"] = "read"
32+
attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.WORKFLOW
33+
34+
if args and len(args) > 0:
35+
workflow = args[0]
36+
37+
# Get workflow information
38+
if hasattr(workflow, "workflow_id") and workflow.workflow_id:
39+
attributes["storage.workflow_id"] = str(workflow.workflow_id)
40+
if hasattr(workflow, "session_id") and workflow.session_id:
41+
attributes["storage.session_id"] = str(workflow.session_id)
42+
43+
# Get storage type
44+
if hasattr(workflow, "storage") and workflow.storage:
45+
storage_type = type(workflow.storage).__name__
46+
attributes["storage.backend"] = storage_type
47+
48+
# Get session state info for context
49+
if hasattr(workflow, "session_state") and isinstance(workflow.session_state, dict):
50+
# Get all cache keys
51+
cache_keys = list(workflow.session_state.keys())
52+
attributes["storage.cache_size"] = len(cache_keys)
53+
if cache_keys:
54+
attributes["storage.cache_keys"] = json.dumps(cache_keys)
55+
56+
# Analyze the return value to determine cache hit/miss
57+
if return_value is not None:
58+
# Cache hit
59+
attributes["storage.cache_hit"] = True
60+
attributes["storage.result"] = "hit"
61+
62+
# Get data type and size
63+
data_type = type(return_value).__name__
64+
attributes["storage.data_type"] = data_type
65+
66+
# For dict/list, show structure
67+
if isinstance(return_value, dict):
68+
attributes["storage.data_keys"] = json.dumps(list(return_value.keys()))
69+
attributes["storage.data_size"] = len(return_value)
70+
elif isinstance(return_value, (list, tuple)):
71+
attributes["storage.data_size"] = len(return_value)
72+
elif isinstance(return_value, str):
73+
attributes["storage.data_size"] = len(return_value)
74+
# Show full string data without truncation
75+
attributes["storage.data_preview"] = return_value
76+
else:
77+
# Cache miss
78+
attributes["storage.cache_hit"] = False
79+
attributes["storage.result"] = "miss"
80+
81+
return attributes
82+
83+
84+
def get_storage_write_attributes(
85+
args: Tuple[Any, ...] = (),
86+
kwargs: Optional[Dict[str, Any]] = None,
87+
return_value: Optional[Any] = None,
88+
) -> Dict[str, AttributeValue]:
89+
"""Extract attributes from storage write operations.
90+
91+
Args:
92+
args: Positional arguments passed to write_to_storage
93+
kwargs: Keyword arguments passed to write_to_storage
94+
return_value: Return value from write_to_storage (usually None or success indicator)
95+
96+
Returns:
97+
Dictionary of OpenTelemetry attributes for storage write operations
98+
"""
99+
attributes = get_common_attributes()
100+
kwargs = kwargs or {}
101+
102+
# Mark this as a storage operation within workflow context
103+
attributes["storage.operation"] = "write"
104+
attributes[SpanAttributes.AGENTOPS_SPAN_KIND] = AgentOpsSpanKind.WORKFLOW
105+
106+
if args and len(args) > 0:
107+
workflow = args[0]
108+
109+
# Get workflow information
110+
if hasattr(workflow, "workflow_id") and workflow.workflow_id:
111+
attributes["storage.workflow_id"] = str(workflow.workflow_id)
112+
if hasattr(workflow, "session_id") and workflow.session_id:
113+
attributes["storage.session_id"] = str(workflow.session_id)
114+
115+
# Get storage type
116+
if hasattr(workflow, "storage") and workflow.storage:
117+
storage_type = type(workflow.storage).__name__
118+
attributes["storage.backend"] = storage_type
119+
120+
# Get session state info to see what's being written
121+
if hasattr(workflow, "session_state") and isinstance(workflow.session_state, dict):
122+
# Get cache state after write
123+
cache_keys = list(workflow.session_state.keys())
124+
attributes["storage.cache_size"] = len(cache_keys)
125+
if cache_keys:
126+
attributes["storage.cache_keys"] = json.dumps(cache_keys)
127+
128+
# Try to identify what was written (the newest/changed data)
129+
# This is a heuristic - in practice you might need to track state changes
130+
if cache_keys:
131+
# Show the last key as likely the one just written
132+
last_key = cache_keys[-1]
133+
attributes["storage.written_key"] = last_key
134+
135+
# Get value preview
136+
value = workflow.session_state.get(last_key)
137+
if value is not None:
138+
value_type = type(value).__name__
139+
attributes["storage.written_value_type"] = value_type
140+
141+
if isinstance(value, str):
142+
if len(value) > 100:
143+
attributes["storage.written_value_preview"] = value[:100] + "..."
144+
else:
145+
attributes["storage.written_value_preview"] = value
146+
attributes["storage.written_value_size"] = len(value)
147+
elif isinstance(value, (dict, list)):
148+
attributes["storage.written_value_size"] = len(value)
149+
attributes["storage.written_value_preview"] = f"{value_type} with {len(value)} items"
150+
151+
# Check write result
152+
if return_value is not None:
153+
attributes["storage.write_success"] = True
154+
else:
155+
# Most storage writes return None on success, so this is normal
156+
attributes["storage.write_success"] = True
157+
158+
return attributes

0 commit comments

Comments
 (0)