Skip to content

Commit 3bf788d

Browse files
Dwij1704bboynton97
andauthored
Add IBM watsonx.ai instrumentation (#941)
* Add IBM watsonx.ai instrumentation * Add IBMMachineLearningInstrumentor and extend watsonx.ai instrumentation with new methods * Remove deprecated IBM Machine Learning instrumentation and update IBMWatsonXInstrumentor to reflect changes in library dependencies. * Refactor IBM WatsonX AI instrumentation. * Fix * Added Exmaples --------- Co-authored-by: Braelyn Boynton <bboynton97@gmail.com>
1 parent 6398844 commit 3bf788d

11 files changed

Lines changed: 1607 additions & 1 deletion

File tree

agentops/instrumentation/__init__.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,16 @@ def get_instance(self) -> BaseInstrumentor:
7777
class_name="GoogleGenerativeAIInstrumentor",
7878
provider_import_name="google.genai",
7979
),
80+
InstrumentorLoader(
81+
module_name="agentops.instrumentation.ibm_watsonx_ai",
82+
class_name="IBMWatsonXInstrumentor",
83+
provider_import_name="ibm_watsonx_ai",
84+
),
8085
InstrumentorLoader(
8186
module_name="agentops.instrumentation.ag2",
8287
class_name="AG2Instrumentor",
8388
provider_import_name="autogen",
84-
),
89+
)
8590
]
8691

8792

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
"""IBM WatsonX AI instrumentation for AgentOps.
2+
3+
This package provides instrumentation for IBM's WatsonX AI foundation models,
4+
capturing telemetry for model interactions including completions, chat, and streaming responses.
5+
"""
6+
7+
import logging
8+
from typing import Collection
9+
10+
logger = logging.getLogger(__name__)
11+
12+
def get_version() -> str:
13+
"""Get the version of the IBM watsonx.ai SDK, or 'unknown' if not found."""
14+
try:
15+
from importlib.metadata import version
16+
return version("ibm-watsonx-ai")
17+
except ImportError:
18+
logger.debug("Could not find IBM WatsonX AI SDK version")
19+
return "1.3.11" # Default to known supported version if not found
20+
21+
# Library identification for instrumentation
22+
LIBRARY_NAME = "ibm_watsonx_ai"
23+
LIBRARY_VERSION = get_version()
24+
25+
# Import after defining constants to avoid circular imports
26+
from agentops.instrumentation.ibm_watsonx_ai.instrumentor import IBMWatsonXInstrumentor # noqa: E402
27+
28+
__all__ = [
29+
"LIBRARY_NAME",
30+
"LIBRARY_VERSION",
31+
"IBMWatsonXInstrumentor",
32+
]
Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
"""Attribute extraction utilities for IBM watsonx.ai instrumentation."""
2+
3+
from agentops.instrumentation.ibm_watsonx_ai.attributes.attributes import (
4+
get_generate_attributes,
5+
get_chat_attributes,
6+
get_tokenize_attributes,
7+
get_model_details_attributes
8+
)
9+
from agentops.instrumentation.ibm_watsonx_ai.attributes.common import (
10+
extract_params_attributes,
11+
convert_params_to_dict,
12+
extract_prompt_from_args,
13+
extract_messages_from_args,
14+
extract_params_from_args
15+
)
16+
17+
__all__ = [
18+
"get_generate_attributes",
19+
"get_chat_attributes",
20+
"get_tokenize_attributes",
21+
"get_model_details_attributes",
22+
"extract_params_attributes",
23+
"convert_params_to_dict",
24+
"extract_prompt_from_args",
25+
"extract_messages_from_args",
26+
"extract_params_from_args"
27+
]
Lines changed: 244 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,244 @@
1+
"""Attributes for IBM watsonx.ai model instrumentation.
2+
3+
This module provides attribute extraction functions for IBM watsonx.ai model operations.
4+
"""
5+
from typing import Any, Dict, Optional, Tuple
6+
from agentops.instrumentation.common.attributes import AttributeMap
7+
from agentops.semconv import SpanAttributes, MessageAttributes
8+
from agentops.instrumentation.ibm_watsonx_ai.attributes.common import (
9+
extract_params_attributes,
10+
convert_params_to_dict,
11+
extract_prompt_from_args,
12+
extract_messages_from_args,
13+
extract_params_from_args
14+
)
15+
from ibm_watsonx_ai.foundation_models.schema import TextGenParameters, TextChatParameters
16+
17+
def get_generate_attributes(args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Optional[Any] = None) -> AttributeMap:
18+
"""Extract token usage attributes from generate method calls."""
19+
attributes = {}
20+
21+
# Extract prompt using helper function
22+
prompt = extract_prompt_from_args(args, kwargs)
23+
if prompt:
24+
attributes[MessageAttributes.PROMPT_ROLE.format(i=0)] = "user"
25+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=0)] = prompt
26+
attributes[MessageAttributes.PROMPT_TYPE.format(i=0)] = "text"
27+
28+
# Extract parameters using helper functions
29+
params = extract_params_from_args(args, kwargs)
30+
if params:
31+
params_dict = convert_params_to_dict(params)
32+
if params_dict:
33+
attributes.update(extract_params_attributes(params_dict))
34+
35+
# Extract response information
36+
if return_value:
37+
if isinstance(return_value, dict):
38+
# Extract model information
39+
if 'model_id' in return_value:
40+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = return_value['model_id']
41+
42+
# Handle results
43+
if 'results' in return_value:
44+
for idx, result in enumerate(return_value['results']):
45+
# Extract completion
46+
if 'generated_text' in result:
47+
attributes[MessageAttributes.COMPLETION_CONTENT.format(i=idx)] = result['generated_text']
48+
attributes[MessageAttributes.COMPLETION_ROLE.format(i=idx)] = "assistant"
49+
attributes[MessageAttributes.COMPLETION_TYPE.format(i=idx)] = "text"
50+
51+
# Extract token usage
52+
if 'input_token_count' in result:
53+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = result['input_token_count']
54+
if 'generated_token_count' in result:
55+
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = result['generated_token_count']
56+
if 'input_token_count' in result and 'generated_token_count' in result:
57+
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = result['input_token_count'] + result['generated_token_count']
58+
59+
if 'stop_reason' in result:
60+
attributes[SpanAttributes.LLM_RESPONSE_STOP_REASON] = result['stop_reason']
61+
62+
return attributes
63+
64+
def get_tokenize_attributes(args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Optional[Any] = None) -> AttributeMap:
65+
"""Extract attributes from tokenize method calls."""
66+
attributes = {}
67+
68+
# Extract input from args or kwargs using helper function
69+
prompt = extract_prompt_from_args(args, kwargs)
70+
if prompt:
71+
attributes[MessageAttributes.PROMPT_ROLE.format(i=0)] = "user"
72+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=0)] = prompt
73+
attributes[MessageAttributes.PROMPT_TYPE.format(i=0)] = "text"
74+
75+
# Extract response information
76+
if return_value and isinstance(return_value, dict):
77+
if "model_id" in return_value:
78+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = return_value["model_id"]
79+
if "result" in return_value:
80+
attributes["ibm.watsonx.tokenize.result"] = str(return_value["result"])
81+
if "token_count" in return_value["result"]:
82+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = return_value["result"]["token_count"]
83+
84+
return attributes
85+
86+
def get_model_details_attributes(args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Optional[Any] = None) -> AttributeMap:
87+
"""Extract attributes from get_details method calls."""
88+
if not isinstance(return_value, dict):
89+
return {}
90+
91+
# Basic model information
92+
attributes = {
93+
f"ibm.watsonx.model.{key}": value
94+
for key, value in return_value.items()
95+
if key in ["model_id", "label", "provider", "source", "short_description", "long_description",
96+
"number_params", "input_tier", "output_tier"]
97+
}
98+
99+
# Model functions
100+
if "functions" in return_value:
101+
attributes["ibm.watsonx.model.functions"] = str([func["id"] for func in return_value["functions"]])
102+
103+
# Model tasks
104+
if "tasks" in return_value:
105+
task_info = [
106+
{k: v for k, v in task.items() if k in ["id", "ratings", "tags"]}
107+
for task in return_value["tasks"]
108+
]
109+
attributes["ibm.watsonx.model.tasks"] = str(task_info)
110+
111+
# Model limits
112+
if "model_limits" in return_value:
113+
limits = return_value["model_limits"]
114+
attributes.update({
115+
f"ibm.watsonx.model.{key}": value
116+
for key, value in limits.items()
117+
if key in ["max_sequence_length", "max_output_tokens", "training_data_max_records"]
118+
})
119+
120+
# Service tier limits
121+
if "limits" in return_value:
122+
for tier, tier_limits in return_value["limits"].items():
123+
attributes.update({
124+
f"ibm.watsonx.model.limits.{tier}.{key}": value
125+
for key, value in tier_limits.items()
126+
if key in ["call_time", "max_output_tokens"]
127+
})
128+
129+
# Model lifecycle
130+
if "lifecycle" in return_value:
131+
attributes.update({
132+
f"ibm.watsonx.model.lifecycle.{stage['id']}": stage["start_date"]
133+
for stage in return_value["lifecycle"]
134+
if "id" in stage and "start_date" in stage
135+
})
136+
137+
# Training parameters
138+
if "training_parameters" in return_value:
139+
attributes.update({
140+
f"ibm.watsonx.model.training.{key}": str(value) if isinstance(value, dict) else value
141+
for key, value in return_value["training_parameters"].items()
142+
})
143+
144+
return attributes
145+
146+
def get_chat_attributes(args: Optional[Tuple] = None, kwargs: Optional[Dict] = None, return_value: Optional[Any] = None) -> AttributeMap:
147+
"""Extract attributes from chat method calls."""
148+
attributes = {}
149+
150+
# Extract messages using helper function
151+
messages = extract_messages_from_args(args, kwargs)
152+
if messages:
153+
# Process each message in the conversation
154+
for i, message in enumerate(messages):
155+
if not isinstance(message, dict):
156+
continue
157+
158+
# Extract role and content
159+
role = message.get('role', '')
160+
content = message.get('content', [])
161+
162+
# Handle content which can be a list of different types (text, image_url)
163+
if isinstance(content, list):
164+
# Combine all text content
165+
text_content = []
166+
image_urls = []
167+
168+
for content_item in content:
169+
if isinstance(content_item, dict):
170+
if content_item.get('type') == 'text':
171+
text_content.append(content_item.get('text', ''))
172+
elif content_item.get('type') == 'image_url':
173+
image_url = content_item.get('image_url', {})
174+
if isinstance(image_url, dict) and 'url' in image_url:
175+
url = image_url['url']
176+
# Only store URLs that start with http, otherwise use placeholder
177+
if url and isinstance(url, str) and url.startswith(('http://', 'https://')):
178+
image_urls.append(url)
179+
else:
180+
image_urls.append("[IMAGE_PLACEHOLDER]")
181+
182+
# Set text content if any
183+
if text_content:
184+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=i)] = ' '.join(text_content)
185+
attributes[MessageAttributes.PROMPT_TYPE.format(i=i)] = "text"
186+
attributes[MessageAttributes.PROMPT_ROLE.format(i=i)] = role
187+
188+
# Set image URLs if any
189+
if image_urls:
190+
attributes[f"ibm.watsonx.chat.message.{i}.images"] = str(image_urls)
191+
else:
192+
# Handle string content
193+
attributes[MessageAttributes.PROMPT_CONTENT.format(i=i)] = str(content)
194+
attributes[MessageAttributes.PROMPT_TYPE.format(i=i)] = "text"
195+
attributes[MessageAttributes.PROMPT_ROLE.format(i=i)] = role
196+
197+
# Extract parameters using helper functions
198+
params = extract_params_from_args(args, kwargs)
199+
if params:
200+
params_dict = convert_params_to_dict(params)
201+
if params_dict:
202+
attributes.update(extract_params_attributes(params_dict))
203+
204+
# Extract response information
205+
if return_value and isinstance(return_value, dict):
206+
# Extract model information
207+
if 'model_id' in return_value:
208+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = return_value['model_id']
209+
elif 'model' in return_value:
210+
attributes[SpanAttributes.LLM_REQUEST_MODEL] = return_value['model']
211+
212+
# Extract completion from choices
213+
if 'choices' in return_value:
214+
for idx, choice in enumerate(return_value['choices']):
215+
if isinstance(choice, dict) and 'message' in choice:
216+
message = choice['message']
217+
if isinstance(message, dict):
218+
if 'content' in message:
219+
attributes[MessageAttributes.COMPLETION_CONTENT.format(i=idx)] = message['content']
220+
attributes[MessageAttributes.COMPLETION_ROLE.format(i=idx)] = message.get('role', 'assistant')
221+
attributes[MessageAttributes.COMPLETION_TYPE.format(i=idx)] = "text"
222+
if 'finish_reason' in choice:
223+
attributes[SpanAttributes.LLM_RESPONSE_STOP_REASON] = choice['finish_reason']
224+
225+
# Extract token usage
226+
if 'usage' in return_value:
227+
usage = return_value['usage']
228+
if isinstance(usage, dict):
229+
if 'prompt_tokens' in usage:
230+
attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS] = usage['prompt_tokens']
231+
if 'completion_tokens' in usage:
232+
attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS] = usage['completion_tokens']
233+
if 'total_tokens' in usage:
234+
attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS] = usage['total_tokens']
235+
236+
# Extract additional metadata
237+
if 'id' in return_value:
238+
attributes['ibm.watsonx.chat.id'] = return_value['id']
239+
if 'model_version' in return_value:
240+
attributes['ibm.watsonx.model.version'] = return_value['model_version']
241+
if 'created_at' in return_value:
242+
attributes['ibm.watsonx.chat.created_at'] = return_value['created_at']
243+
244+
return attributes

0 commit comments

Comments
 (0)