Skip to content

Commit 000720c

Browse files
Dwij1704dot-agi
andauthored
Add LLM_CONTENT_COMPLETION_CHUNK attribute to SpanAttributes class (#885)
* Add LLM_CONTENT_COMPLETION_CHUNK attribute to SpanAttributes class * Refactor SpanAttributes usage in chat wrappers to use BaseSpanAttributes. Update attribute references for LLM_COMPLETIONS, LLM_CONTENT_COMPLETION_CHUNK, and LLM_RESPONSE_MODEL to ensure consistency across the codebase. * Add LLM_NAME and LLM_MODEL attributes to LangChainAttributes class for enhanced semantic conventions in LangChain integration. --------- Co-authored-by: Pratyush Shukla <ps4534@nyu.edu>
1 parent 2c7b3a7 commit 000720c

File tree

3 files changed

+18
-20
lines changed

3 files changed

+18
-20
lines changed

agentops/semconv/langchain.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ class LangChainAttributes:
2525
# Session attributes
2626
SESSION_TAGS = "langchain.session.tags"
2727

28+
LLM_NAME = "langchain.llm.name"
29+
LLM_MODEL = "langchain.llm.model"
30+
2831
# Chain attributes - specific to LangChain
2932
CHAIN_NAME = "langchain.chain.name"
3033
CHAIN_TYPE = "langchain.chain.type"

agentops/semconv/span_attributes.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -40,8 +40,8 @@ class SpanAttributes:
4040

4141
# Content
4242
LLM_PROMPTS = "gen_ai.prompt"
43-
#LLM_COMPLETIONS = "gen_ai.completion" # DO NOT SET THIS DIRECTLY
44-
#LLM_CONTENT_COMPLETION_CHUNK = "gen_ai.completion.chunk"
43+
LLM_COMPLETIONS = "gen_ai.completion" # DO NOT SET THIS DIRECTLY
44+
LLM_CONTENT_COMPLETION_CHUNK = "gen_ai.completion.chunk"
4545

4646
# Response attributes
4747
LLM_RESPONSE_MODEL = "gen_ai.response.model"

third_party/opentelemetry/instrumentation/openai/shared/chat_wrappers.py

Lines changed: 13 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -53,11 +53,6 @@
5353
logger = logging.getLogger(__name__)
5454

5555

56-
# TODO get rid of this and also why are we patching this file like this?...
57-
class SpanAttributes(BaseSpanAttributes):
58-
LLM_COMPLETIONS = "gen_ai.completion"
59-
60-
6156
@_with_chat_telemetry_wrapper
6257
def chat_wrapper(
6358
tracer: Tracer,
@@ -81,7 +76,7 @@ def chat_wrapper(
8176
span = tracer.start_span(
8277
SPAN_NAME,
8378
kind=SpanKind.CLIENT,
84-
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
79+
attributes={BaseSpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
8580
)
8681

8782
run_async(_handle_request(span, kwargs, instance))
@@ -175,7 +170,7 @@ async def achat_wrapper(
175170
span = tracer.start_span(
176171
SPAN_NAME,
177172
kind=SpanKind.CLIENT,
178-
attributes={SpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
173+
attributes={BaseSpanAttributes.LLM_REQUEST_TYPE: LLM_REQUEST_TYPE.value},
179174
)
180175
await _handle_request(span, kwargs, instance)
181176

@@ -324,7 +319,7 @@ def _set_choice_counter_metrics(choice_counter, choices, shared_attributes):
324319
for choice in choices:
325320
attributes_with_reason = {**shared_attributes}
326321
if choice.get("finish_reason"):
327-
attributes_with_reason[SpanAttributes.LLM_RESPONSE_FINISH_REASON] = choice.get("finish_reason")
322+
attributes_with_reason[BaseSpanAttributes.LLM_RESPONSE_FINISH_REASON] = choice.get("finish_reason")
328323
choice_counter.add(1, attributes=attributes_with_reason)
329324

330325

@@ -333,7 +328,7 @@ def _set_token_counter_metrics(token_counter, usage, shared_attributes):
333328
if name in OPENAI_LLM_USAGE_TOKEN_TYPES:
334329
attributes_with_token_type = {
335330
**shared_attributes,
336-
SpanAttributes.LLM_TOKEN_TYPE: _token_type(name),
331+
BaseSpanAttributes.LLM_TOKEN_TYPE: _token_type(name),
337332
}
338333
token_counter.record(val, attributes=attributes_with_token_type)
339334

@@ -369,7 +364,7 @@ async def _set_prompts(span, messages):
369364
return
370365

371366
for i, msg in enumerate(messages):
372-
prefix = f"{SpanAttributes.LLM_PROMPTS}.{i}"
367+
prefix = f"{BaseSpanAttributes.LLM_PROMPTS}.{i}"
373368

374369
_set_span_attribute(span, f"{prefix}.role", msg.get("role"))
375370
if msg.get("content"):
@@ -418,7 +413,7 @@ def _set_completions(span, choices):
418413

419414
for choice in choices:
420415
index = choice.get("index")
421-
prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
416+
prefix = f"{BaseSpanAttributes.LLM_COMPLETIONS}.{index}"
422417
_set_span_attribute(span, f"{prefix}.finish_reason", choice.get("finish_reason"))
423418

424419
if choice.get("content_filter_results"):
@@ -519,14 +514,14 @@ def _set_streaming_token_metrics(request_kwargs, complete_response, span, token_
519514
if isinstance(prompt_usage, int) and prompt_usage >= 0:
520515
attributes_with_token_type = {
521516
**shared_attributes,
522-
SpanAttributes.LLM_TOKEN_TYPE: "input",
517+
BaseSpanAttributes.LLM_TOKEN_TYPE: "input",
523518
}
524519
token_counter.record(prompt_usage, attributes=attributes_with_token_type)
525520

526521
if isinstance(completion_usage, int) and completion_usage >= 0:
527522
attributes_with_token_type = {
528523
**shared_attributes,
529-
SpanAttributes.LLM_TOKEN_TYPE: "output",
524+
BaseSpanAttributes.LLM_TOKEN_TYPE: "output",
530525
}
531526
token_counter.record(completion_usage, attributes=attributes_with_token_type)
532527

@@ -613,7 +608,7 @@ async def __anext__(self):
613608
return chunk
614609

615610
def _process_item(self, item):
616-
self._span.add_event(name=f"{SpanAttributes.LLM_CONTENT_COMPLETION_CHUNK}")
611+
self._span.add_event(name=f"{BaseSpanAttributes.LLM_CONTENT_COMPLETION_CHUNK}")
617612

618613
if self._first_token and self._streaming_time_to_first_token:
619614
self._time_of_first_token = time.time()
@@ -695,7 +690,7 @@ def _build_from_streaming_response(
695690
time_of_first_token = start_time # will be updated when first token is received
696691

697692
for item in response:
698-
span.add_event(name=f"{SpanAttributes.LLM_CONTENT_COMPLETION_CHUNK}")
693+
span.add_event(name=f"{BaseSpanAttributes.LLM_CONTENT_COMPLETION_CHUNK}")
699694

700695
item_to_yield = item
701696

@@ -709,7 +704,7 @@ def _build_from_streaming_response(
709704
yield item_to_yield
710705

711706
shared_attributes = {
712-
SpanAttributes.LLM_RESPONSE_MODEL: complete_response.get("model") or None,
707+
BaseSpanAttributes.LLM_RESPONSE_MODEL: complete_response.get("model") or None,
713708
"server.address": _get_openai_base_url(instance),
714709
"stream": True,
715710
}
@@ -758,7 +753,7 @@ async def _abuild_from_streaming_response(
758753
time_of_first_token = start_time # will be updated when first token is received
759754

760755
async for item in response:
761-
span.add_event(name=f"{SpanAttributes.LLM_CONTENT_COMPLETION_CHUNK}")
756+
span.add_event(name=f"{BaseSpanAttributes.LLM_CONTENT_COMPLETION_CHUNK}")
762757

763758
item_to_yield = item
764759

@@ -772,7 +767,7 @@ async def _abuild_from_streaming_response(
772767
yield item_to_yield
773768

774769
shared_attributes = {
775-
SpanAttributes.LLM_RESPONSE_MODEL: complete_response.get("model") or None,
770+
BaseSpanAttributes.LLM_RESPONSE_MODEL: complete_response.get("model") or None,
776771
"server.address": _get_openai_base_url(instance),
777772
"stream": True,
778773
}

0 commit comments

Comments
 (0)