Skip to content

Commit e1299a4

Browse files
committed
feat: add server_address on llm call traces and metrics
1 parent e907755 commit e1299a4

File tree

2 files changed

+32
-7
lines changed

2 files changed

+32
-7
lines changed

veadk/tracing/telemetry/attributes/extractors/llm_attributes_extractors.py

Lines changed: 23 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414

1515
import json
1616

17+
from google.adk.agents.run_config import StreamingMode
1718
from veadk.tracing.telemetry.attributes.extractors.types import (
1819
ExtractorResponse,
1920
LLMAttributesParams,
@@ -507,13 +508,15 @@ def llm_gen_ai_is_streaming(params: LLMAttributesParams) -> ExtractorResponse:
507508
for performance analysis and debugging purposes.
508509
509510
Args:
510-
params: LLM execution parameters (currently not implemented)
511+
params: LLM execution parameters
511512
512513
Returns:
513-
ExtractorResponse: Response containing None (not implemented)
514+
ExtractorResponse: Response containing None
514515
"""
515-
# return params.llm_request.stream
516-
return ExtractorResponse(content=None)
516+
is_streaming = bool(
517+
params.invocation_context.run_config and params.invocation_context.run_config.streaming_mode != StreamingMode.NONE)
518+
519+
return ExtractorResponse(content=is_streaming)
517520

518521

519522
def llm_gen_ai_operation_name(params: LLMAttributesParams) -> ExtractorResponse:
@@ -804,6 +807,21 @@ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorRespon
804807

805808
return ExtractorResponse(content=functions)
806809

810+
def llm_server_address(params: LLMAttributesParams) -> ExtractorResponse:
811+
"""Extract the LLM server address (model API base URL).
812+
813+
Returns the model API base URL configured on the current Agent.
814+
If the Agent or base URL is unavailable, returns 'unknown' to
815+
keep the span attribute consistent.
816+
817+
Args:
818+
params: LLM execution parameters containing invocation context
819+
820+
Returns:
821+
ExtractorResponse: Response containing the server address or 'unknown'
822+
"""
823+
return ExtractorResponse(content=getattr(params.invocation_context.agent, "model_api_base", None) or "unknown")
824+
807825

808826
LLM_ATTRIBUTES = {
809827
# -> 1. attributes
@@ -813,6 +831,7 @@ def llm_gen_ai_request_functions(params: LLMAttributesParams) -> ExtractorRespon
813831
"gen_ai.request.max_tokens": llm_gen_ai_request_max_tokens,
814832
"gen_ai.request.temperature": llm_gen_ai_request_temperature,
815833
"gen_ai.request.top_p": llm_gen_ai_request_top_p,
834+
"server.address": llm_server_address,
816835
# CozeLoop required
817836
"gen_ai.request.functions": llm_gen_ai_request_functions,
818837
# -> 1.2. response

veadk/tracing/telemetry/exporters/apmplus_exporter.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,13 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15+
import os
1516
import time
1617
from dataclasses import dataclass
1718
from typing import Any
1819

1920
from google.adk.agents.invocation_context import InvocationContext
21+
from google.adk.agents.run_config import StreamingMode
2022
from google.adk.events import Event
2123
from google.adk.models.llm_request import LlmRequest
2224
from google.adk.models.llm_response import LlmResponse
@@ -34,6 +36,7 @@
3436
from typing_extensions import override
3537

3638
from veadk.config import settings
39+
from veadk.consts import DEFAULT_MODEL_AGENT_API_BASE
3740
from veadk.tracing.telemetry.exporters.base_exporter import BaseExporter
3841
from veadk.utils.logger import get_logger
3942

@@ -296,13 +299,16 @@ def record_call_llm(
296299
llm_request: Request object with model and parameter details
297300
llm_response: Response object with content and usage metadata
298301
"""
302+
is_streaming = bool(
303+
invocation_context.run_config and invocation_context.run_config.streaming_mode != StreamingMode.NONE)
304+
server_address = getattr(invocation_context.agent, "model_api_base", None) or "unknown"
299305
attributes = {
300306
"gen_ai_system": "volcengine",
301307
"gen_ai_response_model": llm_request.model,
302308
"gen_ai_operation_name": "chat",
303309
"gen_ai_operation_type": "llm",
304-
"stream": "false",
305-
"server_address": "api.volcengine.com",
310+
"stream": is_streaming,
311+
"server_address": server_address,
306312
} # required by Volcengine APMPlus
307313

308314
if llm_response.usage_metadata:
@@ -337,7 +343,7 @@ def record_call_llm(
337343
if llm_response.error_code and self.chat_exception_counter:
338344
exception_attributes = {
339345
**attributes,
340-
"error_type": llm_response.error_message,
346+
"error_type": llm_response.error_code,
341347
}
342348
self.chat_exception_counter.add(1, exception_attributes)
343349

0 commit comments

Comments
 (0)