Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@

from opentelemetry.instrumentation.anthropic.package import _instruments
from opentelemetry.instrumentation.anthropic.patch import (
async_messages_create,
messages_create,
)
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
Expand Down Expand Up @@ -99,13 +100,20 @@ def _instrument(self, **kwargs: Any) -> None:
logger_provider=logger_provider,
)

# Patch Messages.create
# Patch Messages.create (sync)
wrap_function_wrapper(
"anthropic.resources.messages",
"Messages.create",
messages_create(handler),
)

# Patch AsyncMessages.create (async)
wrap_function_wrapper(
"anthropic.resources.messages",
"AsyncMessages.create",
async_messages_create(handler),
)

def _uninstrument(self, **kwargs: Any) -> None:
"""Disable Anthropic instrumentation.

Expand All @@ -117,3 +125,7 @@ def _uninstrument(self, **kwargs: Any) -> None:
anthropic.resources.messages.Messages, # pyright: ignore[reportAttributeAccessIssue,reportUnknownMemberType,reportUnknownArgumentType]
"create",
)
unwrap(
anthropic.resources.messages.AsyncMessages, # pyright: ignore[reportAttributeAccessIssue,reportUnknownMemberType,reportUnknownArgumentType]
"create",
)
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,9 @@
from __future__ import annotations

import logging
from typing import TYPE_CHECKING, Any, Callable, Union, cast
from typing import TYPE_CHECKING, Any, Callable, Coroutine, Union, cast

from anthropic._streaming import AsyncStream as AnthropicAsyncStream
from anthropic._streaming import Stream as AnthropicStream
from anthropic.types import Message as AnthropicMessage

Expand All @@ -41,12 +42,13 @@
get_system_instruction,
)
from .wrappers import (
AsyncMessagesStreamWrapper,
MessagesStreamWrapper,
MessageWrapper,
)

if TYPE_CHECKING:
from anthropic.resources.messages import Messages
from anthropic.resources.messages import AsyncMessages, Messages
from anthropic.types import RawMessageStreamEvent


Expand Down Expand Up @@ -129,3 +131,72 @@ def traced_method(
'Callable[..., Union["AnthropicMessage", "AnthropicStream[RawMessageStreamEvent]", MessagesStreamWrapper[None]]]',
traced_method,
)


def async_messages_create(
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This code is almost identical to messages_create (the only difference I see is the check if isinstance(result, AnthropicAsyncStream):), can we update both implementations to use a general version.

handler: TelemetryHandler,
) -> Callable[
...,
"Coroutine[Any, Any, Union[AnthropicMessage, AnthropicAsyncStream[RawMessageStreamEvent], AsyncMessagesStreamWrapper[None]]]",
]:
"""Wrap the `create` method of the `AsyncMessages` class to trace it."""
capture_content = should_capture_content_on_spans_in_experimental_mode()

async def traced_method(
wrapped: Callable[
...,
"Coroutine[Any, Any, Union[AnthropicMessage, AnthropicAsyncStream[RawMessageStreamEvent]]]",
],
instance: "AsyncMessages",
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> Union[
"AnthropicMessage",
"AnthropicAsyncStream[RawMessageStreamEvent]",
AsyncMessagesStreamWrapper[None],
]:
params = extract_params(*args, **kwargs)
attributes = get_llm_request_attributes(params, instance)
request_model_attribute = attributes.get(
GenAIAttributes.GEN_AI_REQUEST_MODEL
)
request_model = (
request_model_attribute
if isinstance(request_model_attribute, str)
else params.model
)

invocation = LLMInvocation(
request_model=request_model,
provider=ANTHROPIC,
input_messages=get_input_messages(params.messages)
if capture_content
else [],
system_instruction=get_system_instruction(params.system)
if capture_content
else [],
attributes=attributes,
)

handler.start_llm(invocation)
try:
result = await wrapped(*args, **kwargs)
if isinstance(result, AnthropicAsyncStream):
return AsyncMessagesStreamWrapper(
result, handler, invocation, capture_content
)

wrapper = MessageWrapper(result, capture_content)
wrapper.extract_into(invocation)
handler.stop_llm(invocation)
return wrapper.message
except Exception as exc:
handler.fail_llm(
invocation, Error(message=str(exc), type=type(exc))
)
raise

return cast(
'Callable[..., Coroutine[Any, Any, Union["AnthropicMessage", "AnthropicAsyncStream[RawMessageStreamEvent]", AsyncMessagesStreamWrapper[None]]]]',
traced_method,
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
interactions:
- request:
body: |-
{
"max_tokens": 100,
"messages": [
{
"role": "user",
"content": "Hello"
}
],
"model": "invalid-model-name"
}
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
anthropic-version:
- '2023-06-01'
connection:
- keep-alive
content-length:
- '94'
content-type:
- application/json
host:
- api.anthropic.com
user-agent:
- Anthropic/Python 0.75.0
x-api-key:
- test_anthropic_api_key
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 0.75.0
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.9.6
x-stainless-timeout:
- '600'
method: POST
uri: https://api.anthropic.com/v1/messages
response:
body:
string: |-
{
"type": "error",
"error": {
"type": "not_found_error",
"message": "model: invalid-model-name"
},
"request_id": "req_011CYfQMhgSid28ainNjq126"
}
headers:
CF-RAY:
- 9d6567ebcfafc4fb-EWR
Connection:
- keep-alive
Content-Security-Policy:
- default-src 'none'; frame-ancestors 'none'
Content-Type:
- application/json
Date:
- Tue, 03 Mar 2026 03:03:04 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Robots-Tag:
- none
cf-cache-status:
- DYNAMIC
content-length:
- '133'
request-id:
- req_011CYfQMhgSid28ainNjq126
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
vary:
- Accept-Encoding
x-envoy-upstream-service-time:
- '28'
x-should-retry:
- 'false'
status:
code: 404
message: Not Found
version: 1
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
interactions:
- request:
body: |-
{
"max_tokens": 100,
"messages": [
{
"role": "user",
"content": "Say hello in one word."
}
],
"model": "claude-sonnet-4-20250514"
}
headers:
accept:
- application/json
accept-encoding:
- gzip, deflate
anthropic-version:
- '2023-06-01'
connection:
- keep-alive
content-length:
- '117'
content-type:
- application/json
host:
- api.anthropic.com
user-agent:
- Anthropic/Python 0.75.0
x-api-key:
- test_anthropic_api_key
x-stainless-arch:
- arm64
x-stainless-async:
- 'false'
x-stainless-lang:
- python
x-stainless-os:
- MacOS
x-stainless-package-version:
- 0.75.0
x-stainless-read-timeout:
- '600'
x-stainless-retry-count:
- '0'
x-stainless-runtime:
- CPython
x-stainless-runtime-version:
- 3.9.6
x-stainless-timeout:
- '600'
method: POST
uri: https://api.anthropic.com/v1/messages
response:
body:
string: |-
{
"model": "claude-sonnet-4-20250514",
"id": "msg_0176GK1qFwwpVM59jDYiKPjN",
"type": "message",
"role": "assistant",
"content": [
{
"type": "text",
"text": "Hello!"
}
],
"stop_reason": "end_turn",
"stop_sequence": null,
"usage": {
"input_tokens": 13,
"cache_creation_input_tokens": 0,
"cache_read_input_tokens": 0,
"cache_creation": {
"ephemeral_5m_input_tokens": 0,
"ephemeral_1h_input_tokens": 0
},
"output_tokens": 5,
"service_tier": "standard",
"inference_geo": "not_available"
}
}
headers:
CF-RAY:
- 9d6567b84d8509ae-EWR
Connection:
- keep-alive
Content-Security-Policy:
- default-src 'none'; frame-ancestors 'none'
Content-Type:
- application/json
Date:
- Tue, 03 Mar 2026 03:02:57 GMT
Server:
- cloudflare
Transfer-Encoding:
- chunked
X-Robots-Tag:
- none
anthropic-ratelimit-input-tokens-limit:
- '450000'
anthropic-ratelimit-input-tokens-remaining:
- '450000'
anthropic-ratelimit-input-tokens-reset:
- '2026-03-03T03:02:57Z'
anthropic-ratelimit-output-tokens-limit:
- '90000'
anthropic-ratelimit-output-tokens-remaining:
- '90000'
anthropic-ratelimit-output-tokens-reset:
- '2026-03-03T03:02:57Z'
anthropic-ratelimit-requests-limit:
- '1000'
anthropic-ratelimit-requests-remaining:
- '999'
anthropic-ratelimit-requests-reset:
- '2026-03-03T03:02:56Z'
anthropic-ratelimit-tokens-limit:
- '540000'
anthropic-ratelimit-tokens-remaining:
- '540000'
anthropic-ratelimit-tokens-reset:
- '2026-03-03T03:02:57Z'
cf-cache-status:
- DYNAMIC
content-length:
- '441'
request-id:
- req_011CYfQM6TP1iFTVWNrJQJcn
strict-transport-security:
- max-age=31536000; includeSubDomains; preload
vary:
- Accept-Encoding
x-envoy-upstream-service-time:
- '1056'
status:
code: 200
message: OK
version: 1
Loading