Skip to content

Commit 794b5ba

Browse files
committed
WIP reproducer
1 parent e2ba6d4 commit 794b5ba

1 file changed

Lines changed: 195 additions & 0 deletions

File tree

instrumentation/opentelemetry-instrumentation-botocore/tests/test_botocore_bedrock.py

Lines changed: 195 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -878,6 +878,201 @@ def test_converse_stream_with_content_tool_call(
878878
)
879879

880880

881+
@pytest.mark.skipif(
882+
BOTO3_VERSION < (1, 35, 56), reason="ConverseStream API not available"
883+
)
884+
@pytest.mark.vcr()
885+
def test_converse_stream_tool_call_parsing_errors(
886+
span_exporter, log_exporter, bedrock_runtime_client
887+
):
888+
# pylint:disable=too-many-locals,too-many-statements
889+
messages = [
890+
{
891+
"role": "user",
892+
"content": [
893+
{
894+
"text": "Use the get_cities_list tool to provide exactly 10 popular tourist cities in Japan. Call the tool with a cities array containing: Tokyo, Osaka, Kyoto, Hiroshima, Nara, Yokohama, Sapporo, Fukuoka, Sendai, and Nagoya"
895+
}
896+
],
897+
}
898+
]
899+
900+
tool_config = {
901+
"tools": [
902+
{
903+
"toolSpec": {
904+
"name": "get_cities_list",
905+
"description": "Get a list of cities",
906+
"inputSchema": {
907+
"json": {
908+
"type": "object",
909+
"properties": {
910+
"cities": {
911+
"type": "array",
912+
"items": {"type": "string"},
913+
}
914+
},
915+
}
916+
},
917+
}
918+
}
919+
]
920+
}
921+
922+
llm_model_value = "anthropic.claude-3-sonnet-20240229-v1:0"
923+
response_0 = bedrock_runtime_client.converse_stream(
924+
messages=messages, modelId=llm_model_value, toolConfig=tool_config
925+
)
926+
927+
res = ""
928+
# Process the streaming response - error occurs here
929+
for chunk in response_0["stream"]:
930+
if "contentBlockDelta" in chunk:
931+
delta = chunk["contentBlockDelta"]["delta"]
932+
if "toolUse" in delta:
933+
res += delta["toolUse"].get("input")
934+
935+
# Parse the output and print it
936+
print(json.loads(res))
937+
938+
# consume the stream and assemble it as the non-streaming version
939+
response_0_message = _rebuild_stream_message(response_0)
940+
941+
tool_requests_ids = [
942+
request["toolUse"]["toolUseId"]
943+
for request in response_0_message["content"]
944+
if "toolUse" in request
945+
]
946+
assert len(tool_requests_ids) == 1
947+
948+
tool_call_result = {
949+
"role": "user",
950+
"content": [
951+
{
952+
"toolResult": {
953+
"content": [
954+
{"json": {"weather": "50 degrees and raining"}}
955+
],
956+
"toolUseId": tool_requests_ids[0],
957+
},
958+
},
959+
{
960+
"toolResult": {
961+
"content": [{"json": {"weather": "70 degrees and sunny"}}],
962+
"toolUseId": tool_requests_ids[1],
963+
},
964+
},
965+
],
966+
}
967+
968+
response_0_message.pop("stopReason")
969+
messages.append(response_0_message)
970+
messages.append(tool_call_result)
971+
972+
response_1 = bedrock_runtime_client.converse_stream(
973+
messages=messages,
974+
modelId=llm_model_value,
975+
toolConfig=tool_config,
976+
)
977+
978+
# consume the stream to have it traced
979+
_ = _rebuild_stream_message(response_1)
980+
981+
(span_0, span_1) = span_exporter.get_finished_spans()
982+
assert_stream_completion_attributes(
983+
span_0,
984+
llm_model_value,
985+
input_tokens=mock.ANY,
986+
output_tokens=mock.ANY,
987+
finish_reason=("tool_use",),
988+
operation_name="chat",
989+
)
990+
assert_stream_completion_attributes(
991+
span_1,
992+
llm_model_value,
993+
input_tokens=mock.ANY,
994+
output_tokens=mock.ANY,
995+
finish_reason=("end_turn",),
996+
operation_name="chat",
997+
)
998+
999+
logs = log_exporter.get_finished_logs()
1000+
assert len(logs) == 8
1001+
1002+
# first span
1003+
user_content = {}
1004+
assert_message_in_logs(
1005+
logs[0], "gen_ai.user.message", user_content, span_0
1006+
)
1007+
1008+
function_call_0 = {"name": "get_current_weather"}
1009+
function_call_1 = {"name": "get_current_weather"}
1010+
choice_body = {
1011+
"index": 0,
1012+
"finish_reason": "tool_use",
1013+
"message": {
1014+
"role": "assistant",
1015+
"tool_calls": [
1016+
{
1017+
"id": tool_requests_ids[0],
1018+
"type": "function",
1019+
"function": function_call_0,
1020+
},
1021+
{
1022+
"id": tool_requests_ids[1],
1023+
"type": "function",
1024+
"function": function_call_1,
1025+
},
1026+
],
1027+
},
1028+
}
1029+
assert_message_in_logs(logs[1], "gen_ai.choice", choice_body, span_0)
1030+
1031+
# second span
1032+
assert_message_in_logs(
1033+
logs[2], "gen_ai.user.message", user_content, span_1
1034+
)
1035+
assistant_body = response_0_message
1036+
assistant_body["tool_calls"] = choice_body["message"]["tool_calls"]
1037+
assistant_body.pop("role")
1038+
assistant_body.pop("content")
1039+
assert_message_in_logs(
1040+
logs[3],
1041+
"gen_ai.assistant.message",
1042+
assistant_body,
1043+
span_1,
1044+
)
1045+
tool_message_0 = {
1046+
"id": tool_requests_ids[0],
1047+
"content": None,
1048+
}
1049+
assert_message_in_logs(
1050+
logs[4], "gen_ai.tool.message", tool_message_0, span_1
1051+
)
1052+
tool_message_1 = {
1053+
"id": tool_requests_ids[1],
1054+
"content": None,
1055+
}
1056+
assert_message_in_logs(
1057+
logs[5], "gen_ai.tool.message", tool_message_1, span_1
1058+
)
1059+
1060+
user_message_body = tool_call_result
1061+
user_message_body.pop("role")
1062+
user_message_body.pop("content")
1063+
assert_message_in_logs(
1064+
logs[6], "gen_ai.user.message", user_message_body, span_1
1065+
)
1066+
choice_body = {
1067+
"index": 0,
1068+
"finish_reason": "end_turn",
1069+
"message": {
1070+
"role": "assistant",
1071+
},
1072+
}
1073+
assert_message_in_logs(logs[7], "gen_ai.choice", choice_body, span_1)
1074+
1075+
8811076
@pytest.mark.skipif(
8821077
BOTO3_VERSION < (1, 35, 56), reason="ConverseStream API not available"
8831078
)

0 commit comments

Comments
 (0)