|
1 | 1 |
|
| 2 | +import time |
2 | 3 | from openai.types.chat.chat_completion import ChatCompletion, Choice |
3 | 4 | from openai.types.chat.chat_completion_message import ChatCompletionMessage |
| 5 | +from agentscope.model import ChatResponse as AgentScopeChatResponse |
4 | 6 | from openai.types.completion_usage import CompletionUsage |
5 | | -import time |
| 7 | +from typing import Any, Callable, Dict, List, Literal, Type, Union |
| 8 | +from agentscope.message import TextBlock, ToolUseBlock |
| 9 | +from agentscope._utils._common import _json_loads_with_repair |
| 10 | +from pydantic import BaseModel |
| 11 | +from agentscope.model import ChatResponse |
6 | 12 |
|
7 | 13 |
|
8 | 14 | def convert_llm_proxy_response_to_oai_response(llm_proxy_response): |
@@ -40,6 +46,66 @@ def convert_llm_proxy_response_to_oai_response(llm_proxy_response): |
40 | 46 | usage=usage, |
41 | 47 | ) |
42 | 48 |
|
| 49 | +# copied from AgentScope's DashScopeChatModule |
| 50 | +def convert_llm_proxy_response_to_agentscope_response( |
| 51 | + message, |
| 52 | + structured_model: Type[BaseModel] | None = None, |
| 53 | +) -> AgentScopeChatResponse: # type: ignore |
| 54 | + content_blocks: List[TextBlock | ToolUseBlock] = [] |
| 55 | + content = message.get("content") |
| 56 | + metadata: dict | None = None |
| 57 | + |
| 58 | + if content not in [ |
| 59 | + None, |
| 60 | + "", |
| 61 | + [], |
| 62 | + ]: |
| 63 | + if isinstance(content, list): |
| 64 | + for item in content: |
| 65 | + if isinstance(item, dict) and "text" in item: |
| 66 | + content_blocks.append( |
| 67 | + TextBlock( |
| 68 | + type="text", |
| 69 | + text=item["text"], |
| 70 | + ), |
| 71 | + ) |
| 72 | + else: |
| 73 | + content_blocks.append( |
| 74 | + TextBlock( |
| 75 | + type="text", |
| 76 | + text=content, |
| 77 | + ), |
| 78 | + ) |
| 79 | + |
| 80 | + if message.get("tool_calls"): |
| 81 | + for tool_call in message["tool_calls"]: |
| 82 | + input_ = _json_loads_with_repair( |
| 83 | + tool_call["function"].get( |
| 84 | + "arguments", |
| 85 | + "{}", |
| 86 | + ) |
| 87 | + or "{}", |
| 88 | + ) |
| 89 | + content_blocks.append( |
| 90 | + ToolUseBlock( |
| 91 | + type="tool_use", |
| 92 | + name=tool_call["function"]["name"], |
| 93 | + input=input_, # type: ignore |
| 94 | + id=tool_call["id"], |
| 95 | + ), |
| 96 | + ) |
| 97 | + |
| 98 | + if structured_model: |
| 99 | + metadata = input_ # type: ignore |
| 100 | + |
| 101 | + parsed_response = AgentScopeChatResponse( |
| 102 | + content=content_blocks, |
| 103 | + metadata=metadata, |
| 104 | + ) |
| 105 | + |
| 106 | + return parsed_response |
| 107 | + |
| 108 | + |
43 | 109 |
|
44 | 110 | def test_convert_llm_proxy_response_to_oai_response(): |
45 | 111 | """Test the conversion from llm_proxy_response to OpenAI ChatCompletion format.""" |
|
0 commit comments