@@ -46,7 +46,9 @@ def convert_llm_proxy_response_to_oai_response(llm_proxy_response):
4646 usage = usage ,
4747 )
4848
49- # copied from AgentScope's DashScopeChatModule
49+
50+
51+ # modified from AgentScope's DashScopeChatModule
5052def convert_llm_proxy_response_to_agentscope_response (
5153 message ,
5254 structured_model : Type [BaseModel ] | None = None ,
@@ -105,91 +107,3 @@ def convert_llm_proxy_response_to_agentscope_response(
105107
106108 return parsed_response
107109
108-
109-
110- def test_convert_llm_proxy_response_to_oai_response ():
111- """Test the conversion from llm_proxy_response to OpenAI ChatCompletion format."""
112-
113- from ajet .schema .logprob import TokenAndProb
114- # Test case 1: Basic response with content only
115- llm_proxy_response_basic = {
116- "role" : "assistant" ,
117- "request_id" : "req-123456" ,
118- "content" : "Hello, how can I help you today?" ,
119- "tool_calls" : None ,
120- "tokens" : [
121- TokenAndProb (
122- token_id = 123 ,
123- logprob = - 0.5 ,
124- decoded_string = "Hello" ,
125- ),
126- TokenAndProb (
127- token_id = 456 ,
128- logprob = - 0.3 ,
129- decoded_string = "," ,
130- ),
131- ],
132- }
133-
134- result = convert_llm_proxy_response_to_oai_response (llm_proxy_response_basic )
135-
136- assert result .id == "req-123456"
137- assert result .object == "chat.completion"
138- assert len (result .choices ) == 1
139- assert result .choices [0 ].message .role == "assistant"
140- assert result .choices [0 ].message .content == "Hello, how can I help you today?"
141- assert result .choices [0 ].message .tool_calls is None
142- assert result .choices [0 ].finish_reason == "stop"
143- assert result .usage is not None
144- assert result .usage .completion_tokens == 2
145- assert result .usage .total_tokens == 2
146-
147- print ("✓ Test case 1 passed: Basic response with content" )
148-
149- # Test case 2: Response with tool calls
150- llm_proxy_response_with_tools = {
151- "role" : "assistant" ,
152- "request_id" : "req-789012" ,
153- "content" : "" ,
154- "tool_calls" : [
155- {
156- "id" : "call_abc123" ,
157- "type" : "function" ,
158- "function" : {
159- "name" : "get_weather" ,
160- "arguments" : '{"location": "San Francisco"}'
161- }
162- }
163- ],
164- "tokens" : [],
165- }
166-
167- result2 = convert_llm_proxy_response_to_oai_response (llm_proxy_response_with_tools )
168-
169- assert result2 .id == "req-789012"
170- assert result2 .choices [0 ].message .content == ""
171- assert result2 .choices [0 ].message .tool_calls is not None
172- assert len (result2 .choices [0 ].message .tool_calls ) == 1
173- assert result2 .usage is None # No tokens provided
174-
175- print ("✓ Test case 2 passed: Response with tool calls" )
176-
177- # Test case 3: Minimal response with defaults
178- llm_proxy_response_minimal = {
179- "content" : "Test response"
180- }
181-
182- result3 = convert_llm_proxy_response_to_oai_response (llm_proxy_response_minimal )
183-
184- assert result3 .id == "chatcmpl-default"
185- assert result3 .choices [0 ].message .role == "assistant"
186- assert result3 .choices [0 ].message .content == "Test response"
187- assert result3 .model == "unknown"
188-
189- print ("✓ Test case 3 passed: Minimal response with defaults" )
190-
191- print ("\n ✅ All tests passed!" )
192-
193-
194- if __name__ == "__main__" :
195- test_convert_llm_proxy_response_to_oai_response ()
0 commit comments