@@ -114,11 +114,13 @@ def _build_orchestration_template(
114114 if text_parts :
115115 template .append ({"role" : role , "content" : "\n " .join (text_parts )})
116116 for tool_call_id , resp_content in function_responses :
117- template .append ({
118- "role" : "tool" ,
119- "tool_call_id" : tool_call_id ,
120- "content" : resp_content ,
121- })
117+ template .append (
118+ {
119+ "role" : "tool" ,
120+ "tool_call_id" : tool_call_id ,
121+ "content" : resp_content ,
122+ }
123+ )
122124 elif text_parts :
123125 template .append ({"role" : role , "content" : "\n " .join (text_parts )})
124126
@@ -131,14 +133,16 @@ def _build_orchestration_tools(
131133 openai_tools = _convert_tools_to_openai (tools )
132134 result = []
133135 for t in openai_tools :
134- result .append ({
135- "type" : "function" ,
136- "function" : {
137- "name" : t ["function" ]["name" ],
138- "description" : t ["function" ].get ("description" , "" ),
139- "parameters" : t ["function" ].get ("parameters" , {"type" : "object" , "properties" : {}}),
140- },
141- })
136+ result .append (
137+ {
138+ "type" : "function" ,
139+ "function" : {
140+ "name" : t ["function" ]["name" ],
141+ "description" : t ["function" ].get ("description" , "" ),
142+ "parameters" : t ["function" ].get ("parameters" , {"type" : "object" , "properties" : {}}),
143+ },
144+ }
145+ )
142146 return result
143147
144148
@@ -316,8 +320,7 @@ async def generate_content_async(
316320
317321 if llm_request .config and llm_request .config .tools :
318322 genai_tools : list [types .Tool ] = [
319- t for t in llm_request .config .tools
320- if isinstance (t , types .Tool ) and hasattr (t , "function_declarations" )
323+ t for t in llm_request .config .tools if isinstance (t , types .Tool ) and hasattr (t , "function_declarations" )
321324 ]
322325 if genai_tools :
323326 orch_tools = _build_orchestration_tools (genai_tools )
@@ -388,7 +391,7 @@ async def _stream_request(
388391 if not line :
389392 continue
390393
391- payload = line [len ("data: " ):] if line .startswith ("data: " ) else line
394+ payload = line [len ("data: " ) :] if line .startswith ("data: " ) else line
392395 if payload == "[DONE]" :
393396 break
394397
@@ -463,9 +466,7 @@ async def _stream_request(
463466 usage_metadata = usage_metadata ,
464467 )
465468
466- async def _non_stream_request (
467- self , url : str , headers : dict [str , str ], body : dict [str , Any ]
468- ) -> LlmResponse :
469+ async def _non_stream_request (self , url : str , headers : dict [str , str ], body : dict [str , Any ]) -> LlmResponse :
469470 client = self ._get_http_client ()
470471 resp = await client .post (url , headers = headers , json = body )
471472 resp .raise_for_status ()
@@ -490,11 +491,15 @@ async def _non_stream_request(
490491 parts .append (part )
491492
492493 usage = result .get ("usage" , {})
493- usage_metadata = types .GenerateContentResponseUsageMetadata (
494- prompt_token_count = usage .get ("prompt_tokens" ),
495- candidates_token_count = usage .get ("completion_tokens" ),
496- total_token_count = usage .get ("total_tokens" ),
497- ) if usage else None
494+ usage_metadata = (
495+ types .GenerateContentResponseUsageMetadata (
496+ prompt_token_count = usage .get ("prompt_tokens" ),
497+ candidates_token_count = usage .get ("completion_tokens" ),
498+ total_token_count = usage .get ("total_tokens" ),
499+ )
500+ if usage
501+ else None
502+ )
498503
499504 stop_reason = result .get ("choices" , [{}])[0 ].get ("finish_reason" , "stop" )
500505 fr = self ._map_finish_reason (stop_reason )
0 commit comments