@@ -139,7 +139,7 @@ def _get_langfuse_data_from_kwargs(resource: OpenAiDefinition, langfuse: Langfus
139139 return InitialGeneration (name = name , metadata = metadata , trace_id = trace_id , start_time = start_time , prompt = prompt , modelParameters = modelParameters , model = model )
140140
141141
142- def _get_lagnfuse_data_from_sync_streaming_response (resource : OpenAiDefinition , response , generation : StatefulGenerationClient , langfuse : Langfuse ):
142+ def _get_langfuse_data_from_sync_streaming_response (resource : OpenAiDefinition , response , generation : StatefulGenerationClient , langfuse : Langfuse ):
143143 responses = []
144144 for i in response :
145145 responses .append (i )
@@ -150,7 +150,7 @@ def _get_lagnfuse_data_from_sync_streaming_response(resource: OpenAiDefinition,
150150 _create_langfuse_update (completion , generation , completion_start_time , model = model )
151151
152152
153- async def _get_lagnfuse_data_from_async_streaming_response (resource : OpenAiDefinition , response , generation : StatefulGenerationClient , langfuse : Langfuse ):
153+ async def _get_langfuse_data_from_async_streaming_response (resource : OpenAiDefinition , response , generation : StatefulGenerationClient , langfuse : Langfuse ):
154154 responses = []
155155 async for i in response :
156156 responses .append (i )
@@ -263,7 +263,7 @@ def _wrap(open_ai_resource: OpenAiDefinition, initialize, wrapped, args, kwargs)
263263 openai_response = wrapped (** arg_extractor .get_openai_args ())
264264
265265 if _is_streaming_response (openai_response ):
266- return _get_lagnfuse_data_from_sync_streaming_response (open_ai_resource , openai_response , generation , new_langfuse )
266+ return _get_langfuse_data_from_sync_streaming_response (open_ai_resource , openai_response , generation , new_langfuse )
267267
268268 else :
269269 model , completion , usage = _get_langfuse_data_from_default_response (open_ai_resource , openai_response .__dict__ if _is_openai_v1 () else openai_response )
@@ -287,7 +287,7 @@ async def _wrap_async(open_ai_resource: OpenAiDefinition, initialize, wrapped, a
287287 openai_response = await wrapped (** arg_extractor .get_openai_args ())
288288
289289 if _is_streaming_response (openai_response ):
290- return _get_lagnfuse_data_from_async_streaming_response (open_ai_resource , openai_response , generation , new_langfuse )
290+ return _get_langfuse_data_from_async_streaming_response (open_ai_resource , openai_response , generation , new_langfuse )
291291
292292 else :
293293 model , completion , usage = _get_langfuse_data_from_default_response (open_ai_resource , openai_response .__dict__ if _is_openai_v1 () else openai_response )
0 commit comments