Skip to content

Commit b9bfda5

Browse files
viniciusdsmelloclaude
authored andcommitted
feat: Fix provider detection for LiteLLM proxy models and improve step naming
- Add LITELLM_PREFIX_TO_PROVIDER_MAP to resolve correct provider from model prefix (e.g. gemini/gemini-2.5-flash -> Google instead of OpenAI) - Always use provider-based name for chat completion steps instead of accepting generic run_name from caller Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
1 parent 5d702ef commit b9bfda5

File tree

1 file changed

+35
-2
lines changed

1 file changed

+35
-2
lines changed

src/openlayer/lib/integrations/langchain_callback.py

Lines changed: 35 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,29 @@
3232
"amazon_bedrock_converse_chat": "Bedrock",
3333
}
3434

35+
# LiteLLM model prefixes to provider names.
36+
# When models are accessed via a LiteLLM proxy (e.g. "gemini/gemini-2.5-flash"),
37+
# the LangChain _type is "openai-chat" which incorrectly maps to "OpenAI".
38+
# This map resolves the actual provider from the model prefix.
39+
LITELLM_PREFIX_TO_PROVIDER_MAP = {
40+
"gemini": "Google",
41+
"anthropic": "Anthropic",
42+
"cohere": "Cohere",
43+
"mistral": "Mistral",
44+
"bedrock": "Bedrock",
45+
"vertex_ai": "Google",
46+
"azure": "Azure",
47+
"huggingface": "Hugging Face",
48+
"replicate": "Replicate",
49+
"together_ai": "Together AI",
50+
"groq": "Groq",
51+
"deepseek": "DeepSeek",
52+
"fireworks_ai": "Fireworks AI",
53+
"perplexity": "Perplexity",
54+
"ollama": "Ollama",
55+
"openai": "OpenAI",
56+
}
57+
3558

3659
if HAVE_LANGCHAIN:
3760
BaseCallbackHandlerClass = BaseCallbackHandler
@@ -380,6 +403,14 @@ def _extract_model_info(
380403
or serialized.get("name")
381404
)
382405

406+
# Override provider from LiteLLM model prefix (e.g. "gemini/gemini-2.5-flash")
407+
# when the model is accessed through a proxy that reports as "openai-chat".
408+
if model and "/" in model:
409+
prefix = model.split("/", 1)[0]
410+
litellm_provider = LITELLM_PREFIX_TO_PROVIDER_MAP.get(prefix)
411+
if litellm_provider:
412+
provider = litellm_provider
413+
383414
# Clean invocation params (remove internal LangChain params)
384415
clean_params = {
385416
k: v for k, v in invocation_params.items() if not k.startswith("_")
@@ -477,7 +508,7 @@ def _handle_llm_start(
477508
serialized, invocation_params, metadata or {}
478509
)
479510

480-
step_name = name or f"{model_info['provider'] or 'LLM'} Chat Completion"
511+
step_name = f"{model_info['provider'] or 'LLM'} Chat Completion"
481512
prompt = [{"role": "user", "content": text} for text in prompts]
482513

483514
self._start_step(
@@ -508,7 +539,9 @@ def _handle_chat_model_start(
508539
serialized, invocation_params, metadata or {}
509540
)
510541

511-
step_name = name or f"{model_info['provider'] or 'Chat Model'} Chat Completion"
542+
# Always use provider-based name for chat completions (e.g. "Google Chat Completion")
543+
# rather than the run_name from the caller (e.g. "Language Model") which is generic.
544+
step_name = f"{model_info['provider'] or 'Chat Model'} Chat Completion"
512545
prompt = self._messages_to_prompt_format(messages)
513546

514547
self._start_step(

0 commit comments

Comments
 (0)