|
32 | 32 | "amazon_bedrock_converse_chat": "Bedrock", |
33 | 33 | } |
34 | 34 |
|
| 35 | +# LiteLLM model prefixes to provider names. |
| 36 | +# When models are accessed via a LiteLLM proxy (e.g. "gemini/gemini-2.5-flash"), |
| 37 | +# the LangChain _type is "openai-chat" which incorrectly maps to "OpenAI". |
| 38 | +# This map resolves the actual provider from the model prefix. |
| 39 | +LITELLM_PREFIX_TO_PROVIDER_MAP = { |
| 40 | + "gemini": "Google", |
| 41 | + "anthropic": "Anthropic", |
| 42 | + "cohere": "Cohere", |
| 43 | + "mistral": "Mistral", |
| 44 | + "bedrock": "Bedrock", |
| 45 | + "vertex_ai": "Google", |
| 46 | + "azure": "Azure", |
| 47 | + "huggingface": "Hugging Face", |
| 48 | + "replicate": "Replicate", |
| 49 | + "together_ai": "Together AI", |
| 50 | + "groq": "Groq", |
| 51 | + "deepseek": "DeepSeek", |
| 52 | + "fireworks_ai": "Fireworks AI", |
| 53 | + "perplexity": "Perplexity", |
| 54 | + "ollama": "Ollama", |
| 55 | + "openai": "OpenAI", |
| 56 | +} |
| 57 | + |
35 | 58 |
|
36 | 59 | if HAVE_LANGCHAIN: |
37 | 60 | BaseCallbackHandlerClass = BaseCallbackHandler |
@@ -380,6 +403,14 @@ def _extract_model_info( |
380 | 403 | or serialized.get("name") |
381 | 404 | ) |
382 | 405 |
|
| 406 | + # Override provider from LiteLLM model prefix (e.g. "gemini/gemini-2.5-flash") |
| 407 | + # when the model is accessed through a proxy that reports as "openai-chat". |
| 408 | + if model and "/" in model: |
| 409 | + prefix = model.split("/", 1)[0] |
| 410 | + litellm_provider = LITELLM_PREFIX_TO_PROVIDER_MAP.get(prefix) |
| 411 | + if litellm_provider: |
| 412 | + provider = litellm_provider |
| 413 | + |
383 | 414 | # Clean invocation params (remove internal LangChain params) |
384 | 415 | clean_params = { |
385 | 416 | k: v for k, v in invocation_params.items() if not k.startswith("_") |
@@ -477,7 +508,7 @@ def _handle_llm_start( |
477 | 508 | serialized, invocation_params, metadata or {} |
478 | 509 | ) |
479 | 510 |
|
480 | | - step_name = name or f"{model_info['provider'] or 'LLM'} Chat Completion" |
| 511 | + step_name = f"{model_info['provider'] or 'LLM'} Chat Completion" |
481 | 512 | prompt = [{"role": "user", "content": text} for text in prompts] |
482 | 513 |
|
483 | 514 | self._start_step( |
@@ -508,7 +539,9 @@ def _handle_chat_model_start( |
508 | 539 | serialized, invocation_params, metadata or {} |
509 | 540 | ) |
510 | 541 |
|
511 | | - step_name = name or f"{model_info['provider'] or 'Chat Model'} Chat Completion" |
| 542 | + # Always use provider-based name for chat completions (e.g. "Google Chat Completion") |
| 543 | + # rather than the run_name from the caller (e.g. "Language Model") which is generic. |
| 544 | + step_name = f"{model_info['provider'] or 'Chat Model'} Chat Completion" |
512 | 545 | prompt = self._messages_to_prompt_format(messages) |
513 | 546 |
|
514 | 547 | self._start_step( |
|
0 commit comments