From e8d97ee43c34a94a8ba51462101ecce369e7385a Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Mon, 27 Apr 2026 06:19:52 -0700 Subject: [PATCH] chore: migrate legacy langgraph imports to maintain compatibility PiperOrigin-RevId: 906317678 --- .../test_agent_engine_templates_langgraph.py | 12 ++- ...st_reasoning_engine_templates_langgraph.py | 12 ++- vertexai/agent_engines/templates/langchain.py | 99 +++++++++++------ vertexai/agent_engines/templates/langgraph.py | 59 ++++++---- .../reasoning_engines/templates/langchain.py | 101 ++++++++++++------ .../reasoning_engines/templates/langgraph.py | 61 +++++++---- 6 files changed, 238 insertions(+), 106 deletions(-) diff --git a/tests/unit/vertex_langchain/test_agent_engine_templates_langgraph.py b/tests/unit/vertex_langchain/test_agent_engine_templates_langgraph.py index 21cff50bdf..1048d8f842 100644 --- a/tests/unit/vertex_langchain/test_agent_engine_templates_langgraph.py +++ b/tests/unit/vertex_langchain/test_agent_engine_templates_langgraph.py @@ -208,7 +208,12 @@ def test_query(self, langchain_dump_mock): mocks.attach_mock(mock=agent._tmpl_attrs.get("runnable"), attribute="invoke") agent.query(input="test query") mocks.assert_has_calls( - [mock.call.invoke.invoke(input={"input": "test query"}, config=None)] + [ + mock.call.invoke.invoke( + input={"input": "test query", "messages": [("user", "test query")]}, + config=None, + ) + ] ) def test_stream_query(self, langchain_dump_mock): @@ -217,7 +222,10 @@ def test_stream_query(self, langchain_dump_mock): agent._tmpl_attrs["runnable"].stream.return_value = [] list(agent.stream_query(input="test stream query")) agent._tmpl_attrs["runnable"].stream.assert_called_once_with( - input={"input": "test stream query"}, + input={ + "input": "test stream query", + "messages": [("user", "test stream query")], + }, config=None, ) diff --git a/tests/unit/vertex_langchain/test_reasoning_engine_templates_langgraph.py b/tests/unit/vertex_langchain/test_reasoning_engine_templates_langgraph.py index 8215ebfa2b..d49d206a15 100644 --- a/tests/unit/vertex_langchain/test_reasoning_engine_templates_langgraph.py +++ b/tests/unit/vertex_langchain/test_reasoning_engine_templates_langgraph.py @@ -208,7 +208,12 @@ def test_query(self, langchain_dump_mock): mocks.attach_mock(mock=agent._runnable, attribute="invoke") agent.query(input="test query") mocks.assert_has_calls( - [mock.call.invoke.invoke(input={"input": "test query"}, config=None)] + [ + mock.call.invoke.invoke( + input={"input": "test query", "messages": [("user", "test query")]}, + config=None, + ) + ] ) def test_stream_query(self, langchain_dump_mock): @@ -217,7 +222,10 @@ def test_stream_query(self, langchain_dump_mock): agent._runnable.stream.return_value = [] list(agent.stream_query(input="test stream query")) agent._runnable.stream.assert_called_once_with( - input={"input": "test stream query"}, + input={ + "input": "test stream query", + "messages": [("user", "test stream query")], + }, config=None, ) diff --git a/vertexai/agent_engines/templates/langchain.py b/vertexai/agent_engines/templates/langchain.py index 31dabcbdd3..4bcf40f55b 100644 --- a/vertexai/agent_engines/templates/langchain.py +++ b/vertexai/agent_engines/templates/langchain.py @@ -43,11 +43,12 @@ RunnableSerializable = Any try: - from langchain_google_vertexai.functions_utils import _ToolsType - - _ToolsType = _ToolsType + from langchain_google_genai.functions_utils import _ToolsType except ImportError: - _ToolsType = Any + try: + from langchain_google_vertexai.functions_utils import _ToolsType + except ImportError: + _ToolsType = Any try: from opentelemetry.sdk import trace @@ -81,13 +82,15 @@ def _default_runnable_kwargs(has_history: bool) -> Mapping[str, Any]: def _default_output_parser(): try: - from langchain.agents.output_parsers.tools import ToolsAgentOutputParser + from langchain_classic.agents.output_parsers.tools import ToolsAgentOutputParser except (ModuleNotFoundError, ImportError): - # Fallback to an older version if needed. - from langchain.agents.output_parsers.openai_tools import ( - OpenAIToolsAgentOutputParser as ToolsAgentOutputParser, - ) - + try: + from langchain.agents.output_parsers.tools import ToolsAgentOutputParser + except (ModuleNotFoundError, ImportError): + # Fallback to an older version if needed. + from langchain.agents.output_parsers.openai_tools import ( + OpenAIToolsAgentOutputParser as ToolsAgentOutputParser, + ) return ToolsAgentOutputParser() @@ -98,17 +101,29 @@ def _default_model_builder( location: str, model_kwargs: Optional[Mapping[str, Any]] = None, ) -> "BaseLanguageModel": - import vertexai - from google.cloud.aiplatform import initializer - from langchain_google_vertexai import ChatVertexAI - model_kwargs = model_kwargs or {} - current_project = initializer.global_config.project - current_location = initializer.global_config.location - vertexai.init(project=project, location=location) - model = ChatVertexAI(model_name=model_name, **model_kwargs) - vertexai.init(project=current_project, location=current_location) - return model + try: + from langchain_google_genai import ChatGoogleGenerativeAI + + model = ChatGoogleGenerativeAI( + model=model_name, + project=project, + location=location, + vertexai=True, + **model_kwargs, + ) + return model + except ImportError: + import vertexai + from google.cloud.aiplatform import initializer + from langchain_google_vertexai import ChatVertexAI + + current_project = initializer.global_config.project + current_location = initializer.global_config.location + vertexai.init(project=project, location=location) + model = ChatVertexAI(model_name=model_name, **model_kwargs) + vertexai.init(project=current_project, location=current_location) + return model def _default_runnable_builder( @@ -124,8 +139,16 @@ def _default_runnable_builder( runnable_kwargs: Optional[Mapping[str, Any]] = None, ) -> "RunnableSerializable": from langchain_core import tools as lc_tools - from langchain.agents import AgentExecutor - from langchain.tools.base import StructuredTool + + try: + from langchain_classic.agents import AgentExecutor + except ImportError: + from langchain.agents import AgentExecutor + + try: + from langchain_core.tools import StructuredTool + except ImportError: + from langchain.tools.base import StructuredTool # The prompt template and runnable_kwargs needs to be customized depending # on whether the user intends for the agent to have history. The way the @@ -261,12 +284,16 @@ def _default_prompt( from langchain_core import prompts try: - from langchain.agents.format_scratchpad.tools import format_to_tool_messages - except (ModuleNotFoundError, ImportError): - # Fallback to an older version if needed. - from langchain.agents.format_scratchpad.openai_tools import ( - format_to_openai_tool_messages as format_to_tool_messages, + from langchain_classic.agents.format_scratchpad.tools import ( + format_to_tool_messages, ) + except (ModuleNotFoundError, ImportError): + try: + from langchain.agents.format_scratchpad.tools import format_to_tool_messages + except (ModuleNotFoundError, ImportError): + from langchain.agents.format_scratchpad.openai_tools import ( + format_to_openai_tool_messages as format_to_tool_messages, + ) system_instructions = [] if system_instruction: @@ -629,13 +656,18 @@ def query( Returns: The output of querying the Agent with the given input and config. """ - from langchain.load import dump as langchain_load_dump + try: + from langchain_core.load import dumpd + except ImportError: + from langchain.load import dump as langchain_load_dump + + dumpd = langchain_load_dump.dumpd if isinstance(input, str): input = {"input": input} if not self._tmpl_attrs.get("runnable"): self.set_up() - return langchain_load_dump.dumpd( + return dumpd( self._tmpl_attrs.get("runnable").invoke( input=input, config=config, **kwargs ) @@ -662,7 +694,12 @@ def stream_query( Yields: The output of querying the Agent with the given input and config. """ - from langchain.load import dump as langchain_load_dump + try: + from langchain_core.load import dumpd + except ImportError: + from langchain.load import dump as langchain_load_dump + + dumpd = langchain_load_dump.dumpd if isinstance(input, str): input = {"input": input} @@ -673,4 +710,4 @@ def stream_query( config=config, **kwargs, ): - yield langchain_load_dump.dumpd(chunk) + yield dumpd(chunk) diff --git a/vertexai/agent_engines/templates/langgraph.py b/vertexai/agent_engines/templates/langgraph.py index 9f5295d81a..63c63c83aa 100644 --- a/vertexai/agent_engines/templates/langgraph.py +++ b/vertexai/agent_engines/templates/langgraph.py @@ -34,11 +34,16 @@ BaseLanguageModel = Any try: - from langchain_google_vertexai.functions_utils import _ToolsType + from langchain_google_genai.functions_utils import _ToolsType _ToolLike = _ToolsType except ImportError: - _ToolLike = Any + try: + from langchain_google_vertexai.functions_utils import _ToolsType + + _ToolLike = _ToolsType + except ImportError: + _ToolLike = Any try: from opentelemetry.sdk import trace @@ -87,17 +92,29 @@ def _default_model_builder( Returns: BaseLanguageModel: The language model. """ - import vertexai - from google.cloud.aiplatform import initializer - from langchain_google_vertexai import ChatVertexAI - model_kwargs = model_kwargs or {} - current_project = initializer.global_config.project - current_location = initializer.global_config.location - vertexai.init(project=project, location=location) - model = ChatVertexAI(model_name=model_name, **model_kwargs) - vertexai.init(project=current_project, location=current_location) - return model + try: + from langchain_google_genai import ChatGoogleGenerativeAI + + model = ChatGoogleGenerativeAI( + model=model_name, + project=project, + location=location, + vertexai=True, + **model_kwargs, + ) + return model + except ImportError: + import vertexai + from google.cloud.aiplatform import initializer + from langchain_google_vertexai import ChatVertexAI + + current_project = initializer.global_config.project + current_location = initializer.global_config.location + vertexai.init(project=project, location=location) + model = ChatVertexAI(model_name=model_name, **model_kwargs) + vertexai.init(project=current_project, location=current_location) + return model def _default_runnable_builder( @@ -554,13 +571,16 @@ def query( Returns: The output of querying the Agent with the given input and config. """ - from langchain.load import dump as langchain_load_dump + try: + from langchain_core.load import dumpd + except ImportError: + from langchain.load.dump import dumpd if isinstance(input, str): - input = {"input": input} + input = {"input": input, "messages": [("user", input)]} if not self._tmpl_attrs.get("runnable"): self.set_up() - return langchain_load_dump.dumpd( + return dumpd( self._tmpl_attrs.get("runnable").invoke( input=input, config=config, **kwargs ) @@ -587,10 +607,13 @@ def stream_query( Yields: The output of querying the Agent with the given input and config. """ - from langchain.load import dump as langchain_load_dump + try: + from langchain_core.load import dumpd + except ImportError: + from langchain.load.dump import dumpd if isinstance(input, str): - input = {"input": input} + input = {"input": input, "messages": [("user", input)]} if not self._tmpl_attrs.get("runnable"): self.set_up() for chunk in self._tmpl_attrs.get("runnable").stream( @@ -598,7 +621,7 @@ def stream_query( config=config, **kwargs, ): - yield langchain_load_dump.dumpd(chunk) + yield dumpd(chunk) def get_state_history( self, diff --git a/vertexai/preview/reasoning_engines/templates/langchain.py b/vertexai/preview/reasoning_engines/templates/langchain.py index eddb105d5b..cb39fb172e 100644 --- a/vertexai/preview/reasoning_engines/templates/langchain.py +++ b/vertexai/preview/reasoning_engines/templates/langchain.py @@ -43,11 +43,12 @@ RunnableSerializable = Any try: - from langchain_google_vertexai.functions_utils import _ToolsType - - _ToolsType = _ToolsType + from langchain_google_genai.functions_utils import _ToolsType except ImportError: - _ToolsType = Any + try: + from langchain_google_vertexai.functions_utils import _ToolsType + except ImportError: + _ToolsType = Any try: from opentelemetry.sdk import trace @@ -81,13 +82,15 @@ def _default_runnable_kwargs(has_history: bool) -> Mapping[str, Any]: def _default_output_parser(): try: - from langchain.agents.output_parsers.tools import ToolsAgentOutputParser + from langchain_classic.agents.output_parsers.tools import ToolsAgentOutputParser except (ModuleNotFoundError, ImportError): - # Fallback to an older version if needed. - from langchain.agents.output_parsers.openai_tools import ( - OpenAIToolsAgentOutputParser as ToolsAgentOutputParser, - ) - + try: + from langchain.agents.output_parsers.tools import ToolsAgentOutputParser + except (ModuleNotFoundError, ImportError): + # Fallback to an older version if needed. + from langchain.agents.output_parsers.openai_tools import ( + OpenAIToolsAgentOutputParser as ToolsAgentOutputParser, + ) return ToolsAgentOutputParser() @@ -98,17 +101,29 @@ def _default_model_builder( location: str, model_kwargs: Optional[Mapping[str, Any]] = None, ) -> "BaseLanguageModel": - import vertexai - from google.cloud.aiplatform import initializer - from langchain_google_vertexai import ChatVertexAI - model_kwargs = model_kwargs or {} - current_project = initializer.global_config.project - current_location = initializer.global_config.location - vertexai.init(project=project, location=location) - model = ChatVertexAI(model_name=model_name, **model_kwargs) - vertexai.init(project=current_project, location=current_location) - return model + try: + from langchain_google_genai import ChatGoogleGenerativeAI + + model = ChatGoogleGenerativeAI( + model=model_name, + project=project, + location=location, + vertexai=True, + **model_kwargs, + ) + return model + except ImportError: + import vertexai + from google.cloud.aiplatform import initializer + from langchain_google_vertexai import ChatVertexAI + + current_project = initializer.global_config.project + current_location = initializer.global_config.location + vertexai.init(project=project, location=location) + model = ChatVertexAI(model_name=model_name, **model_kwargs) + vertexai.init(project=current_project, location=current_location) + return model def _default_runnable_builder( @@ -124,8 +139,16 @@ def _default_runnable_builder( runnable_kwargs: Optional[Mapping[str, Any]] = None, ) -> "RunnableSerializable": from langchain_core import tools as lc_tools - from langchain.agents import AgentExecutor - from langchain.tools.base import StructuredTool + + try: + from langchain_classic.agents import AgentExecutor + except ImportError: + from langchain.agents import AgentExecutor + + try: + from langchain_core.tools import StructuredTool + except ImportError: + from langchain.tools.base import StructuredTool # The prompt template and runnable_kwargs needs to be customized depending # on whether the user intends for the agent to have history. The way the @@ -175,12 +198,16 @@ def _default_prompt( from langchain_core import prompts try: - from langchain.agents.format_scratchpad.tools import format_to_tool_messages - except (ModuleNotFoundError, ImportError): - # Fallback to an older version if needed. - from langchain.agents.format_scratchpad.openai_tools import ( - format_to_openai_tool_messages as format_to_tool_messages, + from langchain_classic.agents.format_scratchpad.tools import ( + format_to_tool_messages, ) + except (ModuleNotFoundError, ImportError): + try: + from langchain.agents.format_scratchpad.tools import format_to_tool_messages + except (ModuleNotFoundError, ImportError): + from langchain.agents.format_scratchpad.openai_tools import ( + format_to_openai_tool_messages as format_to_tool_messages, + ) system_instructions = [] if system_instruction: @@ -605,15 +632,18 @@ def query( Returns: The output of querying the Agent with the given input and config. """ - from langchain.load import dump as langchain_load_dump + try: + from langchain_core.load import dumpd + except ImportError: + from langchain.load import dump as langchain_load_dump + + dumpd = langchain_load_dump.dumpd if isinstance(input, str): input = {"input": input} if not self._runnable: self.set_up() - return langchain_load_dump.dumpd( - self._runnable.invoke(input=input, config=config, **kwargs) - ) + return dumpd(self._runnable.invoke(input=input, config=config, **kwargs)) def stream_query( self, @@ -636,11 +666,16 @@ def stream_query( Yields: The output of querying the Agent with the given input and config. """ - from langchain.load import dump as langchain_load_dump + try: + from langchain_core.load import dumpd + except ImportError: + from langchain.load import dump as langchain_load_dump + + dumpd = langchain_load_dump.dumpd if isinstance(input, str): input = {"input": input} if not self._runnable: self.set_up() for chunk in self._runnable.stream(input=input, config=config, **kwargs): - yield langchain_load_dump.dumpd(chunk) + yield dumpd(chunk) diff --git a/vertexai/preview/reasoning_engines/templates/langgraph.py b/vertexai/preview/reasoning_engines/templates/langgraph.py index dcc711c867..a3553fae2c 100644 --- a/vertexai/preview/reasoning_engines/templates/langgraph.py +++ b/vertexai/preview/reasoning_engines/templates/langgraph.py @@ -42,11 +42,16 @@ RunnableSerializable = Any try: - from langchain_google_vertexai.functions_utils import _ToolsType + from langchain_google_genai.functions_utils import _ToolsType _ToolLike = _ToolsType except ImportError: - _ToolLike = Any + try: + from langchain_google_vertexai.functions_utils import _ToolsType + + _ToolLike = _ToolsType + except ImportError: + _ToolLike = Any try: from opentelemetry.sdk import trace @@ -95,17 +100,29 @@ def _default_model_builder( Returns: BaseLanguageModel: The language model. """ - import vertexai - from google.cloud.aiplatform import initializer - from langchain_google_vertexai import ChatVertexAI - model_kwargs = model_kwargs or {} - current_project = initializer.global_config.project - current_location = initializer.global_config.location - vertexai.init(project=project, location=location) - model = ChatVertexAI(model_name=model_name, **model_kwargs) - vertexai.init(project=current_project, location=current_location) - return model + try: + from langchain_google_genai import ChatGoogleGenerativeAI + + model = ChatGoogleGenerativeAI( + model=model_name, + project=project, + location=location, + vertexai=True, + **model_kwargs, + ) + return model + except ImportError: + import vertexai + from google.cloud.aiplatform import initializer + from langchain_google_vertexai import ChatVertexAI + + current_project = initializer.global_config.project + current_location = initializer.global_config.location + vertexai.init(project=project, location=location) + model = ChatVertexAI(model_name=model_name, **model_kwargs) + vertexai.init(project=current_project, location=current_location) + return model def _default_runnable_builder( @@ -541,15 +558,16 @@ def query( Returns: The output of querying the Agent with the given input and config. """ - from langchain.load import dump as langchain_load_dump + try: + from langchain_core.load import dumpd + except ImportError: + from langchain.load.dump import dumpd if isinstance(input, str): - input = {"input": input} + input = {"input": input, "messages": [("user", input)]} if not self._runnable: self.set_up() - return langchain_load_dump.dumpd( - self._runnable.invoke(input=input, config=config, **kwargs) - ) + return dumpd(self._runnable.invoke(input=input, config=config, **kwargs)) def stream_query( self, @@ -572,14 +590,17 @@ def stream_query( Yields: The output of querying the Agent with the given input and config. """ - from langchain.load import dump as langchain_load_dump + try: + from langchain_core.load import dumpd + except ImportError: + from langchain.load.dump import dumpd if isinstance(input, str): - input = {"input": input} + input = {"input": input, "messages": [("user", input)]} if not self._runnable: self.set_up() for chunk in self._runnable.stream(input=input, config=config, **kwargs): - yield langchain_load_dump.dumpd(chunk) + yield dumpd(chunk) def get_state_history( self,