Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions examples/langgraph-python/.env.template
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# TODO: Get your E2B API key from https://e2b.dev/docs
# Get your E2B API key from https://e2b.dev/dashboard?tab=keys
E2B_API_KEY=""

# TODO: Get your OpenAI API key from https://platform.openai.com
# Get your OpenAI API key from https://platform.openai.com/settings/organization/api-keys
OPENAI_API_KEY=""
310 changes: 56 additions & 254 deletions examples/langgraph-python/langgraph_code_interpreter.ipynb

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,88 +1,47 @@
import os
import json
from typing import Any

from typing import Any, List
from langchain_core.tools import Tool
from pydantic.v1 import BaseModel, Field
from e2b_code_interpreter import Sandbox
from langchain_core.messages import BaseMessage, ToolMessage
from langchain.agents.output_parsers.tools import (
ToolAgentAction,
)
from langchain_core.tools import tool


class RichToolMessage(ToolMessage):
raw_output: dict


class LangchainCodeInterpreterToolInput(BaseModel):
code: str = Field(description="Python code to execute.")


class CodeInterpreterFunctionTool:
"""
This class calls arbitrary code against a Python Jupyter notebook.
It requires an E2B_API_KEY to create a sandbox.
"""

tool_name: str = "code_interpreter"
class CodeInterpreterTool:
"""E2B code interpreter sandbox."""

def __init__(self):
# Instantiate the E2B sandbox - this is a long lived object
# that's pinging E2B cloud to keep the sandbox alive.
if "E2B_API_KEY" not in os.environ:
raise Exception(
"Code Interpreter tool called while E2B_API_KEY environment variable is not set. Please get your E2B api key here https://e2b.dev/docs and set the E2B_API_KEY environment variable."
"Code Interpreter tool called while E2B_API_KEY environment variable is not set. "
"Please get your E2B api key here https://e2b.dev/dashboard?tab=keys and set the E2B_API_KEY environment variable."
)
self.code_interpreter = Sandbox()
self.sandbox = Sandbox.create()
self.last_results = []

def close(self):
self.code_interpreter.kill()

def call(self, parameters: dict, **kwargs: Any):
# TODO: E2B supports generating and streaming charts and other rich data
# because it has a full Jupyter server running inside the sandbox.
# What's the best way to send this data back to frontend and render them in chat?

code = parameters.get("code", "")
def run_code(self, code: str) -> dict[str, Any]:
print(f"***Code Interpreting...\n{code}\n====")
execution = self.code_interpreter.run_code(code)
execution = self.sandbox.run_code(code)
self.last_results = execution.results
return {
"results": execution.results,
"stdout": execution.logs.stdout,
"stderr": execution.logs.stderr,
"error": execution.error,
}

# langchain does not return a dict as a parameter, only a code string
def langchain_call(self, code: str):
return self.call({"code": code})
def close(self):
self.sandbox.kill()

def __enter__(self):
return self

def to_langchain_tool(self) -> Tool:
tool = Tool(
name=self.tool_name,
description="Execute python code in a Jupyter notebook cell and returns any rich data (eg charts), stdout, stderr, and error.",
func=self.langchain_call,
)
tool.args_schema = LangchainCodeInterpreterToolInput
return tool
def __exit__(self, *args):
self.close()

@staticmethod
def format_to_tool_message(
tool_call_id: str,
output: dict,
) -> RichToolMessage:
"""
Format the output of the CodeInterpreter tool to be returned as a RichToolMessage.
"""

# TODO: Add info about the results for the LLM
content = json.dumps(
{k: v for k, v in output.items() if k not in ("results")}, indent=2
)
def create_code_interpreter_tool(interpreter: CodeInterpreterTool):
@tool
def code_interpreter(code: str) -> dict[str, Any]:
"""Execute python code in a Jupyter notebook cell and returns any rich data (eg charts), stdout, stderr, and error."""
return interpreter.run_code(code)

return RichToolMessage(
content=content,
raw_output=output,
tool_call_id=tool_call_id,
)
return code_interpreter
120 changes: 34 additions & 86 deletions examples/langgraph-python/langgraph_e2b_python/main.py
Original file line number Diff line number Diff line change
@@ -1,96 +1,44 @@
import base64

from typing import List
from dotenv import load_dotenv
from langchain_groq import ChatGroq
from langchain_openai import ChatOpenAI
from langgraph.prebuilt import create_react_agent

from langgraph_e2b_python.code_interpreter_tool import (
CodeInterpreterFunctionTool,
RichToolMessage,
CodeInterpreterTool,
create_code_interpreter_tool,
)
from langchain_openai import ChatOpenAI
from langgraph.graph import END, MessageGraph
from e2b_code_interpreter import Result

load_dotenv()


# Define the function that determines whether to continue or not
def should_continue(messages) -> str:
last_message = messages[-1]
# If there is no function call, then we finish
if not last_message.tool_calls:
return END
else:
return "action"


# Handle tools execution
def execute_tools(messages, tool_map) -> List[RichToolMessage]:
tool_messages = []
for tool_call in messages[-1].tool_calls:
tool = tool_map[tool_call["name"]]
if tool_call["name"] == CodeInterpreterFunctionTool.tool_name:
output = tool.invoke(tool_call["args"])
message = CodeInterpreterFunctionTool.format_to_tool_message(
tool_call["id"],
output,
)
tool_messages.append(message)
else:
content = tool.invoke(tool_call["args"])
tool_messages.append(RichToolMessage(content, tool_call_id=tool_call["id"]))
return tool_messages


def main():
# 1. Pick your favorite llm
llm = ChatOpenAI(model="gpt-3.5-turbo-0125", temperature=0)
# llm = ChatGroq(temperature=0, model_name="llama3-70b-8192")

# 2. Initialize the code interpreter tool
code_interpreter = CodeInterpreterFunctionTool()
code_interpreter_tool = code_interpreter.to_langchain_tool()
tools = [code_interpreter_tool]
tool_map = {tool.name: tool for tool in tools}

# 3. Define the graph
workflow = MessageGraph()
workflow.add_node("agent", llm.bind_tools(tools))
workflow.add_node("action", lambda x: execute_tools(x, tool_map))

# Conditional agent -> action OR agent -> END
workflow.add_conditional_edges(
"agent",
should_continue,
)
# Always transition `action` -> `agent`
workflow.add_edge("action", "agent")

workflow.set_entry_point("agent")

app = workflow.compile()

# 4. Invoke the graph
result = app.invoke([("human", "plot and show sinus")])

code_interpreter.close()

print(result)

# Save the chart image
for message in result:
if hasattr(message, "raw_output"):
if message.raw_output["results"]:
rs = message.raw_output["results"]
first_result = rs[0]

# Save the received PNG chart
if first_result.png:
# Decode the base64 encoded PNG data
png_data = base64.b64decode(first_result.png)

# Save the decoded PNG data to a file
filename = f"chart.png"
with open(filename, "wb") as f:
f.write(png_data)
print(f"Saved chart to {filename}")
# Pick your favourite LLM
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)

# Initialize CodeInterpreterTool - defined in code_interpreter_tool.py
with CodeInterpreterTool() as interpreter:
# Create ReAct agent from LangGraph preset
agent = create_react_agent(
model=llm,
tools=[create_code_interpreter_tool(interpreter)],
prompt="You are a helpful assistant that can execute Python code to help answer questions.",
)

# Invoke agent to plot and show sinus
result = agent.invoke(
{"messages": [{"role": "user", "content": "plot and show sinus"}]}
)
print(result)

# Save PNG chart
for r in interpreter.last_results:
if hasattr(r, "png") and r.png:
png_data = base64.b64decode(r.png)
with open("chart.png", "wb") as f:
f.write(png_data)
print("Saved chart to chart.png")
break

if __name__ == "__main__":
main()
Loading