diff --git a/examples/tracing/langgraph/langgraph_tracing.ipynb b/examples/tracing/langgraph/langgraph_tracing.ipynb new file mode 100644 index 00000000..4bc2d7e0 --- /dev/null +++ b/examples/tracing/langgraph/langgraph_tracing.ipynb @@ -0,0 +1,390 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "2722b419", + "metadata": {}, + "source": [ + "[![OpenĀ InĀ Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/openlayer-ai/openlayer-python/blob/main/examples/tracing/langgraph/langgraph_tracing.ipynb)\n", + "\n", + "\n", + "# LangGraph tracing\n", + "\n", + "This notebook illustrates how use Openlayer's callback handler to monitor LangGraph workflows." + ] + }, + { + "cell_type": "markdown", + "id": "75c2a473", + "metadata": {}, + "source": [ + "## 1. Set the environment variables" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f3f4fa13", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "# OpenAI env variables\n", + "os.environ[\"OPENAI_API_KEY\"] = \"YOUR_OPENAI_API_KEY_HERE\"\n", + "\n", + "# Openlayer env variables\n", + "os.environ[\"OPENLAYER_API_KEY\"] = \"YOUR_OPENLAYER_API_KEY_HERE\"\n", + "os.environ[\"OPENLAYER_INFERENCE_PIPELINE_ID\"] = \"YOUR_OPENLAYER_INFERENCE_PIPELINE_ID_HERE\"" + ] + }, + { + "cell_type": "markdown", + "id": "9758533f", + "metadata": {}, + "source": [ + "## 2. Instantiate the `OpenlayerHandler`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e60584fa", + "metadata": {}, + "outputs": [], + "source": [ + "from openlayer.lib.integrations import langchain_callback\n", + "\n", + "openlayer_handler = langchain_callback.OpenlayerHandler()" + ] + }, + { + "cell_type": "markdown", + "id": "72a6b954", + "metadata": {}, + "source": [ + "## 3. Use LangGraph \n", + "\n", + "### 3.1 Simple chatbot example" + ] + }, + { + "cell_type": "markdown", + "id": "76a350b4", + "metadata": {}, + "source": [ + "We can start with a simple chatbot example similar to the one in the [LangGraph quickstart](https://langchain-ai.github.io/langgraph/tutorials/get-started/1-build-basic-chatbot/).\n", + "\n", + "The idea is passing the `openlayer_handler` as a callback to the LangGraph graph. After running the graph,\n", + "you'll be able to see the traces in the Openlayer platform." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cc351618", + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Annotated\n", + "from typing_extensions import TypedDict\n", + "\n", + "from langgraph.graph import StateGraph\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain_core.messages import HumanMessage\n", + "from langgraph.graph.message import add_messages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4595c63b", + "metadata": {}, + "outputs": [], + "source": [ + "class State(TypedDict):\n", + " # Messages have the type \"list\". The `add_messages` function in the annotation defines how this state key should be updated\n", + " # (in this case, it appends messages to the list, rather than overwriting them)\n", + " messages: Annotated[list, add_messages]\n", + "\n", + "graph_builder = StateGraph(State)\n", + "\n", + "llm = ChatOpenAI(model = \"gpt-4o\", temperature = 0.2)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "00a6fa80", + "metadata": {}, + "outputs": [], + "source": [ + "# The chatbot node function takes the current State as input and returns an updated messages list. This is the basic pattern for all LangGraph node functions.\n", + "def chatbot(state: State):\n", + " return {\"messages\": [llm.invoke(state[\"messages\"])]}\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a36e5160", + "metadata": {}, + "outputs": [], + "source": [ + "# Add a \"chatbot\" node. Nodes represent units of work. They are typically regular python functions.\n", + "graph_builder.add_node(\"chatbot\", chatbot)\n", + "\n", + "# Add an entry point. This tells our graph where to start its work each time we run it.\n", + "graph_builder.set_entry_point(\"chatbot\")\n", + "\n", + "# Set a finish point. This instructs the graph \"any time this node is run, you can exit.\"\n", + "graph_builder.set_finish_point(\"chatbot\")\n", + "\n", + "# To be able to run our graph, call \"compile()\" on the graph builder. This creates a \"CompiledGraph\" we can use invoke on our state.\n", + "graph = graph_builder.compile()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "deef517e", + "metadata": {}, + "outputs": [], + "source": [ + "# Pass the openlayer_handler as a callback to the LangGraph graph. After running the graph,\n", + "# you'll be able to see the traces in the Openlayer platform.\n", + "for s in graph.stream({\"messages\": [HumanMessage(content = \"What is the meaning of life?\")]},\n", + " config={\"callbacks\": [openlayer_handler]}):\n", + " print(s) # noqa: T201" + ] + }, + { + "cell_type": "markdown", + "id": "c049c8fa", + "metadata": {}, + "source": [ + "### 3.2 Multi-agent example\n", + "\n", + "Now, we're going to use a more complex example. The principle, however, remains the same: passing the `openlayer_handler` as a callback to the LangGraph graph." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "213fc402", + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Annotated\n", + "from datetime import datetime\n", + "\n", + "from langchain.tools import Tool\n", + "from langchain_community.tools import WikipediaQueryRun\n", + "from langchain_community.utilities import WikipediaAPIWrapper\n", + "\n", + "# Define a tools that searches Wikipedia\n", + "wikipedia_tool = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())\n", + "\n", + "# Define a new tool that returns the current datetime\n", + "datetime_tool = Tool(\n", + " name=\"Datetime\",\n", + " func = lambda x: datetime.now().isoformat(), # noqa: ARG005\n", + " description=\"Returns the current datetime\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c76c8935", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain.agents import AgentExecutor, create_openai_tools_agent\n", + "from langchain_openai import ChatOpenAI\n", + "from langchain_core.messages import BaseMessage, HumanMessage\n", + "\n", + "\n", + "def create_agent(llm: ChatOpenAI, system_prompt: str, tools: list):\n", + " # Each worker node will be given a name and some tools.\n", + " prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\n", + " \"system\",\n", + " system_prompt,\n", + " ),\n", + " MessagesPlaceholder(variable_name=\"messages\"),\n", + " MessagesPlaceholder(variable_name=\"agent_scratchpad\"),\n", + " ]\n", + " )\n", + " agent = create_openai_tools_agent(llm, tools, prompt)\n", + " executor = AgentExecutor(agent=agent, tools=tools)\n", + " return executor\n", + "\n", + "def agent_node(state, agent, name):\n", + " result = agent.invoke(state)\n", + " return {\"messages\": [HumanMessage(content=result[\"output\"], name=name)]}" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f626e7f4", + "metadata": {}, + "outputs": [], + "source": [ + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser\n", + "\n", + "members = [\"Researcher\", \"CurrentTime\"]\n", + "system_prompt = (\n", + " \"You are a supervisor tasked with managing a conversation between the\"\n", + " \" following workers: {members}. Given the following user request,\"\n", + " \" respond with the worker to act next. Each worker will perform a\"\n", + " \" task and respond with their results and status. When finished,\"\n", + " \" respond with FINISH.\"\n", + ")\n", + "# Our team supervisor is an LLM node. It just picks the next agent to process and decides when the work is completed\n", + "options = [\"FINISH\"] + members\n", + "\n", + "# Using openai function calling can make output parsing easier for us\n", + "function_def = {\n", + " \"name\": \"route\",\n", + " \"description\": \"Select the next role.\",\n", + " \"parameters\": {\n", + " \"title\": \"routeSchema\",\n", + " \"type\": \"object\",\n", + " \"properties\": {\n", + " \"next\": {\n", + " \"title\": \"Next\",\n", + " \"anyOf\": [\n", + " {\"enum\": options},\n", + " ],\n", + " }\n", + " },\n", + " \"required\": [\"next\"],\n", + " },\n", + "}\n", + "\n", + "# Create the prompt using ChatPromptTemplate\n", + "prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\"system\", system_prompt),\n", + " MessagesPlaceholder(variable_name=\"messages\"),\n", + " (\n", + " \"system\",\n", + " \"Given the conversation above, who should act next?\"\n", + " \" Or should we FINISH? Select one of: {options}\",\n", + " ),\n", + " ]\n", + ").partial(options=str(options), members=\", \".join(members))\n", + "\n", + "llm = ChatOpenAI(model=\"gpt-4o\")\n", + "\n", + "# Construction of the chain for the supervisor agent\n", + "supervisor_chain = (\n", + " prompt\n", + " | llm.bind_functions(functions=[function_def], function_call=\"route\")\n", + " | JsonOutputFunctionsParser()\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ec307b80", + "metadata": {}, + "outputs": [], + "source": [ + "import operator\n", + "import functools\n", + "from typing import Sequence, TypedDict\n", + "\n", + "from langgraph.graph import END, START, StateGraph\n", + "from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n", + "\n", + "\n", + "# The agent state is the input to each node in the graph\n", + "class AgentState(TypedDict):\n", + " # The annotation tells the graph that new messages will always be added to the current states\n", + " messages: Annotated[Sequence[BaseMessage], operator.add]\n", + " # The 'next' field indicates where to route to next\n", + " next: str\n", + "\n", + "# Add the research agent using the create_agent helper function\n", + "research_agent = create_agent(llm, \"You are a web researcher.\", [wikipedia_tool])\n", + "research_node = functools.partial(agent_node, agent=research_agent, name=\"Researcher\")\n", + "\n", + "# Add the time agent using the create_agent helper function\n", + "currenttime_agent = create_agent(llm, \"You can tell the current time at\", [datetime_tool])\n", + "currenttime_node = functools.partial(agent_node, agent=currenttime_agent, name = \"CurrentTime\")\n", + "\n", + "workflow = StateGraph(AgentState)\n", + "\n", + "# Add a \"chatbot\" node. Nodes represent units of work. They are typically regular python functions.\n", + "workflow.add_node(\"Researcher\", research_node)\n", + "workflow.add_node(\"CurrentTime\", currenttime_node)\n", + "workflow.add_node(\"supervisor\", supervisor_chain)\n", + "\n", + "# We want our workers to ALWAYS \"report back\" to the supervisor when done\n", + "for member in members:\n", + " workflow.add_edge(member, \"supervisor\")\n", + "\n", + "# Conditional edges usually contain \"if\" statements to route to different nodes depending on the current graph state.\n", + "# These functions receive the current graph state and return a string or list of strings indicating which node(s) to call next.\n", + "conditional_map = {k: k for k in members}\n", + "conditional_map[\"FINISH\"] = END\n", + "workflow.add_conditional_edges(\"supervisor\", lambda x: x[\"next\"], conditional_map)\n", + "\n", + "# Add an entry point. This tells our graph where to start its work each time we run it.\n", + "workflow.add_edge(START, \"supervisor\")\n", + "\n", + "# To be able to run our graph, call \"compile()\" on the graph builder. This creates a \"CompiledGraph\" we can use invoke on our state.\n", + "graph_2 = workflow.compile()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "08e35ae9", + "metadata": {}, + "outputs": [], + "source": [ + "# Pass the openlayer_handler as a callback to the LangGraph graph. After running the graph,\n", + "# you'll be able to see the traces in the Openlayer platform.\n", + "for s in graph_2.stream({\"messages\": [HumanMessage(content = \"How does photosynthesis work?\")]},\n", + " config={\"callbacks\": [openlayer_handler]}):\n", + " print(s) # noqa: T201" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "16acecc2", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "callback-improvements", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.19" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +}