1+ """
2+ LangGraph StateGraph example with an LLM node.
3+
4+ Similar to the manual example (../manual/main.py) but uses LangGraph's StateGraph
5+ with a node that calls ChatOpenAI. OpenTelemetry LangChain instrumentation traces
6+ the LLM calls made from within the graph node.
7+ """
8+
9+ from typing import Annotated
10+
11+
12+ from langchain_core .messages import HumanMessage , SystemMessage
13+ from langchain_openai import ChatOpenAI
14+ from langgraph .graph import END , START , StateGraph
15+ from langgraph .graph .message import add_messages
16+ from typing_extensions import TypedDict
17+
18+ from opentelemetry import _logs , metrics , trace
19+ from opentelemetry .exporter .otlp .proto .grpc ._log_exporter import (
20+ OTLPLogExporter ,
21+ )
22+ from opentelemetry .exporter .otlp .proto .grpc .metric_exporter import (
23+ OTLPMetricExporter ,
24+ )
25+ from opentelemetry .exporter .otlp .proto .grpc .trace_exporter import (
26+ OTLPSpanExporter ,
27+ )
28+ from opentelemetry .instrumentation .langchain import LangChainInstrumentor
29+ from opentelemetry .sdk ._logs import LoggerProvider
30+ from opentelemetry .sdk ._logs .export import BatchLogRecordProcessor
31+ from opentelemetry .sdk .metrics import MeterProvider
32+ from opentelemetry .sdk .metrics .export import PeriodicExportingMetricReader
33+ from opentelemetry .sdk .trace import TracerProvider
34+ from opentelemetry .sdk .trace .export import BatchSpanProcessor
35+
36+
37+ # Configure tracing
38+ trace .set_tracer_provider (TracerProvider ())
39+ span_processor = BatchSpanProcessor (OTLPSpanExporter ())
40+ trace .get_tracer_provider ().add_span_processor (span_processor )
41+
42+ # Configure logging
43+ _logs .set_logger_provider (LoggerProvider ())
44+ _logs .get_logger_provider ().add_log_record_processor (
45+ BatchLogRecordProcessor (OTLPLogExporter ())
46+ )
47+
48+ # Configure metrics
49+ metrics .set_meter_provider (
50+ MeterProvider (
51+ metric_readers = [
52+ PeriodicExportingMetricReader (
53+ OTLPMetricExporter (),
54+ ),
55+ ]
56+ )
57+ )
58+
59+
60+ class GraphState (TypedDict ):
61+ """State for the graph; messages are accumulated with add_messages."""
62+
63+ messages : Annotated [list , add_messages ]
64+
65+
66+ def build_graph (llm : ChatOpenAI ):
67+ """Build a StateGraph with a single LLM node."""
68+
69+ def llm_node (state : GraphState ) -> dict :
70+ """Node that invokes the LLM with the current messages."""
71+ response = llm .invoke (state ["messages" ])
72+ return {"messages" : [response ]}
73+
74+ builder = StateGraph (GraphState )
75+ builder .add_node ("llm" , llm_node )
76+ builder .add_edge (START , "llm" )
77+ builder .add_edge ("llm" , END )
78+ return builder .compile ()
79+
80+
81+ def main ():
82+ # Set up instrumentation (traces LLM calls from within graph nodes)
83+ LangChainInstrumentor ().instrument ()
84+
85+ # ChatOpenAI setup
86+ llm = ChatOpenAI (
87+ model = "gpt-3.5-turbo" ,
88+ temperature = 0.1 ,
89+ max_tokens = 100 ,
90+ top_p = 0.9 ,
91+ frequency_penalty = 0.5 ,
92+ presence_penalty = 0.5 ,
93+ stop_sequences = ["\n " , "Human:" , "AI:" ],
94+ seed = 100 ,
95+ )
96+
97+ graph = build_graph (llm )
98+
99+ initial_messages = [
100+ SystemMessage (content = "You are a helpful assistant!" ),
101+ HumanMessage (content = "What is the capital of France?" ),
102+ ]
103+
104+ result = graph .invoke ({"messages" : initial_messages })
105+
106+ print ("LangGraph output (messages):" )
107+ for msg in result .get ("messages" , []):
108+ print (f" { type (msg ).__name__ } : { msg .content } " )
109+
110+ # Un-instrument after use
111+ LangChainInstrumentor ().uninstrument ()
112+
113+
114+ if __name__ == "__main__" :
115+ main ()
0 commit comments