Skip to content

Commit 912b784

Browse files
committed
Add workflow and refactor LLM for langchain
1 parent 498b928 commit 912b784

4 files changed

Lines changed: 448 additions & 27 deletions

File tree

Lines changed: 115 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,115 @@
1+
"""
2+
LangGraph StateGraph example with an LLM node.
3+
4+
Similar to the manual example (../manual/main.py) but uses LangGraph's StateGraph
5+
with a node that calls ChatOpenAI. OpenTelemetry LangChain instrumentation traces
6+
the LLM calls made from within the graph node.
7+
"""
8+
9+
from typing import Annotated
10+
11+
12+
from langchain_core.messages import HumanMessage, SystemMessage
13+
from langchain_openai import ChatOpenAI
14+
from langgraph.graph import END, START, StateGraph
15+
from langgraph.graph.message import add_messages
16+
from typing_extensions import TypedDict
17+
18+
from opentelemetry import _logs, metrics, trace
19+
from opentelemetry.exporter.otlp.proto.grpc._log_exporter import (
20+
OTLPLogExporter,
21+
)
22+
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import (
23+
OTLPMetricExporter,
24+
)
25+
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
26+
OTLPSpanExporter,
27+
)
28+
from opentelemetry.instrumentation.langchain import LangChainInstrumentor
29+
from opentelemetry.sdk._logs import LoggerProvider
30+
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor
31+
from opentelemetry.sdk.metrics import MeterProvider
32+
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
33+
from opentelemetry.sdk.trace import TracerProvider
34+
from opentelemetry.sdk.trace.export import BatchSpanProcessor
35+
36+
37+
# Configure tracing
38+
trace.set_tracer_provider(TracerProvider())
39+
span_processor = BatchSpanProcessor(OTLPSpanExporter())
40+
trace.get_tracer_provider().add_span_processor(span_processor)
41+
42+
# Configure logging
43+
_logs.set_logger_provider(LoggerProvider())
44+
_logs.get_logger_provider().add_log_record_processor(
45+
BatchLogRecordProcessor(OTLPLogExporter())
46+
)
47+
48+
# Configure metrics
49+
metrics.set_meter_provider(
50+
MeterProvider(
51+
metric_readers=[
52+
PeriodicExportingMetricReader(
53+
OTLPMetricExporter(),
54+
),
55+
]
56+
)
57+
)
58+
59+
60+
class GraphState(TypedDict):
61+
"""State for the graph; messages are accumulated with add_messages."""
62+
63+
messages: Annotated[list, add_messages]
64+
65+
66+
def build_graph(llm: ChatOpenAI):
67+
"""Build a StateGraph with a single LLM node."""
68+
69+
def llm_node(state: GraphState) -> dict:
70+
"""Node that invokes the LLM with the current messages."""
71+
response = llm.invoke(state["messages"])
72+
return {"messages": [response]}
73+
74+
builder = StateGraph(GraphState)
75+
builder.add_node("llm", llm_node)
76+
builder.add_edge(START, "llm")
77+
builder.add_edge("llm", END)
78+
return builder.compile()
79+
80+
81+
def main():
82+
# Set up instrumentation (traces LLM calls from within graph nodes)
83+
LangChainInstrumentor().instrument()
84+
85+
# ChatOpenAI setup
86+
llm = ChatOpenAI(
87+
model="gpt-3.5-turbo",
88+
temperature=0.1,
89+
max_tokens=100,
90+
top_p=0.9,
91+
frequency_penalty=0.5,
92+
presence_penalty=0.5,
93+
stop_sequences=["\n", "Human:", "AI:"],
94+
seed=100,
95+
)
96+
97+
graph = build_graph(llm)
98+
99+
initial_messages = [
100+
SystemMessage(content="You are a helpful assistant!"),
101+
HumanMessage(content="What is the capital of France?"),
102+
]
103+
104+
result = graph.invoke({"messages": initial_messages})
105+
106+
print("LangGraph output (messages):")
107+
for msg in result.get("messages", []):
108+
print(f" {type(msg).__name__}: {msg.content}")
109+
110+
# Un-instrument after use
111+
LangChainInstrumentor().uninstrument()
112+
113+
114+
if __name__ == "__main__":
115+
main()
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
langchain==0.3.21
2+
langchain_openai
3+
langgraph
4+
opentelemetry-sdk>=1.39.0
5+
opentelemetry-exporter-otlp-proto-grpc>=1.39.0
6+
7+
# Uncomment after langchain instrumentation is released
8+
# opentelemetry-instrumentation-langchain~=2.0b0.dev

instrumentation-genai/opentelemetry-instrumentation-langchain/src/opentelemetry/instrumentation/langchain/callback_handler.py

Lines changed: 18 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,9 @@
2525
_InvocationManager,
2626
)
2727
from opentelemetry.util.genai.handler import TelemetryHandler
28+
from opentelemetry.util.genai.invocation import InferenceInvocation
2829
from opentelemetry.util.genai.types import (
29-
Error,
3030
InputMessage,
31-
LLMInvocation, # TODO: migrate to InferenceInvocation
3231
MessagePart,
3332
OutputMessage,
3433
Text,
@@ -140,25 +139,22 @@ def on_chat_model_start(
140139
)
141140
)
142141

143-
llm_invocation = LLMInvocation(
142+
llm_invocation = self._telemetry_handler.start_inference(
143+
provider,
144144
request_model=request_model,
145-
input_messages=input_messages,
146-
provider=provider,
147-
top_p=top_p,
148-
frequency_penalty=frequency_penalty,
149-
presence_penalty=presence_penalty,
150-
stop_sequences=stop_sequences,
151-
seed=seed,
152-
temperature=temperature,
153-
max_tokens=max_tokens,
154-
)
155-
llm_invocation = self._telemetry_handler.start_llm(
156-
invocation=llm_invocation
157145
)
146+
llm_invocation.input_messages = input_messages
147+
llm_invocation.top_p = top_p
148+
llm_invocation.frequency_penalty = frequency_penalty
149+
llm_invocation.presence_penalty = presence_penalty
150+
llm_invocation.stop_sequences = stop_sequences
151+
llm_invocation.seed = seed
152+
llm_invocation.temperature = temperature
153+
llm_invocation.max_tokens = max_tokens
158154
self._invocation_manager.add_invocation_state(
159155
run_id=run_id,
160156
parent_run_id=parent_run_id,
161-
invocation=llm_invocation, # pyright: ignore[reportArgumentType]
157+
invocation=llm_invocation,
162158
)
163159

164160
def on_llm_end(
@@ -172,7 +168,7 @@ def on_llm_end(
172168
llm_invocation = self._invocation_manager.get_invocation(run_id=run_id)
173169
if llm_invocation is None or not isinstance(
174170
llm_invocation,
175-
LLMInvocation,
171+
InferenceInvocation,
176172
):
177173
# If the invocation does not exist, we cannot set attributes or end it
178174
return
@@ -247,10 +243,8 @@ def on_llm_end(
247243
if response_id is not None:
248244
llm_invocation.response_id = str(response_id)
249245

250-
llm_invocation = self._telemetry_handler.stop_llm(
251-
invocation=llm_invocation
252-
)
253-
if llm_invocation.span and not llm_invocation.span.is_recording():
246+
llm_invocation.stop()
247+
if not llm_invocation.span.is_recording():
254248
self._invocation_manager.delete_invocation_state(run_id=run_id)
255249

256250
def on_llm_error(
@@ -264,14 +258,11 @@ def on_llm_error(
264258
llm_invocation = self._invocation_manager.get_invocation(run_id=run_id)
265259
if llm_invocation is None or not isinstance(
266260
llm_invocation,
267-
LLMInvocation,
261+
InferenceInvocation,
268262
):
269263
# If the invocation does not exist, we cannot set attributes or end it
270264
return
271265

272-
error_otel = Error(message=str(error), type=type(error))
273-
llm_invocation = self._telemetry_handler.fail_llm(
274-
invocation=llm_invocation, error=error_otel
275-
)
276-
if llm_invocation.span and not llm_invocation.span.is_recording():
266+
llm_invocation.fail(error)
267+
if not llm_invocation.span.is_recording():
277268
self._invocation_manager.delete_invocation_state(run_id=run_id)

0 commit comments

Comments
 (0)