Skip to content

Commit dcb28fb

Browse files
committed
langgraph_demo.py
1 parent 71b079f commit dcb28fb

File tree

1 file changed

+41
-0
lines changed

1 file changed

+41
-0
lines changed

llm/langgraph_demo.py

Lines changed: 41 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,41 @@
1+
#!/usr/bin/python3
2+
3+
import os
4+
from typing import TypedDict
5+
from langchain_openai import ChatOpenAI
6+
from langgraph.graph import StateGraph, END
7+
8+
class State(TypedDict):
9+
input: str
10+
llm_output: str
11+
token_count: int
12+
13+
def llm_node(state):
14+
"""Initializes an LLM and returns its output content."""
15+
model = ChatOpenAI(base_url=os.getenv("OPENAI_EP", "http://localhost:8321/v1/"), #api_key="none",
16+
model=os.getenv("INFERENCE_MODEL", "gemini/models/gemini-flash-latest"))
17+
output = model.invoke(state["input"])
18+
return {"llm_output": output.content}
19+
20+
def token_counter_node(state):
21+
"""Counts the number of tokens (words) in a string."""
22+
tokens = str(state["llm_output"]).split()
23+
count_no = len(tokens)
24+
print(f"Token count: {count_no}")
25+
return {"token_count": count_no}
26+
27+
workflow = StateGraph(State)
28+
29+
workflow.add_node('LLM_Model', llm_node)
30+
workflow.add_node('Get_Token_Counter', token_counter_node)
31+
32+
workflow.add_edge('LLM_Model', 'Get_Token_Counter')
33+
workflow.add_edge('Get_Token_Counter', END)
34+
35+
workflow.set_entry_point('LLM_Model')
36+
37+
app = workflow.compile()
38+
39+
final_result = app.invoke({"input": "Explain the concept of AI agents in one sentence."})
40+
print(f"Final result:\n{final_result['llm_output']}")
41+

0 commit comments

Comments
 (0)