File tree Expand file tree Collapse file tree 8 files changed +107
-10
lines changed
applications/spring-ai-demo
java/com/langfuse/springai Expand file tree Collapse file tree 8 files changed +107
-10
lines changed Original file line number Diff line number Diff line change 55- Java 21+
66- Langfuse stack ([ Cloud] ( https://cloud.langfuse.com/ ) or [ Self-Hosted] ( https://langfuse.com/docs/deployment/self-host ) )
77- Langfuse API Keys
8- - An OpenAI Api Key
8+ - An OpenAI or Google AI Studio Api Key
99
1010## How to run
1111
12121 . Configure environment variables to connect Spring AI demo app with Langfuse.
13- ```
13+ ``` bash
1414 export SPRING_AI_OPENAI_APIKEY=" sk-proj-xxx"
15+ # export SPRING_AI_GOOGLE_GENAI_APIKEY="AI..."
16+ # export SPRING_PROFILES_ACTIVE="google" # openai is used by default
17+
18+ export OTEL_EXPORTER_OTLP_HEADERS=" Authorization=Basic $( echo -n " pk-lf-xxx:sk-lf-xxx" | base64) "
1519 export OTEL_EXPORTER_OTLP_ENDPOINT=" https://cloud.langfuse.com/api/public/otel" # 🇪🇺 EU data region
1620 # export OTEL_EXPORTER_OTLP_ENDPOINT="https://us.cloud.langfuse.com/api/public/otel" # 🇺🇸 US data region
1721 # export OTEL_EXPORTER_OTLP_ENDPOINT="http://localhost:3000/api/public/otel" # 🏠 Local deployment (>= v3.22.0)
18- export OTEL_EXPORTER_OTLP_HEADERS="Authorization=Basic $(echo -n "pk-lf-xxx:sk-lf-xxx" | base64)"
1922 ```
20- 2 . Run the sample application with ` ./mvnw clean install spring-boot:run ` .
21- 3 . Call the chat endpoint with ` curl localhost:8080/v1/chat ` .
22- 4 . Observe the new trace in the Langfuse web UI.
23+ 2 . Run the sample application with
24+ ``` bash
25+ ./mvnw clean install spring-boot:run
2326
24- ![ sample-trace] ( ./screenshots/spring-ai-demo-trace.png )
27+ # Or use docker compose from the docker/ folder
28+ docker compose -f docker/docker-compose.yml up --build
29+ ```
30+ 3 . Call the chat endpoint with
31+ ``` bash
32+ curl localhost:8080/v1/chat
33+ ```
34+ 4 . Observe the new trace in the Langfuse web UI.
Original file line number Diff line number Diff line change 1+ # syntax=docker/dockerfile:1.6
2+ FROM eclipse-temurin:21-jdk AS builder
3+ WORKDIR /workspace
4+ COPY .mvn/ .mvn/
5+ COPY mvnw pom.xml ./
6+ RUN chmod +x mvnw && ./mvnw -B dependency:go-offline
7+ COPY src ./src
8+ RUN ./mvnw -B clean package -DskipTests
9+
10+ FROM eclipse-temurin:21-jre
11+ WORKDIR /workspace
12+ COPY --from=builder /workspace/target/spring-ai-demo-0.0.1-SNAPSHOT.jar app.jar
13+ EXPOSE 8080
14+ ENTRYPOINT ["java" ,"-jar" ,"/workspace/app.jar" ]
Original file line number Diff line number Diff line change 1+ services :
2+ spring-ai-demo :
3+ build :
4+ context : ..
5+ dockerfile : docker/Dockerfile
6+ ports :
7+ - " 8080:8080"
8+ environment :
9+ OTEL_EXPORTER_OTLP_ENDPOINT : ${OTEL_EXPORTER_OTLP_ENDPOINT:-https://cloud.langfuse.com/api/public/otel}
10+ OTEL_EXPORTER_OTLP_HEADERS : ${OTEL_EXPORTER_OTLP_HEADERS:-default_OTEL_EXPORTER_OTLP_HEADERS}
11+ SPRING_AI_GOOGLE_GENAI_APIKEY : ${SPRING_AI_GOOGLE_GENAI_APIKEY:-default_SPRING_AI_GOOGLE_GENAI_APIKEY}
12+ SPRING_AI_OPENAI_APIKEY : ${SPRING_AI_OPENAI_APIKEY:-default_SPRING_AI_OPENAI_APIKEY}
13+ SPRING_PROFILES_ACTIVE : ${SPRING_PROFILES_ACTIVE:-openai}
14+ restart : unless-stopped
Original file line number Diff line number Diff line change 1717
1818 <properties >
1919 <java .version>21</java .version>
20- <spring-ai .version>1.0 .0</spring-ai .version>
20+ <spring-ai .version>1.1 .0</spring-ai .version>
2121 </properties >
2222
2323 <dependencyManagement >
4848 <groupId >org.springframework.ai</groupId >
4949 <artifactId >spring-ai-starter-model-openai</artifactId >
5050 </dependency >
51+ <dependency >
52+ <groupId >org.springframework.ai</groupId >
53+ <artifactId >spring-ai-starter-model-google-genai</artifactId >
54+ </dependency >
5155 <!-- Spring AI needs a reactive web server to run for some reason-->
5256 <dependency >
5357 <groupId >org.springframework.boot</groupId >
Original file line number Diff line number Diff line change @@ -63,4 +63,3 @@ private List<String> processCompletion(ChatModelObservationContext context) {
6363 }
6464 }
6565}
66-
Original file line number Diff line number Diff line change 1+ package com .langfuse .springai ;
2+
3+ import org .springframework .ai .chat .model .ChatModel ;
4+ import org .springframework .beans .factory .annotation .Qualifier ;
5+ import org .springframework .context .annotation .Bean ;
6+ import org .springframework .context .annotation .Configuration ;
7+ import org .springframework .context .annotation .Primary ;
8+ import org .springframework .context .annotation .Profile ;
9+
10+ @ Configuration
11+ public class ChatModelProfileConfig {
12+
13+ @ Bean
14+ @ Primary
15+ @ Profile ("openai" )
16+ public ChatModel openAiChatModel (@ Qualifier ("openAiChatModel" ) ChatModel delegate ) {
17+ return delegate ;
18+ }
19+
20+ @ Bean
21+ @ Primary
22+ @ Profile ("google" )
23+ public ChatModel googleChatModel (@ Qualifier ("googleGenAiChatModel" ) ChatModel delegate ) {
24+ return delegate ;
25+ }
26+ }
Original file line number Diff line number Diff line change @@ -21,7 +21,7 @@ public ChatService(ChatClient.Builder builder) {
2121 @ EventListener (ApplicationReadyEvent .class )
2222 public String testAiCall () {
2323 LOGGER .info ("Invoking LLM" );
24- String answer = chatClient .prompt ("Reply with the word ' java' " ).call ().content ();
24+ String answer = chatClient .prompt ("Tell the current UTC time and tell a joke about java programmers. " ).call ().content ();
2525 LOGGER .info ("AI answered: {}" , answer );
2626 return answer ;
2727 }
Original file line number Diff line number Diff line change @@ -6,10 +6,40 @@ spring:
66 observations :
77 log-prompt : true # Include prompt content in tracing (disabled by default for privacy)
88 log-completion : true # Include completion content in tracing (disabled by default)
9+
910management :
1011 tracing :
1112 sampling :
1213 probability : 1.0 # Sample 100% of requests for full tracing (adjust in production as needed)
1314 observations :
1415 annotations :
1516 enabled : true # Enable @Observed (if you use observation annotations in code)
17+
18+ otel :
19+ logs :
20+ exporter : none # Disable OTLP log export (Langfuse expects traces only)
21+
22+ ---
23+
24+ spring :
25+ config :
26+ activate :
27+ on-profile : google
28+ ai :
29+ google :
30+ genai :
31+ chat :
32+ options :
33+ model : gemini-2.0-flash
34+
35+ ---
36+
37+ spring :
38+ config :
39+ activate :
40+ on-profile : openai
41+ ai :
42+ openai :
43+ chat :
44+ options :
45+ model : gpt-4o-mini
You can’t perform that action at this time.
0 commit comments