Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 17 additions & 7 deletions applications/spring-ai-demo/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,20 +5,30 @@
- Java 21+
- Langfuse stack ([Cloud](https://cloud.langfuse.com/) or [Self-Hosted](https://langfuse.com/docs/deployment/self-host))
- Langfuse API Keys
- An OpenAI Api Key
- An OpenAI or Google AI Studio Api Key

## How to run

1. Configure environment variables to connect Spring AI demo app with Langfuse.
```
```bash
export SPRING_AI_OPENAI_APIKEY="sk-proj-xxx"
# export SPRING_AI_GOOGLE_GENAI_APIKEY="AI..."
# export SPRING_PROFILES_ACTIVE="google" # openai is used by default

export OTEL_EXPORTER_OTLP_HEADERS="Authorization=Basic $(echo -n "pk-lf-xxx:sk-lf-xxx" | base64)"
export OTEL_EXPORTER_OTLP_ENDPOINT="https://cloud.langfuse.com/api/public/otel" # 🇪🇺 EU data region
# export OTEL_EXPORTER_OTLP_ENDPOINT="https://us.cloud.langfuse.com/api/public/otel" # 🇺🇸 US data region
# export OTEL_EXPORTER_OTLP_ENDPOINT="http://localhost:3000/api/public/otel" # 🏠 Local deployment (>= v3.22.0)
export OTEL_EXPORTER_OTLP_HEADERS="Authorization=Basic $(echo -n "pk-lf-xxx:sk-lf-xxx" | base64)"
```
2. Run the sample application with `./mvnw clean install spring-boot:run`.
3. Call the chat endpoint with `curl localhost:8080/v1/chat`.
4. Observe the new trace in the Langfuse web UI.
2. Run the sample application with
```bash
./mvnw clean install spring-boot:run

![sample-trace](./screenshots/spring-ai-demo-trace.png)
# Or use docker compose from the docker/ folder
docker compose -f docker/docker-compose.yml up --build
```
3. Call the chat endpoint with
```bash
curl localhost:8080/v1/chat
```
4. Observe the new trace in the Langfuse web UI.
14 changes: 14 additions & 0 deletions applications/spring-ai-demo/docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
# syntax=docker/dockerfile:1.6
FROM eclipse-temurin:21-jdk AS builder
WORKDIR /workspace
COPY .mvn/ .mvn/
COPY mvnw pom.xml ./
RUN chmod +x mvnw && ./mvnw -B dependency:go-offline
COPY src ./src
RUN ./mvnw -B clean package -DskipTests

FROM eclipse-temurin:21-jre
WORKDIR /workspace
COPY --from=builder /workspace/target/spring-ai-demo-0.0.1-SNAPSHOT.jar app.jar
EXPOSE 8080
ENTRYPOINT ["java","-jar","/workspace/app.jar"]
14 changes: 14 additions & 0 deletions applications/spring-ai-demo/docker/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
services:
spring-ai-demo:
build:
context: ..
dockerfile: docker/Dockerfile
ports:
- "8080:8080"
environment:
OTEL_EXPORTER_OTLP_ENDPOINT: ${OTEL_EXPORTER_OTLP_ENDPOINT:-https://cloud.langfuse.com/api/public/otel}
OTEL_EXPORTER_OTLP_HEADERS: ${OTEL_EXPORTER_OTLP_HEADERS:-default_OTEL_EXPORTER_OTLP_HEADERS}
SPRING_AI_GOOGLE_GENAI_APIKEY: ${SPRING_AI_GOOGLE_GENAI_APIKEY:-default_SPRING_AI_GOOGLE_GENAI_APIKEY}
SPRING_AI_OPENAI_APIKEY: ${SPRING_AI_OPENAI_APIKEY:-default_SPRING_AI_OPENAI_APIKEY}
SPRING_PROFILES_ACTIVE: ${SPRING_PROFILES_ACTIVE:-openai}
restart: unless-stopped
6 changes: 5 additions & 1 deletion applications/spring-ai-demo/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

<properties>
<java.version>21</java.version>
<spring-ai.version>1.0.0</spring-ai.version>
<spring-ai.version>1.1.0</spring-ai.version>
</properties>

<dependencyManagement>
Expand Down Expand Up @@ -48,6 +48,10 @@
<groupId>org.springframework.ai</groupId>
<artifactId>spring-ai-starter-model-openai</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.ai</groupId>
<artifactId>spring-ai-starter-model-google-genai</artifactId>
</dependency>
<!-- Spring AI needs a reactive web server to run for some reason-->
<dependency>
<groupId>org.springframework.boot</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,4 +63,3 @@ private List<String> processCompletion(ChatModelObservationContext context) {
}
}
}

Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
package com.langfuse.springai;

import org.springframework.ai.chat.model.ChatModel;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.context.annotation.Profile;

@Configuration
public class ChatModelProfileConfig {

@Bean
@Primary
@Profile("openai")
public ChatModel openAiChatModel(@Qualifier("openAiChatModel") ChatModel delegate) {
return delegate;
}

@Bean
@Primary
@Profile("google")
public ChatModel googleChatModel(@Qualifier("googleGenAiChatModel") ChatModel delegate) {
return delegate;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ public ChatService(ChatClient.Builder builder) {
@EventListener(ApplicationReadyEvent.class)
public String testAiCall() {
LOGGER.info("Invoking LLM");
String answer = chatClient.prompt("Reply with the word 'java'").call().content();
String answer = chatClient.prompt("Tell the current UTC time and tell a joke about java programmers.").call().content();
LOGGER.info("AI answered: {}", answer);
return answer;
}
Expand Down
30 changes: 30 additions & 0 deletions applications/spring-ai-demo/src/main/resources/application.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,40 @@ spring:
observations:
log-prompt: true # Include prompt content in tracing (disabled by default for privacy)
log-completion: true # Include completion content in tracing (disabled by default)

management:
tracing:
sampling:
probability: 1.0 # Sample 100% of requests for full tracing (adjust in production as needed)
observations:
annotations:
enabled: true # Enable @Observed (if you use observation annotations in code)

otel:
logs:
exporter: none # Disable OTLP log export (Langfuse expects traces only)

---

spring:
config:
activate:
on-profile: google
ai:
google:
genai:
chat:
options:
model: gemini-2.0-flash

---

spring:
config:
activate:
on-profile: openai
ai:
openai:
chat:
options:
model: gpt-4o-mini