Skip to content

Commit df4ba73

Browse files
committed
instrument google genai embeddings api
1 parent 1de98d2 commit df4ba73

11 files changed

Lines changed: 384 additions & 11 deletions

File tree

dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/mocks.js

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,31 @@ export class MockGoogleGenAI {
3939
},
4040
};
4141
},
42+
embedContent: async (...args) => {
43+
const params = args[0];
44+
await new Promise(resolve => setTimeout(resolve, 10));
45+
46+
if (params.model === 'error-model') {
47+
const error = new Error('Model not found');
48+
error.status = 404;
49+
throw error;
50+
}
51+
52+
return {
53+
embeddings: [
54+
{
55+
values: [0.1, 0.2, 0.3, 0.4, 0.5],
56+
statistics: {
57+
tokenCount: 8,
58+
truncated: false,
59+
},
60+
},
61+
],
62+
metadata: {
63+
billableCharacterCount: 30,
64+
},
65+
};
66+
},
4267
generateContentStream: async () => {
4368
// Return a promise that resolves to an async generator
4469
return (async function* () {

dev-packages/browser-integration-tests/suites/tracing/ai-providers/google-genai/subject.js

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,3 +30,11 @@ const response = await chat.sendMessage({
3030
});
3131

3232
console.log('Received response', response);
33+
34+
// Test embedContent
35+
const embedResponse = await client.models.embedContent({
36+
model: 'text-embedding-004',
37+
contents: 'Hello world',
38+
});
39+
40+
console.log('Received embed response', embedResponse);
Lines changed: 84 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,84 @@
1+
import { GoogleGenAI } from '@google/genai';
2+
import * as Sentry from '@sentry/node';
3+
import express from 'express';
4+
5+
function startMockGoogleGenAIServer() {
6+
const app = express();
7+
app.use(express.json());
8+
9+
app.post('/v1beta/models/:model\\:embedContent', (req, res) => {
10+
const model = req.params.model;
11+
12+
if (model === 'error-model') {
13+
res.status(404).set('x-request-id', 'mock-request-123').end('Model not found');
14+
return;
15+
}
16+
17+
res.send({
18+
embeddings: [
19+
{
20+
values: [0.1, 0.2, 0.3, 0.4, 0.5],
21+
statistics: {
22+
tokenCount: 8,
23+
truncated: false,
24+
},
25+
},
26+
],
27+
metadata: {
28+
billableCharacterCount: 30,
29+
},
30+
});
31+
});
32+
33+
return new Promise(resolve => {
34+
const server = app.listen(0, () => {
35+
resolve(server);
36+
});
37+
});
38+
}
39+
40+
async function run() {
41+
const server = await startMockGoogleGenAIServer();
42+
43+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
44+
const client = new GoogleGenAI({
45+
apiKey: 'mock-api-key',
46+
httpOptions: { baseUrl: `http://localhost:${server.address().port}` },
47+
});
48+
49+
// Test 1: Basic embedContent with string contents
50+
await client.models.embedContent({
51+
model: 'text-embedding-004',
52+
contents: 'What is the capital of France?',
53+
});
54+
55+
// Test 2: Error handling
56+
try {
57+
await client.models.embedContent({
58+
model: 'error-model',
59+
contents: 'This will fail',
60+
});
61+
} catch {
62+
// Expected error
63+
}
64+
65+
// Test 3: embedContent with array contents
66+
await client.models.embedContent({
67+
model: 'text-embedding-004',
68+
contents: [
69+
{
70+
role: 'user',
71+
parts: [{ text: 'First input text' }],
72+
},
73+
{
74+
role: 'user',
75+
parts: [{ text: 'Second input text' }],
76+
},
77+
],
78+
});
79+
});
80+
81+
server.close();
82+
}
83+
84+
run();

dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts

Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
22
import { afterAll, describe, expect } from 'vitest';
33
import {
4+
GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE,
45
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
56
GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
67
GEN_AI_OPERATION_NAME_ATTRIBUTE,
@@ -601,4 +602,131 @@ describe('Google GenAI integration', () => {
601602
});
602603
},
603604
);
605+
606+
const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS = {
607+
transaction: 'main',
608+
spans: expect.arrayContaining([
609+
// First span - embedContent with string contents
610+
expect.objectContaining({
611+
data: {
612+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
613+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
614+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
615+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
616+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-004',
617+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
618+
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 8,
619+
},
620+
description: 'embeddings text-embedding-004',
621+
op: 'gen_ai.embeddings',
622+
origin: 'auto.ai.google_genai',
623+
status: 'ok',
624+
}),
625+
// Second span - embedContent error model
626+
expect.objectContaining({
627+
data: {
628+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
629+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
630+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
631+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
632+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
633+
},
634+
description: 'embeddings error-model',
635+
op: 'gen_ai.embeddings',
636+
origin: 'auto.ai.google_genai',
637+
status: 'internal_error',
638+
}),
639+
// Third span - embedContent with array contents
640+
expect.objectContaining({
641+
data: {
642+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
643+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
644+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
645+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
646+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-004',
647+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
648+
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 8,
649+
},
650+
description: 'embeddings text-embedding-004',
651+
op: 'gen_ai.embeddings',
652+
origin: 'auto.ai.google_genai',
653+
status: 'ok',
654+
}),
655+
]),
656+
};
657+
658+
const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_EMBEDDINGS = {
659+
transaction: 'main',
660+
spans: expect.arrayContaining([
661+
// First span - embedContent with PII
662+
expect.objectContaining({
663+
data: {
664+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
665+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
666+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
667+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
668+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-004',
669+
[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'What is the capital of France?',
670+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
671+
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 8,
672+
},
673+
description: 'embeddings text-embedding-004',
674+
op: 'gen_ai.embeddings',
675+
origin: 'auto.ai.google_genai',
676+
status: 'ok',
677+
}),
678+
// Second span - embedContent error model with PII
679+
expect.objectContaining({
680+
data: {
681+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
682+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
683+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
684+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
685+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
686+
[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'This will fail',
687+
},
688+
description: 'embeddings error-model',
689+
op: 'gen_ai.embeddings',
690+
origin: 'auto.ai.google_genai',
691+
status: 'internal_error',
692+
}),
693+
// Third span - embedContent with array contents and PII
694+
expect.objectContaining({
695+
data: {
696+
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
697+
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
698+
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
699+
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
700+
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-004',
701+
[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: expect.any(String),
702+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
703+
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 8,
704+
},
705+
description: 'embeddings text-embedding-004',
706+
op: 'gen_ai.embeddings',
707+
origin: 'auto.ai.google_genai',
708+
status: 'ok',
709+
}),
710+
]),
711+
};
712+
713+
createEsmAndCjsTests(__dirname, 'scenario-embeddings.mjs', 'instrument.mjs', (createRunner, test) => {
714+
test('creates google genai embeddings spans with sendDefaultPii: false', async () => {
715+
await createRunner()
716+
.ignore('event')
717+
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS })
718+
.start()
719+
.completed();
720+
});
721+
});
722+
723+
createEsmAndCjsTests(__dirname, 'scenario-embeddings.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
724+
test('creates google genai embeddings spans with sendDefaultPii: true', async () => {
725+
await createRunner()
726+
.ignore('event')
727+
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_EMBEDDINGS })
728+
.start()
729+
.completed();
730+
});
731+
});
604732
});

packages/core/src/tracing/ai/utils.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,10 @@ export function getFinalOperationName(methodPath: string): string {
2323
if (methodPath.includes('generateContent')) {
2424
return 'generate_content';
2525
}
26+
// Google GenAI: models.embedContent -> embeddings
27+
if (methodPath.includes('embedContent')) {
28+
return 'embeddings';
29+
}
2630
// Anthropic: models.get/retrieve -> models (metadata retrieval only)
2731
if (methodPath.includes('models')) {
2832
return 'models';

packages/core/src/tracing/google-genai/constants.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ export const GOOGLE_GENAI_INTEGRATION_NAME = 'Google_GenAI';
77
export const GOOGLE_GENAI_INSTRUMENTED_METHODS = [
88
'models.generateContent',
99
'models.generateContentStream',
10+
'models.embedContent',
1011
'chats.create',
1112
'sendMessage',
1213
'sendMessageStream',
@@ -15,4 +16,5 @@ export const GOOGLE_GENAI_INSTRUMENTED_METHODS = [
1516
// Constants for internal use
1617
export const GOOGLE_GENAI_SYSTEM_NAME = 'google_genai';
1718
export const CHATS_CREATE_METHOD = 'chats.create';
19+
export const EMBED_CONTENT_METHOD = 'models.embedContent';
1820
export const CHAT_PATH = 'chat';
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
import {
2+
GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE,
3+
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
4+
GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE,
5+
} from '../ai/gen-ai-attributes';
6+
import type { Span } from '../../types-hoist/span';
7+
import type { GoogleGenAIEmbedContentResponse } from './types';
8+
9+
/**
10+
* Add private request attributes for embeddings methods.
11+
* Records the embeddings input on gen_ai.embeddings.input instead of gen_ai.input.messages.
12+
* The input is NOT truncated (matching OpenAI behavior).
13+
*/
14+
export function addEmbeddingsRequestAttributes(span: Span, params: Record<string, unknown>): void {
15+
if (!('contents' in params)) {
16+
return;
17+
}
18+
19+
const contents = params.contents;
20+
21+
if (contents == null) {
22+
return;
23+
}
24+
25+
if (typeof contents === 'string' && contents.length === 0) {
26+
return;
27+
}
28+
29+
if (Array.isArray(contents) && contents.length === 0) {
30+
return;
31+
}
32+
33+
span.setAttribute(
34+
GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE,
35+
typeof contents === 'string' ? contents : JSON.stringify(contents),
36+
);
37+
}
38+
39+
/**
40+
* Add response attributes from the Google GenAI embedContent response.
41+
* The EmbedContentResponse has no usageMetadata/candidates/modelVersion.
42+
* Token counts come from embeddings[].statistics.tokenCount.
43+
* @see https://ai.google.dev/api/embeddings#EmbedContentResponse
44+
*/
45+
export function addEmbedContentResponseAttributes(span: Span, response: unknown): void {
46+
if (!response || typeof response !== 'object') return;
47+
48+
const embedResponse = response as GoogleGenAIEmbedContentResponse;
49+
50+
if (Array.isArray(embedResponse.embeddings)) {
51+
let totalTokenCount = 0;
52+
for (const embedding of embedResponse.embeddings) {
53+
if (embedding.statistics && typeof embedding.statistics.tokenCount === 'number') {
54+
totalTokenCount += embedding.statistics.tokenCount;
55+
}
56+
}
57+
58+
if (totalTokenCount > 0) {
59+
span.setAttributes({
60+
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: totalTokenCount,
61+
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: totalTokenCount,
62+
});
63+
}
64+
}
65+
}

0 commit comments

Comments
 (0)