Skip to content

Commit 6229224

Browse files
authored
Merge pull request #17848 from getsentry/rolaabuhasna/js-945-expose-ai-integrations-in-browser-sdk
feat(browser): Expose AI instrumentation methods
1 parent 6b83234 commit 6229224

File tree

19 files changed

+439
-0
lines changed

19 files changed

+439
-0
lines changed
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import * as Sentry from '@sentry/browser';
2+
3+
window.Sentry = Sentry;
4+
5+
Sentry.init({
6+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
7+
tracesSampleRate: 1,
8+
debug: true,
9+
});
Lines changed: 55 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,55 @@
1+
// Mock Anthropic client for browser testing
2+
export class MockAnthropic {
3+
constructor(config) {
4+
this.apiKey = config.apiKey;
5+
6+
// Main focus: messages.create functionality
7+
this.messages = {
8+
create: async (...args) => {
9+
const params = args[0];
10+
// Simulate processing time
11+
await new Promise(resolve => setTimeout(resolve, 10));
12+
13+
if (params.model === 'error-model') {
14+
const error = new Error('Model not found');
15+
error.status = 404;
16+
error.headers = { 'x-request-id': 'mock-request-123' };
17+
throw error;
18+
}
19+
20+
const response = {
21+
id: 'msg_mock123',
22+
type: 'message',
23+
role: 'assistant',
24+
model: params.model,
25+
content: [
26+
{
27+
type: 'text',
28+
text: 'Hello from Anthropic mock!',
29+
},
30+
],
31+
stop_reason: 'end_turn',
32+
stop_sequence: null,
33+
usage: {
34+
input_tokens: 10,
35+
output_tokens: 15,
36+
cache_creation_input_tokens: 0,
37+
cache_read_input_tokens: 0,
38+
},
39+
};
40+
return response;
41+
},
42+
countTokens: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock', input_tokens: 0 }),
43+
};
44+
45+
// Minimal implementations for required interface compliance
46+
this.models = {
47+
list: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock' }),
48+
get: async (..._args) => ({ id: 'mock', type: 'model', model: 'mock' }),
49+
};
50+
51+
this.completions = {
52+
create: async (..._args) => ({ id: 'mock', type: 'completion', model: 'mock' }),
53+
};
54+
}
55+
}
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import { instrumentAnthropicAiClient } from '@sentry/browser';
2+
import { MockAnthropic } from './mocks.js';
3+
4+
const mockClient = new MockAnthropic({
5+
apiKey: 'mock-api-key',
6+
});
7+
8+
const client = instrumentAnthropicAiClient(mockClient);
9+
10+
// Test that manual instrumentation doesn't crash the browser
11+
// The instrumentation automatically creates spans
12+
const response = await client.messages.create({
13+
model: 'claude-3-haiku-20240307',
14+
messages: [{ role: 'user', content: 'What is the capital of France?' }],
15+
temperature: 0.7,
16+
max_tokens: 100,
17+
});
18+
19+
console.log('Received response', response);
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
import { expect } from '@playwright/test';
2+
import { sentryTest } from '../../../../utils/fixtures';
3+
import { envelopeRequestParser, waitForTransactionRequest } from '../../../../utils/helpers';
4+
5+
// These tests are not exhaustive because the instrumentation is
6+
// already tested in the node integration tests and we merely
7+
// want to test that the instrumentation does not crash in the browser
8+
// and that gen_ai transactions are sent.
9+
10+
sentryTest('manual Anthropic instrumentation sends gen_ai transactions', async ({ getLocalTestUrl, page }) => {
11+
const transactionPromise = waitForTransactionRequest(page, event => {
12+
return !!event.transaction?.includes('claude-3-haiku-20240307');
13+
});
14+
15+
const url = await getLocalTestUrl({ testDir: __dirname });
16+
await page.goto(url);
17+
18+
const req = await transactionPromise;
19+
20+
const eventData = envelopeRequestParser(req);
21+
22+
// Verify it's a gen_ai transaction
23+
expect(eventData.transaction).toBe('messages claude-3-haiku-20240307');
24+
expect(eventData.contexts?.trace?.op).toBe('gen_ai.messages');
25+
expect(eventData.contexts?.trace?.origin).toBe('auto.ai.anthropic');
26+
expect(eventData.contexts?.trace?.data).toMatchObject({
27+
'gen_ai.operation.name': 'messages',
28+
'gen_ai.system': 'anthropic',
29+
'gen_ai.request.model': 'claude-3-haiku-20240307',
30+
'gen_ai.request.temperature': 0.7,
31+
'gen_ai.response.model': 'claude-3-haiku-20240307',
32+
'gen_ai.response.id': 'msg_mock123',
33+
'gen_ai.usage.input_tokens': 10,
34+
'gen_ai.usage.output_tokens': 15,
35+
});
36+
});
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import * as Sentry from '@sentry/browser';
2+
3+
window.Sentry = Sentry;
4+
5+
Sentry.init({
6+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
7+
tracesSampleRate: 1,
8+
debug: true,
9+
});
Lines changed: 118 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
// Mock Google GenAI client for browser testing
2+
export class MockGoogleGenAI {
3+
constructor(config) {
4+
this.apiKey = config.apiKey;
5+
6+
// models.generateContent functionality
7+
this.models = {
8+
generateContent: async (...args) => {
9+
const params = args[0];
10+
// Simulate processing time
11+
await new Promise(resolve => setTimeout(resolve, 10));
12+
13+
if (params.model === 'error-model') {
14+
const error = new Error('Model not found');
15+
error.status = 404;
16+
error.headers = { 'x-request-id': 'mock-request-123' };
17+
throw error;
18+
}
19+
20+
return {
21+
candidates: [
22+
{
23+
content: {
24+
parts: [
25+
{
26+
text: 'Hello from Google GenAI mock!',
27+
},
28+
],
29+
role: 'model',
30+
},
31+
finishReason: 'stop',
32+
index: 0,
33+
},
34+
],
35+
usageMetadata: {
36+
promptTokenCount: 8,
37+
candidatesTokenCount: 12,
38+
totalTokenCount: 20,
39+
},
40+
};
41+
},
42+
generateContentStream: async () => {
43+
// Return a promise that resolves to an async generator
44+
return (async function* () {
45+
yield {
46+
candidates: [
47+
{
48+
content: {
49+
parts: [{ text: 'Streaming response' }],
50+
role: 'model',
51+
},
52+
finishReason: 'stop',
53+
index: 0,
54+
},
55+
],
56+
};
57+
})();
58+
},
59+
};
60+
61+
// chats.create implementation
62+
this.chats = {
63+
create: (...args) => {
64+
const params = args[0];
65+
const model = params.model;
66+
67+
return {
68+
modelVersion: model,
69+
sendMessage: async (..._messageArgs) => {
70+
// Simulate processing time
71+
await new Promise(resolve => setTimeout(resolve, 10));
72+
73+
const response = {
74+
candidates: [
75+
{
76+
content: {
77+
parts: [
78+
{
79+
text: 'This is a joke from the chat!',
80+
},
81+
],
82+
role: 'model',
83+
},
84+
finishReason: 'stop',
85+
index: 0,
86+
},
87+
],
88+
usageMetadata: {
89+
promptTokenCount: 8,
90+
candidatesTokenCount: 12,
91+
totalTokenCount: 20,
92+
},
93+
modelVersion: model, // Include model version in response
94+
};
95+
return response;
96+
},
97+
sendMessageStream: async () => {
98+
// Return a promise that resolves to an async generator
99+
return (async function* () {
100+
yield {
101+
candidates: [
102+
{
103+
content: {
104+
parts: [{ text: 'Streaming chat response' }],
105+
role: 'model',
106+
},
107+
finishReason: 'stop',
108+
index: 0,
109+
},
110+
],
111+
};
112+
})();
113+
},
114+
};
115+
},
116+
};
117+
}
118+
}
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
import { instrumentGoogleGenAIClient } from '@sentry/browser';
2+
import { MockGoogleGenAI } from './mocks.js';
3+
4+
const mockClient = new MockGoogleGenAI({
5+
apiKey: 'mock-api-key',
6+
});
7+
8+
const client = instrumentGoogleGenAIClient(mockClient);
9+
10+
// Test that manual instrumentation doesn't crash the browser
11+
// The instrumentation automatically creates spans
12+
// Test both chats and models APIs
13+
const chat = client.chats.create({
14+
model: 'gemini-1.5-pro',
15+
config: {
16+
temperature: 0.8,
17+
topP: 0.9,
18+
maxOutputTokens: 150,
19+
},
20+
history: [
21+
{
22+
role: 'user',
23+
parts: [{ text: 'Hello, how are you?' }],
24+
},
25+
],
26+
});
27+
28+
const response = await chat.sendMessage({
29+
message: 'Tell me a joke',
30+
});
31+
32+
console.log('Received response', response);
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
import { expect } from '@playwright/test';
2+
import { sentryTest } from '../../../../utils/fixtures';
3+
import { envelopeRequestParser, waitForTransactionRequest } from '../../../../utils/helpers';
4+
5+
// These tests are not exhaustive because the instrumentation is
6+
// already tested in the node integration tests and we merely
7+
// want to test that the instrumentation does not crash in the browser
8+
// and that gen_ai transactions are sent.
9+
10+
sentryTest('manual Google GenAI instrumentation sends gen_ai transactions', async ({ getLocalTestUrl, page }) => {
11+
const transactionPromise = waitForTransactionRequest(page, event => {
12+
return !!event.transaction?.includes('gemini-1.5-pro');
13+
});
14+
15+
const url = await getLocalTestUrl({ testDir: __dirname });
16+
await page.goto(url);
17+
18+
const req = await transactionPromise;
19+
20+
const eventData = envelopeRequestParser(req);
21+
22+
// Verify it's a gen_ai transaction
23+
expect(eventData.transaction).toBe('chat gemini-1.5-pro create');
24+
expect(eventData.contexts?.trace?.op).toBe('gen_ai.chat');
25+
expect(eventData.contexts?.trace?.origin).toBe('auto.ai.google_genai');
26+
expect(eventData.contexts?.trace?.data).toMatchObject({
27+
'gen_ai.operation.name': 'chat',
28+
'gen_ai.system': 'google_genai',
29+
'gen_ai.request.model': 'gemini-1.5-pro',
30+
});
31+
});
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
import * as Sentry from '@sentry/browser';
2+
3+
window.Sentry = Sentry;
4+
5+
Sentry.init({
6+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
7+
tracesSampleRate: 1,
8+
debug: true,
9+
});
Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
// Mock OpenAI client for browser testing
2+
export class MockOpenAi {
3+
constructor(config) {
4+
this.apiKey = config.apiKey;
5+
6+
this.chat = {
7+
completions: {
8+
create: async (...args) => {
9+
const params = args[0];
10+
// Simulate processing time
11+
await new Promise(resolve => setTimeout(resolve, 10));
12+
13+
if (params.model === 'error-model') {
14+
const error = new Error('Model not found');
15+
error.status = 404;
16+
error.headers = { 'x-request-id': 'mock-request-123' };
17+
throw error;
18+
}
19+
20+
const response = {
21+
id: 'chatcmpl-mock123',
22+
object: 'chat.completion',
23+
created: 1677652288,
24+
model: params.model,
25+
system_fingerprint: 'fp_44709d6fcb',
26+
choices: [
27+
{
28+
index: 0,
29+
message: {
30+
role: 'assistant',
31+
content: 'Hello from OpenAI mock!',
32+
},
33+
finish_reason: 'stop',
34+
},
35+
],
36+
usage: {
37+
prompt_tokens: 10,
38+
completion_tokens: 15,
39+
total_tokens: 25,
40+
},
41+
};
42+
return response;
43+
},
44+
},
45+
};
46+
}
47+
}

0 commit comments

Comments
 (0)