diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs new file mode 100644 index 000000000000..35f97fd84093 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs @@ -0,0 +1,16 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: false, + transport: loggingTransport, + integrations: [ + Sentry.openAIIntegration({ + recordInputs: true, + recordOutputs: true, + }), + ], +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs new file mode 100644 index 000000000000..a53a13af7738 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs @@ -0,0 +1,11 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: true, + transport: loggingTransport, + integrations: [Sentry.openAIIntegration()], +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs new file mode 100644 index 000000000000..f3fbac9d1274 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs @@ -0,0 +1,11 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: false, + transport: loggingTransport, + integrations: [Sentry.openAIIntegration()], +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-chat.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-chat.mjs new file mode 100644 index 000000000000..fde651c3c1ff --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-chat.mjs @@ -0,0 +1,318 @@ +import { instrumentOpenAiClient } from '@sentry/core'; +import * as Sentry from '@sentry/node'; + +class MockOpenAI { + constructor(config) { + this.apiKey = config.apiKey; + + this.chat = { + completions: { + create: async params => { + // Simulate processing time + await new Promise(resolve => setTimeout(resolve, 10)); + + if (params.model === 'error-model') { + const error = new Error('Model not found'); + error.status = 404; + error.headers = { 'x-request-id': 'mock-request-123' }; + throw error; + } + + // If stream is requested, return an async generator + if (params.stream) { + return this._createChatCompletionStream(params); + } + + return { + id: 'chatcmpl-mock123', + object: 'chat.completion', + created: 1677652288, + model: params.model, + system_fingerprint: 'fp_44709d6fcb', + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: 'Hello from OpenAI mock!', + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 15, + total_tokens: 25, + }, + }; + }, + }, + }; + + this.responses = { + create: async params => { + await new Promise(resolve => setTimeout(resolve, 10)); + + // If stream is requested, return an async generator + if (params.stream) { + return this._createResponsesApiStream(params); + } + + return { + id: 'resp_mock456', + object: 'response', + created_at: 1677652290, + model: params.model, + input_text: params.input, + output_text: `Response to: ${params.input}`, + status: 'completed', + usage: { + input_tokens: 5, + output_tokens: 8, + total_tokens: 13, + }, + }; + }, + }; + } + + // Create a mock streaming response for chat completions + async *_createChatCompletionStream(params) { + // First chunk with basic info + yield { + id: 'chatcmpl-stream-123', + object: 'chat.completion.chunk', + created: 1677652300, + model: params.model, + system_fingerprint: 'fp_stream_123', + choices: [ + { + index: 0, + delta: { + role: 'assistant', + content: 'Hello', + }, + finish_reason: null, + }, + ], + }; + + // Second chunk with more content + yield { + id: 'chatcmpl-stream-123', + object: 'chat.completion.chunk', + created: 1677652300, + model: params.model, + system_fingerprint: 'fp_stream_123', + choices: [ + { + index: 0, + delta: { + content: ' from OpenAI streaming!', + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 12, + completion_tokens: 18, + total_tokens: 30, + completion_tokens_details: { + accepted_prediction_tokens: 0, + audio_tokens: 0, + reasoning_tokens: 0, + rejected_prediction_tokens: 0, + }, + prompt_tokens_details: { + audio_tokens: 0, + cached_tokens: 0, + }, + }, + }; + } + + // Create a mock streaming response for responses API + async *_createResponsesApiStream(params) { + // Response created event + yield { + type: 'response.created', + response: { + id: 'resp_stream_456', + object: 'response', + created_at: 1677652310, + model: params.model, + status: 'in_progress', + error: null, + incomplete_details: null, + instructions: params.instructions, + max_output_tokens: 1000, + parallel_tool_calls: false, + previous_response_id: null, + reasoning: { + effort: null, + summary: null, + }, + store: false, + temperature: 0.7, + text: { + format: { + type: 'text', + }, + }, + tool_choice: 'auto', + top_p: 1.0, + truncation: 'disabled', + user: null, + metadata: {}, + output: [], + output_text: '', + usage: { + input_tokens: 0, + output_tokens: 0, + total_tokens: 0, + }, + }, + sequence_number: 1, + }; + + // Response in progress with output text delta + yield { + type: 'response.output_text.delta', + delta: 'Streaming response to: ', + sequence_number: 2, + }; + + yield { + type: 'response.output_text.delta', + delta: params.input, + sequence_number: 3, + }; + + // Response completed event + yield { + type: 'response.completed', + response: { + id: 'resp_stream_456', + object: 'response', + created_at: 1677652310, + model: params.model, + status: 'completed', + error: null, + incomplete_details: null, + instructions: params.instructions, + max_output_tokens: 1000, + parallel_tool_calls: false, + previous_response_id: null, + reasoning: { + effort: null, + summary: null, + }, + store: false, + temperature: 0.7, + text: { + format: { + type: 'text', + }, + }, + tool_choice: 'auto', + top_p: 1.0, + truncation: 'disabled', + user: null, + metadata: {}, + output: [], + output_text: params.input, + usage: { + input_tokens: 6, + output_tokens: 10, + total_tokens: 16, + }, + }, + sequence_number: 4, + }; + } +} + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + const mockClient = new MockOpenAI({ + apiKey: 'mock-api-key', + }); + + const client = instrumentOpenAiClient(mockClient); + + // First test: basic chat completion + await client.chat.completions.create({ + model: 'gpt-3.5-turbo', + messages: [ + { role: 'system', content: 'You are a helpful assistant.' }, + { role: 'user', content: 'What is the capital of France?' }, + ], + temperature: 0.7, + max_tokens: 100, + }); + + // Second test: responses API + await client.responses.create({ + model: 'gpt-3.5-turbo', + input: 'Translate this to French: Hello', + instructions: 'You are a translator', + }); + + // Third test: error handling in chat completions + try { + await client.chat.completions.create({ + model: 'error-model', + messages: [{ role: 'user', content: 'This will fail' }], + }); + } catch { + // Error is expected and handled + } + + // Fourth test: chat completions streaming + const stream1 = await client.chat.completions.create({ + model: 'gpt-4', + messages: [ + { role: 'system', content: 'You are a helpful assistant.' }, + { role: 'user', content: 'Tell me about streaming' }, + ], + stream: true, + temperature: 0.8, + }); + + // Consume the stream to trigger span instrumentation + for await (const chunk of stream1) { + // Stream chunks are processed automatically by instrumentation + void chunk; // Prevent unused variable warning + } + + // Fifth test: responses API streaming + const stream2 = await client.responses.create({ + model: 'gpt-4', + input: 'Test streaming responses API', + instructions: 'You are a streaming assistant', + stream: true, + }); + + for await (const chunk of stream2) { + void chunk; + } + + // Sixth test: error handling in streaming context + try { + const errorStream = await client.chat.completions.create({ + model: 'error-model', + messages: [{ role: 'user', content: 'This will fail' }], + stream: true, + }); + + // Try to consume the stream (this should not execute) + for await (const chunk of errorStream) { + void chunk; + } + } catch { + // Error is expected and handled + } + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs new file mode 100644 index 000000000000..9cdb24a42da9 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs @@ -0,0 +1,67 @@ +import { instrumentOpenAiClient } from '@sentry/core'; +import * as Sentry from '@sentry/node'; + +class MockOpenAI { + constructor(config) { + this.apiKey = config.apiKey; + + this.embeddings = { + create: async params => { + await new Promise(resolve => setTimeout(resolve, 10)); + + if (params.model === 'error-model') { + const error = new Error('Model not found'); + error.status = 404; + error.headers = { 'x-request-id': 'mock-request-123' }; + throw error; + } + + return { + object: 'list', + data: [ + { + object: 'embedding', + embedding: [0.1, 0.2, 0.3], + index: 0, + }, + ], + model: params.model, + usage: { + prompt_tokens: 10, + total_tokens: 10, + }, + }; + }, + }; + } +} + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + const mockClient = new MockOpenAI({ + apiKey: 'mock-api-key', + }); + + const client = instrumentOpenAiClient(mockClient); + + // First test: embeddings API + await client.embeddings.create({ + input: 'Embedding test!', + model: 'text-embedding-3-small', + dimensions: 1536, + encoding_format: 'float', + }); + + // Second test: embeddings API error model + try { + await client.embeddings.create({ + input: 'Error embedding test!', + model: 'error-model', + }); + } catch { + // Error is expected and handled + } + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-root-span.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-root-span.mjs new file mode 100644 index 000000000000..2aaca0700312 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-root-span.mjs @@ -0,0 +1,63 @@ +import express from 'express'; +import OpenAI from 'openai'; + +function startMockOpenAiServer() { + const app = express(); + app.use(express.json()); + + app.post('/openai/chat/completions', (req, res) => { + res.send({ + id: 'chatcmpl-mock123', + object: 'chat.completion', + created: 1677652288, + model: req.body.model, + system_fingerprint: 'fp_44709d6fcb', + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: 'Hello from OpenAI mock!', + }, + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 15, + total_tokens: 25, + }, + }); + }); + return new Promise(resolve => { + const server = app.listen(0, () => { + resolve(server); + }); + }); +} + +async function run() { + const server = await startMockOpenAiServer(); + + const client = new OpenAI({ + baseURL: `http://localhost:${server.address().port}/openai`, + apiKey: 'mock-api-key', + }); + + const response = await client.chat.completions.create({ + model: 'gpt-3.5-turbo', + messages: [ + { role: 'system', content: 'You are a helpful assistant.' }, + { role: 'user', content: 'What is the capital of France?' }, + ], + temperature: 0.7, + max_tokens: 100, + }); + + // eslint-disable-next-line no-console + console.log(JSON.stringify(response)); + + server.close(); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts new file mode 100644 index 000000000000..053f3066a1b0 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts @@ -0,0 +1,565 @@ +import { afterAll, describe, expect } from 'vitest'; +import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; + +describe('OpenAI integration (V6)', () => { + afterAll(() => { + cleanupChildProcesses(); + }); + + const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_CHAT = { + transaction: 'main', + spans: expect.arrayContaining([ + // First span - basic chat completion without PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-3.5-turbo', + 'gen_ai.request.temperature': 0.7, + 'gen_ai.response.model': 'gpt-3.5-turbo', + 'gen_ai.response.id': 'chatcmpl-mock123', + 'gen_ai.response.finish_reasons': '["stop"]', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 15, + 'gen_ai.usage.total_tokens': 25, + 'openai.response.id': 'chatcmpl-mock123', + 'openai.response.model': 'gpt-3.5-turbo', + 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', + 'openai.usage.completion_tokens': 15, + 'openai.usage.prompt_tokens': 10, + }, + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', + origin: 'auto.ai.openai', + status: 'ok', + }), + // Second span - responses API + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'responses', + 'sentry.op': 'gen_ai.responses', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-3.5-turbo', + 'gen_ai.response.model': 'gpt-3.5-turbo', + 'gen_ai.response.id': 'resp_mock456', + 'gen_ai.response.finish_reasons': '["completed"]', + 'gen_ai.usage.input_tokens': 5, + 'gen_ai.usage.output_tokens': 8, + 'gen_ai.usage.total_tokens': 13, + 'openai.response.id': 'resp_mock456', + 'openai.response.model': 'gpt-3.5-turbo', + 'openai.response.timestamp': '2023-03-01T06:31:30.000Z', + 'openai.usage.completion_tokens': 8, + 'openai.usage.prompt_tokens': 5, + }, + description: 'responses gpt-3.5-turbo', + op: 'gen_ai.responses', + origin: 'auto.ai.openai', + status: 'ok', + }), + // Third span - error handling + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'error-model', + }, + description: 'chat error-model', + op: 'gen_ai.chat', + origin: 'auto.ai.openai', + status: 'internal_error', + }), + // Fourth span - chat completions streaming + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-4', + 'gen_ai.request.temperature': 0.8, + 'gen_ai.request.stream': true, + 'gen_ai.response.model': 'gpt-4', + 'gen_ai.response.id': 'chatcmpl-stream-123', + 'gen_ai.response.finish_reasons': '["stop"]', + 'gen_ai.usage.input_tokens': 12, + 'gen_ai.usage.output_tokens': 18, + 'gen_ai.usage.total_tokens': 30, + 'openai.response.id': 'chatcmpl-stream-123', + 'openai.response.model': 'gpt-4', + 'gen_ai.response.streaming': true, + 'openai.response.timestamp': '2023-03-01T06:31:40.000Z', + 'openai.usage.completion_tokens': 18, + 'openai.usage.prompt_tokens': 12, + }, + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', + origin: 'auto.ai.openai', + status: 'ok', + }), + // Fifth span - responses API streaming + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'responses', + 'sentry.op': 'gen_ai.responses', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-4', + 'gen_ai.request.stream': true, + 'gen_ai.response.model': 'gpt-4', + 'gen_ai.response.id': 'resp_stream_456', + 'gen_ai.response.finish_reasons': '["in_progress","completed"]', + 'gen_ai.usage.input_tokens': 6, + 'gen_ai.usage.output_tokens': 10, + 'gen_ai.usage.total_tokens': 16, + 'openai.response.id': 'resp_stream_456', + 'openai.response.model': 'gpt-4', + 'gen_ai.response.streaming': true, + 'openai.response.timestamp': '2023-03-01T06:31:50.000Z', + 'openai.usage.completion_tokens': 10, + 'openai.usage.prompt_tokens': 6, + }, + description: 'responses gpt-4 stream-response', + op: 'gen_ai.responses', + origin: 'auto.ai.openai', + status: 'ok', + }), + // Sixth span - error handling in streaming context + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'error-model', + 'gen_ai.request.stream': true, + 'gen_ai.system': 'openai', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.openai', + }, + description: 'chat error-model stream-response', + op: 'gen_ai.chat', + origin: 'auto.ai.openai', + status: 'internal_error', + }), + ]), + }; + + const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_CHAT = { + transaction: 'main', + spans: expect.arrayContaining([ + // First span - basic chat completion with PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-3.5-turbo', + 'gen_ai.request.temperature': 0.7, + 'gen_ai.request.messages': + '[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"What is the capital of France?"}]', + 'gen_ai.response.model': 'gpt-3.5-turbo', + 'gen_ai.response.id': 'chatcmpl-mock123', + 'gen_ai.response.finish_reasons': '["stop"]', + 'gen_ai.response.text': '["Hello from OpenAI mock!"]', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 15, + 'gen_ai.usage.total_tokens': 25, + 'openai.response.id': 'chatcmpl-mock123', + 'openai.response.model': 'gpt-3.5-turbo', + 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', + 'openai.usage.completion_tokens': 15, + 'openai.usage.prompt_tokens': 10, + }, + description: 'chat gpt-3.5-turbo', + op: 'gen_ai.chat', + origin: 'auto.ai.openai', + status: 'ok', + }), + // Second span - responses API with PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'responses', + 'sentry.op': 'gen_ai.responses', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-3.5-turbo', + 'gen_ai.request.messages': 'Translate this to French: Hello', + 'gen_ai.response.text': 'Response to: Translate this to French: Hello', + 'gen_ai.response.finish_reasons': '["completed"]', + 'gen_ai.response.model': 'gpt-3.5-turbo', + 'gen_ai.response.id': 'resp_mock456', + 'gen_ai.usage.input_tokens': 5, + 'gen_ai.usage.output_tokens': 8, + 'gen_ai.usage.total_tokens': 13, + 'openai.response.id': 'resp_mock456', + 'openai.response.model': 'gpt-3.5-turbo', + 'openai.response.timestamp': '2023-03-01T06:31:30.000Z', + 'openai.usage.completion_tokens': 8, + 'openai.usage.prompt_tokens': 5, + }, + description: 'responses gpt-3.5-turbo', + op: 'gen_ai.responses', + origin: 'auto.ai.openai', + status: 'ok', + }), + // Third span - error handling with PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'error-model', + 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', + }, + description: 'chat error-model', + op: 'gen_ai.chat', + origin: 'auto.ai.openai', + status: 'internal_error', + }), + // Fourth span - chat completions streaming with PII + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-4', + 'gen_ai.request.temperature': 0.8, + 'gen_ai.request.stream': true, + 'gen_ai.request.messages': + '[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"Tell me about streaming"}]', + 'gen_ai.response.text': 'Hello from OpenAI streaming!', + 'gen_ai.response.finish_reasons': '["stop"]', + 'gen_ai.response.id': 'chatcmpl-stream-123', + 'gen_ai.response.model': 'gpt-4', + 'gen_ai.usage.input_tokens': 12, + 'gen_ai.usage.output_tokens': 18, + 'gen_ai.usage.total_tokens': 30, + 'openai.response.id': 'chatcmpl-stream-123', + 'openai.response.model': 'gpt-4', + 'gen_ai.response.streaming': true, + 'openai.response.timestamp': '2023-03-01T06:31:40.000Z', + 'openai.usage.completion_tokens': 18, + 'openai.usage.prompt_tokens': 12, + }), + description: 'chat gpt-4 stream-response', + op: 'gen_ai.chat', + origin: 'auto.ai.openai', + status: 'ok', + }), + // Fifth span - responses API streaming with PII + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'responses', + 'sentry.op': 'gen_ai.responses', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-4', + 'gen_ai.request.stream': true, + 'gen_ai.request.messages': 'Test streaming responses API', + 'gen_ai.response.text': 'Streaming response to: Test streaming responses APITest streaming responses API', + 'gen_ai.response.finish_reasons': '["in_progress","completed"]', + 'gen_ai.response.id': 'resp_stream_456', + 'gen_ai.response.model': 'gpt-4', + 'gen_ai.usage.input_tokens': 6, + 'gen_ai.usage.output_tokens': 10, + 'gen_ai.usage.total_tokens': 16, + 'openai.response.id': 'resp_stream_456', + 'openai.response.model': 'gpt-4', + 'gen_ai.response.streaming': true, + 'openai.response.timestamp': '2023-03-01T06:31:50.000Z', + 'openai.usage.completion_tokens': 10, + 'openai.usage.prompt_tokens': 6, + }), + description: 'responses gpt-4 stream-response', + op: 'gen_ai.responses', + origin: 'auto.ai.openai', + status: 'ok', + }), + // Sixth span - error handling in streaming context with PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'gen_ai.request.model': 'error-model', + 'gen_ai.request.stream': true, + 'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]', + 'gen_ai.system': 'openai', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.openai', + }, + description: 'chat error-model stream-response', + op: 'gen_ai.chat', + origin: 'auto.ai.openai', + status: 'internal_error', + }), + ]), + }; + + const EXPECTED_TRANSACTION_WITH_OPTIONS = { + transaction: 'main', + spans: expect.arrayContaining([ + // Check that custom options are respected + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + }), + }), + // Check that custom options are respected for streaming + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + 'gen_ai.request.stream': true, // Should be marked as stream + }), + }), + ]), + }; + + const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS = { + transaction: 'main', + spans: expect.arrayContaining([ + // First span - embeddings API + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'embeddings', + 'sentry.op': 'gen_ai.embeddings', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'text-embedding-3-small', + 'gen_ai.request.encoding_format': 'float', + 'gen_ai.request.dimensions': 1536, + 'gen_ai.response.model': 'text-embedding-3-small', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.total_tokens': 10, + 'openai.response.model': 'text-embedding-3-small', + 'openai.usage.prompt_tokens': 10, + }, + description: 'embeddings text-embedding-3-small', + op: 'gen_ai.embeddings', + origin: 'auto.ai.openai', + status: 'ok', + }), + // Second span - embeddings API error model + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'embeddings', + 'sentry.op': 'gen_ai.embeddings', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'error-model', + }, + description: 'embeddings error-model', + op: 'gen_ai.embeddings', + origin: 'auto.ai.openai', + status: 'internal_error', + }), + ]), + }; + + const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_EMBEDDINGS = { + transaction: 'main', + spans: expect.arrayContaining([ + // First span - embeddings API with PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'embeddings', + 'sentry.op': 'gen_ai.embeddings', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'text-embedding-3-small', + 'gen_ai.request.encoding_format': 'float', + 'gen_ai.request.dimensions': 1536, + 'gen_ai.request.messages': 'Embedding test!', + 'gen_ai.response.model': 'text-embedding-3-small', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.total_tokens': 10, + 'openai.response.model': 'text-embedding-3-small', + 'openai.usage.prompt_tokens': 10, + }, + description: 'embeddings text-embedding-3-small', + op: 'gen_ai.embeddings', + origin: 'auto.ai.openai', + status: 'ok', + }), + // Second span - embeddings API error model with PII + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'embeddings', + 'sentry.op': 'gen_ai.embeddings', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'error-model', + 'gen_ai.request.messages': 'Error embedding test!', + }, + description: 'embeddings error-model', + op: 'gen_ai.embeddings', + origin: 'auto.ai.openai', + status: 'internal_error', + }), + ]), + }; + + createEsmAndCjsTests( + __dirname, + 'scenario-chat.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates openai related spans with sendDefaultPii: false (v6)', async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_CHAT }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + openai: '6.0.0', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-chat.mjs', + 'instrument-with-pii.mjs', + (createRunner, test) => { + test('creates openai related spans with sendDefaultPii: true (v6)', async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_CHAT }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + openai: '6.0.0', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-chat.mjs', + 'instrument-with-options.mjs', + (createRunner, test) => { + test('creates openai related spans with custom options (v6)', async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + openai: '6.0.0', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-embeddings.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates openai related spans with sendDefaultPii: false (v6)', async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + openai: '6.0.0', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-embeddings.mjs', + 'instrument-with-pii.mjs', + (createRunner, test) => { + test('creates openai related spans with sendDefaultPii: true (v6)', async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_EMBEDDINGS }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + openai: '6.0.0', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-root-span.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('it works without a wrapping span (v6)', async () => { + await createRunner() + // First the span that our mock express server is emitting, unrelated to this test + .expect({ + transaction: { + transaction: 'POST /openai/chat/completions', + }, + }) + .expect({ + transaction: { + transaction: 'chat gpt-3.5-turbo', + contexts: { + trace: { + span_id: expect.any(String), + trace_id: expect.any(String), + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.openai', + 'gen_ai.system': 'openai', + 'gen_ai.request.model': 'gpt-3.5-turbo', + 'gen_ai.request.temperature': 0.7, + 'gen_ai.response.model': 'gpt-3.5-turbo', + 'gen_ai.response.id': 'chatcmpl-mock123', + 'gen_ai.response.finish_reasons': '["stop"]', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 15, + 'gen_ai.usage.total_tokens': 25, + 'openai.response.id': 'chatcmpl-mock123', + 'openai.response.model': 'gpt-3.5-turbo', + 'openai.response.timestamp': '2023-03-01T06:31:28.000Z', + 'openai.usage.completion_tokens': 15, + 'openai.usage.prompt_tokens': 10, + }, + op: 'gen_ai.chat', + origin: 'auto.ai.openai', + status: 'ok', + }, + }, + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + openai: '6.0.0', + express: 'latest', + }, + }, + ); +}); diff --git a/packages/node/src/integrations/tracing/openai/instrumentation.ts b/packages/node/src/integrations/tracing/openai/instrumentation.ts index d71c548395b0..e0682185ff0a 100644 --- a/packages/node/src/integrations/tracing/openai/instrumentation.ts +++ b/packages/node/src/integrations/tracing/openai/instrumentation.ts @@ -13,7 +13,7 @@ import { SDK_VERSION, } from '@sentry/core'; -const supportedVersions = ['>=4.0.0 <6']; +const supportedVersions = ['>=4.0.0 <7']; export interface OpenAiIntegration extends Integration { options: OpenAiOptions;