From c0a0f25d64141fd6e1ad0298250fd7727b1e2709 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Tue, 2 Dec 2025 13:14:31 +0100 Subject: [PATCH 1/3] refactor --- packages/core/src/tracing/openai/index.ts | 43 ++++++++----------- .../src/integrations/tracing/openai/index.ts | 7 ++- .../tracing/openai/instrumentation.ts | 25 ++++------- 3 files changed, 29 insertions(+), 46 deletions(-) diff --git a/packages/core/src/tracing/openai/index.ts b/packages/core/src/tracing/openai/index.ts index bba2ee0f5afd..c68e920daf2b 100644 --- a/packages/core/src/tracing/openai/index.ts +++ b/packages/core/src/tracing/openai/index.ts @@ -1,4 +1,4 @@ -import { getCurrentScope } from '../../currentScopes'; +import { getClient } from '../../currentScopes'; import { captureException } from '../../exports'; import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; import { SPAN_STATUS_ERROR } from '../../tracing'; @@ -20,12 +20,10 @@ import { GEN_AI_SYSTEM_ATTRIBUTE, } from '../ai/gen-ai-attributes'; import { getTruncatedJsonString } from '../ai/utils'; -import { OPENAI_INTEGRATION_NAME } from './constants'; import { instrumentStream } from './streaming'; import type { ChatCompletionChunk, InstrumentedMethod, - OpenAiIntegration, OpenAiOptions, OpenAiResponse, OpenAIStream, @@ -128,18 +126,6 @@ function addRequestAttributes(span: Span, params: Record): void } } -function getOptionsFromIntegration(): OpenAiOptions { - const scope = getCurrentScope(); - const client = scope.getClient(); - const integration = client?.getIntegrationByName(OPENAI_INTEGRATION_NAME); - const shouldRecordInputsAndOutputs = integration ? Boolean(client?.getOptions().sendDefaultPii) : false; - - return { - recordInputs: integration?.options?.recordInputs ?? shouldRecordInputsAndOutputs, - recordOutputs: integration?.options?.recordOutputs ?? shouldRecordInputsAndOutputs, - }; -} - /** * Instrument a method with Sentry spans * Following Sentry AI Agents Manual Instrumentation conventions @@ -149,10 +135,9 @@ function instrumentMethod( originalMethod: (...args: T) => Promise, methodPath: InstrumentedMethod, context: unknown, - options?: OpenAiOptions, + options: OpenAiOptions, ): (...args: T) => Promise { return async function instrumentedMethod(...args: T): Promise { - const finalOptions = options || getOptionsFromIntegration(); const requestAttributes = extractRequestAttributes(args, methodPath); const model = (requestAttributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] as string) || 'unknown'; const operationName = getOperationName(methodPath); @@ -170,8 +155,8 @@ function instrumentMethod( }, async (span: Span) => { try { - if (finalOptions.recordInputs && args[0] && typeof args[0] === 'object') { - addRequestAttributes(span, args[0] as Record); + if (options.recordInputs && params) { + addRequestAttributes(span, params); } const result = await originalMethod.apply(context, args); @@ -179,7 +164,7 @@ function instrumentMethod( return instrumentStream( result as OpenAIStream, span, - finalOptions.recordOutputs ?? false, + options.recordOutputs ?? false, ) as unknown as R; } catch (error) { // For streaming requests that fail before stream creation, we still want to record @@ -209,12 +194,12 @@ function instrumentMethod( }, async (span: Span) => { try { - if (finalOptions.recordInputs && args[0] && typeof args[0] === 'object') { - addRequestAttributes(span, args[0] as Record); + if (options.recordInputs && params) { + addRequestAttributes(span, params); } const result = await originalMethod.apply(context, args); - addResponseAttributes(span, result, finalOptions.recordOutputs); + addResponseAttributes(span, result, options.recordOutputs); return result; } catch (error) { captureException(error, { @@ -237,7 +222,7 @@ function instrumentMethod( /** * Create a deep proxy for OpenAI client instrumentation */ -function createDeepProxy(target: T, currentPath = '', options?: OpenAiOptions): T { +function createDeepProxy(target: T, currentPath = '', options: OpenAiOptions): T { return new Proxy(target, { get(obj: object, prop: string): unknown { const value = (obj as Record)[prop]; @@ -267,5 +252,13 @@ function createDeepProxy(target: T, currentPath = '', options? * Can be used across Node.js, Cloudflare Workers, and Vercel Edge */ export function instrumentOpenAiClient(client: T, options?: OpenAiOptions): T { - return createDeepProxy(client, '', options); + const sendDefaultPii = Boolean(getClient()?.getOptions().sendDefaultPii); + + const _options = { + recordInputs: sendDefaultPii, + recordOutputs: sendDefaultPii, + ...options, + }; + + return createDeepProxy(client, '', _options); } diff --git a/packages/node/src/integrations/tracing/openai/index.ts b/packages/node/src/integrations/tracing/openai/index.ts index 0e88d2b315cc..e7f4171f87fa 100644 --- a/packages/node/src/integrations/tracing/openai/index.ts +++ b/packages/node/src/integrations/tracing/openai/index.ts @@ -3,17 +3,16 @@ import { defineIntegration, OPENAI_INTEGRATION_NAME } from '@sentry/core'; import { generateInstrumentOnce } from '@sentry/node-core'; import { SentryOpenAiInstrumentation } from './instrumentation'; -export const instrumentOpenAi = generateInstrumentOnce( +export const instrumentOpenAi = generateInstrumentOnce( OPENAI_INTEGRATION_NAME, - () => new SentryOpenAiInstrumentation({}), + options => new SentryOpenAiInstrumentation(options), ); const _openAiIntegration = ((options: OpenAiOptions = {}) => { return { name: OPENAI_INTEGRATION_NAME, - options, setupOnce() { - instrumentOpenAi(); + instrumentOpenAi(options); }, }; }) satisfies IntegrationFn; diff --git a/packages/node/src/integrations/tracing/openai/instrumentation.ts b/packages/node/src/integrations/tracing/openai/instrumentation.ts index b1a577f9a5f4..d88c0a8802fd 100644 --- a/packages/node/src/integrations/tracing/openai/instrumentation.ts +++ b/packages/node/src/integrations/tracing/openai/instrumentation.ts @@ -19,6 +19,8 @@ export interface OpenAiIntegration extends Integration { options: OpenAiOptions; } +type OpenAiInstrumentationOptions = InstrumentationConfig & OpenAiOptions; + /** * Represents the patched shape of the OpenAI module export. */ @@ -28,23 +30,11 @@ interface PatchedModuleExports { AzureOpenAI?: abstract new (...args: unknown[]) => OpenAiClient; } -/** - * Determines telemetry recording settings. - */ -function determineRecordingSettings( - integrationOptions: OpenAiOptions | undefined, - defaultEnabled: boolean, -): { recordInputs: boolean; recordOutputs: boolean } { - const recordInputs = integrationOptions?.recordInputs ?? defaultEnabled; - const recordOutputs = integrationOptions?.recordOutputs ?? defaultEnabled; - return { recordInputs, recordOutputs }; -} - /** * Sentry OpenAI instrumentation using OpenTelemetry. */ -export class SentryOpenAiInstrumentation extends InstrumentationBase { - public constructor(config: InstrumentationConfig = {}) { +export class SentryOpenAiInstrumentation extends InstrumentationBase { + public constructor(config: OpenAiInstrumentationOptions = {}) { super('@sentry/instrumentation-openai', SDK_VERSION, config); } @@ -75,6 +65,8 @@ export class SentryOpenAiInstrumentation extends InstrumentationBase(OPENAI_INTEGRATION_NAME); - const integrationOpts = integration?.options; const defaultPii = Boolean(client?.getOptions().sendDefaultPii); - const { recordInputs, recordOutputs } = determineRecordingSettings(integrationOpts, defaultPii); + const recordInputs = config.recordInputs ?? defaultPii; + const recordOutputs = config.recordOutputs ?? defaultPii; return instrumentOpenAiClient(instance as OpenAiClient, { recordInputs, From dcbcee520915723d8c365d9b3b51bbfe2db74664 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Tue, 2 Dec 2025 16:56:30 +0100 Subject: [PATCH 2/3] fix tests --- .../tracing/openai/instrument-root-span.mjs | 11 + .../openai/instrument-with-options.mjs | 6 + .../tracing/openai/instrument-with-pii.mjs | 6 + .../suites/tracing/openai/instrument.mjs | 6 + .../suites/tracing/openai/scenario-chat.mjs | 380 ++++++++---------- .../tracing/openai/scenario-embeddings.mjs | 74 ++-- .../suites/tracing/openai/test.ts | 2 +- .../openai/v6/instrument-root-span.mjs | 11 + .../openai/v6/instrument-with-options.mjs | 7 + .../tracing/openai/v6/instrument-with-pii.mjs | 6 + .../suites/tracing/openai/v6/instrument.mjs | 6 + .../tracing/openai/v6/scenario-chat.mjs | 380 ++++++++---------- .../tracing/openai/v6/scenario-embeddings.mjs | 74 ++-- .../suites/tracing/openai/v6/test.ts | 2 +- 14 files changed, 483 insertions(+), 488 deletions(-) create mode 100644 dev-packages/node-integration-tests/suites/tracing/openai/instrument-root-span.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-root-span.mjs diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-root-span.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-root-span.mjs new file mode 100644 index 000000000000..f3fbac9d1274 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-root-span.mjs @@ -0,0 +1,11 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: false, + transport: loggingTransport, + integrations: [Sentry.openAIIntegration()], +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs index 35f97fd84093..86219de9983a 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs @@ -13,4 +13,10 @@ Sentry.init({ recordOutputs: true, }), ], + beforeSendTransaction: event => { + if (event.transaction.includes('/openai/')) { + return null; + } + return event; + }, }); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs index a53a13af7738..74bc63db971b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs @@ -8,4 +8,10 @@ Sentry.init({ sendDefaultPii: true, transport: loggingTransport, integrations: [Sentry.openAIIntegration()], + beforeSendTransaction: event => { + if (event.transaction.includes('/openai/')) { + return null; + } + return event; + }, }); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs index f3fbac9d1274..1ff3990a0693 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs @@ -8,4 +8,10 @@ Sentry.init({ sendDefaultPii: false, transport: loggingTransport, integrations: [Sentry.openAIIntegration()], + beforeSendTransaction: event => { + if (event.transaction.includes('/openai/')) { + return null; + } + return event; + }, }); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/scenario-chat.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/scenario-chat.mjs index fde651c3c1ff..6031b6861f5b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/scenario-chat.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/scenario-chat.mjs @@ -1,245 +1,203 @@ -import { instrumentOpenAiClient } from '@sentry/core'; import * as Sentry from '@sentry/node'; +import express from 'express'; +import OpenAI from 'openai'; -class MockOpenAI { - constructor(config) { - this.apiKey = config.apiKey; +function startMockServer() { + const app = express(); + app.use(express.json()); - this.chat = { - completions: { - create: async params => { - // Simulate processing time - await new Promise(resolve => setTimeout(resolve, 10)); + // Chat completions endpoint + app.post('/openai/chat/completions', (req, res) => { + const { model, stream } = req.body; - if (params.model === 'error-model') { - const error = new Error('Model not found'); - error.status = 404; - error.headers = { 'x-request-id': 'mock-request-123' }; - throw error; - } + // Handle error model + if (model === 'error-model') { + res.status(500).set('x-request-id', 'mock-request-error').end('Internal server error'); + return; + } - // If stream is requested, return an async generator - if (params.stream) { - return this._createChatCompletionStream(params); - } + if (stream) { + // Streaming response + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); - return { - id: 'chatcmpl-mock123', - object: 'chat.completion', - created: 1677652288, - model: params.model, - system_fingerprint: 'fp_44709d6fcb', - choices: [ - { - index: 0, - message: { - role: 'assistant', - content: 'Hello from OpenAI mock!', - }, - finish_reason: 'stop', - }, - ], - usage: { - prompt_tokens: 10, - completion_tokens: 15, - total_tokens: 25, + const chunks = [ + { + id: 'chatcmpl-stream-123', + object: 'chat.completion.chunk', + created: 1677652300, + model: model, + choices: [{ delta: { role: 'assistant', content: '' }, index: 0 }], + }, + { + id: 'chatcmpl-stream-123', + object: 'chat.completion.chunk', + created: 1677652300, + model: model, + choices: [{ delta: { content: 'Hello from OpenAI streaming!' }, index: 0 }], + }, + { + id: 'chatcmpl-stream-123', + object: 'chat.completion.chunk', + created: 1677652300, + model: model, + choices: [{ delta: {}, index: 0, finish_reason: 'stop' }], + usage: { + prompt_tokens: 12, + completion_tokens: 18, + total_tokens: 30, + }, + }, + ]; + + chunks.forEach((chunk, index) => { + setTimeout(() => { + res.write(`data: ${JSON.stringify(chunk)}\n\n`); + if (index === chunks.length - 1) { + res.write('data: [DONE]\n\n'); + res.end(); + } + }, index * 10); + }); + } else { + // Non-streaming response + res.send({ + id: 'chatcmpl-mock123', + object: 'chat.completion', + created: 1677652288, + model: model, + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: 'Hello from OpenAI mock!', }, - }; + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 15, + total_tokens: 25, }, - }, - }; + }); + } + }); - this.responses = { - create: async params => { - await new Promise(resolve => setTimeout(resolve, 10)); + // Responses API endpoint + app.post('/openai/responses', (req, res) => { + const { model, stream } = req.body; - // If stream is requested, return an async generator - if (params.stream) { - return this._createResponsesApiStream(params); - } + // Handle error model + if (model === 'error-model') { + res.status(500).set('x-request-id', 'mock-request-error').end('Internal server error'); + return; + } - return { - id: 'resp_mock456', - object: 'response', - created_at: 1677652290, - model: params.model, - input_text: params.input, - output_text: `Response to: ${params.input}`, - status: 'completed', - usage: { - input_tokens: 5, - output_tokens: 8, - total_tokens: 13, - }, - }; - }, - }; - } + if (stream) { + // Streaming response - using event-based format with 'response' field + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); - // Create a mock streaming response for chat completions - async *_createChatCompletionStream(params) { - // First chunk with basic info - yield { - id: 'chatcmpl-stream-123', - object: 'chat.completion.chunk', - created: 1677652300, - model: params.model, - system_fingerprint: 'fp_stream_123', - choices: [ + const events = [ { - index: 0, - delta: { - role: 'assistant', - content: 'Hello', + type: 'response.created', + response: { + id: 'resp_stream_456', + object: 'response', + created_at: 1677652310, + model: model, + status: 'in_progress', }, - finish_reason: null, }, - ], - }; - - // Second chunk with more content - yield { - id: 'chatcmpl-stream-123', - object: 'chat.completion.chunk', - created: 1677652300, - model: params.model, - system_fingerprint: 'fp_stream_123', - choices: [ { - index: 0, - delta: { - content: ' from OpenAI streaming!', + type: 'response.output_text.delta', + delta: 'Streaming response to: Test streaming responses API', + response: { + id: 'resp_stream_456', + model: model, + created_at: 1677652310, }, - finish_reason: 'stop', - }, - ], - usage: { - prompt_tokens: 12, - completion_tokens: 18, - total_tokens: 30, - completion_tokens_details: { - accepted_prediction_tokens: 0, - audio_tokens: 0, - reasoning_tokens: 0, - rejected_prediction_tokens: 0, }, - prompt_tokens_details: { - audio_tokens: 0, - cached_tokens: 0, - }, - }, - }; - } - - // Create a mock streaming response for responses API - async *_createResponsesApiStream(params) { - // Response created event - yield { - type: 'response.created', - response: { - id: 'resp_stream_456', - object: 'response', - created_at: 1677652310, - model: params.model, - status: 'in_progress', - error: null, - incomplete_details: null, - instructions: params.instructions, - max_output_tokens: 1000, - parallel_tool_calls: false, - previous_response_id: null, - reasoning: { - effort: null, - summary: null, - }, - store: false, - temperature: 0.7, - text: { - format: { - type: 'text', + { + type: 'response.completed', + response: { + id: 'resp_stream_456', + object: 'response', + created_at: 1677652310, + model: model, + status: 'completed', + output_text: 'Test streaming responses API', + usage: { + input_tokens: 6, + output_tokens: 10, + total_tokens: 16, + }, }, }, - tool_choice: 'auto', - top_p: 1.0, - truncation: 'disabled', - user: null, - metadata: {}, - output: [], - output_text: '', - usage: { - input_tokens: 0, - output_tokens: 0, - total_tokens: 0, - }, - }, - sequence_number: 1, - }; - - // Response in progress with output text delta - yield { - type: 'response.output_text.delta', - delta: 'Streaming response to: ', - sequence_number: 2, - }; + ]; - yield { - type: 'response.output_text.delta', - delta: params.input, - sequence_number: 3, - }; - - // Response completed event - yield { - type: 'response.completed', - response: { - id: 'resp_stream_456', + events.forEach((event, index) => { + setTimeout(() => { + res.write(`data: ${JSON.stringify(event)}\n\n`); + if (index === events.length - 1) { + res.write('data: [DONE]\n\n'); + res.end(); + } + }, index * 10); + }); + } else { + // Non-streaming response + res.send({ + id: 'resp_mock456', object: 'response', - created_at: 1677652310, - model: params.model, - status: 'completed', - error: null, - incomplete_details: null, - instructions: params.instructions, - max_output_tokens: 1000, - parallel_tool_calls: false, - previous_response_id: null, - reasoning: { - effort: null, - summary: null, - }, - store: false, - temperature: 0.7, - text: { - format: { - type: 'text', + created_at: 1677652290, + model: model, + output: [ + { + type: 'message', + id: 'msg_mock_output_1', + status: 'completed', + role: 'assistant', + content: [ + { + type: 'output_text', + text: `Response to: ${req.body.input}`, + annotations: [], + }, + ], }, - }, - tool_choice: 'auto', - top_p: 1.0, - truncation: 'disabled', - user: null, - metadata: {}, - output: [], - output_text: params.input, + ], + output_text: `Response to: ${req.body.input}`, + status: 'completed', usage: { - input_tokens: 6, - output_tokens: 10, - total_tokens: 16, + input_tokens: 5, + output_tokens: 8, + total_tokens: 13, }, - }, - sequence_number: 4, - }; - } + }); + } + }); + + return new Promise(resolve => { + const server = app.listen(0, () => { + resolve(server); + }); + }); } async function run() { + const server = await startMockServer(); + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { - const mockClient = new MockOpenAI({ + const client = new OpenAI({ + baseURL: `http://localhost:${server.address().port}/openai`, apiKey: 'mock-api-key', }); - const client = instrumentOpenAiClient(mockClient); - // First test: basic chat completion await client.chat.completions.create({ model: 'gpt-3.5-turbo', @@ -313,6 +271,8 @@ async function run() { // Error is expected and handled } }); + + server.close(); } run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/scenario-embeddings.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/scenario-embeddings.mjs index 9cdb24a42da9..f6cbe1160bf5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/scenario-embeddings.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/scenario-embeddings.mjs @@ -1,49 +1,55 @@ -import { instrumentOpenAiClient } from '@sentry/core'; import * as Sentry from '@sentry/node'; +import express from 'express'; +import OpenAI from 'openai'; -class MockOpenAI { - constructor(config) { - this.apiKey = config.apiKey; +function startMockServer() { + const app = express(); + app.use(express.json()); - this.embeddings = { - create: async params => { - await new Promise(resolve => setTimeout(resolve, 10)); + // Embeddings endpoint + app.post('/openai/embeddings', (req, res) => { + const { model } = req.body; - if (params.model === 'error-model') { - const error = new Error('Model not found'); - error.status = 404; - error.headers = { 'x-request-id': 'mock-request-123' }; - throw error; - } + // Handle error model + if (model === 'error-model') { + res.status(404).set('x-request-id', 'mock-request-123').end('Model not found'); + return; + } - return { - object: 'list', - data: [ - { - object: 'embedding', - embedding: [0.1, 0.2, 0.3], - index: 0, - }, - ], - model: params.model, - usage: { - prompt_tokens: 10, - total_tokens: 10, - }, - }; + // Return embeddings response + res.send({ + object: 'list', + data: [ + { + object: 'embedding', + embedding: [0.1, 0.2, 0.3], + index: 0, + }, + ], + model: model, + usage: { + prompt_tokens: 10, + total_tokens: 10, }, - }; - } + }); + }); + + return new Promise(resolve => { + const server = app.listen(0, () => { + resolve(server); + }); + }); } async function run() { + const server = await startMockServer(); + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { - const mockClient = new MockOpenAI({ + const client = new OpenAI({ + baseURL: `http://localhost:${server.address().port}/openai`, apiKey: 'mock-api-key', }); - const client = instrumentOpenAiClient(mockClient); - // First test: embeddings API await client.embeddings.create({ input: 'Embedding test!', @@ -62,6 +68,8 @@ async function run() { // Error is expected and handled } }); + + server.close(); } run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts index a0436d9e5a8b..d56bb27f6a24 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts @@ -454,7 +454,7 @@ describe('OpenAI integration', () => { }); }); - createEsmAndCjsTests(__dirname, 'scenario-root-span.mjs', 'instrument.mjs', (createRunner, test) => { + createEsmAndCjsTests(__dirname, 'scenario-root-span.mjs', 'instrument-root-span.mjs', (createRunner, test) => { test('it works without a wrapping span', async () => { await createRunner() // First the span that our mock express server is emitting, unrelated to this test diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-root-span.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-root-span.mjs new file mode 100644 index 000000000000..f3fbac9d1274 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-root-span.mjs @@ -0,0 +1,11 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: false, + transport: loggingTransport, + integrations: [Sentry.openAIIntegration()], +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs index 35f97fd84093..51da27f73bbc 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs @@ -13,4 +13,11 @@ Sentry.init({ recordOutputs: true, }), ], + debug: true, + beforeSendTransaction: event => { + if (event.transaction.includes('/openai/')) { + return null; + } + return event; + }, }); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs index a53a13af7738..74bc63db971b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs @@ -8,4 +8,10 @@ Sentry.init({ sendDefaultPii: true, transport: loggingTransport, integrations: [Sentry.openAIIntegration()], + beforeSendTransaction: event => { + if (event.transaction.includes('/openai/')) { + return null; + } + return event; + }, }); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs index f3fbac9d1274..1ff3990a0693 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs @@ -8,4 +8,10 @@ Sentry.init({ sendDefaultPii: false, transport: loggingTransport, integrations: [Sentry.openAIIntegration()], + beforeSendTransaction: event => { + if (event.transaction.includes('/openai/')) { + return null; + } + return event; + }, }); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-chat.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-chat.mjs index fde651c3c1ff..6031b6861f5b 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-chat.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-chat.mjs @@ -1,245 +1,203 @@ -import { instrumentOpenAiClient } from '@sentry/core'; import * as Sentry from '@sentry/node'; +import express from 'express'; +import OpenAI from 'openai'; -class MockOpenAI { - constructor(config) { - this.apiKey = config.apiKey; +function startMockServer() { + const app = express(); + app.use(express.json()); - this.chat = { - completions: { - create: async params => { - // Simulate processing time - await new Promise(resolve => setTimeout(resolve, 10)); + // Chat completions endpoint + app.post('/openai/chat/completions', (req, res) => { + const { model, stream } = req.body; - if (params.model === 'error-model') { - const error = new Error('Model not found'); - error.status = 404; - error.headers = { 'x-request-id': 'mock-request-123' }; - throw error; - } + // Handle error model + if (model === 'error-model') { + res.status(500).set('x-request-id', 'mock-request-error').end('Internal server error'); + return; + } - // If stream is requested, return an async generator - if (params.stream) { - return this._createChatCompletionStream(params); - } + if (stream) { + // Streaming response + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); - return { - id: 'chatcmpl-mock123', - object: 'chat.completion', - created: 1677652288, - model: params.model, - system_fingerprint: 'fp_44709d6fcb', - choices: [ - { - index: 0, - message: { - role: 'assistant', - content: 'Hello from OpenAI mock!', - }, - finish_reason: 'stop', - }, - ], - usage: { - prompt_tokens: 10, - completion_tokens: 15, - total_tokens: 25, + const chunks = [ + { + id: 'chatcmpl-stream-123', + object: 'chat.completion.chunk', + created: 1677652300, + model: model, + choices: [{ delta: { role: 'assistant', content: '' }, index: 0 }], + }, + { + id: 'chatcmpl-stream-123', + object: 'chat.completion.chunk', + created: 1677652300, + model: model, + choices: [{ delta: { content: 'Hello from OpenAI streaming!' }, index: 0 }], + }, + { + id: 'chatcmpl-stream-123', + object: 'chat.completion.chunk', + created: 1677652300, + model: model, + choices: [{ delta: {}, index: 0, finish_reason: 'stop' }], + usage: { + prompt_tokens: 12, + completion_tokens: 18, + total_tokens: 30, + }, + }, + ]; + + chunks.forEach((chunk, index) => { + setTimeout(() => { + res.write(`data: ${JSON.stringify(chunk)}\n\n`); + if (index === chunks.length - 1) { + res.write('data: [DONE]\n\n'); + res.end(); + } + }, index * 10); + }); + } else { + // Non-streaming response + res.send({ + id: 'chatcmpl-mock123', + object: 'chat.completion', + created: 1677652288, + model: model, + choices: [ + { + index: 0, + message: { + role: 'assistant', + content: 'Hello from OpenAI mock!', }, - }; + finish_reason: 'stop', + }, + ], + usage: { + prompt_tokens: 10, + completion_tokens: 15, + total_tokens: 25, }, - }, - }; + }); + } + }); - this.responses = { - create: async params => { - await new Promise(resolve => setTimeout(resolve, 10)); + // Responses API endpoint + app.post('/openai/responses', (req, res) => { + const { model, stream } = req.body; - // If stream is requested, return an async generator - if (params.stream) { - return this._createResponsesApiStream(params); - } + // Handle error model + if (model === 'error-model') { + res.status(500).set('x-request-id', 'mock-request-error').end('Internal server error'); + return; + } - return { - id: 'resp_mock456', - object: 'response', - created_at: 1677652290, - model: params.model, - input_text: params.input, - output_text: `Response to: ${params.input}`, - status: 'completed', - usage: { - input_tokens: 5, - output_tokens: 8, - total_tokens: 13, - }, - }; - }, - }; - } + if (stream) { + // Streaming response - using event-based format with 'response' field + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); - // Create a mock streaming response for chat completions - async *_createChatCompletionStream(params) { - // First chunk with basic info - yield { - id: 'chatcmpl-stream-123', - object: 'chat.completion.chunk', - created: 1677652300, - model: params.model, - system_fingerprint: 'fp_stream_123', - choices: [ + const events = [ { - index: 0, - delta: { - role: 'assistant', - content: 'Hello', + type: 'response.created', + response: { + id: 'resp_stream_456', + object: 'response', + created_at: 1677652310, + model: model, + status: 'in_progress', }, - finish_reason: null, }, - ], - }; - - // Second chunk with more content - yield { - id: 'chatcmpl-stream-123', - object: 'chat.completion.chunk', - created: 1677652300, - model: params.model, - system_fingerprint: 'fp_stream_123', - choices: [ { - index: 0, - delta: { - content: ' from OpenAI streaming!', + type: 'response.output_text.delta', + delta: 'Streaming response to: Test streaming responses API', + response: { + id: 'resp_stream_456', + model: model, + created_at: 1677652310, }, - finish_reason: 'stop', - }, - ], - usage: { - prompt_tokens: 12, - completion_tokens: 18, - total_tokens: 30, - completion_tokens_details: { - accepted_prediction_tokens: 0, - audio_tokens: 0, - reasoning_tokens: 0, - rejected_prediction_tokens: 0, }, - prompt_tokens_details: { - audio_tokens: 0, - cached_tokens: 0, - }, - }, - }; - } - - // Create a mock streaming response for responses API - async *_createResponsesApiStream(params) { - // Response created event - yield { - type: 'response.created', - response: { - id: 'resp_stream_456', - object: 'response', - created_at: 1677652310, - model: params.model, - status: 'in_progress', - error: null, - incomplete_details: null, - instructions: params.instructions, - max_output_tokens: 1000, - parallel_tool_calls: false, - previous_response_id: null, - reasoning: { - effort: null, - summary: null, - }, - store: false, - temperature: 0.7, - text: { - format: { - type: 'text', + { + type: 'response.completed', + response: { + id: 'resp_stream_456', + object: 'response', + created_at: 1677652310, + model: model, + status: 'completed', + output_text: 'Test streaming responses API', + usage: { + input_tokens: 6, + output_tokens: 10, + total_tokens: 16, + }, }, }, - tool_choice: 'auto', - top_p: 1.0, - truncation: 'disabled', - user: null, - metadata: {}, - output: [], - output_text: '', - usage: { - input_tokens: 0, - output_tokens: 0, - total_tokens: 0, - }, - }, - sequence_number: 1, - }; - - // Response in progress with output text delta - yield { - type: 'response.output_text.delta', - delta: 'Streaming response to: ', - sequence_number: 2, - }; + ]; - yield { - type: 'response.output_text.delta', - delta: params.input, - sequence_number: 3, - }; - - // Response completed event - yield { - type: 'response.completed', - response: { - id: 'resp_stream_456', + events.forEach((event, index) => { + setTimeout(() => { + res.write(`data: ${JSON.stringify(event)}\n\n`); + if (index === events.length - 1) { + res.write('data: [DONE]\n\n'); + res.end(); + } + }, index * 10); + }); + } else { + // Non-streaming response + res.send({ + id: 'resp_mock456', object: 'response', - created_at: 1677652310, - model: params.model, - status: 'completed', - error: null, - incomplete_details: null, - instructions: params.instructions, - max_output_tokens: 1000, - parallel_tool_calls: false, - previous_response_id: null, - reasoning: { - effort: null, - summary: null, - }, - store: false, - temperature: 0.7, - text: { - format: { - type: 'text', + created_at: 1677652290, + model: model, + output: [ + { + type: 'message', + id: 'msg_mock_output_1', + status: 'completed', + role: 'assistant', + content: [ + { + type: 'output_text', + text: `Response to: ${req.body.input}`, + annotations: [], + }, + ], }, - }, - tool_choice: 'auto', - top_p: 1.0, - truncation: 'disabled', - user: null, - metadata: {}, - output: [], - output_text: params.input, + ], + output_text: `Response to: ${req.body.input}`, + status: 'completed', usage: { - input_tokens: 6, - output_tokens: 10, - total_tokens: 16, + input_tokens: 5, + output_tokens: 8, + total_tokens: 13, }, - }, - sequence_number: 4, - }; - } + }); + } + }); + + return new Promise(resolve => { + const server = app.listen(0, () => { + resolve(server); + }); + }); } async function run() { + const server = await startMockServer(); + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { - const mockClient = new MockOpenAI({ + const client = new OpenAI({ + baseURL: `http://localhost:${server.address().port}/openai`, apiKey: 'mock-api-key', }); - const client = instrumentOpenAiClient(mockClient); - // First test: basic chat completion await client.chat.completions.create({ model: 'gpt-3.5-turbo', @@ -313,6 +271,8 @@ async function run() { // Error is expected and handled } }); + + server.close(); } run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs index 9cdb24a42da9..18f6edd711ee 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs @@ -1,49 +1,55 @@ -import { instrumentOpenAiClient } from '@sentry/core'; import * as Sentry from '@sentry/node'; +import OpenAI from 'openai'; +import express from 'express'; -class MockOpenAI { - constructor(config) { - this.apiKey = config.apiKey; +function startMockServer() { + const app = express(); + app.use(express.json()); - this.embeddings = { - create: async params => { - await new Promise(resolve => setTimeout(resolve, 10)); + // Embeddings endpoint + app.post('/openai/embeddings', (req, res) => { + const { model } = req.body; - if (params.model === 'error-model') { - const error = new Error('Model not found'); - error.status = 404; - error.headers = { 'x-request-id': 'mock-request-123' }; - throw error; - } + // Handle error model + if (model === 'error-model') { + res.status(404).set('x-request-id', 'mock-request-123').end('Model not found'); + return; + } - return { - object: 'list', - data: [ - { - object: 'embedding', - embedding: [0.1, 0.2, 0.3], - index: 0, - }, - ], - model: params.model, - usage: { - prompt_tokens: 10, - total_tokens: 10, - }, - }; + // Return embeddings response + res.send({ + object: 'list', + data: [ + { + object: 'embedding', + embedding: [0.1, 0.2, 0.3], + index: 0, + }, + ], + model: model, + usage: { + prompt_tokens: 10, + total_tokens: 10, }, - }; - } + }); + }); + + return new Promise(resolve => { + const server = app.listen(0, () => { + resolve(server); + }); + }); } async function run() { + const server = await startMockServer(); + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { - const mockClient = new MockOpenAI({ + const client = new OpenAI({ + baseURL: `http://localhost:${server.address().port}/openai`, apiKey: 'mock-api-key', }); - const client = instrumentOpenAiClient(mockClient); - // First test: embeddings API await client.embeddings.create({ input: 'Embedding test!', @@ -62,6 +68,8 @@ async function run() { // Error is expected and handled } }); + + server.close(); } run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts index 4929325c6790..23520852f070 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts @@ -508,7 +508,7 @@ describe('OpenAI integration (V6)', () => { createEsmAndCjsTests( __dirname, 'scenario-root-span.mjs', - 'instrument.mjs', + 'instrument-root-span.mjs', (createRunner, test) => { test('it works without a wrapping span (v6)', async () => { await createRunner() From 6f8a2b61548b9112eee5a03b02ec4861ed19f810 Mon Sep 17 00:00:00 2001 From: RulaKhaled Date: Wed, 3 Dec 2025 13:19:51 +0100 Subject: [PATCH 3/3] fix import order --- .../suites/tracing/openai/v6/scenario-embeddings.mjs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs index 18f6edd711ee..f6cbe1160bf5 100644 --- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs +++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/scenario-embeddings.mjs @@ -1,6 +1,6 @@ import * as Sentry from '@sentry/node'; -import OpenAI from 'openai'; import express from 'express'; +import OpenAI from 'openai'; function startMockServer() { const app = express();