Skip to content

Commit 8f47b16

Browse files
committed
fix(tracing): Add missing attributes in vercel-ai spans (#18333)
Also, sort the fields in the integration test, in order to more easily se which fields were missing/misnamed in the fixture objects. Fix JS-1216 Fix JS-1236
1 parent f271bbb commit 8f47b16

File tree

11 files changed

+366
-227
lines changed

11 files changed

+366
-227
lines changed

dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts

Lines changed: 216 additions & 185 deletions
Large diffs are not rendered by default.

dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts

Lines changed: 18 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ describe('Vercel AI integration (V5)', () => {
1313
// First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false
1414
expect.objectContaining({
1515
data: {
16-
'vercel.ai.model.id': 'mock-model-id',
16+
'gen_ai.request.model': 'mock-model-id',
1717
'vercel.ai.model.provider': 'mock-provider',
1818
'vercel.ai.operationId': 'ai.generateText',
1919
'vercel.ai.pipeline.name': 'generateText',
@@ -41,10 +41,9 @@ describe('Vercel AI integration (V5)', () => {
4141
'operation.name': 'ai.generateText.doGenerate',
4242
'vercel.ai.operationId': 'ai.generateText.doGenerate',
4343
'vercel.ai.model.provider': 'mock-provider',
44-
'vercel.ai.model.id': 'mock-model-id',
44+
'gen_ai.request.model': 'mock-model-id',
4545
'vercel.ai.settings.maxRetries': 2,
4646
'gen_ai.system': 'mock-provider',
47-
'gen_ai.request.model': 'mock-model-id',
4847
'vercel.ai.pipeline.name': 'generateText.doGenerate',
4948
'vercel.ai.streaming': false,
5049
'vercel.ai.response.finishReason': 'stop',
@@ -66,7 +65,7 @@ describe('Vercel AI integration (V5)', () => {
6665
// Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii
6766
expect.objectContaining({
6867
data: {
69-
'vercel.ai.model.id': 'mock-model-id',
68+
'gen_ai.request.model': 'mock-model-id',
7069
'vercel.ai.model.provider': 'mock-provider',
7170
'vercel.ai.operationId': 'ai.generateText',
7271
'vercel.ai.pipeline.name': 'generateText',
@@ -76,6 +75,7 @@ describe('Vercel AI integration (V5)', () => {
7675
'vercel.ai.settings.maxRetries': 2,
7776
'vercel.ai.streaming': false,
7877
'gen_ai.prompt': '{"prompt":"Where is the second span?"}',
78+
'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]',
7979
'gen_ai.response.model': 'mock-model-id',
8080
'gen_ai.usage.input_tokens': 10,
8181
'gen_ai.usage.output_tokens': 20,
@@ -97,10 +97,9 @@ describe('Vercel AI integration (V5)', () => {
9797
'operation.name': 'ai.generateText.doGenerate',
9898
'vercel.ai.operationId': 'ai.generateText.doGenerate',
9999
'vercel.ai.model.provider': 'mock-provider',
100-
'vercel.ai.model.id': 'mock-model-id',
100+
'gen_ai.request.model': 'mock-model-id',
101101
'vercel.ai.settings.maxRetries': 2,
102102
'gen_ai.system': 'mock-provider',
103-
'gen_ai.request.model': 'mock-model-id',
104103
'vercel.ai.pipeline.name': 'generateText.doGenerate',
105104
'vercel.ai.streaming': false,
106105
'vercel.ai.response.finishReason': 'stop',
@@ -124,7 +123,7 @@ describe('Vercel AI integration (V5)', () => {
124123
// Fifth span - tool call generateText span
125124
expect.objectContaining({
126125
data: {
127-
'vercel.ai.model.id': 'mock-model-id',
126+
'gen_ai.request.model': 'mock-model-id',
128127
'vercel.ai.model.provider': 'mock-provider',
129128
'vercel.ai.operationId': 'ai.generateText',
130129
'vercel.ai.pipeline.name': 'generateText',
@@ -147,7 +146,7 @@ describe('Vercel AI integration (V5)', () => {
147146
// Sixth span - tool call doGenerate span
148147
expect.objectContaining({
149148
data: {
150-
'vercel.ai.model.id': 'mock-model-id',
149+
'gen_ai.request.model': 'mock-model-id',
151150
'vercel.ai.model.provider': 'mock-provider',
152151
'vercel.ai.operationId': 'ai.generateText.doGenerate',
153152
'vercel.ai.pipeline.name': 'generateText.doGenerate',
@@ -157,7 +156,6 @@ describe('Vercel AI integration (V5)', () => {
157156
'vercel.ai.response.timestamp': expect.any(String),
158157
'vercel.ai.settings.maxRetries': 2,
159158
'vercel.ai.streaming': false,
160-
'gen_ai.request.model': 'mock-model-id',
161159
'gen_ai.response.finish_reasons': ['tool-calls'],
162160
'gen_ai.response.id': expect.any(String),
163161
'gen_ai.response.model': 'mock-model-id',
@@ -202,11 +200,12 @@ describe('Vercel AI integration (V5)', () => {
202200
// First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true
203201
expect.objectContaining({
204202
data: {
205-
'vercel.ai.model.id': 'mock-model-id',
203+
'gen_ai.request.model': 'mock-model-id',
206204
'vercel.ai.model.provider': 'mock-provider',
207205
'vercel.ai.operationId': 'ai.generateText',
208206
'vercel.ai.pipeline.name': 'generateText',
209207
'vercel.ai.prompt': '{"prompt":"Where is the first span?"}',
208+
'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]',
210209
'vercel.ai.response.finishReason': 'stop',
211210
'gen_ai.response.text': 'First span here!',
212211
'vercel.ai.settings.maxRetries': 2,
@@ -228,7 +227,7 @@ describe('Vercel AI integration (V5)', () => {
228227
// Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true
229228
expect.objectContaining({
230229
data: {
231-
'vercel.ai.model.id': 'mock-model-id',
230+
'gen_ai.request.model': 'mock-model-id',
232231
'vercel.ai.model.provider': 'mock-provider',
233232
'vercel.ai.operationId': 'ai.generateText.doGenerate',
234233
'vercel.ai.pipeline.name': 'generateText.doGenerate',
@@ -240,7 +239,6 @@ describe('Vercel AI integration (V5)', () => {
240239
'vercel.ai.response.timestamp': expect.any(String),
241240
'vercel.ai.settings.maxRetries': 2,
242241
'vercel.ai.streaming': false,
243-
'gen_ai.request.model': 'mock-model-id',
244242
'gen_ai.response.finish_reasons': ['stop'],
245243
'gen_ai.response.id': expect.any(String),
246244
'gen_ai.response.model': 'mock-model-id',
@@ -260,11 +258,12 @@ describe('Vercel AI integration (V5)', () => {
260258
// Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii
261259
expect.objectContaining({
262260
data: {
263-
'vercel.ai.model.id': 'mock-model-id',
261+
'gen_ai.request.model': 'mock-model-id',
264262
'vercel.ai.model.provider': 'mock-provider',
265263
'vercel.ai.operationId': 'ai.generateText',
266264
'vercel.ai.pipeline.name': 'generateText',
267265
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
266+
'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]',
268267
'vercel.ai.response.finishReason': 'stop',
269268
'gen_ai.response.text': expect.any(String),
270269
'vercel.ai.settings.maxRetries': 2,
@@ -291,10 +290,9 @@ describe('Vercel AI integration (V5)', () => {
291290
'operation.name': 'ai.generateText.doGenerate',
292291
'vercel.ai.operationId': 'ai.generateText.doGenerate',
293292
'vercel.ai.model.provider': 'mock-provider',
294-
'vercel.ai.model.id': 'mock-model-id',
293+
'gen_ai.request.model': 'mock-model-id',
295294
'vercel.ai.settings.maxRetries': 2,
296295
'gen_ai.system': 'mock-provider',
297-
'gen_ai.request.model': 'mock-model-id',
298296
'vercel.ai.pipeline.name': 'generateText.doGenerate',
299297
'vercel.ai.streaming': false,
300298
'vercel.ai.response.finishReason': 'stop',
@@ -318,13 +316,13 @@ describe('Vercel AI integration (V5)', () => {
318316
// Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true)
319317
expect.objectContaining({
320318
data: {
321-
'vercel.ai.model.id': 'mock-model-id',
319+
'gen_ai.request.model': 'mock-model-id',
322320
'vercel.ai.model.provider': 'mock-provider',
323321
'vercel.ai.operationId': 'ai.generateText',
324322
'vercel.ai.pipeline.name': 'generateText',
325323
'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
324+
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
326325
'vercel.ai.response.finishReason': 'tool-calls',
327-
// 'gen_ai.response.text': 'Tool call completed!',
328326
'gen_ai.response.tool_calls': expect.any(String),
329327
'vercel.ai.settings.maxRetries': 2,
330328
'vercel.ai.streaming': false,
@@ -345,7 +343,7 @@ describe('Vercel AI integration (V5)', () => {
345343
// Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true)
346344
expect.objectContaining({
347345
data: {
348-
'vercel.ai.model.id': 'mock-model-id',
346+
'gen_ai.request.model': 'mock-model-id',
349347
'vercel.ai.model.provider': 'mock-provider',
350348
'vercel.ai.operationId': 'ai.generateText.doGenerate',
351349
'vercel.ai.pipeline.name': 'generateText.doGenerate',
@@ -360,7 +358,6 @@ describe('Vercel AI integration (V5)', () => {
360358
'gen_ai.response.tool_calls': expect.any(String),
361359
'vercel.ai.settings.maxRetries': 2,
362360
'vercel.ai.streaming': false,
363-
'gen_ai.request.model': 'mock-model-id',
364361
'gen_ai.response.finish_reasons': ['tool-calls'],
365362
'gen_ai.response.id': expect.any(String),
366363
'gen_ai.response.model': 'mock-model-id',
@@ -441,7 +438,7 @@ describe('Vercel AI integration (V5)', () => {
441438
spans: expect.arrayContaining([
442439
expect.objectContaining({
443440
data: {
444-
'vercel.ai.model.id': 'mock-model-id',
441+
'gen_ai.request.model': 'mock-model-id',
445442
'vercel.ai.model.provider': 'mock-provider',
446443
'vercel.ai.operationId': 'ai.generateText',
447444
'vercel.ai.pipeline.name': 'generateText',
@@ -462,7 +459,7 @@ describe('Vercel AI integration (V5)', () => {
462459
}),
463460
expect.objectContaining({
464461
data: {
465-
'vercel.ai.model.id': 'mock-model-id',
462+
'gen_ai.request.model': 'mock-model-id',
466463
'vercel.ai.model.provider': 'mock-provider',
467464
'vercel.ai.operationId': 'ai.generateText.doGenerate',
468465
'vercel.ai.pipeline.name': 'generateText.doGenerate',
@@ -472,7 +469,6 @@ describe('Vercel AI integration (V5)', () => {
472469
'vercel.ai.response.timestamp': expect.any(String),
473470
'vercel.ai.settings.maxRetries': 2,
474471
'vercel.ai.streaming': false,
475-
'gen_ai.request.model': 'mock-model-id',
476472
'gen_ai.response.finish_reasons': ['tool-calls'],
477473
'gen_ai.response.id': expect.any(String),
478474
'gen_ai.response.model': 'mock-model-id',

packages/core/src/client.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ import { merge } from './utils/merge';
4444
import { checkOrSetAlreadyCaught, uuid4 } from './utils/misc';
4545
import { parseSampleRate } from './utils/parseSampleRate';
4646
import { prepareEvent } from './utils/prepareEvent';
47-
import { makePromiseBuffer, type PromiseBuffer, SENTRY_BUFFER_FULL_ERROR } from './utils/promisebuffer';
47+
import { type PromiseBuffer, makePromiseBuffer, SENTRY_BUFFER_FULL_ERROR } from './utils/promisebuffer';
4848
import { reparentChildSpans, shouldIgnoreSpan } from './utils/should-ignore-span';
4949
import { showSpanDropWarning } from './utils/spanUtils';
5050
import { rejectedSyncPromise } from './utils/syncpromise';

packages/core/src/tracing/vercel-ai/index.ts

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,18 +4,27 @@ import type { Event } from '../../types-hoist/event';
44
import type { Span, SpanAttributes, SpanAttributeValue, SpanJSON, SpanOrigin } from '../../types-hoist/span';
55
import { spanToJSON } from '../../utils/spanUtils';
66
import {
7+
GEN_AI_OPERATION_NAME_ATTRIBUTE,
8+
GEN_AI_REQUEST_MESSAGES_ATTRIBUTE,
9+
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
10+
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
11+
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
712
GEN_AI_USAGE_INPUT_TOKENS_CACHE_WRITE_ATTRIBUTE,
813
GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE,
14+
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
915
} from '../ai/gen-ai-attributes';
10-
import { getTruncatedJsonString } from '../ai/utils';
1116
import { toolCallSpanMap } from './constants';
1217
import type { TokenSummary } from './types';
13-
import { accumulateTokensForParent, applyAccumulatedTokens, convertAvailableToolsToJsonString } from './utils';
18+
import {
19+
accumulateTokensForParent,
20+
applyAccumulatedTokens,
21+
convertAvailableToolsToJsonString,
22+
requestMessagesFromPrompt,
23+
} from './utils';
1424
import type { ProviderMetadata } from './vercel-ai-attributes';
1525
import {
1626
AI_MODEL_ID_ATTRIBUTE,
1727
AI_MODEL_PROVIDER_ATTRIBUTE,
18-
AI_PROMPT_ATTRIBUTE,
1928
AI_PROMPT_MESSAGES_ATTRIBUTE,
2029
AI_PROMPT_TOOLS_ATTRIBUTE,
2130
AI_RESPONSE_OBJECT_ATTRIBUTE,
@@ -31,9 +40,7 @@ import {
3140
AI_USAGE_CACHED_INPUT_TOKENS_ATTRIBUTE,
3241
AI_USAGE_COMPLETION_TOKENS_ATTRIBUTE,
3342
AI_USAGE_PROMPT_TOKENS_ATTRIBUTE,
34-
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
35-
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
36-
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
43+
OPERATION_NAME_ATTRIBUTE,
3744
} from './vercel-ai-attributes';
3845

3946
function addOriginToSpan(span: Span, origin: SpanOrigin): void {
@@ -131,7 +138,8 @@ function processEndedVercelAiSpan(span: SpanJSON): void {
131138
}
132139

133140
// Rename AI SDK attributes to standardized gen_ai attributes
134-
renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, 'gen_ai.request.messages');
141+
renameAttributeKey(attributes, OPERATION_NAME_ATTRIBUTE, GEN_AI_OPERATION_NAME_ATTRIBUTE);
142+
renameAttributeKey(attributes, AI_PROMPT_MESSAGES_ATTRIBUTE, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE);
135143
renameAttributeKey(attributes, AI_RESPONSE_TEXT_ATTRIBUTE, 'gen_ai.response.text');
136144
renameAttributeKey(attributes, AI_RESPONSE_TOOL_CALLS_ATTRIBUTE, 'gen_ai.response.tool_calls');
137145
renameAttributeKey(attributes, AI_RESPONSE_OBJECT_ATTRIBUTE, 'gen_ai.response.object');
@@ -141,6 +149,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void {
141149
renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, 'gen_ai.tool.output');
142150

143151
renameAttributeKey(attributes, AI_SCHEMA_ATTRIBUTE, 'gen_ai.request.schema');
152+
renameAttributeKey(attributes, AI_MODEL_ID_ATTRIBUTE, GEN_AI_REQUEST_MODEL_ATTRIBUTE);
144153

145154
addProviderMetadataToAttributes(attributes);
146155

@@ -203,10 +212,8 @@ function processGenerateSpan(span: Span, name: string, attributes: SpanAttribute
203212
span.setAttribute('gen_ai.function_id', functionId);
204213
}
205214

206-
if (attributes[AI_PROMPT_ATTRIBUTE]) {
207-
const truncatedPrompt = getTruncatedJsonString(attributes[AI_PROMPT_ATTRIBUTE] as string | string[]);
208-
span.setAttribute('gen_ai.prompt', truncatedPrompt);
209-
}
215+
requestMessagesFromPrompt(span, attributes);
216+
210217
if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) {
211218
span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]);
212219
}

packages/core/src/tracing/vercel-ai/utils.ts

Lines changed: 52 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,14 @@
11
import type { TraceContext } from '../../types-hoist/context';
2-
import type { Span, SpanJSON } from '../../types-hoist/span';
3-
import { GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE } from '../ai/gen-ai-attributes';
2+
import type { Span, SpanAttributes, SpanJSON } from '../../types-hoist/span';
3+
import {
4+
GEN_AI_REQUEST_MESSAGES_ATTRIBUTE,
5+
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
6+
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
7+
} from '../ai/gen-ai-attributes';
8+
import { getTruncatedJsonString } from '../ai/utils';
49
import { toolCallSpanMap } from './constants';
510
import type { TokenSummary } from './types';
11+
import { AI_PROMPT_ATTRIBUTE, AI_PROMPT_MESSAGES_ATTRIBUTE } from './vercel-ai-attributes';
612

713
/**
814
* Accumulates token data from a span to its parent in the token accumulator map.
@@ -87,3 +93,47 @@ export function convertAvailableToolsToJsonString(tools: unknown[]): string {
8793
});
8894
return JSON.stringify(toolObjects);
8995
}
96+
97+
/**
98+
* Convert the prompt string to messages array
99+
*/
100+
export function convertPromptToMessages(prompt: string): { role: string; content: string }[] {
101+
try {
102+
const p = JSON.parse(prompt);
103+
if (!!p && typeof p === 'object') {
104+
const { prompt, system } = p;
105+
if (typeof prompt === 'string' || typeof system === 'string') {
106+
const messages: { role: string; content: string }[] = [];
107+
if (typeof system === 'string') {
108+
messages.push({ role: 'system', content: system });
109+
}
110+
if (typeof prompt === 'string') {
111+
messages.push({ role: 'user', content: prompt });
112+
}
113+
return messages;
114+
}
115+
}
116+
// eslint-disable-next-line no-empty
117+
} catch {}
118+
return [];
119+
}
120+
121+
/**
122+
* Generate a request.messages JSON array from the prompt field in the
123+
* invoke_agent op
124+
*/
125+
export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes): void {
126+
if (attributes[AI_PROMPT_ATTRIBUTE]) {
127+
const truncatedPrompt = getTruncatedJsonString(attributes[AI_PROMPT_ATTRIBUTE] as string | string[]);
128+
span.setAttribute('gen_ai.prompt', truncatedPrompt);
129+
}
130+
const prompt = attributes[AI_PROMPT_ATTRIBUTE];
131+
if (
132+
typeof prompt === 'string' &&
133+
!attributes[GEN_AI_REQUEST_MESSAGES_ATTRIBUTE] &&
134+
!attributes[AI_PROMPT_MESSAGES_ATTRIBUTE]
135+
) {
136+
const messages = convertPromptToMessages(prompt);
137+
if (messages.length) span.setAttribute(GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, getTruncatedJsonString(messages));
138+
}
139+
}

packages/core/src/transports/base.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,8 @@ import {
1414
forEachEnvelopeItem,
1515
serializeEnvelope,
1616
} from '../utils/envelope';
17-
import { makePromiseBuffer, type PromiseBuffer, SENTRY_BUFFER_FULL_ERROR } from '../utils/promisebuffer';
18-
import { isRateLimited, type RateLimits, updateRateLimits } from '../utils/ratelimit';
17+
import { type PromiseBuffer, makePromiseBuffer, SENTRY_BUFFER_FULL_ERROR } from '../utils/promisebuffer';
18+
import { type RateLimits, isRateLimited, updateRateLimits } from '../utils/ratelimit';
1919

2020
export const DEFAULT_TRANSPORT_BUFFER_SIZE = 64;
2121

packages/core/test/lib/integrations/captureconsole.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
/* eslint-disable @typescript-eslint/unbound-method */
22

3-
import { afterEach, beforeEach, describe, expect, it, type Mock, vi } from 'vitest';
3+
import { type Mock, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
44
import type { Client } from '../../../src';
55
import * as CurrentScopes from '../../../src/currentScopes';
66
import * as SentryCore from '../../../src/exports';

packages/core/test/lib/sdk.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { beforeEach, describe, expect, it, type Mock, test, vi } from 'vitest';
1+
import { type Mock, beforeEach, describe, expect, it, test, vi } from 'vitest';
22
import type { Client } from '../../src/client';
33
import { getCurrentScope } from '../../src/currentScopes';
44
import { captureCheckIn } from '../../src/exports';

packages/core/test/lib/trpc.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { beforeEach, describe, expect, test, vi } from 'vitest';
2-
import { type Client, setCurrentClient, type Span, trpcMiddleware } from '../../src';
2+
import { type Client, type Span, setCurrentClient, trpcMiddleware } from '../../src';
33
import * as currentScopes from '../../src/currentScopes';
44
import * as exports from '../../src/exports';
55
import * as tracing from '../../src/tracing';

packages/core/test/lib/utils/featureFlags.test.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@ import { afterEach, describe, expect, it, vi } from 'vitest';
22
import { getCurrentScope } from '../../../src/currentScopes';
33
import { debug } from '../../../src/utils/debug-logger';
44
import {
5+
type FeatureFlag,
56
_INTERNAL_insertFlagToScope,
67
_INTERNAL_insertToFlagBuffer,
7-
type FeatureFlag,
88
} from '../../../src/utils/featureFlags';
99

1010
describe('flags', () => {

0 commit comments

Comments
 (0)