Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: true,
transport: loggingTransport,
integrations: [
Sentry.vercelAIIntegration({
recordInputs: true,
recordOutputs: true,
enableTruncation: false,
}),
],
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: true,
transport: loggingTransport,
traceLifecycle: 'stream',
integrations: [
Sentry.vercelAIIntegration({
enableTruncation: true,
}),
],
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: true,
transport: loggingTransport,
traceLifecycle: 'stream',
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import * as Sentry from '@sentry/node';
import { generateText } from 'ai';
import { MockLanguageModelV1 } from 'ai/test';

async function run() {
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
// Multiple messages with long content (would normally be truncated and popped to last message only)
const longContent = 'A'.repeat(50_000);
await generateText({
experimental_telemetry: { isEnabled: true },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 5 },
text: 'Response',
}),
}),
messages: [
{ role: 'user', content: longContent },
{ role: 'assistant', content: 'Some reply' },
{ role: 'user', content: 'Follow-up question' },
],
});
});
}

run();
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
import * as Sentry from '@sentry/node';
import { generateText } from 'ai';
import { MockLanguageModelV1 } from 'ai/test';

async function run() {
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
const longContent = 'A'.repeat(50_000);
await generateText({
experimental_telemetry: { isEnabled: true },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 5 },
text: 'Response',
}),
}),
messages: [
{ role: 'user', content: longContent },
{ role: 'assistant', content: 'Some reply' },
{ role: 'user', content: 'Follow-up question' },
],
});
});

// Flush is required when span streaming is enabled to ensure streamed spans are sent before the process exits
await Sentry.flush(2000);
}

run();
Original file line number Diff line number Diff line change
Expand Up @@ -950,4 +950,83 @@ describe('Vercel AI integration', () => {
.completed();
});
});

const longContent = 'A'.repeat(50_000);

createEsmAndCjsTests(
__dirname,
'scenario-no-truncation.mjs',
'instrument-no-truncation.mjs',
(createRunner, test) => {
test('does not truncate input messages when enableTruncation is false', async () => {
await createRunner()
.expect({
transaction: {
transaction: 'main',
spans: expect.arrayContaining([
// Multiple messages should all be preserved (no popping to last message only)
expect.objectContaining({
data: expect.objectContaining({
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
{ role: 'user', content: longContent },
{ role: 'assistant', content: 'Some reply' },
{ role: 'user', content: 'Follow-up question' },
]),
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
}),
}),
]),
},
})
.start()
.completed();
});
},
);

const streamingLongContent = 'A'.repeat(50_000);

createEsmAndCjsTests(__dirname, 'scenario-streaming.mjs', 'instrument-streaming.mjs', (createRunner, test) => {
test('automatically disables truncation when span streaming is enabled', async () => {
await createRunner()
.expect({
span: container => {
const spans = container.items;

const chatSpan = spans.find(s =>
s.attributes?.[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.includes(streamingLongContent),
);
expect(chatSpan).toBeDefined();
},
})
.start()
.completed();
});
});

createEsmAndCjsTests(
__dirname,
'scenario-streaming.mjs',
'instrument-streaming-with-truncation.mjs',
(createRunner, test) => {
test('respects explicit enableTruncation: true even when span streaming is enabled', async () => {
await createRunner()
.expect({
span: container => {
const spans = container.items;

// With explicit enableTruncation: true, truncation keeps only the last message
// and drops the long content. The result should NOT contain the full 50k 'A' string.
const chatSpan = spans.find(s =>
s.attributes?.[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.includes('Follow-up question'),
);
expect(chatSpan).toBeDefined();
expect(chatSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).not.toContain(streamingLongContent);
},
})
.start()
.completed();
});
},
);
});
11 changes: 10 additions & 1 deletion packages/cloudflare/src/integrations/tracing/vercelai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,18 @@ import { addVercelAiProcessors, defineIntegration } from '@sentry/core';

const INTEGRATION_NAME = 'VercelAI';

const _vercelAIIntegration = (() => {
interface VercelAiOptions {
/**
* Enable or disable truncation of recorded input messages.
* Defaults to `true`.
*/
enableTruncation?: boolean;
}

const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
return {
name: INTEGRATION_NAME,
options,
setup(client) {
addVercelAiProcessors(client);
},
Expand Down
11 changes: 11 additions & 0 deletions packages/core/src/tracing/ai/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
*/
import { captureException } from '../../exports';
import { getClient } from '../../currentScopes';
import { hasSpanStreamingEnabled } from '../spans/hasSpanStreamingEnabled';
import type { Span } from '../../types-hoist/span';
import { isThenable } from '../../utils/is';
import {
Expand Down Expand Up @@ -56,6 +57,16 @@ export function resolveAIRecordingOptions<T extends AIRecordingOptions>(options?
} as T & Required<AIRecordingOptions>;
}

/**
* Resolves whether truncation should be enabled.
* If the user explicitly set `enableTruncation`, that value is used.
* Otherwise, truncation is disabled when span streaming is active.
*/
export function shouldEnableTruncation(enableTruncation: boolean | undefined): boolean {
const client = getClient();
return enableTruncation ?? !(client && hasSpanStreamingEnabled(client));
}

/**
* Build method path from current traversal
*/
Expand Down
14 changes: 11 additions & 3 deletions packages/core/src/tracing/vercel-ai/index.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
/* eslint-disable max-lines */
import type { Client } from '../../client';
import { getClient } from '../../currentScopes';
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes';
import { shouldEnableTruncation } from '../ai/utils';
import type { Event } from '../../types-hoist/event';
import type { Span, SpanAttributes, SpanAttributeValue, SpanJSON } from '../../types-hoist/span';
import { spanToJSON } from '../../utils/spanUtils';
Expand Down Expand Up @@ -114,7 +116,13 @@ function onVercelAiSpanStart(span: Span): void {
return;
}

processGenerateSpan(span, name, attributes);
const client = getClient();
const integration = client?.getIntegrationByName('VercelAI') as
| { options?: { enableTruncation?: boolean } }
| undefined;
const enableTruncation = shouldEnableTruncation(integration?.options?.enableTruncation);

processGenerateSpan(span, name, attributes, enableTruncation);
}

function vercelAiEventProcessor(event: Event): Event {
Expand Down Expand Up @@ -396,7 +404,7 @@ function processToolCallSpan(span: Span, attributes: SpanAttributes): void {
}
}

function processGenerateSpan(span: Span, name: string, attributes: SpanAttributes): void {
function processGenerateSpan(span: Span, name: string, attributes: SpanAttributes, enableTruncation: boolean): void {
span.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, 'auto.vercelai.otel');

const nameWthoutAi = name.replace('ai.', '');
Expand All @@ -408,7 +416,7 @@ function processGenerateSpan(span: Span, name: string, attributes: SpanAttribute
span.setAttribute('gen_ai.function_id', functionId);
}

requestMessagesFromPrompt(span, attributes);
requestMessagesFromPrompt(span, attributes, enableTruncation);

if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) {
span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]);
Expand Down
20 changes: 12 additions & 8 deletions packages/core/src/tracing/vercel-ai/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import {
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
} from '../ai/gen-ai-attributes';
import { extractSystemInstructions, getTruncatedJsonString } from '../ai/utils';
import { extractSystemInstructions, getJsonString, getTruncatedJsonString } from '../ai/utils';
import { toolCallSpanContextMap } from './constants';
import type { TokenSummary, ToolCallSpanContext } from './types';
import { AI_PROMPT_ATTRIBUTE, AI_PROMPT_MESSAGES_ATTRIBUTE } from './vercel-ai-attributes';
Expand Down Expand Up @@ -227,7 +227,7 @@ export function convertUserInputToMessagesFormat(userInput: string): { role: str
* Generate a request.messages JSON array from the prompt field in the
* invoke_agent op
*/
export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes): void {
export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes, enableTruncation: boolean): void {
if (
typeof attributes[AI_PROMPT_ATTRIBUTE] === 'string' &&
!attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] &&
Expand All @@ -247,11 +247,13 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes
}

const filteredLength = Array.isArray(filteredMessages) ? filteredMessages.length : 0;
const truncatedMessages = getTruncatedJsonString(filteredMessages);
const messagesJson = enableTruncation
? getTruncatedJsonString(filteredMessages)
: getJsonString(filteredMessages);

span.setAttributes({
[AI_PROMPT_ATTRIBUTE]: truncatedMessages,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: truncatedMessages,
[AI_PROMPT_ATTRIBUTE]: messagesJson,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: messagesJson,
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: filteredLength,
});
}
Expand All @@ -268,11 +270,13 @@ export function requestMessagesFromPrompt(span: Span, attributes: SpanAttributes
}

const filteredLength = Array.isArray(filteredMessages) ? filteredMessages.length : 0;
const truncatedMessages = getTruncatedJsonString(filteredMessages);
const messagesJson = enableTruncation
? getTruncatedJsonString(filteredMessages)
: getJsonString(filteredMessages);

span.setAttributes({
[AI_PROMPT_MESSAGES_ATTRIBUTE]: truncatedMessages,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: truncatedMessages,
[AI_PROMPT_MESSAGES_ATTRIBUTE]: messagesJson,
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: messagesJson,
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: filteredLength,
});
}
Expand Down
11 changes: 10 additions & 1 deletion packages/deno/src/integrations/tracing/vercelai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,18 @@ import { addVercelAiProcessors, defineIntegration } from '@sentry/core';

const INTEGRATION_NAME = 'VercelAI';

const _vercelAIIntegration = (() => {
interface VercelAiOptions {
/**
* Enable or disable truncation of recorded input messages.
* Defaults to `true`.
*/
enableTruncation?: boolean;
}

const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
return {
name: INTEGRATION_NAME,
options,
setup(client) {
addVercelAiProcessors(client);
},
Expand Down
6 changes: 6 additions & 0 deletions packages/node/src/integrations/tracing/vercelai/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,12 @@ export interface VercelAiOptions {
* If you want to register the span processors even when the ai package usage cannot be detected, you can set `force` to `true`.
*/
force?: boolean;

/**
* Enable or disable truncation of recorded input messages.
* Defaults to `true`.
*/
enableTruncation?: boolean;
}

export interface VercelAiIntegration extends Integration {
Expand Down
11 changes: 10 additions & 1 deletion packages/vercel-edge/src/integrations/tracing/vercelai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,18 @@ import { addVercelAiProcessors, defineIntegration } from '@sentry/core';

const INTEGRATION_NAME = 'VercelAI';

const _vercelAIIntegration = (() => {
interface VercelAiOptions {
/**
* Enable or disable truncation of recorded input messages.
* Defaults to `true`.
*/
enableTruncation?: boolean;
}

const _vercelAIIntegration = ((options: VercelAiOptions = {}) => {
return {
name: INTEGRATION_NAME,
options,
setup(client) {
addVercelAiProcessors(client);
},
Expand Down
Loading