Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

feat(core): Improve error handling for Anthropic AI instrumentation #17535

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
RulaKhaled merged 5 commits into develop from record-errors
Sep 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
View file Open in desktop
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
import { instrumentAnthropicAiClient } from '@sentry/core';
import * as Sentry from '@sentry/node';

class MockAnthropic {
constructor(config) {
this.apiKey = config.apiKey;
this.messages = {
create: this._messagesCreate.bind(this),
};
this.models = {
retrieve: this._modelsRetrieve.bind(this),
};
}

async _messagesCreate(params) {
await new Promise(resolve => setTimeout(resolve, 5));

// Case 1: Invalid tool format error
if (params.model === 'invalid-format') {
const error = new Error('Invalid format');
error.status = 400;
error.headers = { 'x-request-id': 'mock-invalid-tool-format-error' };
throw error;
}

// Default case (success) - return tool use for successful tool usage test
return {
id: 'msg_ok',
type: 'message',
model: params.model,
role: 'assistant',
content: [
{
type: 'tool_use',
id: 'tool_ok_1',
name: 'calculator',
input: { expression: '2+2' },
},
],
stop_reason: 'tool_use',
usage: { input_tokens: 7, output_tokens: 9 },
};
}

async _modelsRetrieve(modelId) {
await new Promise(resolve => setTimeout(resolve, 5));

// Case for model retrieval error
if (modelId === 'nonexistent-model') {
const error = new Error('Model not found');
error.status = 404;
error.headers = { 'x-request-id': 'mock-model-retrieval-error' };
throw error;
}

return {
id: modelId,
name: modelId,
created_at: 1715145600,
model: modelId,
};
}
}

async function run() {
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
const mockClient = new MockAnthropic({ apiKey: 'mock-api-key' });
const client = instrumentAnthropicAiClient(mockClient);

// 1. Test invalid format error
// https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/implement-tool-use#handling-tool-use-and-tool-result-content-blocks
try {
await client.messages.create({
model: 'invalid-format',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'Here are the results:' }, // ❌ Text before tool_result
{ type: 'tool_result', tool_use_id: 'toolu_01' },
],
},
],
});
} catch {
// Error expected
}

// 2. Test model retrieval error
try {
await client.models.retrieve('nonexistent-model');
} catch {
// Error expected
}

// 3. Test successful tool usage for comparison
await client.messages.create({
model: 'claude-3-haiku-20240307',
messages: [{ role: 'user', content: 'Calculate 2+2' }],
tools: [
{
name: 'calculator',
description: 'Perform calculations',
input_schema: {
type: 'object',
properties: { expression: { type: 'string' } },
required: ['expression'],
},
},
],
});
});
}

run();
View file Open in desktop
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
import { instrumentAnthropicAiClient } from '@sentry/core';
import * as Sentry from '@sentry/node';

// Generator for default fallback
function createMockDefaultFallbackStream() {
async function* generator() {
yield {
type: 'content_block_start',
index: 0,
};
yield {
type: 'content_block_delta',
index: 0,
delta: { text: 'This stream will work fine.' },
};
yield {
type: 'content_block_stop',
index: 0,
};
}
return generator();
}

// Generator that errors midway through streaming
function createMockMidwayErrorStream() {
async function* generator() {
// First yield some initial data to start the stream
yield {
type: 'content_block_start',
message: {
id: 'msg_error_stream_1',
type: 'message',
role: 'assistant',
model: 'claude-3-haiku-20240307',
content: [],
usage: { input_tokens: 5 },
},
};

// Yield one chunk of content
yield { type: 'content_block_delta', delta: { text: 'This stream will ' } };

// Then throw an error
await new Promise(resolve => setTimeout(resolve, 5));
throw new Error('Stream interrupted');
}

return generator();
}

class MockAnthropic {
constructor(config) {
this.apiKey = config.apiKey;

this.messages = {
create: this._messagesCreate.bind(this),
stream: this._messagesStream.bind(this),
};
}

// client.messages.create with stream: true
async _messagesCreate(params) {
await new Promise(resolve => setTimeout(resolve, 5));

// Error on initialization for 'error-stream-init' model
if (params.model === 'error-stream-init') {
if (params?.stream === true) {
throw new Error('Failed to initialize stream');
}
}

// Error midway for 'error-stream-midway' model
if (params.model === 'error-stream-midway') {
if (params?.stream === true) {
return createMockMidwayErrorStream();
}
}

// Default fallback
return {
id: 'msg_mock123',
type: 'message',
model: params.model,
role: 'assistant',
content: [{ type: 'text', text: 'Non-stream response' }],
usage: { input_tokens: 5, output_tokens: 7 },
};
}

// client.messages.stream
async _messagesStream(params) {
await new Promise(resolve => setTimeout(resolve, 5));

// Error on initialization for 'error-stream-init' model
if (params.model === 'error-stream-init') {
throw new Error('Failed to initialize stream');
}

// Error midway for 'error-stream-midway' model
if (params.model === 'error-stream-midway') {
return createMockMidwayErrorStream();
}

// Default fallback
return createMockDefaultFallbackStream();
}
}

async function run() {
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
const mockClient = new MockAnthropic({ apiKey: 'mock-api-key' });
const client = instrumentAnthropicAiClient(mockClient);

// 1) Error on stream initialization with messages.create
try {
await client.messages.create({
model: 'error-stream-init',
messages: [{ role: 'user', content: 'This will fail immediately' }],
stream: true,
});
} catch {
// Error expected
}

// 2) Error on stream initialization with messages.stream
try {
await client.messages.stream({
model: 'error-stream-init',
messages: [{ role: 'user', content: 'This will also fail immediately' }],
});
} catch {
// Error expected
}

// 3) Error midway through streaming with messages.create
try {
const stream = await client.messages.create({
model: 'error-stream-midway',
messages: [{ role: 'user', content: 'This will fail midway' }],
stream: true,
});

for await (const _ of stream) {
void _;
}
} catch {
// Error expected
}

// 4) Error midway through streaming with messages.stream
try {
const stream = await client.messages.stream({
model: 'error-stream-midway',
messages: [{ role: 'user', content: 'This will also fail midway' }],
});

for await (const _ of stream) {
void _;
}
} catch {
// Error expected
}
});
}

run();
View file Open in desktop
Original file line number Diff line number Diff line change
Expand Up @@ -348,4 +348,101 @@ describe('Anthropic integration', () => {
.completed();
});
});

// Additional error scenarios - Streaming errors
const EXPECTED_STREAM_ERROR_SPANS = {
transaction: 'main',
spans: expect.arrayContaining([
// Error with messages.create on stream initialization
expect.objectContaining({
description: 'messages error-stream-init stream-response',
op: 'gen_ai.messages',
status: 'internal_error', // Actual status coming from the instrumentation
data: expect.objectContaining({
'gen_ai.request.model': 'error-stream-init',
'gen_ai.request.stream': true,
}),
}),
// Error with messages.stream on stream initialization
expect.objectContaining({
description: 'messages error-stream-init stream-response',
op: 'gen_ai.messages',
status: 'internal_error', // Actual status coming from the instrumentation
data: expect.objectContaining({
'gen_ai.request.model': 'error-stream-init',
}),
}),
// Error midway with messages.create on streaming - note: The stream is started successfully
// so we get a successful span with the content that was streamed before the error
expect.objectContaining({
description: 'messages error-stream-midway stream-response',
op: 'gen_ai.messages',
status: 'ok',
data: expect.objectContaining({
'gen_ai.request.model': 'error-stream-midway',
'gen_ai.request.stream': true,
'gen_ai.response.streaming': true,
'gen_ai.response.text': 'This stream will ', // We received some data before error
}),
}),
// Error midway with messages.stream - same behavior, we get a span with the streamed data
expect.objectContaining({
description: 'messages error-stream-midway stream-response',
op: 'gen_ai.messages',
status: 'ok',
data: expect.objectContaining({
'gen_ai.request.model': 'error-stream-midway',
'gen_ai.response.streaming': true,
'gen_ai.response.text': 'This stream will ', // We received some data before error
}),
}),
]),
};

createEsmAndCjsTests(__dirname, 'scenario-stream-errors.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
test('handles streaming errors correctly', async () => {
await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_ERROR_SPANS }).start().completed();
});
});

// Additional error scenarios - Tool errors and model retrieval errors
const EXPECTED_ERROR_SPANS = {
transaction: 'main',
spans: expect.arrayContaining([
// Invalid tool format error
expect.objectContaining({
description: 'messages invalid-format',
op: 'gen_ai.messages',
status: 'unknown_error',
data: expect.objectContaining({
'gen_ai.request.model': 'invalid-format',
}),
}),
// Model retrieval error
expect.objectContaining({
description: 'models nonexistent-model',
op: 'gen_ai.models',
status: 'unknown_error',
data: expect.objectContaining({
'gen_ai.request.model': 'nonexistent-model',
}),
}),
// Successful tool usage (for comparison)
expect.objectContaining({
description: 'messages claude-3-haiku-20240307',
op: 'gen_ai.messages',
status: 'ok',
data: expect.objectContaining({
'gen_ai.request.model': 'claude-3-haiku-20240307',
'gen_ai.response.tool_calls': expect.stringContaining('tool_ok_1'),
}),
}),
]),
};

createEsmAndCjsTests(__dirname, 'scenario-errors.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
test('handles tool errors and model retrieval errors correctly', async () => {
await createRunner().ignore('event').expect({ transaction: EXPECTED_ERROR_SPANS }).start().completed();
});
});
});
Loading
Loading

AltStyle によって変換されたページ (->オリジナル) /