Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit b225838

Browse files
quick refactor
1 parent c6c2071 commit b225838

File tree

1 file changed

+52
-147
lines changed

1 file changed

+52
-147
lines changed
Lines changed: 52 additions & 147 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { captureException } from '../../exports';
22
import { SPAN_STATUS_ERROR } from '../../tracing';
3-
import type { Span } from '../../types-hoist/span';
3+
import type { Span,SpanAttributeValue } from '../../types-hoist/span';
44
import {
55
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
66
GEN_AI_RESPONSE_ID_ATTRIBUTE,
@@ -23,15 +23,15 @@ interface StreamingState {
2323
/** Reasons for finishing the response, as reported by the API. */
2424
finishReasons: string[];
2525
/** The response ID. */
26-
responseId: string;
26+
responseId?: string;
2727
/** The model name. */
28-
responseModel: string;
28+
responseModel?: string;
2929
/** Number of prompt/input tokens used. */
30-
promptTokens: number|undefined;
30+
promptTokens?: number;
3131
/** Number of completion/output tokens used. */
32-
completionTokens: number|undefined;
32+
completionTokens?: number;
3333
/** Number of total tokens used. */
34-
totalTokens: number|undefined;
34+
totalTokens?: number;
3535
/** Accumulated tool calls (finalized) */
3636
toolCalls: Array<Record<string, unknown>>;
3737
}
@@ -43,25 +43,14 @@ interface StreamingState {
4343
* @returns Whether an error occurred
4444
*/
4545
function isErrorChunk(chunk: GoogleGenAIResponse, span: Span): boolean {
46-
// Check for errors in the response
47-
if (chunk && typeof chunk === 'object') {
48-
// Google GenAI may include error information in promptFeedback
49-
if (chunk.promptFeedback && typeof chunk.promptFeedback === 'object') {
50-
const feedback = chunk.promptFeedback;
51-
if (feedback.blockReason && typeof feedback.blockReason === 'string') {
52-
// Use blockReasonMessage if available (more descriptive), otherwise use blockReason (enum)
53-
const errorMessage = feedback.blockReasonMessage ? feedback.blockReasonMessage : feedback.blockReason;
54-
55-
span.setStatus({ code: SPAN_STATUS_ERROR, message: `Content blocked: ${errorMessage}` });
56-
captureException(`Content blocked: ${errorMessage}`, {
57-
mechanism: {
58-
handled: false,
59-
type: 'auto.ai.google_genai',
60-
},
61-
});
62-
return true;
63-
}
64-
}
46+
const feedback = chunk?.promptFeedback;
47+
if (feedback?.blockReason) {
48+
const message = feedback.blockReasonMessage ?? feedback.blockReason;
49+
span.setStatus({ code: SPAN_STATUS_ERROR, message: `Content blocked: ${message}` });
50+
captureException(`Content blocked: ${message}`, {
51+
mechanism: { handled: false, type: 'auto.ai.google_genai' },
52+
});
53+
return true;
6554
}
6655
return false;
6756
}
@@ -72,30 +61,14 @@ function isErrorChunk(chunk: GoogleGenAIResponse, span: Span): boolean {
7261
* @param state - The state of the streaming process
7362
*/
7463
function handleResponseMetadata(chunk: GoogleGenAIResponse, state: StreamingState): void {
75-
if (!chunk || typeof chunk !== 'object') return;
76-
77-
// Extract response ID
78-
if (chunk.responseId && typeof chunk.responseId === 'string') {
79-
state.responseId = chunk.responseId;
80-
}
81-
82-
// Extract model version
83-
if (chunk.modelVersion && typeof chunk.modelVersion === 'string') {
84-
state.responseModel = chunk.modelVersion;
85-
}
86-
87-
// Extract usage metadata
88-
if (chunk.usageMetadata && typeof chunk.usageMetadata === 'object') {
89-
const usage = chunk.usageMetadata;
90-
if (typeof usage.promptTokenCount === 'number') {
91-
state.promptTokens = usage.promptTokenCount;
92-
}
93-
if (typeof usage.candidatesTokenCount === 'number') {
94-
state.completionTokens = usage.candidatesTokenCount;
95-
}
96-
if (typeof usage.totalTokenCount === 'number') {
97-
state.totalTokens = usage.totalTokenCount;
98-
}
64+
if (typeof chunk.responseId === 'string') state.responseId = chunk.responseId;
65+
if (typeof chunk.modelVersion === 'string') state.responseModel = chunk.modelVersion;
66+
67+
const usage = chunk.usageMetadata;
68+
if (usage) {
69+
if (typeof usage.promptTokenCount === 'number') state.promptTokens = usage.promptTokenCount;
70+
if (typeof usage.candidatesTokenCount === 'number') state.completionTokens = usage.candidatesTokenCount;
71+
if (typeof usage.totalTokenCount === 'number') state.totalTokens = usage.totalTokenCount;
9972
}
10073
}
10174

@@ -106,46 +79,24 @@ function handleResponseMetadata(chunk: GoogleGenAIResponse, state: StreamingStat
10679
* @param recordOutputs - Whether to record outputs
10780
*/
10881
function handleCandidateContent(chunk: GoogleGenAIResponse, state: StreamingState, recordOutputs: boolean): void {
109-
// Check for direct functionCalls getter first
110-
if (chunk.functionCalls && Array.isArray(chunk.functionCalls)) {
111-
const functionCalls = chunk.functionCalls;
112-
for (const functionCall of functionCalls) {
113-
state.toolCalls.push(functionCall);
114-
}
82+
if (Array.isArray(chunk.functionCalls)) {
83+
state.toolCalls.push(...chunk.functionCalls);
11584
}
11685

117-
if (!chunk?.candidates) return;
118-
119-
for (const candidate of chunk.candidates) {
120-
if (!candidate || typeof candidate !== 'object') continue;
121-
122-
// Extract finish reason
123-
if (candidate.finishReason) {
124-
if (!state.finishReasons.includes(candidate.finishReason)) {
125-
state.finishReasons.push(candidate.finishReason);
126-
}
86+
for (const candidate of chunk.candidates ?? []) {
87+
if (candidate?.finishReason && !state.finishReasons.includes(candidate.finishReason)) {
88+
state.finishReasons.push(candidate.finishReason);
12789
}
12890

129-
// Extract content
130-
if (candidate.content) {
131-
const content = candidate.content;
132-
if (content.parts) {
133-
for (const part of content.parts) {
134-
// Extract text content for output recording
135-
if (recordOutputs && part.text) {
136-
state.responseTexts.push(part.text);
137-
}
138-
139-
// Extract function calls (fallback method)
140-
if (part.functionCall) {
141-
state.toolCalls.push({
142-
type: 'function',
143-
id: part.functionCall?.id,
144-
name: part.functionCall?.name,
145-
arguments: part.functionCall?.args,
146-
});
147-
}
148-
}
91+
for (const part of candidate?.content?.parts ?? []) {
92+
if (recordOutputs && part.text) state.responseTexts.push(part.text);
93+
if (part.functionCall) {
94+
state.toolCalls.push({
95+
type: 'function',
96+
id: part.functionCall.id,
97+
name: part.functionCall.name,
98+
arguments: part.functionCall.args,
99+
});
149100
}
150101
}
151102
}
@@ -159,14 +110,7 @@ function handleCandidateContent(chunk: GoogleGenAIResponse, state: StreamingStat
159110
* @param span - The span to update
160111
*/
161112
function processChunk(chunk: GoogleGenAIResponse, state: StreamingState, recordOutputs: boolean, span: Span): void {
162-
if (!chunk || typeof chunk !== 'object') {
163-
return;
164-
}
165-
166-
const isError = isErrorChunk(chunk, span);
167-
// No further metadata or content will be sent to process
168-
if (isError) return;
169-
113+
if (!chunk || isErrorChunk(chunk, span)) return;
170114
handleResponseMetadata(chunk, state);
171115
handleCandidateContent(chunk, state, recordOutputs);
172116
}
@@ -184,11 +128,6 @@ export async function* instrumentStream(
184128
const state: StreamingState = {
185129
responseTexts: [],
186130
finishReasons: [],
187-
responseId: '',
188-
responseModel: '',
189-
promptTokens: undefined,
190-
completionTokens: undefined,
191-
totalTokens: undefined,
192131
toolCalls: [],
193132
};
194133

@@ -198,61 +137,27 @@ export async function* instrumentStream(
198137
yield chunk;
199138
}
200139
} finally {
201-
// Set common response attributes if available once the stream is finished
202-
if (state.responseId) {
203-
span.setAttributes({
204-
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: state.responseId,
205-
});
206-
}
207-
if (state.responseModel) {
208-
span.setAttributes({
209-
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: state.responseModel,
210-
});
211-
}
212-
213-
// Set token usage attributes
214-
if (state.promptTokens !== undefined) {
215-
span.setAttributes({
216-
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: state.promptTokens,
217-
});
218-
}
219-
if (state.completionTokens !== undefined) {
220-
span.setAttributes({
221-
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: state.completionTokens,
222-
});
223-
}
224-
if (state.totalTokens !== undefined) {
225-
span.setAttributes({
226-
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: state.totalTokens,
227-
});
228-
}
229-
230-
// Mark as streaming response
231-
span.setAttributes({
140+
const attrs: Record<string, SpanAttributeValue> = {
232141
[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
233-
});
142+
};
234143

235-
// Set finish reasons if available
236-
if (state.finishReasons.length > 0) {
237-
span.setAttributes({
238-
[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: JSON.stringify(state.finishReasons),
239-
});
240-
}
144+
if (state.responseId) attrs[GEN_AI_RESPONSE_ID_ATTRIBUTE] = state.responseId;
145+
if (state.responseModel) attrs[GEN_AI_RESPONSE_MODEL_ATTRIBUTE] = state.responseModel;
146+
if (state.promptTokens !== undefined) attrs[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] = state.promptTokens;
147+
if (state.completionTokens !== undefined) attrs[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] = state.completionTokens;
148+
if (state.totalTokens !== undefined) attrs[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE] = state.totalTokens;
241149

242-
// Set response text if recording outputs
243-
if (recordOutputs && state.responseTexts.length > 0) {
244-
span.setAttributes({
245-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: state.responseTexts.join(''),
246-
});
150+
if (state.finishReasons.length) {
151+
attrs[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE] = JSON.stringify(state.finishReasons);
247152
}
248-
249-
// Set tool calls if any were captured
250-
if (recordOutputs && state.toolCalls.length > 0) {
251-
span.setAttributes({
252-
[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: JSON.stringify(state.toolCalls),
253-
});
153+
if (recordOutputs && state.responseTexts.length) {
154+
attrs[GEN_AI_RESPONSE_TEXT_ATTRIBUTE] = state.responseTexts.join('');
155+
}
156+
if (recordOutputs && state.toolCalls.length) {
157+
attrs[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE] = JSON.stringify(state.toolCalls);
254158
}
255159

160+
span.setAttributes(attrs);
256161
span.end();
257162
}
258163
}

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /