|
| 1 | +import { GoogleGenAI } from '@google/genai'; |
| 2 | +import * as Sentry from '@sentry/node'; |
| 3 | +import express from 'express'; |
| 4 | + |
| 5 | +const PORT = 3334; |
| 6 | + |
| 7 | +function startMockGoogleGenAIServer() { |
| 8 | + const app = express(); |
| 9 | + app.use(express.json()); |
| 10 | + |
| 11 | + // Streaming endpoint for models.generateContentStream |
| 12 | + app.post('/v1beta/models/:model\\:streamGenerateContent', (req, res) => { |
| 13 | + const model = req.params.model; |
| 14 | + |
| 15 | + if (model === 'error-model') { |
| 16 | + res.status(404).set('x-request-id', 'mock-request-123').end('Model not found'); |
| 17 | + return; |
| 18 | + } |
| 19 | + |
| 20 | + // Set headers for streaming response |
| 21 | + res.setHeader('Content-Type', 'application/json'); |
| 22 | + res.setHeader('Transfer-Encoding', 'chunked'); |
| 23 | + |
| 24 | + // Create a mock stream |
| 25 | + const mockStream = createMockStream(model); |
| 26 | + |
| 27 | + // Send chunks |
| 28 | + const sendChunk = async () => { |
| 29 | + const { value, done } = await mockStream.next(); |
| 30 | + if (done) { |
| 31 | + res.end(); |
| 32 | + return; |
| 33 | + } |
| 34 | + |
| 35 | + res.write(`data: ${JSON.stringify(value)}\n\n`); |
| 36 | + setTimeout(sendChunk, 10); // Small delay between chunks |
| 37 | + }; |
| 38 | + |
| 39 | + sendChunk(); |
| 40 | + }); |
| 41 | + |
| 42 | + // Streaming endpoint for chat.sendMessageStream |
| 43 | + app.post('/v1beta/models/:model\\:streamGenerateContent', (req, res) => { |
| 44 | + const model = req.params.model; |
| 45 | + |
| 46 | + // Set headers for streaming response |
| 47 | + res.setHeader('Content-Type', 'application/json'); |
| 48 | + res.setHeader('Transfer-Encoding', 'chunked'); |
| 49 | + |
| 50 | + // Create a mock stream |
| 51 | + const mockStream = createMockStream(model); |
| 52 | + |
| 53 | + // Send chunks |
| 54 | + const sendChunk = async () => { |
| 55 | + const { value, done } = await mockStream.next(); |
| 56 | + if (done) { |
| 57 | + res.end(); |
| 58 | + return; |
| 59 | + } |
| 60 | + |
| 61 | + res.write(`data: ${JSON.stringify(value)}\n\n`); |
| 62 | + setTimeout(sendChunk, 10); // Small delay between chunks |
| 63 | + }; |
| 64 | + |
| 65 | + sendChunk(); |
| 66 | + }); |
| 67 | + |
| 68 | + return app.listen(PORT); |
| 69 | +} |
| 70 | + |
| 71 | +// Helper function to create mock stream |
| 72 | +async function* createMockStream(model) { |
| 73 | + if (model === 'blocked-model') { |
| 74 | + // First chunk: Contains promptFeedback with blockReason |
| 75 | + yield { |
| 76 | + promptFeedback: { |
| 77 | + blockReason: 'SAFETY', |
| 78 | + blockReasonMessage: 'The prompt was blocked due to safety concerns', |
| 79 | + }, |
| 80 | + responseId: 'mock-blocked-response-streaming-id', |
| 81 | + modelVersion: 'gemini-1.5-pro', |
| 82 | + }; |
| 83 | + |
| 84 | + // Note: In a real blocked scenario, there would typically be no more chunks |
| 85 | + // But we'll add one more to test that processing stops after the error |
| 86 | + yield { |
| 87 | + candidates: [ |
| 88 | + { |
| 89 | + content: { |
| 90 | + parts: [{ text: 'This should not be processed' }], |
| 91 | + role: 'model', |
| 92 | + }, |
| 93 | + index: 0, |
| 94 | + }, |
| 95 | + ], |
| 96 | + }; |
| 97 | + return; |
| 98 | + } |
| 99 | + |
| 100 | + // First chunk: Start of response with initial text |
| 101 | + yield { |
| 102 | + candidates: [ |
| 103 | + { |
| 104 | + content: { |
| 105 | + parts: [{ text: 'Hello! ' }], |
| 106 | + role: 'model', |
| 107 | + }, |
| 108 | + index: 0, |
| 109 | + }, |
| 110 | + ], |
| 111 | + responseId: 'mock-response-streaming-id', |
| 112 | + modelVersion: 'gemini-1.5-pro', |
| 113 | + }; |
| 114 | + |
| 115 | + // Second chunk: More text content |
| 116 | + yield { |
| 117 | + candidates: [ |
| 118 | + { |
| 119 | + content: { |
| 120 | + parts: [{ text: 'This is a streaming ' }], |
| 121 | + role: 'model', |
| 122 | + }, |
| 123 | + index: 0, |
| 124 | + }, |
| 125 | + ], |
| 126 | + }; |
| 127 | + |
| 128 | + // Third chunk: Final text content |
| 129 | + yield { |
| 130 | + candidates: [ |
| 131 | + { |
| 132 | + content: { |
| 133 | + parts: [{ text: 'response from Google GenAI!' }], |
| 134 | + role: 'model', |
| 135 | + }, |
| 136 | + index: 0, |
| 137 | + }, |
| 138 | + ], |
| 139 | + }; |
| 140 | + |
| 141 | + // Final chunk: End with finish reason and usage metadata |
| 142 | + yield { |
| 143 | + candidates: [ |
| 144 | + { |
| 145 | + content: { |
| 146 | + parts: [{ text: '' }], // Empty text in final chunk |
| 147 | + role: 'model', |
| 148 | + }, |
| 149 | + finishReason: 'STOP', |
| 150 | + index: 0, |
| 151 | + }, |
| 152 | + ], |
| 153 | + usageMetadata: { |
| 154 | + promptTokenCount: 10, |
| 155 | + candidatesTokenCount: 12, |
| 156 | + totalTokenCount: 22, |
| 157 | + }, |
| 158 | + }; |
| 159 | +} |
| 160 | + |
| 161 | +async function run() { |
| 162 | + const server = startMockGoogleGenAIServer(); |
| 163 | + |
| 164 | + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { |
| 165 | + const client = new GoogleGenAI({ |
| 166 | + apiKey: 'mock-api-key', |
| 167 | + httpOptions: { baseUrl: `http://localhost:${PORT}` }, |
| 168 | + }); |
| 169 | + |
| 170 | + // Test 1: models.generateContentStream (streaming) |
| 171 | + const streamResponse = await client.models.generateContentStream({ |
| 172 | + model: 'gemini-1.5-flash', |
| 173 | + config: { |
| 174 | + temperature: 0.7, |
| 175 | + topP: 0.9, |
| 176 | + maxOutputTokens: 100, |
| 177 | + }, |
| 178 | + contents: [ |
| 179 | + { |
| 180 | + role: 'user', |
| 181 | + parts: [{ text: 'Tell me about streaming' }], |
| 182 | + }, |
| 183 | + ], |
| 184 | + }); |
| 185 | + |
| 186 | + // Consume the stream |
| 187 | + for await (const _ of streamResponse) { |
| 188 | + void _; |
| 189 | + } |
| 190 | + |
| 191 | + // Test 2: chat.sendMessageStream (streaming) |
| 192 | + const streamingChat = client.chats.create({ |
| 193 | + model: 'gemini-1.5-pro', |
| 194 | + config: { |
| 195 | + temperature: 0.8, |
| 196 | + topP: 0.9, |
| 197 | + maxOutputTokens: 150, |
| 198 | + }, |
| 199 | + }); |
| 200 | + |
| 201 | + const chatStreamResponse = await streamingChat.sendMessageStream({ |
| 202 | + message: 'Tell me a streaming joke', |
| 203 | + }); |
| 204 | + |
| 205 | + // Consume the chat stream |
| 206 | + for await (const _ of chatStreamResponse) { |
| 207 | + void _; |
| 208 | + } |
| 209 | + |
| 210 | + // Test 3: Blocked content streaming (should trigger error handling) |
| 211 | + try { |
| 212 | + const blockedStreamResponse = await client.models.generateContentStream({ |
| 213 | + model: 'blocked-model', |
| 214 | + config: { |
| 215 | + temperature: 0.7, |
| 216 | + }, |
| 217 | + contents: [ |
| 218 | + { |
| 219 | + role: 'user', |
| 220 | + parts: [{ text: 'This should be blocked' }], |
| 221 | + }, |
| 222 | + ], |
| 223 | + }); |
| 224 | + |
| 225 | + // Consume the blocked stream |
| 226 | + for await (const _ of blockedStreamResponse) { |
| 227 | + void _; |
| 228 | + } |
| 229 | + } catch (error) { |
| 230 | + // Expected: The stream should be processed, but the span should be marked with error status |
| 231 | + // The error handling happens in the streaming instrumentation, not as a thrown error |
| 232 | + } |
| 233 | + |
| 234 | + // Test 4: Error handling for streaming |
| 235 | + try { |
| 236 | + const errorStreamResponse = await client.models.generateContentStream({ |
| 237 | + model: 'error-model', |
| 238 | + config: { |
| 239 | + temperature: 0.7, |
| 240 | + }, |
| 241 | + contents: [ |
| 242 | + { |
| 243 | + role: 'user', |
| 244 | + parts: [{ text: 'This will fail' }], |
| 245 | + }, |
| 246 | + ], |
| 247 | + }); |
| 248 | + |
| 249 | + // Consume the error stream |
| 250 | + for await (const _ of errorStreamResponse) { |
| 251 | + void _; |
| 252 | + } |
| 253 | + } catch (error) { |
| 254 | + // Expected error |
| 255 | + } |
| 256 | + }); |
| 257 | + |
| 258 | + server.close(); |
| 259 | +} |
| 260 | + |
| 261 | +run(); |
0 commit comments