Skip to content

Commit ab3c471

Browse files
feat: 更新测试客户端以使用 OpenAI 端点并移除 Gemini API 实现
Co-authored-by: aider (vertex_ai/gemini-2.5-pro) <aider@aider.chat>
1 parent 334b681 commit ab3c471

File tree

2 files changed

+34
-133
lines changed

2 files changed

+34
-133
lines changed

packages/mcp-server/src/bridge/bridge.ts

Lines changed: 0 additions & 106 deletions
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,6 @@ export class GcliMcpBridge {
129129
for (const tool of allTools) {
130130
this.registerGcliTool(tool);
131131
}
132-
this.registerGeminiApiTool();
133132
}
134133

135134
private registerGcliTool(tool: GcliTool) {
@@ -149,111 +148,6 @@ export class GcliMcpBridge {
149148
);
150149
}
151150

152-
private registerGeminiApiTool() {
153-
this.mcpServer.registerTool(
154-
'call_gemini_api',
155-
{
156-
title: 'Gemini API Proxy',
157-
description:
158-
"Proxies a request to the Gemini API through the CLI's authenticated client. Allows dynamic provision of tools and a system prompt for this call.",
159-
inputSchema: {
160-
messages: z
161-
.any()
162-
.describe(
163-
'The conversation history or prompt to send to the Gemini API.',
164-
),
165-
tools: z
166-
.array(z.any())
167-
.optional()
168-
.describe(
169-
'An array of tool definitions (FunctionDeclarations) to make available to the model for this call.',
170-
),
171-
systemInstruction: z
172-
.string()
173-
.optional()
174-
.describe(
175-
"A system prompt to guide the model's behavior for this call.",
176-
),
177-
},
178-
},
179-
async (args, { sendNotification, signal }) => {
180-
const { messages, tools, systemInstruction } = args as {
181-
messages: Content[];
182-
tools?: Tool[];
183-
systemInstruction?: string;
184-
};
185-
186-
const contentGenerator = this.config
187-
.getGeminiClient()
188-
.getContentGenerator();
189-
190-
// 1. Prepare the generation config with dynamic tools and system prompt.
191-
const generationConfig: GenerateContentConfig = {
192-
tools: tools,
193-
systemInstruction: systemInstruction,
194-
};
195-
196-
// The history for the one-shot chat is all messages except the last one.
197-
const history = messages.slice(0, -1);
198-
// The new prompt is the parts from the last message.
199-
const lastMessage = messages[messages.length - 1];
200-
const newPrompt = lastMessage?.parts;
201-
202-
if (!newPrompt) {
203-
// This should ideally return a proper JSON-RPC error.
204-
// For now, we'll let it proceed, which will likely fail downstream
205-
// in sendMessageStream if `newPrompt` is undefined.
206-
console.error(
207-
`${LOG_PREFIX} ❌ Invalid 'call_gemini_api' arguments: 'messages' array is empty or last message has no parts.`,
208-
);
209-
}
210-
211-
// 2. Create a new, stateless GeminiChat instance for this single call.
212-
const oneShotChat = new GeminiChat(
213-
this.config,
214-
contentGenerator,
215-
generationConfig, // Pass dynamic config here
216-
history, // Start with the provided history
217-
);
218-
219-
// 3. Call sendMessageStream on the new instance.
220-
const stream = await oneShotChat.sendMessageStream({
221-
message: newPrompt || [], // Pass only the parts of the new message
222-
});
223-
224-
let fullTextResponse = '';
225-
for await (const event of stream) {
226-
if (signal.aborted) {
227-
console.log(`${LOG_PREFIX} 🛑 Request was aborted by the client.`);
228-
break;
229-
}
230-
let chunkText = '';
231-
if (event.candidates && event.candidates.length > 0) {
232-
const parts = event.candidates[0].content?.parts || [];
233-
for (const part of parts) {
234-
if (part.text) {
235-
chunkText += part.text;
236-
}
237-
}
238-
}
239-
240-
if (chunkText) {
241-
fullTextResponse += chunkText;
242-
await sendNotification({
243-
method: 'notifications/message',
244-
params: { level: 'info', data: `[STREAM_CHUNK]${chunkText}` },
245-
});
246-
}
247-
// Note: Tool call events from the proxied call are not currently forwarded.
248-
// This could be a future enhancement if needed.
249-
}
250-
251-
return {
252-
content: [{ type: 'text', text: fullTextResponse }],
253-
};
254-
},
255-
);
256-
}
257151

258152
private convertJsonSchemaToZod(jsonSchema: any): any {
259153
// Helper to convert a single JSON schema property to a Zod type.

packages/mcp-server/src/mcp-test-client.ts

Lines changed: 34 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import {
66
} from '@modelcontextprotocol/sdk/types.js'; // <--- 引入 Notification 类型
77
import { URL } from 'url';
88
import { z } from 'zod';
9+
import OpenAI from 'openai';
910

1011
// Define the schema for a text content block, as it's not exported by the SDK.
1112
const TextContentBlockSchema = z.object({
@@ -137,54 +138,60 @@ async function runTestClient() {
137138
JSON.stringify(tool.inputSchema, null, 2),
138139
);
139140
});
140-
141-
// New test case for call_gemini_api
142-
await testGeminiApiCall(client);
143141
} else {
144142
logWithPrefix('⚠️ Server returned an empty list of tools.');
145143
}
146144
} catch (error) {
147145
console.error(`${LOG_PREFIX} ❌ Failed to list tools:`, error);
148146
} finally {
149-
logWithPrefix('👋 Closing connection...');
147+
logWithPrefix('👋 Closing MCP connection...');
150148
await client.close();
151-
logWithPrefix('🚪 Connection closed. Test finished.');
149+
logWithPrefix('🚪 MCP Connection closed.');
152150
}
151+
152+
// Now, test the OpenAI endpoint
153+
await testOpenAIEndpoint();
154+
155+
logWithPrefix('✅ Test finished.');
153156
}
154157

155-
async function testGeminiApiCall(client: Client) {
158+
async function testOpenAIEndpoint() {
156159
logWithPrefix('-----------------------------------');
157-
logWithPrefix('🚀 Testing "call_gemini_api" tool...');
160+
logWithPrefix('🚀 Testing OpenAI compatible endpoint...');
158161

159-
try {
160-
const result = await client.request(
161-
{
162-
method: 'tools/call',
163-
params: {
164-
name: 'call_gemini_api',
165-
arguments: {
166-
messages: [
167-
{ role: 'user', parts: [{ text: 'Why is the sky blue?' }] },
168-
],
169-
},
170-
},
171-
},
172-
z.object({ content: z.array(TextContentBlockSchema) }),
173-
);
162+
const openai = new OpenAI({
163+
baseURL: 'http://localhost:8765/v1',
164+
apiKey: 'not-needed', // The API key is not used by our local server
165+
});
174166

175-
logWithPrefix('✅ Successfully received response from call_gemini_api!');
176-
const responseText = result.content[0]?.text || '';
177-
logWithPrefix(' Response Text:', responseText.substring(0, 100) + '...');
167+
try {
168+
const stream = await openai.chat.completions.create({
169+
model: 'gemini-pro', // This can be any string, it's passed to the transformer
170+
messages: [{ role: 'user', content: 'Why is the sky blue?' }],
171+
stream: true,
172+
});
173+
174+
let fullResponse = '';
175+
logWithPrefix('✅ Stream opened. Receiving response...');
176+
for await (const chunk of stream) {
177+
const content = chunk.choices[0]?.delta?.content || '';
178+
fullResponse += content;
179+
process.stdout.write(content);
180+
}
181+
console.log(''); // Newline after stream
178182

179-
if (responseText.toLowerCase().includes('scattering')) {
183+
if (fullResponse.toLowerCase().includes('scattering')) {
180184
logWithPrefix('✅ Validation successful: Response contains "scattering".');
181185
} else {
182186
console.error(
183187
`${LOG_PREFIX} ❌ Validation failed: Response did not contain "scattering".`,
184188
);
185189
}
186190
} catch (error) {
187-
console.error(`${LOG_PREFIX} ❌ Failed to call call_gemini_api:`, error);
191+
console.error(
192+
`${LOG_PREFIX} ❌ Failed to call OpenAI endpoint:`,
193+
error,
194+
);
188195
}
189196
logWithPrefix('-----------------------------------');
190197
}

0 commit comments

Comments
 (0)