|
7 | 7 | type Tool as GcliTool, |
8 | 8 | type ToolResult, |
9 | 9 | GeminiChat, |
| 10 | + WebFetchTool, // <--- 添加这个导入 |
| 11 | + WebSearchTool, // <--- 添加这个导入 |
10 | 12 | } from '@google/gemini-cli-core'; |
11 | 13 | import { |
12 | 14 | type CallToolResult, |
@@ -163,76 +165,66 @@ export class GcliMcpBridge { |
163 | 165 | } |
164 | 166 |
|
165 | 167 | private registerGcliTool(tool: GcliTool, mcpServer: McpServer) { |
166 | | - const inputSchema = this.convertJsonSchemaToZod(tool.schema.parameters); |
| 168 | + let toolInstanceForExecution = tool; // 默认使用从 ToolRegistry 传入的原始工具实例 |
| 169 | + |
| 170 | + // 检查是否是需要特殊处理的网页工具 |
| 171 | + if (tool.name === 'google_web_search' || tool.name === 'web_fetch') { |
| 172 | + const toolModel = process.env.GEMINI_TOOLS_DEFAULT_MODEL; |
| 173 | + |
| 174 | + // 如果为这些工具设置了专用的模型,则创建一个新的配置和工具实例 |
| 175 | + if (toolModel) { |
| 176 | + if (this.debugMode) { |
| 177 | + console.log( |
| 178 | + `[MCP SERVER] Using custom model "${toolModel}" for tool "${tool.name}"`, |
| 179 | + ); |
| 180 | + } |
| 181 | + |
| 182 | + // 步骤 1: 创建一个 this.config 的代理。 |
| 183 | + // 这个代理对象会拦截对 getModel 方法的调用。 |
| 184 | + const proxyConfig = new Proxy(this.config, { |
| 185 | + get: (target, prop, receiver) => { |
| 186 | + // 如果调用的方法是 getModel,则返回我们指定的工具模型 |
| 187 | + if (prop === 'getModel') { |
| 188 | + return () => toolModel; |
| 189 | + } |
| 190 | + // 对于所有其他属性和方法的调用,都代理到原始的 config 对象 |
| 191 | + return Reflect.get(target, prop, receiver); |
| 192 | + }, |
| 193 | + }) as Config; |
| 194 | + |
| 195 | + // 步骤 2: 根据工具名称,使用这个代理配置来创建新的工具实例 |
| 196 | + if (tool.name === 'google_web_search') { |
| 197 | + toolInstanceForExecution = new WebSearchTool(proxyConfig); |
| 198 | + } else { |
| 199 | + toolInstanceForExecution = new WebFetchTool(proxyConfig); |
| 200 | + } |
| 201 | + } |
| 202 | + } |
167 | 203 |
|
168 | 204 | mcpServer.registerTool( |
169 | 205 | tool.name, |
170 | 206 | { |
171 | 207 | title: tool.displayName, |
172 | 208 | description: tool.description, |
173 | | - inputSchema: inputSchema, |
| 209 | + inputSchema: this.convertJsonSchemaToZod(tool.schema.parameters), |
174 | 210 | }, |
175 | 211 | async ( |
176 | 212 | args: Record<string, unknown>, |
177 | 213 | extra: { signal: AbortSignal }, |
178 | 214 | ) => { |
179 | 215 | try { |
180 | | - // --- START: Isolation logic for tools that call the LLM --- |
181 | | - if (tool.name === 'google_web_search' || tool.name === 'web_fetch') { |
182 | | - // Create an isolated, one-shot chat session for this call |
183 | | - const oneShotChat = new GeminiChat( |
184 | | - this.config, |
185 | | - this.config.getGeminiClient().getContentGenerator(), |
186 | | - {}, // Use default generationConfig |
187 | | - [], // Start with a clean history |
188 | | - ); |
189 | | - |
190 | | - // Prepare the request for the Gemini API |
191 | | - const request = { |
192 | | - message: [{ text: args.query as string }], |
193 | | - config: { |
194 | | - tools: [{ googleSearch: {} }] as Tool[], // For web_search |
195 | | - }, |
196 | | - }; |
197 | | - |
198 | | - // Adjust tool config for web_fetch |
199 | | - if (tool.name === 'web_fetch') { |
200 | | - // web_fetch uses a different tool configuration |
201 | | - request.config.tools = [{ urlContext: {} }]; |
202 | | - } |
203 | | - |
204 | | - // Send the request using the one-shot session |
205 | | - const response = await oneShotChat.sendMessage(request); |
206 | | - const resultText = response.text || ''; |
207 | | - |
208 | | - // Convert the result to the MCP format |
209 | | - const mcpResult = this.convertGcliResultToMcpResult({ |
210 | | - llmContent: resultText, |
211 | | - returnDisplay: `Search results for "${args.query}" returned.`, |
212 | | - }); |
213 | | - |
214 | | - // Attach grounding metadata if it exists |
215 | | - if (response.candidates?.[0]?.groundingMetadata) { |
216 | | - (mcpResult as any)._meta = { |
217 | | - groundingMetadata: response.candidates[0].groundingMetadata, |
218 | | - }; |
219 | | - } |
220 | | - |
221 | | - return mcpResult; |
222 | | - } |
223 | | - // --- END: Isolation logic --- |
224 | | - |
225 | | - // For other tools that don't call the LLM, use the original execute method |
226 | | - const result = await tool.execute(args, extra.signal); |
| 216 | + // *** 关键:现在所有工具都通过这个统一的路径执行 *** |
| 217 | + // toolInstanceForExecution 要么是原始工具,要么是带有自定义模型配置的新实例 |
| 218 | + const result = await toolInstanceForExecution.execute( |
| 219 | + args, |
| 220 | + extra.signal, |
| 221 | + ); |
227 | 222 | return this.convertGcliResultToMcpResult(result); |
228 | 223 | } catch (e) { |
229 | 224 | const errorMessage = e instanceof Error ? e.message : String(e); |
230 | 225 | console.error( |
231 | 226 | `${LOG_PREFIX} Error executing tool '${tool.name}': ${errorMessage}`, |
232 | 227 | ); |
233 | | - |
234 | | - // Simply throw an Error, and the MCP SDK will automatically handle it |
235 | | - // as an appropriate JSON-RPC error. |
236 | 228 | throw new Error( |
237 | 229 | `Error executing tool '${tool.name}': ${errorMessage}`, |
238 | 230 | ); |
|
0 commit comments