From 92cca195c8325502e25044b72914da7c7a94434c Mon Sep 17 00:00:00 2001 From: "google-labs-jules[bot]" <161369871+google-labs-jules[bot]@users.noreply.github.com> Date: Mon, 9 Jun 2025 15:11:25 +0000 Subject: [PATCH] feat: Add optional debug flag to LLM providers This commit introduces an optional `debug` boolean configuration to the OpenAI, Gemini, and WebLlm providers. When `debug` is set to `true` in the provider's configuration, the provider will print useful debugging information to the console. This includes: - For OpenAI and Gemini providers: - The HTTP method being used. - The endpoint URL (API keys in Gemini's direct mode URL are redacted). - Request headers (with 'Authorization' header redacted for OpenAI). - The request body. - For WebLlmProvider: - The model being used. - The system message. - The response format. - The engine configuration. - Chat completion options. - The messages being processed. This feature is intended to help you troubleshoot issues with LLM provider integrations, especially when using proxy mode or when unexpected responses are received. The `debug` option defaults to `false` if not specified. --- src/providers/GeminiProvider.ts | 16 ++++++++++++++++ src/providers/OpenaiProvider.ts | 12 ++++++++++++ src/providers/WebLlmProvider.ts | 13 +++++++++++++ .../provider-config/GeminiProviderConfig.ts | 2 ++ .../provider-config/OpenaiProviderConfig.ts | 2 ++ .../provider-config/WebLlmProviderConfig.ts | 1 + 6 files changed, 46 insertions(+) diff --git a/src/providers/GeminiProvider.ts b/src/providers/GeminiProvider.ts index e27137a..998b305 100644 --- a/src/providers/GeminiProvider.ts +++ b/src/providers/GeminiProvider.ts @@ -14,6 +14,7 @@ class GeminiProvider implements Provider { private systemMessage?: string; private responseFormat!: 'stream' | 'json'; private messageParser?: (messages: Message[]) => GeminiProviderMessage[]; + private debug: boolean = false; /** * Sets default values for the provider based on given configuration. Configuration guide here: @@ -27,6 +28,7 @@ class GeminiProvider implements Provider { this.systemMessage = config.systemMessage; this.responseFormat = config.responseFormat ?? 'stream'; this.messageParser = config.messageParser; + this.debug = config.debug ?? false; this.headers = { 'Content-Type': 'application/json', Accept: this.responseFormat === 'stream' ? 'text/event-stream' : 'application/json', @@ -52,6 +54,20 @@ class GeminiProvider implements Provider { * @param messages messages to include in the request */ public async *sendMessages(messages: Message[]): AsyncGenerator { + if (this.debug) { + const sanitizedEndpoint = this.endpoint.replace(/\?key=([^&]+)/, '?key=[REDACTED]'); + // Headers in Gemini usually don't contain sensitive info like 'Authorization' + // as the API key is in the URL, but we'll keep a general sanitization pattern. + const sanitizedHeaders = { ...this.headers }; + // If any sensitive header were to be added in the future, it should be removed here. + // delete sanitizedHeaders['Some-Sensitive-Header']; + console.log('[GeminiProvider] Request:', { + method: this.method, + endpoint: sanitizedEndpoint, + headers: sanitizedHeaders, + body: this.constructBodyWithMessages(messages), + }); + } const res = await fetch(this.endpoint, { method: this.method, headers: this.headers as HeadersInit, diff --git a/src/providers/OpenaiProvider.ts b/src/providers/OpenaiProvider.ts index 1c5ebbf..c77bcbd 100644 --- a/src/providers/OpenaiProvider.ts +++ b/src/providers/OpenaiProvider.ts @@ -14,6 +14,7 @@ class OpenaiProvider implements Provider { private systemMessage?: string; private responseFormat!: 'stream' | 'json'; private messageParser?: (messages: Message[]) => OpenaiProviderMessage[]; + private debug: boolean = false; /** * Sets default values for the provider based on given configuration. Configuration guide here: @@ -27,6 +28,7 @@ class OpenaiProvider implements Provider { this.systemMessage = config.systemMessage; this.responseFormat = config.responseFormat ?? 'stream'; this.messageParser = config.messageParser; + this.debug = config.debug ?? false; this.headers = { 'Content-Type': 'application/json', Accept: this.responseFormat === 'stream' ? 'text/event-stream' : 'application/json', @@ -54,6 +56,16 @@ class OpenaiProvider implements Provider { * @param messages messages to include in the request */ public async *sendMessages(messages: Message[]): AsyncGenerator { + if (this.debug) { + const sanitizedHeaders = { ...this.headers }; + delete sanitizedHeaders['Authorization']; + console.log('[OpenaiProvider] Request:', { + method: this.method, + endpoint: this.endpoint, + headers: sanitizedHeaders, + body: this.constructBodyWithMessages(messages), + }); + } const res = await fetch(this.endpoint, { method: this.method, headers: this.headers as HeadersInit, diff --git a/src/providers/WebLlmProvider.ts b/src/providers/WebLlmProvider.ts index 21c4ed3..b052dfd 100644 --- a/src/providers/WebLlmProvider.ts +++ b/src/providers/WebLlmProvider.ts @@ -15,6 +15,7 @@ class WebLlmProvider implements Provider { private chatCompletionOptions: Record; private messageParser?: (messages: Message[]) => WebLlmProviderMessage[]; private engine?: MLCEngine; + private debug: boolean = false; /** * Sets default values for the provider based on given configuration. Configuration guide here: @@ -29,6 +30,7 @@ class WebLlmProvider implements Provider { this.messageParser = config.messageParser; this.engineConfig = config.engineConfig ?? {}; this.chatCompletionOptions = config.chatCompletionOptions ?? {}; + this.debug = config.debug ?? false; this.createEngine(); } @@ -52,6 +54,17 @@ class WebLlmProvider implements Provider { await this.createEngine(); } + if (this.debug) { + console.log('[WebLlmProvider] Request:', { + model: this.model, + systemMessage: this.systemMessage, + responseFormat: this.responseFormat, + engineConfig: this.engineConfig, + chatCompletionOptions: this.chatCompletionOptions, + messages: this.constructBodyWithMessages(messages).messages, // Log messages being sent + }); + } + const result = await this.engine?.chat.completions.create(this.constructBodyWithMessages(messages)); if (result && Symbol.asyncIterator in result) { for await (const chunk of result as AsyncIterable) { diff --git a/src/types/provider-config/GeminiProviderConfig.ts b/src/types/provider-config/GeminiProviderConfig.ts index c0bada9..58132dc 100644 --- a/src/types/provider-config/GeminiProviderConfig.ts +++ b/src/types/provider-config/GeminiProviderConfig.ts @@ -15,6 +15,7 @@ type DirectConfig = { headers?: Record; body?: Record; messageParser?: (messages: Message[]) => GeminiProviderMessage[]; + debug?: boolean; }; /** @@ -30,6 +31,7 @@ type ProxyConfig = { headers?: Record; body?: Record; messageParser?: (messages: Message[]) => GeminiProviderMessage[]; + debug?: boolean; }; /** diff --git a/src/types/provider-config/OpenaiProviderConfig.ts b/src/types/provider-config/OpenaiProviderConfig.ts index 9b8ac1b..d92e187 100644 --- a/src/types/provider-config/OpenaiProviderConfig.ts +++ b/src/types/provider-config/OpenaiProviderConfig.ts @@ -15,6 +15,7 @@ type DirectConfig = { headers?: Record; body?: Record; messageParser?: (messages: Message[]) => OpenaiProviderMessage[]; + debug?: boolean; }; /** @@ -30,6 +31,7 @@ type ProxyConfig = { headers?: Record; body?: Record; messageParser?: (messages: Message[]) => OpenaiProviderMessage[]; + debug?: boolean; }; /** diff --git a/src/types/provider-config/WebLlmProviderConfig.ts b/src/types/provider-config/WebLlmProviderConfig.ts index e20f566..6f0ab9b 100644 --- a/src/types/provider-config/WebLlmProviderConfig.ts +++ b/src/types/provider-config/WebLlmProviderConfig.ts @@ -12,6 +12,7 @@ type WebLlmProviderConfig = { engineConfig?: MLCEngineConfig; chatCompletionOptions?: Record; messageParser?: (messages: Message[]) => WebLlmProviderMessage[]; + debug?: boolean; }; export type { WebLlmProviderConfig };