diff --git a/src/providers/GeminiProvider.ts b/src/providers/GeminiProvider.ts index e27137a..998b305 100644 --- a/src/providers/GeminiProvider.ts +++ b/src/providers/GeminiProvider.ts @@ -14,6 +14,7 @@ class GeminiProvider implements Provider { private systemMessage?: string; private responseFormat!: 'stream' | 'json'; private messageParser?: (messages: Message[]) => GeminiProviderMessage[]; + private debug: boolean = false; /** * Sets default values for the provider based on given configuration. Configuration guide here: @@ -27,6 +28,7 @@ class GeminiProvider implements Provider { this.systemMessage = config.systemMessage; this.responseFormat = config.responseFormat ?? 'stream'; this.messageParser = config.messageParser; + this.debug = config.debug ?? false; this.headers = { 'Content-Type': 'application/json', Accept: this.responseFormat === 'stream' ? 'text/event-stream' : 'application/json', @@ -52,6 +54,20 @@ class GeminiProvider implements Provider { * @param messages messages to include in the request */ public async *sendMessages(messages: Message[]): AsyncGenerator { + if (this.debug) { + const sanitizedEndpoint = this.endpoint.replace(/\?key=([^&]+)/, '?key=[REDACTED]'); + // Headers in Gemini usually don't contain sensitive info like 'Authorization' + // as the API key is in the URL, but we'll keep a general sanitization pattern. + const sanitizedHeaders = { ...this.headers }; + // If any sensitive header were to be added in the future, it should be removed here. + // delete sanitizedHeaders['Some-Sensitive-Header']; + console.log('[GeminiProvider] Request:', { + method: this.method, + endpoint: sanitizedEndpoint, + headers: sanitizedHeaders, + body: this.constructBodyWithMessages(messages), + }); + } const res = await fetch(this.endpoint, { method: this.method, headers: this.headers as HeadersInit, diff --git a/src/providers/OpenaiProvider.ts b/src/providers/OpenaiProvider.ts index 1c5ebbf..c77bcbd 100644 --- a/src/providers/OpenaiProvider.ts +++ b/src/providers/OpenaiProvider.ts @@ -14,6 +14,7 @@ class OpenaiProvider implements Provider { private systemMessage?: string; private responseFormat!: 'stream' | 'json'; private messageParser?: (messages: Message[]) => OpenaiProviderMessage[]; + private debug: boolean = false; /** * Sets default values for the provider based on given configuration. Configuration guide here: @@ -27,6 +28,7 @@ class OpenaiProvider implements Provider { this.systemMessage = config.systemMessage; this.responseFormat = config.responseFormat ?? 'stream'; this.messageParser = config.messageParser; + this.debug = config.debug ?? false; this.headers = { 'Content-Type': 'application/json', Accept: this.responseFormat === 'stream' ? 'text/event-stream' : 'application/json', @@ -54,6 +56,16 @@ class OpenaiProvider implements Provider { * @param messages messages to include in the request */ public async *sendMessages(messages: Message[]): AsyncGenerator { + if (this.debug) { + const sanitizedHeaders = { ...this.headers }; + delete sanitizedHeaders['Authorization']; + console.log('[OpenaiProvider] Request:', { + method: this.method, + endpoint: this.endpoint, + headers: sanitizedHeaders, + body: this.constructBodyWithMessages(messages), + }); + } const res = await fetch(this.endpoint, { method: this.method, headers: this.headers as HeadersInit, diff --git a/src/providers/WebLlmProvider.ts b/src/providers/WebLlmProvider.ts index 21c4ed3..b052dfd 100644 --- a/src/providers/WebLlmProvider.ts +++ b/src/providers/WebLlmProvider.ts @@ -15,6 +15,7 @@ class WebLlmProvider implements Provider { private chatCompletionOptions: Record; private messageParser?: (messages: Message[]) => WebLlmProviderMessage[]; private engine?: MLCEngine; + private debug: boolean = false; /** * Sets default values for the provider based on given configuration. Configuration guide here: @@ -29,6 +30,7 @@ class WebLlmProvider implements Provider { this.messageParser = config.messageParser; this.engineConfig = config.engineConfig ?? {}; this.chatCompletionOptions = config.chatCompletionOptions ?? {}; + this.debug = config.debug ?? false; this.createEngine(); } @@ -52,6 +54,17 @@ class WebLlmProvider implements Provider { await this.createEngine(); } + if (this.debug) { + console.log('[WebLlmProvider] Request:', { + model: this.model, + systemMessage: this.systemMessage, + responseFormat: this.responseFormat, + engineConfig: this.engineConfig, + chatCompletionOptions: this.chatCompletionOptions, + messages: this.constructBodyWithMessages(messages).messages, // Log messages being sent + }); + } + const result = await this.engine?.chat.completions.create(this.constructBodyWithMessages(messages)); if (result && Symbol.asyncIterator in result) { for await (const chunk of result as AsyncIterable) { diff --git a/src/types/provider-config/GeminiProviderConfig.ts b/src/types/provider-config/GeminiProviderConfig.ts index c0bada9..58132dc 100644 --- a/src/types/provider-config/GeminiProviderConfig.ts +++ b/src/types/provider-config/GeminiProviderConfig.ts @@ -15,6 +15,7 @@ type DirectConfig = { headers?: Record; body?: Record; messageParser?: (messages: Message[]) => GeminiProviderMessage[]; + debug?: boolean; }; /** @@ -30,6 +31,7 @@ type ProxyConfig = { headers?: Record; body?: Record; messageParser?: (messages: Message[]) => GeminiProviderMessage[]; + debug?: boolean; }; /** diff --git a/src/types/provider-config/OpenaiProviderConfig.ts b/src/types/provider-config/OpenaiProviderConfig.ts index 9b8ac1b..d92e187 100644 --- a/src/types/provider-config/OpenaiProviderConfig.ts +++ b/src/types/provider-config/OpenaiProviderConfig.ts @@ -15,6 +15,7 @@ type DirectConfig = { headers?: Record; body?: Record; messageParser?: (messages: Message[]) => OpenaiProviderMessage[]; + debug?: boolean; }; /** @@ -30,6 +31,7 @@ type ProxyConfig = { headers?: Record; body?: Record; messageParser?: (messages: Message[]) => OpenaiProviderMessage[]; + debug?: boolean; }; /** diff --git a/src/types/provider-config/WebLlmProviderConfig.ts b/src/types/provider-config/WebLlmProviderConfig.ts index e20f566..6f0ab9b 100644 --- a/src/types/provider-config/WebLlmProviderConfig.ts +++ b/src/types/provider-config/WebLlmProviderConfig.ts @@ -12,6 +12,7 @@ type WebLlmProviderConfig = { engineConfig?: MLCEngineConfig; chatCompletionOptions?: Record; messageParser?: (messages: Message[]) => WebLlmProviderMessage[]; + debug?: boolean; }; export type { WebLlmProviderConfig };