Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 19 additions & 1 deletion services/llm-api/internal/domain/prompt/deep_research_module.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package prompt

import (
"context"
"fmt"
"strings"

"github.com/rs/zerolog/log"
Expand Down Expand Up @@ -37,30 +38,47 @@ func (m *DeepResearchModule) Name() string {
// 2. Module is not disabled
func (m *DeepResearchModule) ShouldApply(ctx context.Context, promptCtx *Context, messages []openai.ChatCompletionMessage) bool {
if ctx == nil || ctx.Err() != nil {
log.Debug().Msg("[DEBUG] DeepResearchModule.ShouldApply: context is nil or cancelled")
return false
}
if promptCtx == nil || promptCtx.Preferences == nil {
log.Debug().Msg("[DEBUG] DeepResearchModule.ShouldApply: promptCtx or Preferences is nil")
return false
}

// Check if module is disabled
if isModuleDisabled(promptCtx.Preferences, m.Name()) {
log.Debug().Msg("[DEBUG] DeepResearchModule.ShouldApply: module is disabled via preferences")
return false
}

// Check if deep_research is enabled in preferences
deepResearch, ok := promptCtx.Preferences["deep_research"]
if !ok {
log.Debug().
Interface("preferences", promptCtx.Preferences).
Msg("[DEBUG] DeepResearchModule.ShouldApply: deep_research not found in preferences")
return false
}

log.Debug().
Interface("deep_research_value", deepResearch).
Str("deep_research_type", fmt.Sprintf("%T", deepResearch)).
Msg("[DEBUG] DeepResearchModule.ShouldApply: found deep_research in preferences")

// Handle different types
switch v := deepResearch.(type) {
case bool:
log.Debug().Bool("result", v).Msg("[DEBUG] DeepResearchModule.ShouldApply: returning bool value")
return v
case string:
return strings.ToLower(v) == "true"
result := strings.ToLower(v) == "true"
log.Debug().Bool("result", result).Msg("[DEBUG] DeepResearchModule.ShouldApply: returning parsed string value")
return result
default:
log.Debug().
Str("type", fmt.Sprintf("%T", deepResearch)).
Msg("[DEBUG] DeepResearchModule.ShouldApply: unsupported type, returning false")
return false
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ import (
"fmt"
"strings"

"github.com/rs/zerolog/log"

"jan-server/services/llm-api/internal/config"
domainmodel "jan-server/services/llm-api/internal/domain/model"
"jan-server/services/llm-api/internal/utils/crypto"
Expand All @@ -22,12 +24,29 @@ func NewInferenceProvider() *InferenceProvider {
}

func (ip *InferenceProvider) GetChatCompletionClient(ctx context.Context, provider *domainmodel.Provider) (*chatclient.ChatCompletionClient, error) {
log.Debug().
Str("provider_id", provider.PublicID).
Str("provider_name", provider.DisplayName).
Str("base_url", provider.BaseURL).
Str("provider_kind", string(provider.Kind)).
Msg("[DEBUG] GetChatCompletionClient: creating client for provider")

client, err := ip.createRestyClient(ctx, provider)
if err != nil {
log.Error().
Err(err).
Str("provider_id", provider.PublicID).
Str("provider_name", provider.DisplayName).
Msg("[DEBUG] GetChatCompletionClient: failed to create resty client")
return nil, err
}

clientName := provider.DisplayName
log.Debug().
Str("provider_name", clientName).
Str("base_url", provider.BaseURL).
Msg("[DEBUG] GetChatCompletionClient: client created successfully")

return chatclient.NewChatCompletionClient(client, clientName, provider.BaseURL), nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -255,6 +255,24 @@ func (h *ChatHandler) CreateChatCompletion(
if request.DeepResearch != nil && *request.DeepResearch {
preferences["deep_research"] = true
observability.AddSpanAttributes(ctx, attribute.Bool("chat.deep_research", true))
// Debug logging for deep research
debugLog := logger.GetLogger()
debugLog.Debug().
Bool("deep_research", true).
Str("conversation_id", conversationID).
Str("model", request.Model).
Msg("[DEBUG] deep_research flag enabled, adding to preferences")
} else {
// Debug logging when deep research is NOT enabled
debugLog := logger.GetLogger()
deepResearchValue := "nil"
if request.DeepResearch != nil {
deepResearchValue = fmt.Sprintf("%v", *request.DeepResearch)
}
debugLog.Debug().
Str("deep_research_value", deepResearchValue).
Str("conversation_id", conversationID).
Msg("[DEBUG] deep_research flag not enabled")
}

var profileSettings *usersettings.ProfileSettings
Expand Down Expand Up @@ -412,20 +430,53 @@ func (h *ChatHandler) CreateChatCompletion(

observability.AddSpanEvent(ctx, "calling_llm")

// Debug logging before LLM call
log := logger.GetLogger()
log.Debug().
Str("model", request.Model).
Str("conversation_id", conversationID).
Bool("stream", request.Stream).
Int("message_count", len(llmRequest.Messages)).
Msg("[DEBUG] calling LLM provider")

llmStartTime := time.Now()
if request.Stream {
log.Debug().
Str("model", request.Model).
Str("conversation_id", conversationID).
Msg("[DEBUG] starting streaming completion")
response, err = h.streamCompletion(ctx, reqCtx, chatClient, conv, llmRequest)
} else {
log.Debug().
Str("model", request.Model).
Str("conversation_id", conversationID).
Msg("[DEBUG] starting non-streaming completion")
response, err = h.callCompletion(ctx, chatClient, llmRequest)
}
llmDuration := time.Since(llmStartTime)

if err != nil {
// Enhanced error logging for debugging
log.Error().
Err(err).
Str("model", request.Model).
Str("conversation_id", conversationID).
Bool("stream", request.Stream).
Dur("duration", llmDuration).
Str("error_type", fmt.Sprintf("%T", err)).
Msg("[DEBUG] LLM completion failed - returning fallback response")

observability.AddSpanEvent(ctx, "completion_fallback",
attribute.String("error", err.Error()),
)
response = h.BuildFallbackResponse(request.Model)
err = nil
} else {
log.Debug().
Str("model", request.Model).
Str("conversation_id", conversationID).
Dur("duration", llmDuration).
Msg("[DEBUG] LLM completion succeeded")
}

// Add LLM response metrics
Expand Down Expand Up @@ -518,11 +569,27 @@ func (h *ChatHandler) callCompletion(
chatClient *chat.ChatCompletionClient,
request chat.CompletionRequest,
) (*openai.ChatCompletionResponse, error) {
log := logger.GetLogger()
log.Debug().
Str("model", request.Model).
Int("message_count", len(request.Messages)).
Msg("[DEBUG] callCompletion: calling CreateChatCompletion")

chatCompletion, err := chatClient.CreateChatCompletion(ctx, "", request)
if err != nil {
log.Error().
Err(err).
Str("model", request.Model).
Str("error_type", fmt.Sprintf("%T", err)).
Msg("[DEBUG] callCompletion: CreateChatCompletion failed")
return nil, platformerrors.AsError(ctx, platformerrors.LayerHandler, err, "chat completion failed")
}

log.Debug().
Str("model", request.Model).
Int("total_tokens", chatCompletion.Usage.TotalTokens).
Msg("[DEBUG] callCompletion: completed successfully")

return chatCompletion, nil
}

Expand Down Expand Up @@ -567,11 +634,26 @@ func (h *ChatHandler) streamCompletion(
}

// Stream completion response to context with callback
log := logger.GetLogger()
log.Debug().
Str("model", request.Model).
Int("message_count", len(request.Messages)).
Msg("[DEBUG] streamCompletion: calling StreamChatCompletionToContextWithCallback")

resp, err := chatClient.StreamChatCompletionToContextWithCallback(reqCtx, "", request, beforeDoneCallback)
if err != nil {
log.Error().
Err(err).
Str("model", request.Model).
Str("error_type", fmt.Sprintf("%T", err)).
Msg("[DEBUG] streamCompletion: streaming failed")
return nil, platformerrors.AsError(ctx, platformerrors.LayerHandler, err, "streaming completion failed")
}

log.Debug().
Str("model", request.Model).
Msg("[DEBUG] streamCompletion: completed successfully")

return resp, nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,12 @@ func (p *FlexibleContentPart) ToOpenAIChatMessagePart() openai.ChatMessagePart {
Text: p.Text,
}
}
// Return empty part - will be filtered out by caller
// Note: We can't return nil, so we return an empty image part which will be filtered
// because empty text parts with omitempty cause {"type": "text"} without text field
// which fails validation on some LLM providers
return openai.ChatMessagePart{
Type: openai.ChatMessagePartTypeText,
Text: "",
Type: openai.ChatMessagePartTypeImageURL, // Will be filtered out by caller
}
Comment on lines +65 to 71
Copy link

Copilot AI Dec 19, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Returning an empty ImageURL part as a workaround is confusing and fragile. The comment acknowledges this is intentional for later filtering, but this creates tight coupling between this function and its callers. Consider refactoring to return an Optional/Maybe type, or modify the signature to return a pointer that can be nil, allowing explicit representation of "no valid part".

Copilot uses AI. Check for mistakes.
}
}
Expand All @@ -85,6 +88,12 @@ func parseFlexibleContentParts(jsonContent string) ([]openai.ChatMessagePart, er
log.Warn().Str("original_type", fp.Type).Msg("Skipping empty image part with no URL/data")
continue
}
// Filter out empty text parts (empty Text field would cause validation errors
// because go-openai uses omitempty, resulting in {"type": "text"} without text field)
if part.Type == openai.ChatMessagePartTypeText && part.Text == "" {
log.Warn().Str("original_type", fp.Type).Msg("Skipping empty text part with no content")
continue
}
result = append(result, part)
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package chat

import (
"fmt"
"net/http"

"github.com/gin-gonic/gin"
Expand Down Expand Up @@ -103,15 +104,43 @@ func (chatCompletionRoute *ChatCompletionRoute) PostCompletion(reqCtx *gin.Conte
Msg("chat completion request received")

// Delegate to chat handler
log.Debug().
Str("route", "/v1/chat/completions").
Str("model", request.Model).
Str("conversation_id", conversationID).
Bool("stream", request.Stream).
Msg("[DEBUG] delegating to chat handler")

result, err := chatCompletionRoute.chatHandler.CreateChatCompletion(reqCtx.Request.Context(), reqCtx, user.ID, request)
if err != nil {
// Log the full error for debugging
log.Error().
Err(err).
Str("route", "/v1/chat/completions").
Str("model", request.Model).
Str("conversation_id", conversationID).
Bool("stream", request.Stream).
Str("error_type", fmt.Sprintf("%T", err)).
Msg("[DEBUG] chat completion failed with error")

// Check if it's a validation error (user input too large)
if platformerrors.IsValidationError(err) {
log.Debug().
Err(err).
Str("route", "/v1/chat/completions").
Msg("[DEBUG] returning validation error response")
responses.HandleError(reqCtx, err, err.Error())
return
}

// For other errors, return fallback response
log.Warn().
Err(err).
Str("route", "/v1/chat/completions").
Str("model", request.Model).
Str("conversation_id", conversationID).
Msg("[DEBUG] returning fallback response due to error")

fallback := chatCompletionRoute.chatHandler.BuildFallbackResponse(request.Model)
chatResponse := chatresponses.NewChatCompletionResponse(fallback, "", nil, false)
reqCtx.JSON(http.StatusOK, chatResponse)
Expand Down
Loading
Loading