diff --git a/Dockerfile b/Dockerfile index c8d3d0c..51e065f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,7 +2,7 @@ FROM node:18-alpine WORKDIR /app -RUN apk add --no-cache libc6-compat +RUN apk add --no-cache libc6-compat openssl RUN apk update # Install pnpm @@ -27,4 +27,4 @@ RUN pnpm run build EXPOSE 3000 # Start the application -CMD ["sh", "./start.sh"] \ No newline at end of file +CMD ["sh", "./start.sh"] diff --git a/package.json b/package.json index 1876a3b..f3cc388 100644 --- a/package.json +++ b/package.json @@ -17,6 +17,15 @@ "prisma": { "seed": "tsx prisma/seed.ts" }, + "pnpm": { + "onlyBuiltDependencies": [ + "@prisma/client", + "@prisma/engines", + "esbuild", + "prisma", + "sqlite3" + ] + }, "dependencies": { "@ai-sdk/amazon-bedrock": "^0.0.17", "@ai-sdk/anthropic": "^0.0.46", diff --git a/src/app/[...openai]/route.ts b/src/app/[...openai]/route.ts index 2c023f7..c772ac2 100644 --- a/src/app/[...openai]/route.ts +++ b/src/app/[...openai]/route.ts @@ -18,35 +18,173 @@ const openaiClient = new OpenAI({ // Allow streaming responses up to 30 seconds export const maxDuration = 30; +function transformCursorMessages(messages: any[]): any[] { + if (!Array.isArray(messages)) { + console.warn('Messages is not an array, returning empty array'); + return []; + } + + return messages.map((message) => { + if (!message || typeof message !== 'object') { + console.warn('Invalid message object, skipping:', message); + return null; + } + + // Handle tool role messages from Cursor + if (message.role === "tool") { + // Transform tool messages to assistant messages with tool results + // For Anthropic, we should not include tool_call_id or name in the main message + return { + role: "assistant", + content: `Tool response (${message.name || 'unknown'}): ${message.content || ""}`, + }; + } + + // Handle complex content arrays (e.g., from Claude's thinking feature) + let transformedContent = message.content; + if (Array.isArray(message.content)) { + // Extract text content from content arrays, filtering out thinking and other unsupported types + const textParts = message.content + .filter((item: any) => item.type === "text" || item.type === "tool_result") + .map((item: any) => { + if (item.type === "text") { + return item.text; + } else if (item.type === "tool_result") { + // Handle nested content in tool_result + const toolContent = Array.isArray(item.content) + ? item.content.map((c: any) => c.text || '').join('\n') + : item.content; + return `Tool result (${item.tool_use_id || 'unknown'}): ${toolContent}`; + } + return ''; + }) + .filter(Boolean) + .join('\n\n'); + + // If we have tool_use items, we might need to handle them differently + const toolUses = message.content.filter((item: any) => item.type === "tool_use"); + if (toolUses.length > 0 && message.role === "assistant") { + // This is an assistant message with tool calls + return { + role: "assistant", + content: textParts || "", + // Note: We're not passing tool_calls for now as they need special formatting + }; + } + + transformedContent = textParts || ""; + } + + // Handle assistant messages with tool_calls + if (message.role === "assistant" && message.tool_calls) { + return { + role: "assistant", + content: transformedContent || "", + // Note: tool_calls might need different handling for Anthropic + tool_calls: message.tool_calls, + }; + } + + // Ensure only valid roles are passed + const validRoles = ["system", "user", "assistant"]; + if (!validRoles.includes(message.role)) { + console.warn(`Invalid role "${message.role}", defaulting to "user"`); + return { + role: "user", + content: transformedContent || "", + }; + } + + // Pass through other messages as-is, but ensure they have required fields + return { + role: message.role, + content: transformedContent || "", + ...(message.name && message.role === "system" && { name: message.name }), + }; + }).filter(Boolean); // Remove any null entries +} + +function transformTools(openaiTools: any[]): Record | undefined { + if (!Array.isArray(openaiTools) || openaiTools.length === 0) { + return undefined; + } + + try { + const transformedTools: Record = {}; + + for (const tool of openaiTools) { + if (tool?.type === "function" && tool?.function?.name) { + // Create a proper CoreTool structure for AI SDK + transformedTools[tool.function.name] = { + description: tool.function.description || "", + parameters: { + type: "object", + properties: tool.function.parameters?.properties || {}, + required: tool.function.parameters?.required || [], + ...tool.function.parameters + }, + }; + } + } + + return Object.keys(transformedTools).length > 0 ? transformedTools : undefined; + } catch (error) { + console.error("Error transforming tools:", error); + return undefined; + } +} + async function getAIModelClient(provider: string, model: string) { + if (!provider || !model) { + throw new Error("Provider and model are required"); + } + switch (provider.toLowerCase()) { case "openai": + if (!env.OPENAI_API_KEY) { + throw new Error("OpenAI API key is not configured"); + } return openai(model); case "anthropic": { + if (!env.ANTHROPIC_API_KEY) { + throw new Error("Anthropic API key is not configured"); + } const anthropicClient = createAnthropic({ apiKey: env.ANTHROPIC_API_KEY, }); return anthropicClient(model); } case "anthropiccached": { + if (!env.ANTHROPIC_API_KEY) { + throw new Error("Anthropic API key is not configured"); + } const anthropicClient = createAnthropic({ apiKey: env.ANTHROPIC_API_KEY, }); return anthropicClient(model, { cacheControl: true }); } case "cohere": { + if (!env.COHERE_API_KEY) { + throw new Error("Cohere API key is not configured"); + } const cohereClient = createCohere({ apiKey: env.COHERE_API_KEY, }); return cohereClient(model); } case "mistral": { + if (!env.MISTRAL_API_KEY) { + throw new Error("Mistral API key is not configured"); + } const mistralClient = createMistral({ apiKey: env.MISTRAL_API_KEY, }); return mistralClient(model); } case "groq": { + if (!env.GROQ_API_KEY) { + throw new Error("Groq API key is not configured"); + } const groqClient = createOpenAI({ apiKey: env.GROQ_API_KEY, }); @@ -65,13 +203,21 @@ export async function POST( request: NextRequest, { params }: { params: { openai: string[] } }, ) { + const startTime = Date.now(); // Track conversation start time const endpoint = params.openai.join("/"); + console.log("POST request received:", { + endpoint, + url: request.url, + headers: Object.fromEntries(request.headers), + }); + if (endpoint !== "chat/completions" && endpoint !== "v1/chat/completions") { return NextResponse.json({ error: "Not found", endpoint }, { status: 404 }); } const body = await request.json(); - const { messages, model: cursorModel, stream = false, ...otherParams } = body; + console.log("Request body:", JSON.stringify(body, null, 2)); + const { messages, model: cursorModel, stream = false, tools, ...otherParams } = body; try { const defaultConfig = await getDefaultConfiguration(); @@ -90,20 +236,43 @@ export async function POST( presencePenalty, } = defaultConfig; - if (!provider) { + if (!provider || typeof provider !== 'string') { throw new Error("Provider is not defined in the default configuration"); } + if (!model || typeof model !== 'string') { + throw new Error("Model is not defined in the default configuration"); + } + const aiModel = await getAIModelClient(provider, model); - let modifiedMessages = messages; + // Validate and transform messages + if (!messages || !Array.isArray(messages)) { + throw new Error("Invalid messages format"); + } + + // Transform Cursor messages to AI SDK format + console.log("Original messages:", JSON.stringify(messages, null, 2)); + let modifiedMessages = transformCursorMessages(messages); + console.log("Transformed messages:", JSON.stringify(modifiedMessages, null, 2)); + console.log("Provider:", provider, "Model:", model); + + if (modifiedMessages.length === 0) { + throw new Error("No valid messages found after transformation"); + } + + // Transform tools from OpenAI array format to AI SDK Record format + console.log("Tools parameter received:", tools); + + // Temporarily disable tools for GPT models to debug the core issue + const validatedTools = provider.toLowerCase() === "openai" ? undefined : transformTools(tools); if (provider.toLowerCase() === "anthropiccached") { - const hasPotentialContext = messages.some( + const hasPotentialContext = modifiedMessages.some( (message: any) => message.name === "potential_context", ); - modifiedMessages = messages.map((message: any) => { + modifiedMessages = modifiedMessages.map((message: any) => { if (message.name === "potential_context") { return { ...message, @@ -133,7 +302,7 @@ export async function POST( ) ? 8192 : undefined, - // Add other parameters from defaultConfig if needed + ...(validatedTools && { tools: validatedTools }), }; const logEntry = { @@ -186,6 +355,9 @@ export async function POST( (outputTokens / 1000000) * modelCost.outputTokenCost; const totalCost = inputCost + outputCost; + console.log('Streaming onFinish - toolCalls:', toolCalls); + console.log('Streaming onFinish - toolResults:', toolResults); + logEntry.response = { text, toolCalls, @@ -237,15 +409,22 @@ export async function POST( "Content-Type": "text/event-stream", "Cache-Control": "no-cache", Connection: "keep-alive", + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "GET, POST, OPTIONS", + "Access-Control-Allow-Headers": "Content-Type, Authorization, x-api-key, ngrok-skip-browser-warning", }, }); } // For non-streaming requests, use the AI SDK const result = await generateText({ model: aiModel, - messages, + messages: modifiedMessages, + ...(validatedTools && { tools: validatedTools }), }); + console.log('Non-streaming result - toolCalls:', result.toolCalls); + console.log('Non-streaming result - toolResults:', result.toolResults); + const inputTokens = result.usage?.promptTokens ?? 0; const outputTokens = result.usage?.completionTokens ?? 0; const totalTokens = result.usage?.totalTokens ?? 0; @@ -267,7 +446,13 @@ export async function POST( }; await insertLog(logEntry); - return NextResponse.json(result); + return NextResponse.json(result, { + headers: { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "GET, POST, OPTIONS", + "Access-Control-Allow-Headers": "Content-Type, Authorization, x-api-key, ngrok-skip-browser-warning", + }, + }); } catch (error) { console.error("Error in chat completion:", error); const errorMessage = error instanceof Error ? error.message : String(error); @@ -293,28 +478,48 @@ export async function GET( { params }: { params: { openai: string[] } }, ) { const endpoint = params.openai.join("/"); - - // Existing 'models' endpoint - if (endpoint === "models") { + console.log("GET request received:", { + endpoint, + url: request.url, + headers: Object.fromEntries(request.headers), + }); + + // Handle both 'models' and 'v1/models' endpoints + if (endpoint === "models" || endpoint === "v1/models") { const logEntry = { method: "GET", - url: "/api/v1/models", + url: `/api/${endpoint}`, headers: Object.fromEntries(request.headers), body: {}, response: {}, timestamp: new Date(), + metadata: {}, // Add empty metadata object to satisfy Prisma schema }; try { const models = await openaiClient.models.list(); logEntry.response = models; await insertLog(logEntry); - return NextResponse.json(models); + return NextResponse.json(models, { + headers: { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "GET, POST, OPTIONS", + "Access-Control-Allow-Headers": "Content-Type, Authorization, x-api-key, ngrok-skip-browser-warning", + }, + }); } catch (error) { console.error("Error fetching models:", error); logEntry.response = { error: String(error) }; + logEntry.metadata = { error: String(error) }; // Add error to metadata await insertLog(logEntry); - return NextResponse.json({ error: String(error) }, { status: 500 }); + return NextResponse.json({ error: String(error) }, { + status: 500, + headers: { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "GET, POST, OPTIONS", + "Access-Control-Allow-Headers": "Content-Type, Authorization, x-api-key, ngrok-skip-browser-warning", + }, + }); } } @@ -333,7 +538,9 @@ export async function GET( return testGroq(); } - return NextResponse.json({ error: "Not found" }, { status: 404 }); + // Log any unmatched endpoints + console.log("Unmatched GET endpoint:", endpoint); + return NextResponse.json({ error: "Not found", endpoint }, { status: 404 }); } async function testOpenAI() { @@ -436,3 +643,26 @@ async function testGroq() { return NextResponse.json({ error: String(error) }, { status: 500 }); } } + +// Handle OPTIONS requests for CORS +export async function OPTIONS( + request: NextRequest, + { params }: { params: { openai: string[] } }, +) { + const endpoint = params.openai.join("/"); + console.log("OPTIONS request received:", { + endpoint, + url: request.url, + headers: Object.fromEntries(request.headers), + }); + + return new NextResponse(null, { + status: 204, + headers: { + "Access-Control-Allow-Origin": "*", + "Access-Control-Allow-Methods": "GET, POST, OPTIONS", + "Access-Control-Allow-Headers": "Content-Type, Authorization, x-api-key, ngrok-skip-browser-warning", + "Access-Control-Max-Age": "86400", + }, + }); +} diff --git a/src/app/page.tsx b/src/app/page.tsx index 99ac6af..8f4d8b1 100644 --- a/src/app/page.tsx +++ b/src/app/page.tsx @@ -71,7 +71,7 @@ export default function Home() { setLogs(logsData as unknown as Log[]); // Type assertion setStats(statsData); setAIConfigurations(configData as AIConfiguration[]); // Type assertion - const defaultConfig = configData.find((config) => config.isDefault); + const defaultConfig = configData?.find((config) => config.isDefault); setSelectedConfig(defaultConfig ? defaultConfig.name : ""); setLoading(false); diff --git a/src/components/LogsList.tsx b/src/components/LogsList.tsx index 8f52536..ce143fb 100644 --- a/src/components/LogsList.tsx +++ b/src/components/LogsList.tsx @@ -42,16 +42,16 @@ const LogsListComponent: React.FC = ({ return (
{logs.map((log) => { - const totalTokens = log.metadata.totalTokens || 0; - const totalCost = log.metadata.totalCost || 0; + const totalTokens = log.metadata?.totalTokens || 0; + const totalCost = log.metadata?.totalCost || 0; const firstUserMessage = - log.body.messages.find((m) => m.role === "user" && !("name" in m)) + log.body?.messages?.find((m) => m.role === "user" && !("name" in m)) ?.content || "No message available"; const truncatedMessage = firstUserMessage.slice(0, 100) + (firstUserMessage.length > 100 ? "..." : ""); const isSelected = selectedLogId === log.id; - const providerColorClass = getProviderColor(log.metadata.provider); + const providerColorClass = getProviderColor(log.metadata?.provider || "other"); return ( = ({
- {log.metadata.provider} + {log.metadata?.provider || "unknown"} - {log.metadata.model} + {log.metadata?.model || "unknown"}
diff --git a/src/middleware.ts b/src/middleware.ts new file mode 100644 index 0000000..2ed7d71 --- /dev/null +++ b/src/middleware.ts @@ -0,0 +1,26 @@ +import { NextResponse } from 'next/server' +import type { NextRequest } from 'next/server' + +export function middleware(request: NextRequest) { + console.log("=== INCOMING REQUEST ==="); + console.log("Method:", request.method); + console.log("URL:", request.url); + console.log("Path:", new URL(request.url).pathname); + console.log("Headers:", Object.fromEntries(request.headers)); + console.log("======================="); + + return NextResponse.next(); +} + +// Configure which paths the middleware runs on +export const config = { + matcher: [ + /* + * Match all request paths except for the ones starting with: + * - _next/static (static files) + * - _next/image (image optimization files) + * - favicon.ico (favicon file) + */ + '/((?!_next/static|_next/image|favicon.ico).*)', + ], +} \ No newline at end of file