diff --git a/CHANGELOG.md b/CHANGELOG.md index 70bf260296..5d47e29653 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,12 @@ and this project adheres to [#3702](https://github.com/OpenFn/lightning/issues/3702) - Reintroduce the impeded project with hopefully better performance characteristics [#3542](https://github.com/OpenFn/lightning/issues/3542) +- **AI Assistant Streaming**: AI responses now stream in real-time with status updates + - Users see AI responses appear word-by-word as they're generated + - Status indicators show thinking progress (e.g., "Researching...", "Generating code...") + - Automatic error recovery with retry/cancel options + - Configurable timeout based on Apollo settings + [#3585](https://github.com/OpenFn/lightning/issues/3585) ### Changed @@ -66,6 +72,13 @@ and this project adheres to unauthorized edits when user roles change during active collaboration sessions [#3749](https://github.com/OpenFn/lightning/issues/3749) +### Technical + +- Added `Lightning.ApolloClient.SSEStream` for Server-Sent Events handling +- Enhanced `MessageProcessor` to support streaming responses +- Updated AI Assistant component with real-time markdown rendering +- Improved error handling for network failures and timeouts + ## [2.14.11] - 2025-10-15 ## [2.14.11-pre1] - 2025-10-15 diff --git a/assets/js/hooks/index.ts b/assets/js/hooks/index.ts index 4f4e2c82ae..fa9c50b103 100644 --- a/assets/js/hooks/index.ts +++ b/assets/js/hooks/index.ts @@ -4,6 +4,7 @@ import tippy, { } from 'tippy.js'; import { format, formatRelative } from 'date-fns'; import { enUS } from 'date-fns/locale'; +import { marked } from 'marked'; import type { PhoenixHook } from './PhoenixHook'; import LogLineHighlight from './LogLineHighlight'; @@ -684,9 +685,38 @@ export const BlurDataclipEditor = { export const ScrollToMessage = { mounted() { + this.shouldAutoScroll = true; + + this.handleScrollThrottled = this.throttle(() => { + const isAtBottom = this.isAtBottom(); + this.shouldAutoScroll = isAtBottom; + }, 100); + + this.el.addEventListener('scroll', this.handleScrollThrottled); this.handleScroll(); }, + destroyed() { + if (this.handleScrollThrottled) { + this.el.removeEventListener('scroll', this.handleScrollThrottled); + } + if (this.throttleTimeout !== undefined) { + clearTimeout(this.throttleTimeout); + } + }, + + throttle(func: () => void, wait: number): () => void { + return () => { + if (this.throttleTimeout !== undefined) { + clearTimeout(this.throttleTimeout); + } + this.throttleTimeout = setTimeout(() => { + func(); + this.throttleTimeout = undefined; + }, wait) as unknown as number; + }; + }, + updated() { this.handleScroll(); }, @@ -696,7 +726,7 @@ export const ScrollToMessage = { if (targetMessageId) { this.scrollToSpecificMessage(targetMessageId); - } else { + } else if (this.shouldAutoScroll) { this.scrollToBottom(); } }, @@ -717,18 +747,25 @@ export const ScrollToMessage = { } }, + isAtBottom() { + const threshold = 50; + const position = this.el.scrollTop + this.el.clientHeight; + const height = this.el.scrollHeight; + return height - position <= threshold; + }, + scrollToBottom() { - setTimeout(() => { - this.el.scrollTo({ - top: this.el.scrollHeight, - behavior: 'smooth', - }); - }, 600); + this.el.scrollTop = this.el.scrollHeight; }, } as PhoenixHook<{ + shouldAutoScroll: boolean; + handleScrollThrottled?: () => void; + throttleTimeout?: number; + throttle: (func: () => void, wait: number) => () => void; handleScroll: () => void; scrollToSpecificMessage: (messageId: string) => void; scrollToBottom: () => void; + isAtBottom: () => boolean; }>; export const Copy = { @@ -1020,3 +1057,95 @@ export const LocalTimeConverter = { convertDateTime: () => void; convertToDisplayTime: (isoTimestamp: string, display: string) => void; }>; + +export const StreamingText = { + mounted() { + this.lastContent = ''; + this.renderer = this.createCustomRenderer(); + this.parseCount = 0; + this.pendingUpdate = undefined; + this.updateContent(); + }, + + updated() { + // Debounce updates by 50ms to batch rapid chunk arrivals + if (this.pendingUpdate !== undefined) { + clearTimeout(this.pendingUpdate); + } + + this.pendingUpdate = setTimeout(() => { + this.updateContent(); + this.pendingUpdate = undefined; + }, 50) as unknown as number; + }, + + destroyed() { + if (this.pendingUpdate !== undefined) { + clearTimeout(this.pendingUpdate); + } + }, + + createCustomRenderer() { + const renderer = new marked.Renderer(); + + renderer.code = (code, language) => { + const lang = language ? ` class="${language}"` : ''; + return `
${code}
`; + }; + + renderer.link = (href, title, text) => { + return `${text}`; + }; + + renderer.heading = (text, level) => { + const classes = level === 1 ? 'text-2xl font-bold mb-6' : 'text-xl font-semibold mb-4 mt-8'; + return `${text}`; + }; + + renderer.list = (body, ordered) => { + const tag = ordered ? 'ol' : 'ul'; + const classes = ordered ? 'list-decimal pl-8 space-y-1' : 'list-disc pl-8 space-y-1'; + return `<${tag} class="${classes}">${body}`; + }; + + renderer.listitem = (text) => { + return `
  • ${text}
  • `; + }; + + renderer.paragraph = (text) => { + return `

    ${text}

    `; + }; + + return renderer; + }, + + updateContent() { + const start = performance.now(); + const newContent = this.el.dataset.streamingContent || ''; + + if (newContent !== this.lastContent) { + this.parseCount++; + + const htmlContent = marked.parse(newContent, { + renderer: this.renderer, + breaks: true, + gfm: true, + }); + + this.el.innerHTML = htmlContent; + this.lastContent = newContent; + + const duration = performance.now() - start; + console.debug( + `[StreamingText] Parse #${this.parseCount}: ${duration.toFixed(2)}ms for ${newContent.length} chars` + ); + } + }, +} as PhoenixHook<{ + lastContent: string; + renderer: marked.Renderer; + parseCount: number; + pendingUpdate?: number; + createCustomRenderer: () => marked.Renderer; + updateContent: () => void; +}>; diff --git a/lib/lightning/ai_assistant/ai_assistant.ex b/lib/lightning/ai_assistant/ai_assistant.ex index 8c9389f592..6e2fb714fd 100644 --- a/lib/lightning/ai_assistant/ai_assistant.ex +++ b/lib/lightning/ai_assistant/ai_assistant.ex @@ -569,12 +569,16 @@ defmodule Lightning.AiAssistant do ## Returns - List of `%ChatMessage{}` structs with `:role` of `:user` and `:status` of `:pending`. + List of `%ChatMessage{}` structs with `:role` of `:user` and `:status` of `:pending` or `:processing`. """ @spec find_pending_user_messages(ChatSession.t()) :: [ChatMessage.t()] def find_pending_user_messages(session) do messages = session.messages || [] - Enum.filter(messages, &(&1.role == :user && &1.status == :pending)) + + Enum.filter( + messages, + &(&1.role == :user && &1.status in [:pending, :processing]) + ) end @doc """ diff --git a/lib/lightning/ai_assistant/message_processor.ex b/lib/lightning/ai_assistant/message_processor.ex index 1c5c3d6feb..80b6cf5a01 100644 --- a/lib/lightning/ai_assistant/message_processor.ex +++ b/lib/lightning/ai_assistant/message_processor.ex @@ -13,6 +13,7 @@ defmodule Lightning.AiAssistant.MessageProcessor do alias Lightning.AiAssistant alias Lightning.AiAssistant.ChatMessage alias Lightning.AiAssistant.ChatSession + alias Lightning.ApolloClient.SSEStream alias Lightning.Repo require Logger @@ -38,11 +39,11 @@ defmodule Lightning.AiAssistant.MessageProcessor do @impl Oban.Worker @spec perform(Oban.Job.t()) :: :ok def perform(%Oban.Job{args: %{"message_id" => message_id}}) do - Logger.info("[MessageProcessor] Processing message: #{message_id}") + Logger.debug("[MessageProcessor] Processing message: #{message_id}") case process_message(message_id) do {:ok, _updated_session} -> - Logger.info( + Logger.debug( "[MessageProcessor] Successfully processed message: #{message_id}" ) @@ -77,7 +78,7 @@ defmodule Lightning.AiAssistant.MessageProcessor do @doc false @spec process_message(String.t()) :: - {:ok, AiAssistant.ChatSession.t()} | {:error, String.t()} + {:ok, AiAssistant.ChatSession.t() | :streaming} | {:error, String.t()} defp process_message(message_id) do {:ok, session, message} = ChatMessage @@ -94,6 +95,9 @@ defmodule Lightning.AiAssistant.MessageProcessor do end case result do + {:ok, :streaming} -> + {:ok, session} + {:ok, _} -> {:ok, updated_session, _updated_message} = update_message_status(message, :success) @@ -110,7 +114,7 @@ defmodule Lightning.AiAssistant.MessageProcessor do @doc false @spec process_job_message(AiAssistant.ChatSession.t(), ChatMessage.t()) :: - {:ok, AiAssistant.ChatSession.t()} | {:error, String.t()} + {:ok, :streaming | AiAssistant.ChatSession.t()} | {:error, String.t()} defp process_job_message(session, message) do enriched_session = AiAssistant.enrich_session_with_job_context(session) @@ -123,15 +127,148 @@ defmodule Lightning.AiAssistant.MessageProcessor do [] end - AiAssistant.query(enriched_session, message.content, options) + stream_job_message(enriched_session, message.content, options) + end + + @doc false + @spec stream_job_message(AiAssistant.ChatSession.t(), String.t(), keyword()) :: + {:ok, :streaming | AiAssistant.ChatSession.t()} | {:error, String.t()} + defp stream_job_message(session, content, options) do + start_streaming_request(session, content, options) + {:ok, :streaming} + rescue + _ -> + AiAssistant.query(session, content, options) + end + + @doc false + @spec start_streaming_request( + AiAssistant.ChatSession.t(), + String.t(), + keyword() + ) :: :ok + defp start_streaming_request(session, content, options) do + context = build_context(session, options) + history = get_chat_history(session) + + payload = %{ + "api_key" => Lightning.Config.apollo(:ai_assistant_api_key), + "content" => content, + "context" => context, + "history" => history, + "meta" => session.meta || %{}, + "stream" => true + } + + sse_payload = Map.put(payload, "lightning_session_id", session.id) + apollo_url = get_apollo_url("job_chat") + + case SSEStream.start_stream(apollo_url, sse_payload) do + {:ok, _pid} -> + Logger.debug( + "[MessageProcessor] Started Apollo SSE stream for session #{session.id}" + ) + + {:error, reason} -> + Logger.error( + "[MessageProcessor] Failed to start Apollo stream: #{inspect(reason)}" + ) + + Logger.debug("[MessageProcessor] Falling back to HTTP client") + raise "SSE stream failed, falling back to HTTP (not implemented yet)" + end + + :ok + end + + defp get_apollo_url(service) do + "#{Lightning.Config.apollo(:endpoint)}/services/#{service}/stream" + end + + defp get_chat_history(session) do + session.messages + |> Enum.map(fn message -> + %{ + "role" => to_string(message.role), + "content" => message.content + } + end) + end + + defp build_context(session, options) do + base_context = %{ + expression: session.expression, + adaptor: session.adaptor, + log: session.logs + } + + Enum.reduce(options, base_context, fn + {:code, false}, acc -> + Map.drop(acc, [:expression]) + + {:logs, false}, acc -> + Map.drop(acc, [:log]) + + _opt, acc -> + acc + end) end @doc false @spec process_workflow_message(AiAssistant.ChatSession.t(), ChatMessage.t()) :: - {:ok, AiAssistant.ChatSession.t()} | {:error, String.t()} + {:ok, :streaming | AiAssistant.ChatSession.t()} | {:error, String.t()} defp process_workflow_message(session, message) do code = message.code || workflow_code_from_session(session) - AiAssistant.query_workflow(session, message.content, code: code) + + try do + start_workflow_streaming_request(session, message.content, code) + {:ok, :streaming} + rescue + _ -> + AiAssistant.query_workflow(session, message.content, code: code) + end + end + + @doc false + @spec start_workflow_streaming_request( + AiAssistant.ChatSession.t(), + String.t(), + String.t() | nil + ) :: :ok + defp start_workflow_streaming_request(session, content, code) do + history = get_chat_history(session) + + payload = + %{ + "api_key" => Lightning.Config.apollo(:ai_assistant_api_key), + "content" => content, + "existing_yaml" => code, + "history" => history, + "meta" => session.meta || %{}, + "stream" => true + } + |> Enum.reject(fn {_, v} -> is_nil(v) end) + |> Enum.into(%{}) + + sse_payload = Map.put(payload, "lightning_session_id", session.id) + apollo_url = get_apollo_url("workflow_chat") + + case SSEStream.start_stream(apollo_url, sse_payload) do + {:ok, _pid} -> + Logger.debug( + "[MessageProcessor] Started Apollo SSE stream for workflow session #{session.id}" + ) + + {:error, reason} -> + Logger.error( + "[MessageProcessor] Failed to start Apollo workflow stream: #{inspect(reason)}" + ) + + Logger.debug("[MessageProcessor] Falling back to HTTP client") + raise "SSE stream failed, triggering fallback to HTTP" + end + + :ok end @doc false @@ -248,7 +385,7 @@ defmodule Lightning.AiAssistant.MessageProcessor do |> case do %ChatMessage{id: message_id, status: status} = message when status in [:pending, :processing] -> - Logger.info( + Logger.debug( "[AI Assistant] Updating message #{message_id} to error status after exception" ) @@ -325,7 +462,7 @@ defmodule Lightning.AiAssistant.MessageProcessor do |> case do %ChatMessage{id: message_id, status: status} = message when status in [:pending, :processing] -> - Logger.info( + Logger.debug( "[AI Assistant] Updating message #{message_id} to error status after stop=#{other}" ) diff --git a/lib/lightning/apollo_client/sse_stream.ex b/lib/lightning/apollo_client/sse_stream.ex new file mode 100644 index 0000000000..da2f06ec39 --- /dev/null +++ b/lib/lightning/apollo_client/sse_stream.ex @@ -0,0 +1,288 @@ +defmodule Lightning.ApolloClient.SSEStream do + @moduledoc """ + Server-Sent Events (SSE) client for streaming AI responses from Apollo server. + + This module handles HTTP streaming connections to Apollo's SSE endpoints, + parsing incoming events and forwarding them to the appropriate channels. + """ + use GenServer + + require Logger + + @doc """ + Starts a streaming SSE connection to Apollo server. + + ## Parameters + + - `url` - HTTP URL for Apollo streaming endpoint + - `payload` - Request payload to send to Apollo + + ## Returns + + - `{:ok, pid}` - SSE stream process started successfully + - `{:error, reason}` - Failed to establish connection + """ + def start_stream(url, payload) do + GenServer.start_link(__MODULE__, {url, payload}) + end + + @impl GenServer + def init({url, payload}) do + lightning_session_id = payload["lightning_session_id"] + apollo_payload = Map.delete(payload, "lightning_session_id") + + apollo_timeout = Lightning.Config.apollo(:timeout) || 30_000 + stream_timeout = apollo_timeout + 10_000 + + timeout_ref = Process.send_after(self(), :stream_timeout, stream_timeout) + + parent = self() + + spawn_link(fn -> + stream_request(url, apollo_payload, parent, lightning_session_id) + end) + + {:ok, + %{ + session_id: lightning_session_id, + timeout_ref: timeout_ref, + completed: false + }} + end + + @impl GenServer + def handle_info({:sse_event, event_type, data}, state) do + handle_sse_event(event_type, data, state) + {:noreply, state} + end + + def handle_info(:stream_timeout, %{completed: false} = state) do + Logger.error("[SSEStream] Stream timeout for session #{state.session_id}") + broadcast_error(state.session_id, "Request timed out. Please try again.") + {:stop, :timeout, state} + end + + def handle_info(:stream_timeout, state) do + {:noreply, state} + end + + def handle_info({:sse_complete}, state) do + if state.timeout_ref, do: Process.cancel_timer(state.timeout_ref) + Logger.info("[SSEStream] Stream completed for session #{state.session_id}") + {:stop, :normal, %{state | completed: true}} + end + + def handle_info({:sse_error, reason}, state) do + if state.timeout_ref, do: Process.cancel_timer(state.timeout_ref) + + Logger.error( + "[SSEStream] Stream error for session #{state.session_id}: #{inspect(reason)}" + ) + + error_message = + case reason do + :timeout -> "Connection timed out" + :closed -> "Connection closed unexpectedly" + {:shutdown, _} -> "Server shut down" + {:http_error, status} -> "Server returned error status #{status}" + _ -> "Connection error: #{inspect(reason)}" + end + + broadcast_error(state.session_id, error_message) + {:stop, :normal, %{state | completed: true}} + end + + defp stream_request(url, payload, parent, session_id) do + Logger.info("[SSEStream] Starting SSE connection to #{url}") + Logger.debug("[SSEStream] Payload: #{inspect(payload)}") + + headers = [ + {"Content-Type", "application/json"}, + {"Accept", "text/event-stream"}, + {"Authorization", + "Bearer #{Lightning.Config.apollo(:ai_assistant_api_key)}"} + ] + + case Finch.build(:post, url, headers, Jason.encode!(payload)) + |> Finch.stream(Lightning.Finch, %{}, fn + {:status, status}, acc -> + Logger.debug("[SSEStream] Response status: #{status}") + + if status >= 400 do + send(parent, {:sse_error, {:http_error, status}}) + end + + Map.put(acc, :status, status) + + {:headers, headers}, acc -> + Logger.debug("[SSEStream] Response headers: #{inspect(headers)}") + acc + + {:data, chunk}, acc -> + Logger.debug("[SSEStream] Raw chunk received: #{inspect(chunk)}") + + if Map.get(acc, :status, 200) in 200..299 do + parse_sse_chunk(chunk, parent, session_id) + end + + acc + end) do + {:ok, %{status: status}} when status in 200..299 -> + Logger.info("[SSEStream] Stream completed successfully") + send(parent, {:sse_complete}) + + {:ok, %{status: status}} -> + Logger.error("[SSEStream] Stream failed with status: #{status}") + send(parent, {:sse_error, {:http_error, status}}) + + {:error, reason, _acc} -> + Logger.error( + "[SSEStream] Stream failed before response: #{inspect(reason)}" + ) + + send(parent, {:sse_error, reason}) + end + end + + defp parse_sse_chunk(chunk, parent, _session_id) do + chunk + |> String.split("\n") + |> Enum.reduce(%{event: nil, data: nil}, fn line, acc -> + cond do + String.starts_with?(line, "event:") -> + event = line |> String.trim_leading("event:") |> String.trim() + %{acc | event: event} + + String.starts_with?(line, "data:") -> + data = line |> String.trim_leading("data:") |> String.trim() + %{acc | data: data} + + (line == "" and acc.event) && acc.data -> + send(parent, {:sse_event, acc.event, acc.data}) + %{event: nil, data: nil} + + true -> + acc + end + end) + end + + defp handle_sse_event(event_type, data, state) do + case event_type do + "content_block_delta" -> + handle_content_block_delta(data, state.session_id) + + "message_stop" -> + Logger.debug("[SSEStream] Received message_stop, broadcasting complete") + broadcast_complete(state.session_id) + + "complete" -> + handle_complete_event(data, state.session_id) + + "error" -> + handle_error_event(data, state.session_id) + + "log" -> + Logger.debug("[SSEStream] Apollo log: #{inspect(data)}") + + _ -> + Logger.debug("[SSEStream] Unhandled event type: #{event_type}") + :ok + end + end + + defp handle_content_block_delta(data, session_id) do + case Jason.decode(data) do + {:ok, %{"delta" => %{"type" => "text_delta", "text" => text}}} -> + Logger.debug("[SSEStream] Broadcasting chunk: #{inspect(text)}") + broadcast_chunk(session_id, text) + + {:ok, %{"delta" => %{"type" => "thinking_delta", "thinking" => thinking}}} -> + Logger.debug("[SSEStream] Broadcasting status: #{inspect(thinking)}") + broadcast_status(session_id, thinking) + + _ -> + :ok + end + end + + defp handle_complete_event(data, session_id) do + Logger.debug("[SSEStream] Received complete event with payload") + + case Jason.decode(data) do + {:ok, payload} -> + Logger.debug( + "[SSEStream] Broadcasting complete payload: #{inspect(Map.keys(payload))}" + ) + + broadcast_payload_complete(session_id, payload) + + {:error, error} -> + Logger.error( + "[SSEStream] Failed to parse complete event payload: #{inspect(error)}" + ) + end + + :ok + end + + defp handle_error_event(data, session_id) do + Logger.error("[SSEStream] Received error event: #{inspect(data)}") + + error_message = + case Jason.decode(data) do + {:ok, %{"message" => msg}} -> msg + {:ok, %{"error" => err}} -> err + _ -> "An error occurred while streaming" + end + + broadcast_error(session_id, error_message) + end + + defp broadcast_chunk(session_id, data) do + Lightning.broadcast( + "ai_session:#{session_id}", + {:ai_assistant, :streaming_chunk, %{content: data, session_id: session_id}} + ) + end + + defp broadcast_status(session_id, data) do + Lightning.broadcast( + "ai_session:#{session_id}", + {:ai_assistant, :status_update, %{status: data, session_id: session_id}} + ) + end + + defp broadcast_complete(session_id) do + Lightning.broadcast( + "ai_session:#{session_id}", + {:ai_assistant, :streaming_complete, %{session_id: session_id}} + ) + end + + defp broadcast_payload_complete(session_id, payload) do + payload_data = %{ + session_id: session_id, + usage: Map.get(payload, "usage"), + meta: Map.get(payload, "meta"), + code: Map.get(payload, "response_yaml") + } + + Lightning.broadcast( + "ai_session:#{session_id}", + {:ai_assistant, :streaming_payload_complete, payload_data} + ) + end + + defp broadcast_error(session_id, error_message) do + payload_data = %{ + session_id: session_id, + error: error_message + } + + Lightning.broadcast( + "ai_session:#{session_id}", + {:ai_assistant, :streaming_error, payload_data} + ) + end +end diff --git a/lib/lightning_web/live/ai_assistant/component.ex b/lib/lightning_web/live/ai_assistant/component.ex index 1b8e5dfcac..b27a4563e8 100644 --- a/lib/lightning_web/live/ai_assistant/component.ex +++ b/lib/lightning_web/live/ai_assistant/component.ex @@ -43,7 +43,10 @@ defmodule LightningWeb.AiAssistant.Component do callbacks: %{}, selected_message: nil, registered_session_id: nil, - registered_component_id: nil + registered_component_id: nil, + streaming_content: "", + streaming_status: nil, + streaming_error: nil }) |> assign_async(:endpoint_available, fn -> {:ok, %{endpoint_available: AiAssistant.endpoint_available?()}} @@ -55,6 +58,26 @@ defmodule LightningWeb.AiAssistant.Component do {:ok, handle_message_status(status, socket)} end + def update(%{id: _id, streaming_chunk: chunk_data}, socket) do + {:ok, handle_streaming_chunk(chunk_data, socket)} + end + + def update(%{id: _id, status_update: status_data}, socket) do + {:ok, handle_status_update(status_data, socket)} + end + + def update(%{id: _id, streaming_complete: _}, socket) do + {:ok, handle_streaming_complete(socket)} + end + + def update(%{id: _id, streaming_payload_complete: payload_data}, socket) do + {:ok, handle_streaming_payload_complete(payload_data, socket)} + end + + def update(%{id: _id, streaming_error: error_data}, socket) do + {:ok, handle_streaming_error(error_data, socket)} + end + def update(%{action: :code_error} = assigns, socket) do {:ok, handle_code_error(socket, assigns)} end @@ -167,7 +190,9 @@ defmodule LightningWeb.AiAssistant.Component do session: session, pending_message: AsyncResult.ok(nil), selected_message: nil, - code_error: nil + code_error: nil, + streaming_content: Map.get(socket.assigns, :streaming_content, ""), + streaming_status: Map.get(socket.assigns, :streaming_status, nil) ) |> delegate_to_handler(:on_message_received, [session]) end @@ -175,7 +200,158 @@ defmodule LightningWeb.AiAssistant.Component do defp handle_message_status({:error, session}, socket) do assign(socket, session: session, - pending_message: AsyncResult.ok(nil) + pending_message: AsyncResult.ok(nil), + streaming_content: Map.get(socket.assigns, :streaming_content, ""), + streaming_status: Map.get(socket.assigns, :streaming_status, nil) + ) + end + + defp handle_streaming_chunk(chunk_data, socket) do + current_content = socket.assigns.streaming_content + new_content = current_content <> chunk_data.content + assign(socket, streaming_content: new_content) + end + + defp handle_status_update(status_data, socket) do + assign(socket, streaming_status: status_data.status) + end + + defp handle_streaming_complete(socket) do + # Keep streamed content and status in memory until payload arrives + # This allows saving content + code together in one operation + socket + end + + defp handle_streaming_payload_complete(payload_data, socket) do + require Logger + session = socket.assigns.session + content = socket.assigns.streaming_content + + Logger.debug( + "[Component] Processing complete payload for session #{session.id}" + ) + + # Save the assistant message with ALL data at once (content + code + usage + meta) + # This matches the non-streaming approach + message_attrs = %{ + role: :assistant, + content: content, + status: :success + } + + opts = [ + usage: payload_data[:usage] || %{}, + meta: payload_data[:meta], + code: payload_data[:code] + ] + + case AiAssistant.save_message(session, message_attrs, opts) do + {:ok, _updated_session} -> + Logger.debug( + "[Component] Successfully saved complete message with payload data" + ) + + # Mark all pending/processing user messages as success + # Need to reload first to get current state + {:ok, fresh_session} = AiAssistant.get_session(session.id) + + pending_user_messages = + AiAssistant.find_pending_user_messages(fresh_session) + + Logger.debug( + "[Component] Found #{length(pending_user_messages)} pending user messages to mark as success" + ) + + results = + Enum.map(pending_user_messages, fn user_message -> + Logger.debug( + "[Component] Updating user message #{user_message.id} from #{user_message.status} to :success" + ) + + AiAssistant.update_message_status( + fresh_session, + user_message, + :success + ) + end) + + Logger.debug("[Component] Update results: #{inspect(results)}") + + # Reload session again to get fresh user message statuses after updates + {:ok, final_session} = AiAssistant.get_session(session.id) + + Logger.debug( + "[Component] Final user message statuses: #{inspect(Enum.filter(final_session.messages, &(&1.role == :user)) |> Enum.map(&{&1.id, &1.status}))}" + ) + + # Clear loading state and streaming content + Logger.debug("[Component] Clearing pending_message and streaming state") + + socket = + socket + |> assign( + session: final_session, + pending_message: AsyncResult.ok(nil), + streaming_content: "", + streaming_status: nil + ) + + # Always call callback to notify message received (sets sending_ai_message: false) + case socket.assigns.callbacks[:on_message_received] do + callback when is_function(callback, 2) -> + code = payload_data[:code] + last_message = final_session.messages |> List.last() + callback.(code, last_message) + socket + + _ -> + socket + end + + {:error, error} -> + Logger.error( + "[Component] Failed to save complete message: #{inspect(error)}" + ) + + socket + |> assign( + pending_message: AsyncResult.ok(nil), + streaming_content: "", + streaming_status: nil + ) + end + end + + defp handle_streaming_error(error_data, socket) do + require Logger + session = socket.assigns.session + + Logger.error( + "[Component] Streaming error for session #{session.id}: #{error_data.error}" + ) + + # Find the user message that was being processed + user_messages = + Enum.filter( + session.messages, + &(&1.role == :user && &1.status == :processing) + ) + + # Mark user messages as failed + Enum.each(user_messages, fn msg -> + AiAssistant.update_message_status(session, msg, :error) + end) + + # Reload session with updated statuses + {:ok, updated_session} = AiAssistant.get_session(session.id) + + socket + |> assign( + session: updated_session, + pending_message: AsyncResult.loading(), + streaming_content: "", + streaming_status: nil, + streaming_error: error_data.error ) end @@ -201,6 +377,9 @@ defmodule LightningWeb.AiAssistant.Component do |> assign_new(:changeset, fn _ -> handler.validate_form(%{"content" => nil}) end) + |> assign_new(:streaming_content, fn -> "" end) + |> assign_new(:streaming_status, fn -> nil end) + |> assign_new(:streaming_error, fn -> nil end) end defp extract_message_id(%ChatSession{messages: messages}) do @@ -341,6 +520,47 @@ defmodule LightningWeb.AiAssistant.Component do end end + def handle_event("retry_streaming", _params, socket) do + # Re-submit the last user message + session = socket.assigns.session + + last_user_msg = + Enum.reverse(session.messages) + |> Enum.find(&(&1.role == :user)) + + if last_user_msg do + # Reset error state + socket = assign(socket, streaming_error: nil) + + # Resubmit message + case AiAssistant.retry_message(last_user_msg) do + {:ok, {_message, _oban_job}} -> + {:ok, updated_session} = AiAssistant.get_session(session.id) + + {:noreply, + assign(socket, + session: updated_session, + pending_message: AsyncResult.loading() + )} + + {:error, _} -> + {:noreply, put_flash(socket, :error, "Failed to retry request")} + end + else + {:noreply, socket} + end + end + + def handle_event("cancel_streaming", _params, socket) do + # Just clear the error state + socket = + socket + |> assign(streaming_error: nil, pending_message: AsyncResult.ok(nil)) + |> put_flash(:info, "Request cancelled") + + {:noreply, socket} + end + def handle_event( "select_assistant_message", %{"message-id" => message_id}, @@ -422,14 +642,23 @@ defmodule LightningWeb.AiAssistant.Component do end defp save_message(socket, action, content) do + require Logger + + Logger.debug( + "[AI Component] save_message called with action: #{inspect(action)}" + ) + result = case action do :new -> create_new_session(socket, content) :show -> add_to_existing_session(socket, content) end + Logger.debug("[AI Component] save_message result: #{inspect(result)}") + case result do {:ok, session} -> + Logger.debug("[AI Component] Calling handle_successful_save") handle_successful_save(socket, session, action) {:error, error} -> @@ -446,6 +675,11 @@ defmodule LightningWeb.AiAssistant.Component do end defp handle_successful_save(socket, session, :new) do + # Parent LiveView handles PubSub subscription via component registration + # Component receives updates via send_update from parent + require Logger + Logger.debug("[AI Component] New session created: #{session.id}") + socket |> assign(:session, session) |> assign(:pending_message, AsyncResult.loading()) @@ -453,6 +687,11 @@ defmodule LightningWeb.AiAssistant.Component do end defp handle_successful_save(socket, session, :show) do + # Parent LiveView handles PubSub subscription via component registration + # Component receives updates via send_update from parent + require Logger + Logger.debug("[AI Component] Message added to session: #{session.id}") + socket |> assign(:session, session) |> assign(:pending_message, AsyncResult.loading()) @@ -583,6 +822,9 @@ defmodule LightningWeb.AiAssistant.Component do handler={@handler} code_error={@code_error} mode={@mode} + streaming_status={@streaming_status} + streaming_content={@streaming_content} + streaming_error={@streaming_error} /> <% end %> @@ -1121,6 +1363,9 @@ defmodule LightningWeb.AiAssistant.Component do attr :handler, :any, required: true attr :code_error, :any, required: true attr :mode, :atom, required: true + attr :streaming_status, :string, default: nil + attr :streaming_content, :string, default: "" + attr :streaming_error, :string, default: nil defp render_individual_session(assigns) do assigns = assign(assigns, ai_feedback: ai_feedback()) @@ -1187,7 +1432,15 @@ defmodule LightningWeb.AiAssistant.Component do <.async_result assign={@pending_message}> <:loading> - <.assistant_typing_indicator handler={@handler} /> + <%= if @streaming_error do %> + <.streaming_error_state error={@streaming_error} target={@target} /> + <% else %> + <.assistant_typing_indicator + handler={@handler} + streaming_status={@streaming_status} + streaming_content={@streaming_content} + /> + <% end %> <:failed :let={failure}> @@ -1405,6 +1658,49 @@ defmodule LightningWeb.AiAssistant.Component do """ end + attr :error, :string, required: true + attr :target, :any, required: true + + defp streaming_error_state(assigns) do + ~H""" +
    +
    +
    + <.icon name="hero-exclamation-triangle" class="text-red-600 h-5 w-5" /> +
    +
    + +
    +

    {@error}

    + +
    + + + +
    +
    +
    + """ + end + + attr :handler, :any, required: true + attr :streaming_status, :string, default: nil + attr :streaming_content, :string, default: "" + defp assistant_typing_indicator(assigns) do assigns = assign(assigns, animation_delay: @typing_animation_delay_ms) @@ -1417,8 +1713,8 @@ defmodule LightningWeb.AiAssistant.Component do -
    -
    +
    +
    -

    Processing...

    +

    + {@streaming_status || "Processing..."} +

    +
    +
    diff --git a/lib/lightning_web/live/workflow_live/edit.ex b/lib/lightning_web/live/workflow_live/edit.ex index 98818f78c3..38a3a002a6 100644 --- a/lib/lightning_web/live/workflow_live/edit.ex +++ b/lib/lightning_web/live/workflow_live/edit.ex @@ -2361,11 +2361,57 @@ defmodule LightningWeb.WorkflowLive.Edit do :unregister_component -> handle_component_unregistration(socket, payload) + + action + when action in [ + :streaming_chunk, + :status_update, + :streaming_complete, + :streaming_payload_complete, + :streaming_error + ] -> + handle_streaming_update(socket, payload, action) end end - def handle_info(%{}, socket) do - {:noreply, socket} + defp handle_streaming_update( + socket, + %{session_id: session_id} = payload, + update_type + ) do + registry = socket.assigns.ai_assistant_registry + require Logger + + Logger.info( + "[Edit LiveView] Received #{update_type} for session #{session_id}" + ) + + Logger.info("[Edit LiveView] Registry: #{inspect(Map.keys(registry))}") + + case Map.get(registry, session_id) do + nil -> + Logger.warning( + "[Edit LiveView] No component registered for session #{session_id}" + ) + + {:noreply, socket} + + component_id -> + Logger.info( + "[Edit LiveView] Forwarding #{update_type} to component #{component_id}" + ) + + # Remove session_id from payload and wrap in update_type key + data = Map.delete(payload, :session_id) + update_map = Map.put(%{id: component_id}, update_type, data) + + send_update( + LightningWeb.AiAssistant.Component, + update_map + ) + + {:noreply, socket} + end end defp get_workflow_by_id(workflow_id) do @@ -3657,13 +3703,23 @@ defmodule LightningWeb.WorkflowLive.Edit do session_id: session_id }) do registry = socket.assigns.ai_assistant_registry + require Logger + + Logger.info( + "[Edit LiveView] Registering component #{component_id} for session #{session_id}" + ) if connected?(socket) && !Map.has_key?(registry, session_id) do + Logger.info("[Edit LiveView] Subscribing to ai_session:#{session_id}") Lightning.subscribe("ai_session:#{session_id}") end updated_registry = Map.put(registry, session_id, component_id) + Logger.info( + "[Edit LiveView] Updated registry: #{inspect(Map.keys(updated_registry))}" + ) + {:noreply, assign(socket, :ai_assistant_registry, updated_registry)} end diff --git a/lib/lightning_web/live/workflow_live/workflow_ai_chat_component.ex b/lib/lightning_web/live/workflow_live/workflow_ai_chat_component.ex index dc5a0a6ad0..a3a8ca419a 100644 --- a/lib/lightning_web/live/workflow_live/workflow_ai_chat_component.ex +++ b/lib/lightning_web/live/workflow_live/workflow_ai_chat_component.ex @@ -31,10 +31,16 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponent do }, socket ) do - {:ok, - socket - |> assign(session_or_message: session_or_message) - |> push_event("template_selected", %{template: code})} + socket = assign(socket, session_or_message: session_or_message) + + socket = + if code != nil and code != "" do + push_event(socket, "template_selected", %{template: code}) + else + socket + end + + {:ok, socket} end def update(assigns, socket) do diff --git a/mix.lock b/mix.lock index 1f05e0d3d3..2a85535f39 100644 --- a/mix.lock +++ b/mix.lock @@ -153,6 +153,7 @@ "unsafe": {:hex, :unsafe, "1.0.2", "23c6be12f6c1605364801f4b47007c0c159497d0446ad378b5cf05f1855c0581", [:mix], [], "hexpm", "b485231683c3ab01a9cd44cb4a79f152c6f3bb87358439c6f68791b85c2df675"}, "websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"}, "websock_adapter": {:hex, :websock_adapter, "0.5.8", "3b97dc94e407e2d1fc666b2fb9acf6be81a1798a2602294aac000260a7c4a47d", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "315b9a1865552212b5f35140ad194e67ce31af45bcee443d4ecb96b5fd3f3782"}, + "websockex": {:hex, :websockex, "0.4.3", "92b7905769c79c6480c02daacaca2ddd49de936d912976a4d3c923723b647bf0", [:mix], [], "hexpm", "95f2e7072b85a3a4cc385602d42115b73ce0b74a9121d0d6dbbf557645ac53e4"}, "y_ex": {:hex, :y_ex, "0.8.0", "e1591d97a487a15fe93eb29b88685d0ccb6f76403cdd2b8c60e9cebb9a2d204e", [:mix], [{:rustler, ">= 0.0.0", [hex: :rustler, repo: "hexpm", optional: true]}, {:rustler_precompiled, ">= 0.6.0", [hex: :rustler_precompiled, repo: "hexpm", optional: false]}], "hexpm", "d2ce875481c28896d5d9037d8cb5d859ddbcfb047dcfebdcd0d33c6ebfd3d506"}, "yex": {:hex, :yex, "0.0.1", "99ad1448ac9f7482b40fea8fc5ba23c92933a435b96935b079854e362e8b2353", [:mix], [{:rustler, "~> 0.32.1", [hex: :rustler, repo: "hexpm", optional: false]}], "hexpm", "8304c754ea0856f88f5f1f089191641393fae2791780a8b8865f7b4f9c6069b6"}, } diff --git a/test/lightning/ai_assistant/message_processor_test.exs b/test/lightning/ai_assistant/message_processor_test.exs new file mode 100644 index 0000000000..6bca2d79fe --- /dev/null +++ b/test/lightning/ai_assistant/message_processor_test.exs @@ -0,0 +1,458 @@ +defmodule Lightning.AiAssistant.MessageProcessorTest do + use Lightning.DataCase, async: false + use Mimic + + alias Lightning.AiAssistant + alias Lightning.AiAssistant.MessageProcessor + alias Lightning.Repo + + import Lightning.Factories + import Mox, only: [] + + @moduletag :capture_log + + setup_all do + Mimic.copy(Lightning.ApolloClient.SSEStream) + Mimic.copy(Lightning.ApolloClient) + Mimic.copy(Lightning.AiAssistant) + :ok + end + + setup do + Mox.set_mox_global() + Mimic.set_mimic_global() + Mox.verify_on_exit!() + :ok + end + + setup do + Mox.stub(Lightning.MockConfig, :apollo, fn + :timeout -> 30_000 + :endpoint -> "http://localhost:3000" + :ai_assistant_api_key -> "test_key" + end) + + :ok + end + + describe "MessageProcessor worker functions" do + setup do + user = insert(:user) + workflow = insert(:simple_workflow, project: build(:project)) + job = hd(workflow.jobs) + + Oban.Testing.with_testing_mode(:manual, fn -> + {:ok, session} = AiAssistant.create_session(job, user, "Test message") + + message = + session.messages + |> Enum.find(&(&1.role == :user)) + + {:ok, user: user, session: session, message: message, job: job} + end) + end + + test "processes job message with streaming", %{message: message} do + # Stub streaming to succeed + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + _payload -> + {:ok, self()} + end) + + job = %Oban.Job{args: %{"message_id" => message.id}} + assert :ok = MessageProcessor.perform(job) + end + + test "handles streaming fallback on exception", %{ + message: message, + session: session + } do + # Update session meta to include options + session + |> Ecto.Changeset.change( + meta: %{"message_options" => %{"include_logs" => "false"}} + ) + |> Repo.update!() + + # Stub streaming to fail + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + _payload -> + raise "Streaming failed" + end) + + # Stub the fallback AiAssistant.query + Mimic.stub(Lightning.AiAssistant, :query, fn _session, _content, _opts -> + {:ok, session} + end) + + job = %Oban.Job{args: %{"message_id" => message.id}} + assert :ok = MessageProcessor.perform(job) + end + + test "calculates timeout with buffer" do + job = %Oban.Job{args: %{}} + timeout = MessageProcessor.timeout(job) + + # Should be at least 33_000 (30_000 + 10%) + assert timeout >= 33_000 + end + + test "updates message status through lifecycle", %{message: message} do + # Test status progression + {:ok, _session, updated_message} = + MessageProcessor.update_message_status(message, :processing) + + assert updated_message.status == :processing + + {:ok, _session, updated_message} = + MessageProcessor.update_message_status(updated_message, :success) + + assert updated_message.status == :success + end + + test "handles error status updates", %{message: message} do + {:ok, _session, updated_message} = + MessageProcessor.update_message_status(message, :error) + + assert updated_message.status == :error + end + + test "handles SSEStream.start_stream error for job message", %{ + message: message + } do + # Stub streaming to return error + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + _payload -> + {:error, :connection_failed} + end) + + job = %Oban.Job{args: %{"message_id" => message.id}} + + # Should catch the raised exception from start_streaming_request + assert :ok = MessageProcessor.perform(job) + end + + test "handles failed message processing", %{message: message} do + # Stub streaming to succeed but return error on query fallback + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + _payload -> + raise "Streaming failed" + end) + + # Stub the fallback to return error + Mimic.stub(Lightning.AiAssistant, :query, fn _session, _content, _opts -> + {:error, "Processing failed"} + end) + + job = %Oban.Job{args: %{"message_id" => message.id}} + assert :ok = MessageProcessor.perform(job) + + # Message should be marked as error + updated_message = Repo.reload(message) + assert updated_message.status == :error + end + + test "logs successful message processing", %{message: message} do + # Stub streaming to fail, then fallback to succeed + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + _payload -> + raise "Streaming failed" + end) + + # Stub the fallback to return success (not :streaming) + Mimic.stub(Lightning.AiAssistant, :query, fn session, _content, _opts -> + {:ok, session} + end) + + job = %Oban.Job{args: %{"message_id" => message.id}} + assert :ok = MessageProcessor.perform(job) + + # Message should be marked as success + updated_message = Repo.reload(message) + assert updated_message.status == :success + end + + test "logs successful SSE stream start for job message", %{message: message} do + # Stub streaming to succeed and verify logging happens + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + _payload -> + {:ok, self()} + end) + + # Call start_streaming_request directly (it's private but we can test via perform) + job = %Oban.Job{args: %{"message_id" => message.id}} + assert :ok = MessageProcessor.perform(job) + end + end + + describe "workflow message processing" do + setup do + user = insert(:user) + project = insert(:project) + workflow = insert(:simple_workflow, project: project) + + Oban.Testing.with_testing_mode(:manual, fn -> + {:ok, session} = + AiAssistant.create_workflow_session( + project, + workflow, + user, + "Generate workflow", + meta: %{"code" => "workflow: test"} + ) + + message = + session.messages + |> Enum.find(&(&1.role == :user)) + + {:ok, user: user, session: session, message: message, workflow: workflow} + end) + end + + test "processes workflow message with streaming", %{message: message} do + # Stub streaming to succeed + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + _payload -> + {:ok, self()} + end) + + job = %Oban.Job{args: %{"message_id" => message.id}} + assert :ok = MessageProcessor.perform(job) + end + + test "handles SSEStream.start_stream error for workflow message", %{ + message: message + } do + # Stub streaming to return error + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + _payload -> + {:error, :connection_failed} + end) + + job = %Oban.Job{args: %{"message_id" => message.id}} + + # Should catch the raised exception from start_workflow_streaming_request + assert :ok = MessageProcessor.perform(job) + end + + test "falls back to query_workflow on streaming failure", %{ + message: message, + session: session + } do + # Stub streaming to fail + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + _payload -> + raise "Streaming failed" + end) + + # Stub the fallback query_workflow + Mimic.stub(Lightning.AiAssistant, :query_workflow, fn _session, + _content, + _opts -> + {:ok, session} + end) + + job = %Oban.Job{args: %{"message_id" => message.id}} + assert :ok = MessageProcessor.perform(job) + end + + test "uses code from previous assistant message when not in message", %{ + user: user, + workflow: workflow + } do + Oban.Testing.with_testing_mode(:manual, fn -> + {:ok, session} = + AiAssistant.create_workflow_session( + workflow.project, + workflow, + user, + "First message", + meta: %{"code" => "workflow: test"} + ) + + # Add an assistant response with code + assistant_msg = + insert(:chat_message, + chat_session: session, + role: :assistant, + content: "Here's a workflow", + code: "workflow:\n jobs:\n - id: job1" + ) + + # Add a new user message without code + message = + insert(:chat_message, + chat_session: session, + role: :user, + content: "Update the workflow" + ) + + # Stub streaming to succeed + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + payload -> + # Verify it used the code from the previous assistant message + assert payload["existing_yaml"] == assistant_msg.code + {:ok, self()} + end) + + job = %Oban.Job{args: %{"message_id" => message.id}} + assert :ok = MessageProcessor.perform(job) + end) + end + + test "logs successful SSE stream start for workflow message", %{ + message: message + } do + # Stub streaming to succeed + Mimic.stub(Lightning.ApolloClient.SSEStream, :start_stream, fn _url, + _payload -> + {:ok, self()} + end) + + job = %Oban.Job{args: %{"message_id" => message.id}} + assert :ok = MessageProcessor.perform(job) + end + end + + describe "telemetry event handlers" do + setup do + user = insert(:user) + workflow = insert(:simple_workflow, project: build(:project)) + job = hd(workflow.jobs) + + Oban.Testing.with_testing_mode(:manual, fn -> + {:ok, session} = AiAssistant.create_session(job, user, "Test message") + + message = + session.messages + |> Enum.find(&(&1.role == :user)) + + {:ok, session: session, message: message} + end) + end + + test "handle_ai_assistant_exception logs error", %{message: message} do + measure = %{duration: 1_000_000, memory: 1000, reductions: 100} + + job = %Oban.Job{ + id: 123, + worker: "Lightning.AiAssistant.MessageProcessor", + queue: :ai_assistant, + args: %{"message_id" => message.id} + } + + meta = %{ + error: %RuntimeError{message: "test error"}, + stacktrace: [], + job: job + } + + # Should not crash and should update message to error + MessageProcessor.handle_ai_assistant_exception(measure, meta) + + # Verify message was updated to error + updated_message = Repo.reload(message) + assert updated_message.status == :error + end + + test "handle_ai_assistant_stop with non-success state", %{message: message} do + measure = %{duration: 1_000_000, memory: 1000, reductions: 100} + + job = %Oban.Job{ + id: 123, + worker: "Lightning.AiAssistant.MessageProcessor", + queue: :ai_assistant, + args: %{"message_id" => message.id} + } + + meta = %{ + state: :cancelled, + job: job + } + + MessageProcessor.handle_ai_assistant_stop(measure, meta) + + # Verify message was updated to error + updated_message = Repo.reload(message) + assert updated_message.status == :error + end + + test "handle_ai_assistant_stop with success state", %{message: message} do + measure = %{duration: 1_000_000, memory: 1000, reductions: 100} + + job = %Oban.Job{ + id: 123, + worker: "Lightning.AiAssistant.MessageProcessor", + queue: :ai_assistant, + args: %{"message_id" => message.id} + } + + meta = %{ + state: :success, + job: job + } + + # Success state should be ignored + assert :ok = MessageProcessor.handle_ai_assistant_stop(measure, meta) + end + + test "handle_ai_assistant_exception skips message already in final state", %{ + message: message + } do + # Update message to success (final state) + {:ok, _session, updated_message} = + MessageProcessor.update_message_status(message, :success) + + measure = %{duration: 1_000_000, memory: 1000, reductions: 100} + + job = %Oban.Job{ + id: 123, + worker: "Lightning.AiAssistant.MessageProcessor", + queue: :ai_assistant, + args: %{"message_id" => updated_message.id} + } + + meta = %{ + error: %RuntimeError{message: "test error"}, + stacktrace: [], + job: job + } + + # Should not update message since it's already in success state + MessageProcessor.handle_ai_assistant_exception(measure, meta) + + # Verify message is still in success state + final_message = Repo.reload(updated_message) + assert final_message.status == :success + end + + test "handle_ai_assistant_stop with message already in final state", %{ + message: message + } do + # Update message to success (final state) + {:ok, _session, updated_message} = + MessageProcessor.update_message_status(message, :success) + + measure = %{duration: 1_000_000, memory: 1000, reductions: 100} + + job = %Oban.Job{ + id: 123, + worker: "Lightning.AiAssistant.MessageProcessor", + queue: :ai_assistant, + args: %{"message_id" => updated_message.id} + } + + meta = %{ + state: :cancelled, + job: job + } + + # Should not update message since it's already in success state + MessageProcessor.handle_ai_assistant_stop(measure, meta) + + # Verify message is still in success state + final_message = Repo.reload(updated_message) + assert final_message.status == :success + end + end +end diff --git a/test/lightning/apollo_client/sse_stream_test.exs b/test/lightning/apollo_client/sse_stream_test.exs new file mode 100644 index 0000000000..43c7dead49 --- /dev/null +++ b/test/lightning/apollo_client/sse_stream_test.exs @@ -0,0 +1,638 @@ +defmodule Lightning.ApolloClient.SSEStreamTest do + use Lightning.DataCase, async: false + use Mimic + + alias Lightning.ApolloClient.SSEStream + + import Mox, only: [] + + @moduletag :capture_log + + setup do + Mox.set_mox_global() + Mimic.set_mimic_global() + # Verify Mox expectations on exit + Mox.verify_on_exit!() + :ok + end + + setup do + # Stub Apollo config for all tests - set_mox_global allows this to work in spawned processes + # Use Mox.stub explicitly since both Mox and Mimic export stub/3 + Mox.stub(Lightning.MockConfig, :apollo, fn + :timeout -> 30_000 + :endpoint -> "http://localhost:3000" + :ai_assistant_api_key -> "test_key" + end) + + # Subscribe to PubSub to receive broadcasted messages + session_id = Ecto.UUID.generate() + Phoenix.PubSub.subscribe(Lightning.PubSub, "ai_session:#{session_id}") + %{session_id: session_id} + end + + describe "start_stream/2" do + test "successfully starts streaming GenServer", %{session_id: session_id} do + # This test verifies that SSEStream GenServer can be started + # The actual HTTP connection will fail but the GenServer starts successfully + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "api_key" => "test_key", + "content" => "test", + "lightning_session_id" => session_id, + "stream" => true + } + + # Start the stream (it will fail to connect but GenServer starts) + {:ok, pid} = SSEStream.start_stream(url, payload) + + # GenServer starts successfully + assert Process.alive?(pid) + end + + test "handles successful streaming with SSE data chunks", %{ + session_id: session_id + } do + # Test the full successful streaming path with properly formatted SSE chunks + + # Stub Finch to simulate successful streaming with SSE chunks + Mimic.stub(Finch, :stream, fn _request, _finch_name, acc, fun -> + # Simulate status callback + acc = fun.({:status, 200}, acc) + + # Simulate headers callback + acc = fun.({:headers, [{"content-type", "text/event-stream"}]}, acc) + + # Simulate SSE data chunks + chunk1 = + "event: content_block_delta\ndata: {\"delta\":{\"type\":\"text_delta\",\"text\":\"Hello\"}}\n\n" + + acc = fun.({:data, chunk1}, acc) + + chunk2 = "event: message_stop\ndata: {}\n\n" + acc = fun.({:data, chunk2}, acc) + + {:ok, acc} + end) + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, _pid} = SSEStream.start_stream(url, payload) + + # Should receive the text chunk + assert_receive {:ai_assistant, :streaming_chunk, + %{content: "Hello", session_id: ^session_id}}, + 1000 + + # Should receive completion + assert_receive {:ai_assistant, :streaming_complete, + %{session_id: ^session_id}}, + 1000 + end + + test "handles streaming with thinking_delta status updates", %{ + session_id: session_id + } do + # Test status update streaming + Mimic.stub(Finch, :stream, fn _request, _finch_name, acc, fun -> + acc = fun.({:status, 200}, acc) + acc = fun.({:headers, []}, acc) + + chunk = + "event: content_block_delta\ndata: {\"delta\":{\"type\":\"thinking_delta\",\"thinking\":\"Analyzing...\"}}\n\n" + + acc = fun.({:data, chunk}, acc) + + {:ok, acc} + end) + + url = "http://localhost:3000/services/job_chat/stream" + payload = %{"lightning_session_id" => session_id, "stream" => true} + + {:ok, _pid} = SSEStream.start_stream(url, payload) + + # Should receive status update + assert_receive {:ai_assistant, :status_update, + %{status: "Analyzing...", session_id: ^session_id}}, + 1000 + end + + test "handles HTTP 4xx/5xx error during streaming", %{session_id: session_id} do + # Test handling of HTTP error status codes during streaming + Mimic.stub(Finch, :stream, fn _request, _finch_name, acc, fun -> + # Simulate 500 status + acc = fun.({:status, 500}, acc) + + {:ok, acc} + end) + + url = "http://localhost:3000/services/job_chat/stream" + payload = %{"lightning_session_id" => session_id, "stream" => true} + + {:ok, _pid} = SSEStream.start_stream(url, payload) + + # Should receive error about HTTP 500 + assert_receive {:ai_assistant, :streaming_error, + %{session_id: ^session_id, error: error}}, + 1000 + + assert error =~ "500" + end + + test "handles error events from Apollo", %{session_id: session_id} do + # Simulate receiving an error event by sending it directly to a GenServer + # In a real implementation, this would come from Apollo via SSE + + # Start a stream + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Send an error event to the GenServer + error_data = Jason.encode!(%{"message" => "Test error from Apollo"}) + send(pid, {:sse_event, "error", error_data}) + + # Wait for broadcast + assert_receive {:ai_assistant, :streaming_error, + %{ + session_id: ^session_id, + error: "Test error from Apollo" + }}, + 500 + end + + @tag timeout: 5000 + test "times out hanging streams and broadcasts error", %{ + session_id: session_id + } do + # Test timeout handling by stubbing Finch to hang + # This prevents connection errors from interfering with the timeout test + + # Trap exits so we don't crash when the GenServer stops with :timeout + Process.flag(:trap_exit, true) + + # Stub Finch to block indefinitely, simulating a hanging request + Mimic.stub(Finch, :stream, fn _request, _finch_name, acc, _fun -> + # Just block forever (or until killed) + Process.sleep(:infinity) + {:ok, Map.put(acc, :status, 200)} + end) + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Give the spawned Finch process time to start + Process.sleep(50) + + # Send timeout message to simulate stream timeout + send(pid, :stream_timeout) + + # Should receive timeout error broadcast + assert_receive {:ai_assistant, :streaming_error, + %{ + session_id: ^session_id, + error: "Request timed out. Please try again." + }}, + 1000 + end + + test "ignores timeout after stream completes", %{session_id: session_id} do + # Test that timeout is ignored if stream already completed + # Note: Stream will fail with connection error, simulating completion + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Monitor BEFORE sending completion to catch the shutdown + ref = Process.monitor(pid) + + # Send completion message - this will complete the stream + send(pid, {:sse_complete}) + + # Process should stop normally + assert_receive {:DOWN, ^ref, :process, ^pid, :normal}, 1000 + + # If we send timeout after completion, process is already dead so it's ignored + end + + test "handles completion message and cancels timeout", %{ + session_id: session_id + } do + # Test that :sse_complete properly cancels the timeout timer + # Note: Stream will attempt connection but we complete it before that matters + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Monitor BEFORE sending completion to catch the shutdown + ref = Process.monitor(pid) + + # Send completion message + send(pid, {:sse_complete}) + + # Process should stop normally (not from timeout) + assert_receive {:DOWN, ^ref, :process, ^pid, :normal}, 1000 + end + + test "handles connection failures with econnrefused", %{ + session_id: session_id + } do + # When Finch cannot connect, the stream should broadcast an error + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Simulate a connection failure by sending the error message + send(pid, {:sse_error, :econnrefused}) + + # Should receive an error broadcast + assert_receive {:ai_assistant, :streaming_error, + %{ + session_id: ^session_id, + error: error + }}, + 500 + + assert error =~ "Connection error" + end + + test "handles timeout error from Finch", %{session_id: session_id} do + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + send(pid, {:sse_error, :timeout}) + + assert_receive {:ai_assistant, :streaming_error, + %{ + session_id: ^session_id, + error: "Connection timed out" + }}, + 500 + end + + test "handles closed connection error", %{session_id: session_id} do + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + send(pid, {:sse_error, :closed}) + + assert_receive {:ai_assistant, :streaming_error, + %{ + session_id: ^session_id, + error: "Connection closed unexpectedly" + }}, + 500 + end + + test "handles shutdown error", %{session_id: session_id} do + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + send(pid, {:sse_error, {:shutdown, :some_reason}}) + + assert_receive {:ai_assistant, :streaming_error, + %{ + session_id: ^session_id, + error: "Server shut down" + }}, + 500 + end + + test "handles HTTP error responses", %{session_id: session_id} do + # Test that HTTP error status codes result in error broadcasts + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Simulate HTTP 500 error + send(pid, {:sse_error, {:http_error, 500}}) + + # Should receive an error broadcast + assert_receive {:ai_assistant, :streaming_error, + %{ + session_id: ^session_id, + error: "Server returned error status 500" + }}, + 500 + end + + test "broadcasts content chunks correctly", %{session_id: session_id} do + # Test that content_block_delta events are broadcast + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Send a content chunk event + chunk_data = + Jason.encode!(%{ + "delta" => %{"type" => "text_delta", "text" => "Hello from AI"} + }) + + send(pid, {:sse_event, "content_block_delta", chunk_data}) + + # Should receive the chunk broadcast + assert_receive {:ai_assistant, :streaming_chunk, + %{ + session_id: ^session_id, + content: "Hello from AI" + }}, + 500 + end + + test "broadcasts status updates correctly", %{session_id: session_id} do + # Test that thinking_delta events are broadcast as status updates + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Send a status update event + status_data = + Jason.encode!(%{ + "delta" => %{"type" => "thinking_delta", "thinking" => "Analyzing..."} + }) + + send(pid, {:sse_event, "content_block_delta", status_data}) + + # Should receive the status broadcast + assert_receive {:ai_assistant, :status_update, + %{ + session_id: ^session_id, + status: "Analyzing..." + }}, + 500 + end + + test "broadcasts completion events", %{session_id: session_id} do + # Test that message_stop events broadcast completion + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Send completion event + send(pid, {:sse_event, "message_stop", ""}) + + # Should receive completion broadcast + assert_receive {:ai_assistant, :streaming_complete, + %{ + session_id: ^session_id + }}, + 500 + end + + test "broadcasts complete payload with metadata", %{session_id: session_id} do + # Test that complete events with payload are broadcast + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Send complete event with payload + complete_data = + Jason.encode!(%{ + "usage" => %{"input_tokens" => 100, "output_tokens" => 50}, + "meta" => %{"model" => "claude-3"}, + "response_yaml" => "workflow: test" + }) + + send(pid, {:sse_event, "complete", complete_data}) + + # Should receive payload complete broadcast + assert_receive {:ai_assistant, :streaming_payload_complete, payload_data}, + 500 + + assert payload_data.session_id == session_id + assert payload_data.usage["input_tokens"] == 100 + assert payload_data.meta["model"] == "claude-3" + assert payload_data.code == "workflow: test" + end + + test "handles complete event with invalid JSON", %{session_id: session_id} do + # Test that malformed complete payloads are handled gracefully + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Send complete event with invalid JSON + send(pid, {:sse_event, "complete", "not valid json {"}) + + # Give it a moment to process - should not crash from invalid JSON itself + # (though it may eventually stop due to connection error) + Process.sleep(50) + end + + test "handles log events", %{session_id: session_id} do + # Test that log events are handled (just logged, no broadcast) + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Send log event + send(pid, {:sse_event, "log", "Some log message"}) + + # Give it a moment to process - should not crash from the log event itself + # (though it may eventually stop due to connection error) + Process.sleep(50) + end + + test "handles unknown event types", %{session_id: session_id} do + # Test that unknown event types are handled gracefully + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Send unknown event type + send(pid, {:sse_event, "some_unknown_event", "data"}) + + # Give it a moment to process - should not crash from unknown event itself + # (though it may eventually stop due to connection error) + Process.sleep(50) + end + + test "handles content_block_delta with invalid JSON", %{ + session_id: session_id + } do + # Test that malformed delta events don't crash + + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Send invalid delta data + send(pid, {:sse_event, "content_block_delta", "invalid json"}) + + # Give it a moment to process - should not crash from invalid JSON itself + # (though it may eventually stop due to connection error) + Process.sleep(50) + end + + test "handles error event with message field", %{session_id: session_id} do + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + error_data = Jason.encode!(%{"message" => "Custom error message"}) + send(pid, {:sse_event, "error", error_data}) + + assert_receive {:ai_assistant, :streaming_error, + %{ + session_id: ^session_id, + error: "Custom error message" + }}, + 500 + end + + test "handles error event with error field", %{session_id: session_id} do + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + error_data = Jason.encode!(%{"error" => "Another error format"}) + send(pid, {:sse_event, "error", error_data}) + + assert_receive {:ai_assistant, :streaming_error, + %{ + session_id: ^session_id, + error: "Another error format" + }}, + 500 + end + + test "handles error event with invalid JSON", %{session_id: session_id} do + url = "http://localhost:3000/services/job_chat/stream" + + payload = %{ + "lightning_session_id" => session_id, + "stream" => true + } + + {:ok, pid} = SSEStream.start_stream(url, payload) + + # Send malformed error data + send(pid, {:sse_event, "error", "not json"}) + + # Should use fallback error message + assert_receive {:ai_assistant, :streaming_error, + %{ + session_id: ^session_id, + error: "An error occurred while streaming" + }}, + 500 + end + end +end diff --git a/test/lightning_web/live/ai_assistant/component_test.exs b/test/lightning_web/live/ai_assistant/component_test.exs new file mode 100644 index 0000000000..e1afbb7668 --- /dev/null +++ b/test/lightning_web/live/ai_assistant/component_test.exs @@ -0,0 +1,2069 @@ +defmodule LightningWeb.AiAssistant.ComponentTest do + @moduledoc """ + Comprehensive tests for AI Assistant Component. + + This file contains both unit tests and integration tests organized into + logical sections: + + ## Unit Tests + - Component functions (formatted_content, error_message, etc.) + - Form validation + - Streaming handlers (chunk, status, complete, error) + - Message status updates + - Event handlers (retry, cancel) + + ## Integration Tests + - Template rendering in LiveView context + - User interactions and event flows + - Edge cases and error handling + """ + use LightningWeb.ConnCase, async: false + + import Phoenix.LiveViewTest + import Lightning.Factories + import Mox + + alias Lightning.AiAssistant + alias LightningWeb.AiAssistant.Component, as: AiAssistantComponent + alias LightningWeb.Live.AiAssistant.Modes.{JobCode, WorkflowTemplate} + alias Phoenix.LiveView.AsyncResult + + # ============================================================================ + # UNIT TESTS + # ============================================================================ + + describe "formatted_content/1" do + test "renders assistant messages with properly styled links" do + content = """ + Here are some links: + - [Apollo Repo](https://github.com/OpenFn/apollo) + - Plain text + - [Lightning Repo](https://github.com/OpenFn/lightning) + """ + + html = + render_component( + &AiAssistantComponent.formatted_content/1, + id: "formatted-content", + content: content + ) + + parsed_html = Floki.parse_document!(html) + links = Floki.find(parsed_html, "a") + + apollo_link = + Enum.find( + links, + &(Floki.attribute(&1, "href") == ["https://github.com/OpenFn/apollo"]) + ) + + assert apollo_link != nil + + assert Floki.attribute(apollo_link, "class") == [ + "text-primary-400 hover:text-primary-600" + ] + + assert Floki.attribute(apollo_link, "target") == ["_blank"] + + lightning_link = + Enum.find( + links, + &(Floki.attribute(&1, "href") == [ + "https://github.com/OpenFn/lightning" + ]) + ) + + assert lightning_link != nil + + assert Floki.attribute(lightning_link, "class") == [ + "text-primary-400 hover:text-primary-600" + ] + + assert Floki.attribute(lightning_link, "target") == ["_blank"] + + list_items = Floki.find(parsed_html, "li") + + assert Enum.any?(list_items, fn li -> + Floki.text(li) |> String.trim() == "Plain text" + end) + end + + test "handles content with invalid markdown links" do + content = """ + Broken [link(test.com + [Another](working.com) + """ + + html = + render_component( + &AiAssistantComponent.formatted_content/1, + id: "formatted-content", + content: content + ) + + parsed_html = Floki.parse_document!(html) + assert Floki.text(parsed_html) =~ "Broken [link(test.com" + + working_link = + Floki.find(parsed_html, "a") + |> Enum.find(&(Floki.attribute(&1, "href") == ["working.com"])) + + assert working_link != nil + + assert Floki.attribute(working_link, "class") == [ + "text-primary-400 hover:text-primary-600" + ] + + assert Floki.attribute(working_link, "target") == ["_blank"] + end + + test "elements without defined styles remain unchanged" do + content = """ + Some code + Preformatted text + [A link](https://weirdopierdo.com) + """ + + html = + render_component(&AiAssistantComponent.formatted_content/1, + id: "formatted-content", + content: content + ) + + parsed_html = Floki.parse_document!(html) + + code = Floki.find(parsed_html, "weirdo") + pre = Floki.find(parsed_html, "pierdo") + assert Floki.attribute(code, "class") == [] + assert Floki.attribute(pre, "class") == [] + + link = + Floki.find(parsed_html, "a") + |> Enum.find( + &(Floki.attribute(&1, "href") == ["https://weirdopierdo.com"]) + ) + + assert link != nil + + assert Floki.attribute(link, "class") == [ + "text-primary-400 hover:text-primary-600" + ] + + assert Floki.attribute(link, "target") == ["_blank"] + end + + test "handles content that cannot be parsed as AST" do + content = """ +
    Unclosed div + Unclosed span + Some text + """ + + html = + render_component(&AiAssistantComponent.formatted_content/1, + id: "formatted-content", + content: content + ) + + parsed_html = Floki.parse_document!(html) + + assert Floki.text(parsed_html) =~ "Unclosed div" + assert Floki.text(parsed_html) =~ "Unclosed span" + assert Floki.text(parsed_html) =~ "Some text" + end + + test "applies styles to elements not defined in the default styles" do + content = """ + Custom styled content + """ + + custom_attributes = %{ + "custom-tag" => %{class: "custom-class text-green-700"} + } + + html = + render_component(&AiAssistantComponent.formatted_content/1, %{ + id: "formatted-content", + content: content, + attributes: custom_attributes + }) + + parsed_html = Floki.parse_document!(html) + custom_tag = Floki.find(parsed_html, "custom-tag") |> hd() + + assert custom_tag != nil + + assert Floki.attribute(custom_tag, "class") == [ + "custom-class text-green-700" + ] + end + end + + describe "error_message/1" do + test "renders string error message" do + assert JobCode.error_message({:error, "Something went wrong"}) == + "Something went wrong" + end + + test "renders changeset error message" do + changeset = %Ecto.Changeset{ + valid?: false, + errors: [content: {"is invalid", []}], + data: %Lightning.AiAssistant.ChatSession{} + } + + assert JobCode.error_message({:error, changeset}) == + "Content is invalid" + end + + test "renders text message from map" do + error_data = %{text: "Specific error message"} + + assert JobCode.error_message({:error, :custom_reason, error_data}) == + "Specific error message" + end + + test "renders default error message for unhandled cases" do + assert JobCode.error_message({:error, :unknown_reason}) == + "An error occurred: unknown_reason. Please try again." + + assert JobCode.error_message(:unexpected_error) == + "Oops! Something went wrong. Please try again." + end + end + + describe "form validation" do + test "JobCode Form validates empty content" do + changeset = JobCode.Form.changeset(%{"content" => ""}) + + assert changeset.valid? == false + assert Keyword.has_key?(changeset.errors, :content) + {msg, _opts} = changeset.errors[:content] + assert msg == "Please enter a message before sending" + end + + test "JobCode validate_form includes content validation" do + changeset = JobCode.validate_form(%{"content" => nil}) + + assert changeset.valid? == false + assert Keyword.has_key?(changeset.errors, :content) + end + + test "WorkflowTemplate DefaultForm validates empty content" do + changeset = WorkflowTemplate.DefaultForm.changeset(%{"content" => ""}) + + assert changeset.valid? == false + assert Keyword.has_key?(changeset.errors, :content) + {msg, _opts} = changeset.errors[:content] + assert msg == "Please enter a message before sending" + end + + test "form validation accepts valid content" do + # JobCode + changeset = JobCode.validate_form(%{"content" => "Help me with my code"}) + assert changeset.valid? == true + + # WorkflowTemplate + changeset = + WorkflowTemplate.validate_form(%{"content" => "Create a workflow"}) + + assert changeset.valid? == true + end + + test "validate_form with empty/whitespace content returns error" do + changeset = JobCode.validate_form(%{"content" => " "}) + + assert changeset.valid? == false + assert Keyword.has_key?(changeset.errors, :content) + {msg, _opts} = changeset.errors[:content] + assert msg == "Please enter a message before sending" + end + + test "validate_form with nil content returns error" do + changeset = JobCode.validate_form(%{"content" => nil}) + assert changeset.valid? == false + end + + test "validate_form with valid content passes" do + changeset = JobCode.validate_form(%{"content" => "Valid message"}) + assert changeset.valid? == true + end + end + + describe "streaming update handlers" do + setup do + user = insert(:user) + project = insert(:project) + workflow = insert(:workflow, project: project) + job = insert(:job, workflow: workflow) + + session = + insert(:job_chat_session, + user: user, + job: job + ) + + socket = %Phoenix.LiveView.Socket{ + assigns: %{ + __changed__: %{}, + session: session, + streaming_content: "", + streaming_status: nil, + streaming_error: nil + } + } + + %{socket: socket, session: session, user: user} + end + + test "update with streaming_chunk appends content to streaming_content", + %{socket: socket} do + chunk_data = %{content: "Hello "} + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_chunk: chunk_data}, + socket + ) + + assert updated_socket.assigns.streaming_content == "Hello " + + # Append more content + chunk_data2 = %{content: "world!"} + + {:ok, updated_socket2} = + AiAssistantComponent.update( + %{id: "test-component", streaming_chunk: chunk_data2}, + updated_socket + ) + + assert updated_socket2.assigns.streaming_content == "Hello world!" + end + + test "update with status_update sets streaming_status", + %{socket: socket} do + status_data = %{status: "Processing your request..."} + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", status_update: status_data}, + socket + ) + + assert updated_socket.assigns.streaming_status == + "Processing your request..." + end + + test "update with streaming_complete keeps socket unchanged", + %{socket: socket} do + original_content = "Some content" + socket = put_in(socket.assigns.streaming_content, original_content) + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_complete: true}, + socket + ) + + # Should keep the content as is until payload arrives + assert updated_socket.assigns.streaming_content == original_content + end + end + + describe "handle_streaming_payload_complete" do + setup do + user = insert(:user) + project = insert(:project) + workflow = insert(:workflow, project: project) + job = insert(:job, workflow: workflow) + + session = + insert(:job_chat_session, + user: user, + job: job + ) + + # Create a user message in processing state + user_message = + insert(:chat_message, + role: :user, + chat_session: session, + user: user, + status: :processing, + content: "Help me with this" + ) + + session = AiAssistant.get_session!(session.id) + + socket = %Phoenix.LiveView.Socket{ + assigns: %{ + __changed__: %{}, + session: session, + streaming_content: "This is the streamed response", + streaming_status: "Complete", + streaming_error: nil, + pending_message: AsyncResult.loading(), + callbacks: %{} + } + } + + %{ + socket: socket, + session: session, + user: user, + user_message: user_message + } + end + + test "saves assistant message with streamed content and payload data", + %{socket: socket} do + payload_data = %{ + usage: %{"prompt_tokens" => 100, "completion_tokens" => 50}, + meta: %{"model" => "claude-3"}, + code: nil + } + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_payload_complete: payload_data}, + socket + ) + + # Verify the assistant message was saved + updated_session = updated_socket.assigns.session + + assistant_messages = + Enum.filter( + updated_session.messages, + &(&1.role == :assistant) + ) + + assert length(assistant_messages) == 1 + assistant_message = hd(assistant_messages) + assert assistant_message.content == "This is the streamed response" + assert assistant_message.status == :success + # Usage is tracked at the session level via AI usage tracking + end + + test "marks pending user messages as success", %{socket: socket} do + payload_data = %{usage: %{}, meta: nil, code: nil} + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_payload_complete: payload_data}, + socket + ) + + # Verify user messages are marked as success + updated_session = updated_socket.assigns.session + + user_messages = + Enum.filter( + updated_session.messages, + &(&1.role == :user) + ) + + assert Enum.all?(user_messages, &(&1.status == :success)) + end + + test "clears streaming state after completion", %{socket: socket} do + payload_data = %{usage: %{}, meta: nil, code: nil} + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_payload_complete: payload_data}, + socket + ) + + assert updated_socket.assigns.streaming_content == "" + assert updated_socket.assigns.streaming_status == nil + assert updated_socket.assigns.pending_message == AsyncResult.ok(nil) + end + + test "invokes callback when provided with code", %{socket: socket} do + test_pid = self() + + callback = fn code, message -> + send(test_pid, {:callback_invoked, code, message}) + end + + socket = put_in(socket.assigns.callbacks, %{on_message_received: callback}) + + payload_data = %{ + usage: %{}, + meta: nil, + code: Jason.encode!(%{"some" => "code"}) + } + + {:ok, _updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_payload_complete: payload_data}, + socket + ) + + # Callback should be invoked with code (as JSON string) and message + expected_code = Jason.encode!(%{"some" => "code"}) + assert_receive {:callback_invoked, ^expected_code, _message}, 2000 + end + + test "handles error when saving message fails", %{socket: socket} do + # Test that errors are handled gracefully by using empty content + # which should pass validation but we can verify error handling + socket_with_empty_content = put_in(socket.assigns.streaming_content, "") + + payload_data = %{usage: %{}, meta: nil, code: nil} + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_payload_complete: payload_data}, + socket_with_empty_content + ) + + # Should clear state after attempt + assert updated_socket.assigns.streaming_content == "" + assert updated_socket.assigns.streaming_status == nil + assert updated_socket.assigns.pending_message == AsyncResult.ok(nil) + end + end + + describe "handle_streaming_error" do + setup do + user = insert(:user) + project = insert(:project) + workflow = insert(:workflow, project: project) + job = insert(:job, workflow: workflow) + + session = + insert(:job_chat_session, + user: user, + job: job + ) + + # Create a user message in processing state + user_message = + insert(:chat_message, + role: :user, + chat_session: session, + user: user, + status: :processing, + content: "Help me with this" + ) + + session = AiAssistant.get_session!(session.id) + + socket = %Phoenix.LiveView.Socket{ + assigns: %{ + __changed__: %{}, + session: session, + streaming_content: "Partial content", + streaming_status: "Processing", + streaming_error: nil, + pending_message: AsyncResult.ok(nil) + } + } + + %{ + socket: socket, + session: session, + user_message: user_message + } + end + + test "marks user messages as error", %{socket: socket} do + error_data = %{error: "Connection timeout"} + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_error: error_data}, + socket + ) + + # Verify user messages are marked as error + updated_session = updated_socket.assigns.session + + user_messages = + Enum.filter( + updated_session.messages, + &(&1.role == :user) + ) + + assert Enum.all?(user_messages, &(&1.status == :error)) + end + + test "sets streaming_error in assigns", %{socket: socket} do + error_data = %{error: "Network connection failed"} + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_error: error_data}, + socket + ) + + assert updated_socket.assigns.streaming_error == + "Network connection failed" + end + + test "clears streaming content and status", %{socket: socket} do + error_data = %{error: "Something went wrong"} + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_error: error_data}, + socket + ) + + assert updated_socket.assigns.streaming_content == "" + assert updated_socket.assigns.streaming_status == nil + end + + test "sets pending_message to loading state", %{socket: socket} do + error_data = %{error: "Error occurred"} + + {:ok, updated_socket} = + AiAssistantComponent.update( + %{id: "test-component", streaming_error: error_data}, + socket + ) + + assert updated_socket.assigns.pending_message.loading == true + end + end + + describe "update with message_status_changed" do + setup do + user = insert(:user) + project = insert(:project) + workflow = insert(:workflow, project: project) + job = insert(:job, workflow: workflow) + session = insert(:job_chat_session, user: user, job: job) + + socket = %Phoenix.LiveView.Socket{ + assigns: %{ + __changed__: %{}, + session: session, + streaming_content: "Existing streaming content", + streaming_status: "Processing...", + pending_message: AsyncResult.loading(), + handler: JobCode, + callbacks: %{} + } + } + + %{socket: socket, session: session} + end + + test "update with message_status_changed :success preserves streaming state", + %{socket: socket, session: session} do + # This tests lines 193-196: handle_message_status({:success, session}) + # through the public update/2 function + {:ok, updated_socket} = + AiAssistantComponent.update( + %{message_status_changed: {:success, session}}, + socket + ) + + assert updated_socket.assigns.streaming_content == + "Existing streaming content" + + assert updated_socket.assigns.streaming_status == "Processing..." + assert updated_socket.assigns.pending_message == AsyncResult.ok(nil) + end + + test "update with message_status_changed :error preserves streaming state", + %{socket: socket, session: session} do + # This tests lines 200-205: handle_message_status({:error, session}) + # through the public update/2 function + {:ok, updated_socket} = + AiAssistantComponent.update( + %{message_status_changed: {:error, session}}, + socket + ) + + assert updated_socket.assigns.streaming_content == + "Existing streaming content" + + assert updated_socket.assigns.streaming_status == "Processing..." + assert updated_socket.assigns.pending_message == AsyncResult.ok(nil) + end + end + + describe "streaming event handlers - testing through handle_event" do + setup do + user = insert(:user) + project = insert(:project) + workflow = insert(:workflow, project: project) + job = insert(:job, workflow: workflow) + + session = insert(:job_chat_session, user: user, job: job) + + user_message = + insert(:chat_message, + role: :user, + chat_session: session, + user: user, + status: :pending, + content: "Help me" + ) + + session = AiAssistant.get_session!(session.id) + + %{ + user: user, + project: project, + job: job, + session: session, + user_message: user_message + } + end + + test "retry_streaming resubmits last user message and clears error", + %{session: session} do + # Lines 523-552: Testing retry_streaming handler + socket = %Phoenix.LiveView.Socket{ + assigns: %{ + __changed__: %{}, + session: session, + streaming_error: "Connection timeout", + pending_message: AsyncResult.ok(nil) + } + } + + {:noreply, updated_socket} = + AiAssistantComponent.handle_event("retry_streaming", %{}, socket) + + # Should clear error and set loading state + assert updated_socket.assigns.streaming_error == nil + assert updated_socket.assigns.pending_message.loading == true + end + + test "retry_streaming returns unchanged socket when no user message exists" do + # Test the else branch (line 550) + session_without_user_msg = insert(:job_chat_session) + + socket = %Phoenix.LiveView.Socket{ + assigns: %{ + __changed__: %{}, + session: session_without_user_msg, + streaming_error: "Some error", + pending_message: AsyncResult.ok(nil) + } + } + + {:noreply, updated_socket} = + AiAssistantComponent.handle_event("retry_streaming", %{}, socket) + + # Socket should be returned unchanged + assert updated_socket.assigns.session == session_without_user_msg + end + + test "cancel_streaming clears error state and pending message", + %{session: session} do + # Lines 554-562: Testing cancel_streaming handler + socket = %Phoenix.LiveView.Socket{ + assigns: %{ + __changed__: %{}, + session: session, + streaming_error: "Network failure", + pending_message: AsyncResult.loading(), + flash: %{} + } + } + + {:noreply, updated_socket} = + AiAssistantComponent.handle_event("cancel_streaming", %{}, socket) + + # Should clear both error and pending state + assert updated_socket.assigns.streaming_error == nil + assert updated_socket.assigns.pending_message == AsyncResult.ok(nil) + end + end + + describe "handle_save_error - testing error path through send_message" do + setup do + user = insert(:user) + project = insert(:project) + workflow = insert(:workflow, project: project) + job = insert(:job, workflow: workflow) + + %{user: user, project: project, job: job, workflow: workflow} + end + + test "send_message with empty content triggers validation error", + %{user: user, project: project, job: job} do + # Lines 705-709: handle_save_error is called when save fails + # We trigger this by sending empty/whitespace content + socket = %Phoenix.LiveView.Socket{ + assigns: %{ + __changed__: %{}, + user: user, + project: project, + job: job, + action: :new, + can_edit: true, + handler: JobCode, + ai_limit_result: :ok, + pending_message: AsyncResult.ok(nil), + callbacks: %{}, + changeset: JobCode.validate_form(%{"content" => nil}) + } + } + + params = %{"assistant" => %{"content" => " "}} + + {:noreply, updated_socket} = + AiAssistantComponent.handle_event("send_message", params, socket) + + # Should have alert set (from handle_save_error if save failed) + # Or validation error in changeset + assert updated_socket.assigns.alert != nil || + !updated_socket.assigns.changeset.valid? + end + end + + describe "component initialization - testing assign_new" do + test "mount initializes all streaming fields" do + # Lines 380-382: assign_new for streaming fields + {:ok, socket} = + AiAssistantComponent.mount(%Phoenix.LiveView.Socket{ + assigns: %{__changed__: %{}} + }) + + # Verify streaming fields are initialized + assert socket.assigns.streaming_content == "" + assert socket.assigns.streaming_status == nil + assert socket.assigns.streaming_error == nil + end + end + + # ============================================================================ + # INTEGRATION TESTS + # ============================================================================ + + setup :set_mox_global + setup :register_and_log_in_user + setup :create_project_for_current_user + setup :verify_on_exit! + + setup %{project: project, user: user} do + # Stub Apollo as enabled and online + Mox.stub(Lightning.MockConfig, :apollo, fn + :endpoint -> "http://localhost:4001" + :ai_assistant_api_key -> "test_api_key" + :timeout -> 5_000 + end) + + workflow = insert(:simple_workflow, project: project) + {:ok, _snapshot} = Lightning.Workflows.Snapshot.create(workflow) + job = workflow.jobs |> List.first() + + # Skip disclaimer for most tests + skip_disclaimer(user) + + %{workflow: workflow, job: job} + end + + defp skip_disclaimer(user, read_at \\ DateTime.utc_now() |> DateTime.to_unix()) do + Ecto.Changeset.change(user, %{ + preferences: %{"ai_assistant.disclaimer_read_at" => read_at} + }) + |> Lightning.Repo.update!() + end + + describe "template rendering - onboarding and AI disabled states" do + test "renders onboarding when user hasn't read disclaimer", + %{conn: conn, project: project, workflow: workflow, user: user} do + # Reset disclaimer + skip_disclaimer(user, nil) + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + # Should show onboarding/disclaimer + assert has_element?(view, "#get-started-with-ai-btn") + + html = render(view) + assert html =~ "AI Assistant is a chat agent" + assert html =~ "responsible for how its output is used" + + # Click to accept disclaimer + view + |> element("#get-started-with-ai-btn") + |> render_click() + + # Should now show chat interface + refute has_element?(view, "#get-started-with-ai-btn") + assert has_element?(view, "form[phx-submit='send_message']") + end + + test "renders AI not configured message when AI is disabled", + %{conn: conn, project: project, workflow: workflow} do + # Stub AI as disabled + Mox.stub(Lightning.MockConfig, :apollo, fn + :endpoint -> nil + :ai_assistant_api_key -> nil + :timeout -> 5_000 + end) + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + # Should show "not configured" message (covers + # render_ai_not_configured) + html = render(view) + assert html =~ "AI Assistant Not Available" + assert html =~ "AI Assistant has not been configured" + assert html =~ "app.openfn.org" + assert html =~ "Configure the Apollo endpoint URL" + end + + test "disclaimer modal can be opened with link", + %{conn: conn, project: project, workflow: workflow} do + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + html = render(view) + # Should have link to open disclaimer + assert html =~ "About the AI Assistant" + assert html =~ "OpenFn Responsible AI Policy" + + # Disclaimer content should be in the DOM (hidden) + assert html =~ "Claude Sonnet 3.7" + assert html =~ "Anthropic" + assert html =~ "docs.openfn.org" + end + end + + describe "template rendering - chat history (action :new)" do + test "renders empty state when no sessions exist", + %{conn: conn, project: project, workflow: workflow} do + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + html = render(view) + # Should show empty state (covers render_all_sessions empty branch) + assert html =~ "No chat history yet" + assert html =~ "Start a conversation" + end + + test "renders chat history with sessions", + %{conn: conn, project: project, workflow: workflow, user: user} do + # Create sessions with different characteristics + session1 = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "First chat session" + ) + + insert(:chat_message, chat_session: session1, user: user, content: "Hello") + + session2 = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "Second chat session" + ) + + insert(:chat_message, chat_session: session2, user: user, content: "World") + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + html = render(view) + + # Should show chat history header + assert html =~ "Chat History" + + # Should show sessions + assert html =~ "First chat" + assert html =~ "Second chat" + + # Should have session elements + assert has_element?(view, "[id='session-#{session1.id}']") + assert has_element?(view, "[id='session-#{session2.id}']") + + # Should show sort toggle + assert has_element?(view, "button[phx-click='toggle_sort']") + assert html =~ "Latest" || html =~ "Oldest" + end + + test "renders session with long title showing ellipsis", + %{conn: conn, project: project, workflow: workflow, user: user} do + max_length = AiAssistant.title_max_length() + long_title = String.duplicate("A", max_length + 10) + + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: long_title + ) + + insert(:chat_message, chat_session: session, user: user) + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + html = render_async(view) + + # Should show ellipsis for long titles (covers maybe_show_ellipsis) + # Note: Current implementation shows full title + "..." without + # truncation + assert html =~ "..." + assert html =~ String.slice(long_title, 0, 20) + end + + test "renders session preview with message count", + %{conn: conn, project: project, workflow: workflow, user: user} do + # Session with 0 messages + _session0 = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "Empty" + ) + + # Session with 1 message + session1 = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "One" + ) + + insert(:chat_message, chat_session: session1, user: user) + + # Session with multiple messages + session_many = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "Many" + ) + + insert(:chat_message, chat_session: session_many, user: user) + insert(:chat_message, chat_session: session_many, user: user) + insert(:chat_message, chat_session: session_many, user: user) + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + html = render(view) + + # Should show different message count formats (covers + # format_message_count branches) + assert html =~ "New conversation" || html =~ "0" + assert html =~ "1 message" + assert html =~ "3 messages" + end + + test "toggle sort direction changes order", + %{conn: conn, project: project, workflow: workflow, user: user} do + # Create sessions to have something to sort + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "Session 1" + ) + + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "Session 2" + ) + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + initial_html = render(view) + initial_has_latest = initial_html =~ "Latest" + + # Click toggle sort + view + |> element("button[phx-click='toggle_sort']") + |> render_click() + + render_async(view) + + updated_html = render(view) + + # Sort direction should change + if initial_has_latest do + assert updated_html =~ "Oldest" + else + assert updated_html =~ "Latest" + end + end + end + + describe "template rendering - individual session (action :show)" do + test "renders individual session with messages", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "Test session" + ) + + _user_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + content: "Help me with this job", + status: :success + ) + + assistant_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :assistant, + content: "I can help you with that", + status: :success + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + html = render(view) + + # Should show session header (covers render_individual_session header) + assert html =~ "Test session" + assert html =~ "messages" + + # Should show close button + assert has_element?( + view, + "[id='close-chat-session-btn-#{session.id}']" + ) + + # Should show user message (covers user_message template) + assert html =~ "Help me with this job" + + # Should show assistant message (covers assistant_message template) + assert html =~ "I can help you with that" + + # Should show copy button + assert has_element?( + view, + "[id='copy-message-#{assistant_msg.id}-content-btn']" + ) + + # Should show user avatar with initials + first_initial = String.first(user.first_name) + last_initial = String.first(user.last_name) + assert html =~ "#{first_initial}#{last_initial}" + end + + test "renders user message with different statuses", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + # Success message + _success_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + status: :success, + content: "Success message" + ) + + # Pending message + _pending_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + status: :pending, + content: "Pending message" + ) + + # Error message + error_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + status: :error, + content: "Error message" + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + html = render(view) + + # Should show all messages + assert html =~ "Success message" + assert html =~ "Pending message" + assert html =~ "Error message" + + # Should show status indicators (covers message_status_display) + assert html =~ "Sent" || html =~ "Sending" || html =~ "Failed" + + # Error message should show retry button (covers retry/cancel buttons) + assert has_element?(view, "[id='retry-message-#{error_msg.id}']") + + # Should show cancel button for error message with multiple user + # messages + assert has_element?(view, "[id='cancel-message-#{error_msg.id}']") + end + + test "renders assistant message with code indicator", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + code_data = Jason.encode!(%{"jobs" => [], "triggers" => []}) + + _assistant_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :assistant, + content: "Heres workflow code", + status: :success, + code: code_data + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + html = render_async(view) + + # Should show code indicator banner + assert html =~ "Click to restore workflow to here" + assert html =~ "Heres workflow code" + end + + test "renders formatted markdown content in assistant messages", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + markdown_content = """ + # Heading + + Here's some **bold** text and a [link](https://example.com). + + - Item 1 + - Item 2 + + ```js + console.log('code block'); + ``` + """ + + _assistant_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :assistant, + content: markdown_content, + status: :success + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + html = render(view) + + # Should render markdown (covers formatted_content) + assert html =~ "Heading" + assert html =~ "bold" + assert html =~ "href=\"https://example.com\"" + assert html =~ "Item 1" + assert html =~ "console.log" + end + + test "renders loading state for pending message", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + # Create pending user message + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + status: :processing, + content: "Help me" + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + html = render(view) + + # Should show loading indicator (covers assistant_typing_indicator) + assert html =~ "animate-bounce" || html =~ "Processing" + end + end + + describe "form validation and interaction" do + test "validates empty message and shows error", + %{conn: conn, project: project, workflow: workflow} do + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + # Try to submit empty content (covers send_message validation) + view + |> element("form[phx-submit='send_message']") + |> render_submit(%{"assistant" => %{"content" => " "}}) + + # Should show validation error + html = render(view) + assert html =~ "Please enter a message before sending" + end + + test "form shows disabled state when endpoint not available", + %{conn: conn, project: project, workflow: workflow} do + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + # The form should render with conditional classes based on disabled + # state + html = render(view) + + # Should show PII warning (covers chat_input template) + assert html =~ "Do not paste PII or sensitive data" + + # Should have submit button + assert has_element?(view, "button[type='submit']") + end + + test "creates new session when sending first message", + %{conn: conn, project: project, workflow: workflow} do + Lightning.AiAssistantHelpers.stub_online() + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + # Submit a message (covers save_message :new action) + view + |> element("form[phx-submit='send_message']") + |> render_submit(%{ + "assistant" => %{"content" => "Create a new workflow"} + }) + + # Should redirect to show the new session + assert_patch(view) + + # Verify session was created + sessions = AiAssistant.list_sessions(project, :desc, workflow: workflow) + assert length(sessions.sessions) >= 1 + end + end + + describe "event handlers through UI interactions" do + test "clicking close button returns to history", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + insert(:chat_message, chat_session: session, user: user) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + # Click close button (covers navigation) + view + |> element("[id='close-chat-session-btn-#{session.id}']") + |> render_click() + + # Should navigate back to history + assert_patch(view, ~p"/projects/#{project}/w/#{workflow}?method=ai") + end + + test "retry button on error message triggers retry", + %{conn: conn, project: project, workflow: workflow, user: user} do + Lightning.AiAssistantHelpers.stub_online() + + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + failed_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + status: :error, + content: "Retry me" + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + # Click retry (covers handle_event "retry_message") + view + |> element("[id='retry-message-#{failed_msg.id}']") + |> render_click() + + # Should show loading state + html = render(view) + assert html =~ "Processing" || html =~ "animate-bounce" + end + + test "cancel button on error message marks as cancelled", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + # Need multiple messages for cancel button to appear + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + status: :success + ) + + error_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + status: :error, + content: "Cancel me" + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + # Click cancel (covers handle_event "cancel_message") + view + |> element("[id='cancel-message-#{error_msg.id}']") + |> render_click() + + # Message should be marked cancelled + reloaded_msg = Lightning.Repo.reload(error_msg) + assert reloaded_msg.status == :cancelled + end + + test "validate event updates changeset", + %{conn: conn, project: project, workflow: workflow} do + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + # Trigger validation (covers handle_event "validate") + view + |> element("form[phx-submit='send_message']") + |> render_change(%{"assistant" => %{"content" => "Valid content"}}) + + # Form should process the validation + html = render(view) + refute html =~ "Please enter a message" + end + end + + describe "helper function coverage through rendering" do + test "session time formatting handles different time ranges", + %{conn: conn, project: project, workflow: workflow, user: user} do + # Create sessions at different times to cover all format_session_time + # branches + times = [ + DateTime.add(DateTime.utc_now(), -30, :second), + # < 60s + DateTime.add(DateTime.utc_now(), -15 * 60, :second), + # < 1 hour + DateTime.add(DateTime.utc_now(), -5 * 3600, :second), + # < 24 hours + DateTime.add(DateTime.utc_now(), -3 * 86400, :second), + # < 7 days + DateTime.add(DateTime.utc_now(), -10 * 86400, :second) + # >= 7 days + ] + + for time <- times do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + updated_at: time + ) + + insert(:chat_message, chat_session: session, user: user) + end + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + html = render(view) + + # Should show different time formats (covers all format_session_time + # branches) + assert html =~ "ago" || + html =~ "Just now" || + html =~ "m ago" || + html =~ "h ago" || + html =~ "d ago" || + String.match?(html, ~r/\w{3} \d{2}/) + end + + test "message preview truncates long content", + %{conn: conn, project: project, workflow: workflow, user: user} do + long_content = String.duplicate("x", 100) + + # Create session with ONLY a long message (no title) + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: nil + ) + + # Insert message with long content that will be used for preview + insert(:chat_message, + chat_session: session, + user: user, + content: long_content + ) + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + html = render_async(view) + + # Should be truncated with ellipsis (covers format_last_message) + # The format depends on how the session lists messages + assert String.contains?(html, "x") || String.contains?(html, "message") + end + + test "message timestamps are formatted correctly", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + insert(:chat_message, + chat_session: session, + user: user, + content: "Test" + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + html = render(view) + + # Should show formatted time (covers format_message_time) + # Format is like "02:30 PM" + assert html =~ ~r/\d{2}:\d{2}\s+(AM|PM)/ + end + + test "session preview with empty message content", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "Empty content" + ) + + # Empty content message (covers add_ellipsis_if_needed empty branch) + insert(:chat_message, chat_session: session, user: user, content: "") + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + html = render_async(view) + + # Should show "New conversation" for empty content + assert html =~ "New conversation" || html =~ "Empty content" + end + end + + describe "streaming state handling" do + test "retry_streaming triggers last user message retry with error state", + %{conn: conn, project: project, workflow: workflow, user: user} do + Lightning.AiAssistantHelpers.stub_online() + + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + # Message that had an error during streaming + error_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + status: :error, + content: "Test message" + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + # Should show the message with error status + html = render(view) + assert html =~ "Test message" + + # Now test that retrying works (covers handle_event "retry_message" path) + # The retry_streaming event can be tested if a streaming_error is in + # state but we need actual streaming to occur in parent LiveView + # So we verify the error message rendering exists + assert has_element?(view, "[id='retry-message-#{error_msg.id}']") + end + + test "renders loading state during streaming (processing status)", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + # Create processing user message (indicates streaming in progress) + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + status: :processing, + content: "Help me" + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + html = render(view) + + # Should show loading indicator (covers assistant_typing_indicator) + assert html =~ "animate-bounce" || + html =~ "Processing" || + html =~ "rounded-full bg-gray-400" + end + end + + describe "edge cases and error handling" do + test "form validation prevents empty message submission", + %{conn: conn, project: project, workflow: workflow} do + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + # Try to send empty message (covers authorization and validation paths) + view + |> element("form[phx-submit='send_message']") + |> render_submit(%{"assistant" => %{"content" => ""}}) + + html = render(view) + + # Should show validation error (covers empty content validation) + assert html =~ "Please enter a message" + end + + test "handles async result states for endpoint check", + %{conn: conn, project: project, workflow: workflow} do + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + # Wait for async result to complete + render_async(view) + + _html = render(view) + + # Should show the form (endpoint_available async result is handled) + assert has_element?(view, "form[phx-submit='send_message']") + end + + test "renders assistant message with code (clickable)", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + code_data = Jason.encode!(%{"jobs" => [], "triggers" => []}) + + assistant_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :assistant, + content: "Heres a workflow template", + status: :success, + code: code_data + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + html = render(view) + + # Should show code indicator banner (covers message with code rendering) + assert html =~ "Click to restore workflow to here" + assert html =~ "Heres a workflow template" + + # Message should be clickable to select + assert has_element?(view, "[data-message-id='#{assistant_msg.id}']") + end + + test "handles retry_message with changeset error", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + error_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :user, + status: :error, + content: "Retry me" + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + # Stub retry to fail with validation error + Mox.expect(Lightning.MockConfig, :apollo, 0, fn + :endpoint -> nil + _ -> nil + end) + + # This should trigger the error path + view + |> element("[id='retry-message-#{error_msg.id}']") + |> render_click() + + # Should handle gracefully + html = render(view) + assert html =~ "Retry me" || html =~ "Failed" + end + + test "handles form_content_empty with various edge cases", + %{conn: conn, project: project, workflow: workflow} do + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + # Test nil content + view + |> element("form[phx-submit='send_message']") + |> render_submit(%{"assistant" => %{"content" => nil}}) + + html = render(view) + assert html =~ "Please enter a message" + + # Test whitespace only + view + |> element("form[phx-submit='send_message']") + |> render_submit(%{"assistant" => %{"content" => " \n\t "}}) + + html = render(view) + assert html =~ "Please enter a message" + end + + test "load_more_sessions extends session list", + %{conn: conn, project: project, workflow: workflow, user: user} do + # Create more sessions than default page size + for i <- 1..25 do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "Session #{i}" + ) + + insert(:chat_message, chat_session: session, user: user) + end + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + initial_html = render(view) + + # Should show pagination (covers pagination rendering) + assert initial_html =~ "remaining" + assert has_element?(view, "button[phx-click='load_more_sessions']") + + # Click load more (covers handle_event "load_more_sessions") + view + |> element("button[phx-click='load_more_sessions']") + |> render_click() + + render_async(view) + + # Should load more sessions + final_html = render(view) + assert final_html =~ "Session" + end + + test "loads sessions successfully", + %{conn: conn, project: project, workflow: workflow, user: user} do + # Create a session to test successful loading + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user, + title: "Test session" + ) + + {:ok, view, _html} = + live(conn, ~p"/projects/#{project}/w/#{workflow}?method=ai") + + render_async(view) + + html = render(view) + + # Should show sessions (covers successful async loading) + assert html =~ "Chat History" || html =~ "Test session" + assert has_element?(view, "form[phx-submit='send_message']") + end + + test "select_assistant_message event on code message", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + code_data = Jason.encode!(%{"jobs" => [], "triggers" => []}) + + assistant_msg = + insert(:chat_message, + chat_session: session, + user: user, + role: :assistant, + content: "Workflow content", + code: code_data + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + # Verify message element exists with data attribute + assert has_element?(view, "[data-message-id='#{assistant_msg.id}']") + + # Message with code should be clickable (has phx-click attribute set by + # template). We cannot test the actual click without a real handler + # callback but we verify the element is set up correctly for interaction + html = render(view) + assert html =~ "Workflow content" + assert html =~ "Click to restore workflow to here" + end + end + + describe "markdown formatting edge cases" do + test "handles markdown with code blocks with language", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + # Test code with language class (covers apply_attributes for code) + content = """ + ```javascript + const x = 1; + ``` + """ + + insert(:chat_message, + chat_session: session, + user: user, + role: :assistant, + content: content + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + html = render(view) + + # Should render with language class + assert html =~ "javascript" + assert html =~ "const x" + end + + test "handles invalid markdown gracefully", + %{conn: conn, project: project, workflow: workflow, user: user} do + session = + insert(:workflow_chat_session, + project: project, + workflow: workflow, + user: user + ) + + # Content that might fail markdown parsing + invalid_content = "This is [valid( markdown" + + insert(:chat_message, + chat_session: session, + user: user, + role: :assistant, + content: invalid_content + ) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project}/w/#{workflow}?method=ai&w-chat=#{session.id}" + ) + + render_async(view) + + html = render(view) + + # Should still render something (covers error case in formatted_content) + assert html =~ "not" + end + end +end diff --git a/test/lightning_web/live/ai_assistant_live_test.exs b/test/lightning_web/live/ai_assistant_live_test.exs index 1fec4578b2..14f733f375 100644 --- a/test/lightning_web/live/ai_assistant_live_test.exs +++ b/test/lightning_web/live/ai_assistant_live_test.exs @@ -1,17 +1,26 @@ defmodule LightningWeb.AiAssistantLiveTest do - use LightningWeb.ConnCase, async: true + use LightningWeb.ConnCase, async: false + use Mimic import Lightning.Factories import Lightning.WorkflowLive.Helpers - import Mox + import Mox, except: [verify_on_exit!: 1] + import Eventually + import Ecto.Query use Oban.Testing, repo: Lightning.Repo import Phoenix.Component import Phoenix.LiveViewTest - setup :verify_on_exit! + setup :set_mox_global setup :register_and_log_in_user setup :create_project_for_current_user + setup do + Mox.verify_on_exit!() + Mimic.set_mimic_global() + :ok + end + defp skip_disclaimer(user, read_at \\ DateTime.utc_now() |> DateTime.to_unix()) do Ecto.Changeset.change(user, %{ preferences: %{"ai_assistant.disclaimer_read_at" => read_at} @@ -484,35 +493,7 @@ defmodule LightningWeb.AiAssistantLiveTest do project: project, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub( - Lightning.Tesla.Mock, - :call, - fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - # Simply return the response immediately - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "history" => [ - %{"role" => "user", "content" => "Ping"}, - %{"role" => "assistant", "content" => "Pong"} - ] - } - }} - end - ) + Lightning.AiAssistantHelpers.stub_online() {:ok, view, _html} = live( @@ -528,6 +509,12 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Ping"}) + # Simulate streaming response + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + job_id: job_1.id, + response: "Pong" + ) + assert_patch(view) # In test environment with inline Oban, response appears immediately @@ -542,40 +529,11 @@ defmodule LightningWeb.AiAssistantLiveTest do user: user, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) + Lightning.AiAssistantHelpers.stub_online() expected_question = "Can you help me with this?" expected_answer = "No, I am a robot" - Mox.stub( - Lightning.Tesla.Mock, - :call, - fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "history" => [ - %{"role" => "user", "content" => "Ping"}, - %{"role" => "assistant", "content" => "Pong"}, - %{"role" => "user", "content" => expected_question}, - %{"role" => "assistant", "content" => expected_answer} - ] - } - }} - end - ) - session = insert(:job_chat_session, user: user, @@ -607,6 +565,12 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: expected_question}) + # Simulate streaming response + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + job_id: job_1.id, + response: expected_answer + ) + # In test environment with inline Oban, the response appears immediately html = render(view) assert html =~ expected_answer @@ -619,25 +583,7 @@ defmodule LightningWeb.AiAssistantLiveTest do project: project, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub( - Lightning.Tesla.Mock, - :call, - fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, %Tesla.Env{status: 400, body: %{"message" => "Bad request"}}} - end - ) + Lightning.AiAssistantHelpers.stub_online() {:ok, view, _html} = live( @@ -653,6 +599,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Ping"}) + Lightning.AiAssistantHelpers.submit_and_simulate_error( + job_id: job_1.id, + error: "Bad request" + ) + assert_patch(view) # Error appears immediately in test environment @@ -671,30 +622,7 @@ defmodule LightningWeb.AiAssistantLiveTest do project: project, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub( - Lightning.Tesla.Mock, - :call, - fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - # Return an error response - {:ok, - %Tesla.Env{ - status: 500, - body: %{"message" => "Internal server error"} - }} - end - ) + Lightning.AiAssistantHelpers.stub_online() {:ok, view, _html} = live( @@ -711,9 +639,14 @@ defmodule LightningWeb.AiAssistantLiveTest do view |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Ping"}) + + Lightning.AiAssistantHelpers.submit_and_simulate_error( + job_id: job_1.id, + error: "Internal server error" + ) end) - assert log =~ "AI query failed" + assert log =~ "Streaming error for session" assert log =~ "Internal server error" assert_patch(view) @@ -784,35 +717,10 @@ defmodule LightningWeb.AiAssistantLiveTest do project: project, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) + Lightning.AiAssistantHelpers.stub_online() error_message = "Server is temporarily unavailable" - Mox.stub( - Lightning.Tesla.Mock, - :call, - fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 503, - body: %{ - "code" => 503, - "message" => error_message - } - }} - end - ) - {:ok, view, _html} = live( conn, @@ -827,9 +735,14 @@ defmodule LightningWeb.AiAssistantLiveTest do view |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Ping"}) + + Lightning.AiAssistantHelpers.submit_and_simulate_error( + job_id: job_1.id, + error: error_message + ) end) - assert log =~ "AI query failed for session" + assert log =~ "Streaming error for session" assert log =~ "Server is temporarily unavailable" assert_patch(view) @@ -849,25 +762,7 @@ defmodule LightningWeb.AiAssistantLiveTest do project: project, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub( - Lightning.Tesla.Mock, - :call, - fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:error, :timeout} - end - ) + Lightning.AiAssistantHelpers.stub_online() {:ok, view, _html} = live( @@ -884,9 +779,14 @@ defmodule LightningWeb.AiAssistantLiveTest do view |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Ping"}) + + Lightning.AiAssistantHelpers.submit_and_simulate_error( + job_id: job_1.id, + error: "Request timed out. Please try again." + ) end) - assert log =~ "AI query timed out for session" + assert log =~ "Streaming error for session" assert log =~ "Request timed out. Please try again." assert_patch(view) @@ -906,25 +806,7 @@ defmodule LightningWeb.AiAssistantLiveTest do project: project, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub( - Lightning.Tesla.Mock, - :call, - fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:error, :econnrefused} - end - ) + Lightning.AiAssistantHelpers.stub_online() {:ok, view, _html} = live( @@ -942,9 +824,14 @@ defmodule LightningWeb.AiAssistantLiveTest do view |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Ping"}) + + Lightning.AiAssistantHelpers.submit_and_simulate_error( + job_id: job_1.id, + error: "Unable to reach the AI server. Please try again later." + ) end) - assert log =~ "Connection refused to AI server for session" + assert log =~ "Streaming error for session" assert log =~ "Unable to reach the AI server. Please try again later." html = render_async(view) @@ -962,25 +849,7 @@ defmodule LightningWeb.AiAssistantLiveTest do project: project, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub( - Lightning.Tesla.Mock, - :call, - fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:error, :unknown_error} - end - ) + Lightning.AiAssistantHelpers.stub_online() {:ok, view, _html} = live( @@ -995,6 +864,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Ping"}) + Lightning.AiAssistantHelpers.submit_and_simulate_error( + job_id: job_1.id, + error: "An unexpected error occurred" + ) + assert_patch(view) render_async(view) @@ -1198,21 +1072,7 @@ defmodule LightningWeb.AiAssistantLiveTest do user: user, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, %Tesla.Env{status: 500}} - end) + Lightning.AiAssistantHelpers.stub_online() session = insert(:job_chat_session, @@ -1251,24 +1111,6 @@ defmodule LightningWeb.AiAssistantLiveTest do "#cancel-message-#{List.first(session.messages).id}" ) - # Update the mock for successful response - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "history" => [ - %{"role" => "user", "content" => "Hello"}, - %{"role" => "assistant", "content" => "Hi there!"} - ] - } - }} - end) - # Click retry view |> element("#retry-message-#{List.first(session.messages).id}") @@ -1284,10 +1126,35 @@ defmodule LightningWeb.AiAssistantLiveTest do assert job.args["message_id"] == List.first(session.messages).id - # Process the job + # Process the job and simulate streaming response assert %{success: 1} = Oban.drain_queue(Lightning.Oban, queue: :ai_assistant) + # Subscribe to wait for completion + Phoenix.PubSub.subscribe(Lightning.PubSub, "ai_session:#{session.id}") + + Lightning.AiAssistantHelpers.simulate_streaming_response( + session.id, + "Hi there!" + ) + + # Wait for streaming to complete + assert_receive {:ai_assistant, :streaming_payload_complete, _}, 1000 + + # Poll until LiveView has processed the message + Eventually.eventually( + fn -> + session + |> Lightning.Repo.reload() + |> Lightning.Repo.preload(:messages, force: true) + |> then(& &1.messages) + |> Enum.any?(fn msg -> msg.status == :success end) + end, + true, + 1000, + 10 + ) + # Re-render to see the updated state html = render(view) @@ -1773,42 +1640,10 @@ defmodule LightningWeb.AiAssistantLiveTest do ) insert(:log_line, run: run) - log1 = insert(:log_line, run: run, step: hd(run.steps)) - log2 = insert(:log_line, run: run, step: hd(run.steps)) + _log1 = insert(:log_line, run: run, step: hd(run.steps)) + _log2 = insert(:log_line, run: run, step: hd(run.steps)) - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Lightning.Tesla.Mock - |> expect( - :call, - 2, - fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post, body: json_body}, _opts -> - body = Jason.decode!(json_body) - assert Map.has_key?(body["context"], "log") - assert body["context"]["log"] == log1.message <> "\n" <> log2.message - - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "history" => [ - %{"role" => "user", "content" => "Ping"}, - %{"role" => "assistant", "content" => "Pong"} - ] - } - }} - end - ) + Lightning.AiAssistantHelpers.stub_online() {:ok, view, _html} = live( @@ -1825,10 +1660,16 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Ping", options: %{logs: "true"}}) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + job_id: job_1.id, + response: "Pong" + ) + assert_patch(view) - render_async(view) - end + html = render_async(view) + assert html =~ "Pong" + end end describe "AI Assistant - Workflow Template Mode" do @@ -1874,35 +1715,7 @@ defmodule LightningWeb.AiAssistantLiveTest do project: project, user: user } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => "I'll help you create a Salesforce sync workflow", - "response_yaml" => nil, - "usage" => %{}, - "history" => [ - %{ - "role" => "user", - "content" => "Create a Salesforce sync workflow" - } - ] - } - }} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -1917,6 +1730,12 @@ defmodule LightningWeb.AiAssistantLiveTest do assistant: %{content: "Create a Salesforce sync workflow"} ) + # Simulate streaming response - workflow mode uses project_id as workflow_id + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + workflow_id: project.id, + response: "I'll help you create a Salesforce sync workflow" + ) + assert_patch(view) render_async(view) @@ -1930,13 +1749,7 @@ defmodule LightningWeb.AiAssistantLiveTest do project: project, user: user } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) + Lightning.AiAssistantHelpers.stub_online() workflow_yaml = """ name: "Salesforce Sync Workflow" @@ -1961,22 +1774,6 @@ defmodule LightningWeb.AiAssistantLiveTest do enabled: true """ - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => "Here's your Salesforce sync workflow:", - "response_yaml" => workflow_yaml, - "usage" => %{} - } - }} - end) - skip_disclaimer(user) {:ok, view, _html} = @@ -1990,6 +1787,12 @@ defmodule LightningWeb.AiAssistantLiveTest do assistant: %{content: "Create a Salesforce sync workflow"} ) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + workflow_id: project.id, + response: "Here's your Salesforce sync workflow:", + code: workflow_yaml + ) + assert_patch(view) render_async(view) @@ -2087,25 +1890,7 @@ defmodule LightningWeb.AiAssistantLiveTest do project: project, user: user } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 503, - body: %{"message" => "Service temporarily unavailable"} - }} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -2118,6 +1903,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-new-workflow-panel-assistant") |> render_submit(assistant: %{content: "Create a workflow"}) + Lightning.AiAssistantHelpers.submit_and_simulate_error( + workflow_id: project.id, + error: "Service temporarily unavailable" + ) + assert_patch(view) html = render(view) @@ -2518,21 +2308,7 @@ defmodule LightningWeb.AiAssistantLiveTest do user: user, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:error, :timeout} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -2549,9 +2325,14 @@ defmodule LightningWeb.AiAssistantLiveTest do job_view |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Help with code"}) + + Lightning.AiAssistantHelpers.submit_and_simulate_error( + job_id: job_1.id, + error: "Request timed out. Please try again." + ) end) - assert log =~ "AI query timed out for session" + assert log =~ "Streaming error for session" assert log =~ "Request timed out. Please try again." html = render_async(job_view) @@ -2571,9 +2352,14 @@ defmodule LightningWeb.AiAssistantLiveTest do workflow_view |> form("#ai-assistant-form-new-workflow-panel-assistant") |> render_submit(assistant: %{content: "Create workflow"}) + + Lightning.AiAssistantHelpers.submit_and_simulate_error( + workflow_id: project.id, + error: "Request timed out. Please try again." + ) end) - assert log =~ "AI query timed out for session" + assert log =~ "Streaming error for session" assert log =~ "Request timed out. Please try again." html = render_async(workflow_view) @@ -2813,13 +2599,7 @@ defmodule LightningWeb.AiAssistantLiveTest do user: user, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) + Lightning.AiAssistantHelpers.stub_online() markdown_response = """ Here's your solution: @@ -2838,23 +2618,6 @@ defmodule LightningWeb.AiAssistantLiveTest do 2. Deploy to production """ - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => markdown_response, - "history" => [ - %{"role" => "assistant", "content" => markdown_response} - ] - } - }} - end) - skip_disclaimer(user) {:ok, job_view, _html} = @@ -2869,6 +2632,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Help me"}) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + job_id: job_1.id, + response: markdown_response + ) + render_async(job_view) job_html = render(job_view) @@ -2885,6 +2653,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-new-workflow-panel-assistant") |> render_submit(assistant: %{content: "Create workflow"}) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + workflow_id: project.id, + response: markdown_response + ) + render_async(workflow_view) workflow_html = render(workflow_view) @@ -2899,35 +2672,10 @@ defmodule LightningWeb.AiAssistantLiveTest do user: user, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) + Lightning.AiAssistantHelpers.stub_online() response_content = "Here's some code you can copy" - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => response_content, - "response_yaml" => nil, - "usage" => %{}, - "history" => [ - %{"role" => "assistant", "content" => response_content} - ] - } - }} - end) - skip_disclaimer(user) {:ok, job_view, _html} = @@ -2942,6 +2690,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Help"}) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + job_id: job_1.id, + response: response_content + ) + assert_patch(job_view) render_async(job_view) @@ -2967,6 +2720,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-new-workflow-panel-assistant") |> render_submit(assistant: %{content: "Create"}) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + workflow_id: project.id, + response: response_content + ) + assert_patch(workflow_view) render_async(workflow_view) @@ -3226,54 +2984,7 @@ defmodule LightningWeb.AiAssistantLiveTest do user: user, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post, url: ^apollo_endpoint <> "/query"}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "history" => [ - %{"role" => "assistant", "content" => "Response content"} - ] - } - }} - - %{method: :post, url: ^apollo_endpoint <> "/workflow_chat"}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => "Response content", - "response_yaml" => nil, - "usage" => %{} - } - }} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "history" => [ - %{"role" => "assistant", "content" => "Response content"} - ], - "response" => "Response content", - "response_yaml" => nil, - "usage" => %{} - } - }} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -3290,6 +3001,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Help with debugging"}) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + job_id: job_1.id, + response: "Response content" + ) + # This creates a session and navigates to include j-chat parameter current_path = assert_patch(job_view) render_async(job_view) @@ -3308,6 +3024,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-new-workflow-panel-assistant") |> render_submit(assistant: %{content: "Create new workflow"}) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + workflow_id: project.id, + response: "Response content" + ) + assert_patch(workflow_view) render_async(workflow_view) @@ -3333,32 +3054,7 @@ defmodule LightningWeb.AiAssistantLiveTest do user: user, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => "Delayed response", - "response_yaml" => nil, - "usage" => %{}, - "history" => [ - %{"role" => "assistant", "content" => "Delayed response"} - ] - } - }} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -3374,6 +3070,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Test async"}) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + job_id: job_1.id, + response: "Delayed response" + ) + assert_patch(view) html = render(view) @@ -3388,29 +3089,7 @@ defmodule LightningWeb.AiAssistantLiveTest do user: user, workflow: %{jobs: [job_1 | _]} = workflow } do - apollo_endpoint = "http://localhost:4001" - - Mox.stub(Lightning.MockConfig, :apollo, fn - :endpoint -> apollo_endpoint - :ai_assistant_api_key -> "ai_assistant_api_key" - :timeout -> 5_000 - end) - - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: ^apollo_endpoint <> "/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - # Return a server error - {:ok, - %Tesla.Env{ - status: 500, - body: %{ - "error" => "Internal server error", - "message" => "Service crashed" - } - }} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -3426,6 +3105,11 @@ defmodule LightningWeb.AiAssistantLiveTest do |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant") |> render_submit(assistant: %{content: "Trigger error"}) + Lightning.AiAssistantHelpers.submit_and_simulate_error( + job_id: job_1.id, + error: "Service crashed" + ) + assert_patch(view) # In test environment, error appears immediately @@ -3444,6 +3128,371 @@ defmodule LightningWeb.AiAssistantLiveTest do # User should be able to retry assert has_element?(view, "[phx-click='retry_message']") end + + @tag email: "user@openfn.org" + test "users can retry streaming errors", %{ + conn: conn, + project: project, + workflow: %{jobs: [job_1 | _]} = workflow, + user: user + } do + Lightning.AiAssistantHelpers.stub_online() + skip_disclaimer(user) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project.id}/w/#{workflow.id}?s=#{job_1.id}&m=expand" + ) + + render_async(view) + + # Create session manually without processing the message + {:ok, session} = + Lightning.AiAssistant.create_session(job_1, user, "Test query") + + # Subscribe to the session PubSub topic + Phoenix.PubSub.subscribe(Lightning.PubSub, "ai_session:#{session.id}") + + # Simulate error WITHOUT waiting for message save + Lightning.AiAssistantHelpers.simulate_streaming_error( + session.id, + "Connection timeout" + ) + + # Wait for LiveView to receive the error + assert_receive {:ai_assistant, :streaming_error, _}, 1000 + + render_async(view) + + # Wait for UI and click retry button while it's visible + eventually( + fn -> + if has_element?(view, "[phx-click='retry_streaming']") and + has_element?(view, "[phx-click='cancel_streaming']") do + # UI is visible, click retry button + view + |> element("[phx-click='retry_streaming']") + |> render_click() + + true + else + false + end + end, + true, + 5000, + 50 + ) + + Lightning.AiAssistantHelpers.simulate_streaming_response( + session.id, + "Successfully retried" + ) + + # Wait for streaming to complete + assert_receive {:ai_assistant, :streaming_payload_complete, _}, 1000 + + # Poll until the response appears in the UI + eventually( + fn -> + render_async(view) + html = render(view) + html =~ "Successfully retried" + end, + true, + 5000, + 50 + ) + end + + @tag email: "user@openfn.org" + test "users can cancel streaming errors", %{ + conn: conn, + project: project, + workflow: %{jobs: [job_1 | _]} = workflow, + user: user + } do + Lightning.AiAssistantHelpers.stub_online() + skip_disclaimer(user) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project.id}/w/#{workflow.id}?s=#{job_1.id}&m=expand" + ) + + render_async(view) + + # Create session manually without processing the message + {:ok, session} = + Lightning.AiAssistant.create_session(job_1, user, "Test query") + + # Subscribe to the session PubSub topic + Phoenix.PubSub.subscribe(Lightning.PubSub, "ai_session:#{session.id}") + + # Simulate error WITHOUT waiting for message save + Lightning.AiAssistantHelpers.simulate_streaming_error( + session.id, + "Server unavailable" + ) + + # Wait for LiveView to receive the error + assert_receive {:ai_assistant, :streaming_error, _}, 1000 + + render_async(view) + + # Wait for UI and click cancel button while it's visible + eventually( + fn -> + if has_element?(view, "[phx-click='cancel_streaming']") do + # UI is visible, click cancel button + view + |> element("[phx-click='cancel_streaming']") + |> render_click() + + true + else + false + end + end, + true, + 5000, + 50 + ) + + render_async(view) + + assert has_element?( + view, + "#ai-assistant-form-job-#{job_1.id}-ai-assistant" + ) + end + + @tag email: "user@openfn.org" + test "streaming error UI is rendered correctly", %{ + conn: conn, + project: project, + workflow: %{jobs: [job_1 | _]} = workflow, + user: user + } do + Lightning.AiAssistantHelpers.stub_online() + skip_disclaimer(user) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project.id}/w/#{workflow.id}?s=#{job_1.id}&m=expand" + ) + + render_async(view) + + # Send a message to trigger pending_message loading state + view + |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant", %{ + assistant: %{content: "Test query"} + }) + |> render_submit() + + render_async(view) + + # Get the session that was created + session_id = + receive do + {:ai_assistant, :register_component, %{session_id: sid}} -> sid + after + 2000 -> + # Fallback: find the session manually + {:ok, session} = + Lightning.AiAssistant.create_session(job_1, user, "Test") + + session.id + end + + Phoenix.PubSub.subscribe(Lightning.PubSub, "ai_session:#{session_id}") + + # Simulate streaming error while message is still processing (pending_message loading) + Lightning.AiAssistantHelpers.simulate_streaming_error( + session_id, + "Custom error message" + ) + + # Wait for LiveView to receive the error + assert_receive {:ai_assistant, :streaming_error, _}, 1000 + + render_async(view) + + # Check error UI elements are present (streaming_error_state template should render) + eventually( + fn -> + html = render(view) + + html =~ "hero-exclamation-triangle" and + html =~ "Custom error message" and + html =~ "Retry" and + html =~ "Cancel" and + html =~ "bg-red-50" and + html =~ "text-red-800" + end, + true, + 5000, + 50 + ) + end + + @tag email: "user@openfn.org" + test "retry_streaming when no user message exists", %{ + conn: conn, + project: project, + workflow: %{jobs: [job_1 | _]} = workflow, + user: user + } do + Lightning.AiAssistantHelpers.stub_online() + skip_disclaimer(user) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project.id}/w/#{workflow.id}?s=#{job_1.id}&m=expand" + ) + + render_async(view) + + {:ok, session} = Lightning.AiAssistant.create_session(job_1, user, "Test") + Phoenix.PubSub.subscribe(Lightning.PubSub, "ai_session:#{session.id}") + + # Trigger streaming error + Lightning.AiAssistantHelpers.simulate_streaming_error( + session.id, + "Error" + ) + + assert_receive {:ai_assistant, :streaming_error, _}, 1000 + render_async(view) + + # Delete all user messages to trigger the "else" branch + Lightning.Repo.delete_all( + from m in Lightning.AiAssistant.ChatMessage, + where: m.chat_session_id == ^session.id and m.role == :user + ) + + # Click retry - should handle gracefully (else branch: no user message) + eventually( + fn -> + if has_element?(view, "[phx-click='retry_streaming']") do + view + |> element("[phx-click='retry_streaming']") + |> render_click() + + true + else + false + end + end, + true, + 5000, + 50 + ) + + # Should not crash + refute_receive {:EXIT, _, _}, 100 + end + + @tag email: "user@openfn.org" + test "retry_streaming with retry_message error", %{ + conn: conn, + project: project, + workflow: %{jobs: [job_1 | _]} = workflow, + user: user + } do + Lightning.AiAssistantHelpers.stub_online() + skip_disclaimer(user) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project.id}/w/#{workflow.id}?s=#{job_1.id}&m=expand" + ) + + render_async(view) + + {:ok, session} = Lightning.AiAssistant.create_session(job_1, user, "Test") + Phoenix.PubSub.subscribe(Lightning.PubSub, "ai_session:#{session.id}") + + # Trigger streaming error + Lightning.AiAssistantHelpers.simulate_streaming_error(session.id, "Error") + assert_receive {:ai_assistant, :streaming_error, _}, 1000 + render_async(view) + + # Copy and stub retry_message to return error + Mimic.copy(Lightning.AiAssistant) + + Mimic.stub(Lightning.AiAssistant, :retry_message, fn _msg -> + {:error, %Ecto.Changeset{}} + end) + + # Click retry - should show error flash + eventually( + fn -> + if has_element?(view, "[phx-click='retry_streaming']") do + html = + view + |> element("[phx-click='retry_streaming']") + |> render_click() + + html =~ "Failed to retry request" + else + false + end + end, + true, + 5000, + 50 + ) + end + + @tag email: "user@openfn.org" + test "does not unregister component when switching with pending operation", + %{ + conn: conn, + project: project, + workflow: %{jobs: [job_1 | _]} = workflow, + user: user + } do + Lightning.AiAssistantHelpers.stub_online() + skip_disclaimer(user) + + {:ok, view, _html} = + live( + conn, + ~p"/projects/#{project.id}/w/#{workflow.id}?s=#{job_1.id}&m=expand" + ) + + render_async(view) + + # Submit a message but DON'T simulate completion - keep pending + view + |> form("#ai-assistant-form-job-#{job_1.id}-ai-assistant", %{ + assistant: %{content: "Test query"} + }) + |> render_submit() + + # The component should now have a pending operation + # If we were to switch sessions now, handle_unregistration line 135 would match + # and return the socket without unregistering + + # We can verify this by checking that the form is disabled during pending + eventually( + fn -> + html = render(view) + # The form should be disabled while processing + html =~ "phx-disable-with" or html =~ "disabled" + end, + true, + 2000, + 100 + ) + end end defp create_project_for_user(%{user: user}) do diff --git a/test/lightning_web/live/workflow_live/ai_assistant_component_test.exs b/test/lightning_web/live/workflow_live/ai_assistant_component_test.exs deleted file mode 100644 index 9d0f654443..0000000000 --- a/test/lightning_web/live/workflow_live/ai_assistant_component_test.exs +++ /dev/null @@ -1,333 +0,0 @@ -defmodule LightningWeb.WorkflowLive.AiAssistant.ComponentTest do - use ExUnit.Case, async: true - - import Phoenix.LiveViewTest - - alias LightningWeb.Live.AiAssistant.Modes.JobCode - alias LightningWeb.AiAssistant - - describe "formatted_content/1" do - test "renders assistant messages with properly styled links" do - content = """ - Here are some links: - - [Apollo Repo](https://github.com/OpenFn/apollo) - - Plain text - - [Lightning Repo](https://github.com/OpenFn/lightning) - """ - - html = - render_component( - &AiAssistant.Component.formatted_content/1, - id: "formatted-content", - content: content - ) - - parsed_html = Floki.parse_document!(html) - links = Floki.find(parsed_html, "a") - - apollo_link = - Enum.find( - links, - &(Floki.attribute(&1, "href") == ["https://github.com/OpenFn/apollo"]) - ) - - assert apollo_link != nil - - assert Floki.attribute(apollo_link, "class") == [ - "text-primary-400 hover:text-primary-600" - ] - - assert Floki.attribute(apollo_link, "target") == ["_blank"] - - lightning_link = - Enum.find( - links, - &(Floki.attribute(&1, "href") == [ - "https://github.com/OpenFn/lightning" - ]) - ) - - assert lightning_link != nil - - assert Floki.attribute(lightning_link, "class") == [ - "text-primary-400 hover:text-primary-600" - ] - - assert Floki.attribute(lightning_link, "target") == ["_blank"] - - list_items = Floki.find(parsed_html, "li") - - assert Enum.any?(list_items, fn li -> - Floki.text(li) |> String.trim() == "Plain text" - end) - end - - test "handles content with invalid markdown links" do - content = """ - Broken [link(test.com - [Another](working.com) - """ - - html = - render_component( - &AiAssistant.Component.formatted_content/1, - id: "formatted-content", - content: content - ) - - parsed_html = Floki.parse_document!(html) - assert Floki.text(parsed_html) =~ "Broken [link(test.com" - - working_link = - Floki.find(parsed_html, "a") - |> Enum.find(&(Floki.attribute(&1, "href") == ["working.com"])) - - assert working_link != nil - - assert Floki.attribute(working_link, "class") == [ - "text-primary-400 hover:text-primary-600" - ] - - assert Floki.attribute(working_link, "target") == ["_blank"] - end - - test "elements without defined styles remain unchanged" do - content = """ - Some code - Preformatted text - [A link](https://weirdopierdo.com) - """ - - html = - render_component(&AiAssistant.Component.formatted_content/1, - id: "formatted-content", - content: content - ) - - parsed_html = Floki.parse_document!(html) - - code = Floki.find(parsed_html, "weirdo") - pre = Floki.find(parsed_html, "pierdo") - assert Floki.attribute(code, "class") == [] - assert Floki.attribute(pre, "class") == [] - - link = - Floki.find(parsed_html, "a") - |> Enum.find( - &(Floki.attribute(&1, "href") == ["https://weirdopierdo.com"]) - ) - - assert link != nil - - assert Floki.attribute(link, "class") == [ - "text-primary-400 hover:text-primary-600" - ] - - assert Floki.attribute(link, "target") == ["_blank"] - end - - test "handles content that cannot be parsed as AST" do - content = """ -
    Unclosed div - Unclosed span - Some text - """ - - html = - render_component(&AiAssistant.Component.formatted_content/1, - id: "formatted-content", - content: content - ) - - parsed_html = Floki.parse_document!(html) - - assert Floki.text(parsed_html) =~ "Unclosed div" - assert Floki.text(parsed_html) =~ "Unclosed span" - assert Floki.text(parsed_html) =~ "Some text" - end - - test "applies styles to elements not defined in the default styles" do - content = """ - Custom styled content - """ - - custom_attributes = %{ - "custom-tag" => %{class: "custom-class text-green-700"} - } - - html = - render_component(&AiAssistant.Component.formatted_content/1, %{ - id: "formatted-content", - content: content, - attributes: custom_attributes - }) - - parsed_html = Floki.parse_document!(html) - custom_tag = Floki.find(parsed_html, "custom-tag") |> hd() - - assert custom_tag != nil - - assert Floki.attribute(custom_tag, "class") == [ - "custom-class text-green-700" - ] - end - end - - describe "error_message/1" do - test "renders string error message" do - assert JobCode.error_message({:error, "Something went wrong"}) == - "Something went wrong" - end - - test "renders changeset error message" do - changeset = %Ecto.Changeset{ - valid?: false, - errors: [content: {"is invalid", []}], - data: %Lightning.AiAssistant.ChatSession{} - } - - assert JobCode.error_message({:error, changeset}) == - "Content is invalid" - end - - test "renders text message from map" do - error_data = %{text: "Specific error message"} - - assert JobCode.error_message({:error, :custom_reason, error_data}) == - "Specific error message" - end - - test "renders default error message for unhandled cases" do - assert JobCode.error_message({:error, :unknown_reason}) == - "An error occurred: unknown_reason. Please try again." - - assert JobCode.error_message(:unexpected_error) == - "Oops! Something went wrong. Please try again." - end - - test "elements without defined styles remain unchanged" do - content = """ - Some code - Preformatted text - [A link](https://weirdopierdo.com) - """ - - html = - render_component(&AiAssistant.Component.formatted_content/1, - id: "formatted-content", - content: content - ) - - parsed_html = Floki.parse_document!(html) - - code = Floki.find(parsed_html, "weirdo") - pre = Floki.find(parsed_html, "pierdo") - - assert Floki.attribute(code, "class") == [] - assert Floki.attribute(pre, "class") == [] - - link = - Floki.find(parsed_html, "a") - |> Enum.find( - &(Floki.attribute(&1, "href") == ["https://weirdopierdo.com"]) - ) - - assert link != nil - - assert Floki.attribute(link, "class") == [ - "text-primary-400 hover:text-primary-600" - ] - - assert Floki.attribute(link, "target") == ["_blank"] - end - - test "handles content that cannot be parsed as AST" do - content = """ -
    Unclosed div - Unclosed span - Some text - """ - - html = - render_component(&AiAssistant.Component.formatted_content/1, - id: "formatted-content", - content: content - ) - - parsed_html = Floki.parse_document!(html) - - text = Floki.text(parsed_html) - assert text =~ "Unclosed div" - assert text =~ "Unclosed span" - assert text =~ "Some text" - end - - test "applies styles to elements not defined in the default styles" do - content = """ - Custom styled content - """ - - custom_attributes = %{ - "custom-tag" => %{class: "custom-class text-green-700"} - } - - html = - render_component(&AiAssistant.Component.formatted_content/1, %{ - id: "formatted-content", - content: content, - attributes: custom_attributes - }) - - parsed_html = Floki.parse_document!(html) - - custom_tag = Floki.find(parsed_html, "custom-tag") |> hd() - - assert custom_tag != nil - - assert Floki.attribute(custom_tag, "class") == [ - "custom-class text-green-700" - ] - end - end - - describe "form validation" do - alias LightningWeb.Live.AiAssistant.Modes.WorkflowTemplate - - test "JobCode Form validates empty content" do - changeset = JobCode.Form.changeset(%{"content" => ""}) - - assert changeset.valid? == false - assert Keyword.has_key?(changeset.errors, :content) - {msg, _opts} = changeset.errors[:content] - assert msg == "Please enter a message before sending" - end - - test "JobCode validate_form includes content validation" do - changeset = JobCode.validate_form(%{"content" => nil}) - - assert changeset.valid? == false - assert Keyword.has_key?(changeset.errors, :content) - end - - test "WorkflowTemplate DefaultForm validates empty content" do - changeset = WorkflowTemplate.DefaultForm.changeset(%{"content" => ""}) - - assert changeset.valid? == false - assert Keyword.has_key?(changeset.errors, :content) - {msg, _opts} = changeset.errors[:content] - assert msg == "Please enter a message before sending" - end - - test "form validation accepts valid content" do - # JobCode - changeset = JobCode.validate_form(%{"content" => "Help me with my code"}) - assert changeset.valid? == true - - # WorkflowTemplate - changeset = - WorkflowTemplate.validate_form(%{"content" => "Create a workflow"}) - - assert changeset.valid? == true - end - end -end diff --git a/test/lightning_web/live/workflow_live/new_workflow_component_test.exs b/test/lightning_web/live/workflow_live/new_workflow_component_test.exs index a0e350b33e..0a50a9139f 100644 --- a/test/lightning_web/live/workflow_live/new_workflow_component_test.exs +++ b/test/lightning_web/live/workflow_live/new_workflow_component_test.exs @@ -1,10 +1,12 @@ defmodule LightningWeb.WorkflowLive.NewWorkflowComponentTest do - use LightningWeb.ConnCase, async: true + use LightningWeb.ConnCase, async: false import Phoenix.LiveViewTest import Lightning.Factories import Lightning.WorkflowLive.Helpers + import Mox + setup :set_mox_global setup :register_and_log_in_user setup :create_project_for_current_user diff --git a/test/lightning_web/live/workflow_live/workflow_ai_chat_component_test.exs b/test/lightning_web/live/workflow_live/workflow_ai_chat_component_test.exs index 770311d3e8..2bdc01e73e 100644 --- a/test/lightning_web/live/workflow_live/workflow_ai_chat_component_test.exs +++ b/test/lightning_web/live/workflow_live/workflow_ai_chat_component_test.exs @@ -1,11 +1,12 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do - use LightningWeb.ConnCase, async: true + use LightningWeb.ConnCase, async: false import Phoenix.LiveViewTest import Lightning.Factories import Mox import Ecto.Query + setup :set_mox_global setup :register_and_log_in_user setup :create_project_for_current_user setup :verify_on_exit! @@ -74,28 +75,7 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do condition_type: always """ - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: "http://localhost:4001/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => "I'll update your workflow", - "response_yaml" => valid_workflow_yaml, - "usage" => %{}, - "history" => [ - %{"role" => "user", "content" => "Add a fetch data job"}, - %{ - "role" => "assistant", - "content" => "I'll update your workflow" - } - ] - } - }} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -104,10 +84,18 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do render_async(view) + # Submit the form - this will create a session, message and start Oban job view |> element("#ai-assistant-form-workflow-ai-chat-panel-assistant") |> render_submit(%{"assistant" => %{"content" => "Add a fetch data job"}}) + # Wait and simulate streaming response + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + workflow.id, + response: "I'll update your workflow", + code: valid_workflow_yaml + ) + assert_push_event(view, "template_selected", %{template: template}) assert template =~ "name: Updated Workflow" assert template =~ "fetch_data" @@ -168,21 +156,7 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do body: | """ - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: "http://localhost:4001/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => "Here's your workflow", - "response_yaml" => invalid_yaml, - "usage" => %{} - } - }} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -195,6 +169,13 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do |> element("#ai-assistant-form-workflow-ai-chat-panel-assistant") |> render_submit(%{"assistant" => %{"content" => "Create a bad workflow"}}) + # Wait and simulate streaming response + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + workflow.id, + response: "Here's your workflow", + code: invalid_yaml + ) + assert_push_event(view, "template_selected", %{template: template}) assert template =~ "Bad Workflow" @@ -235,28 +216,16 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do workflow: workflow, user: user } do - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: "http://localhost:4001/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} + invalid_workflow_yaml = """ + name: "" + jobs: + empty_job: + name: "" + adaptor: "" + body: "" + """ - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => "Here's a workflow with validation issues", - "response_yaml" => """ - name: "" - jobs: - empty_job: - name: "" - adaptor: "" - body: "" - """, - "usage" => %{} - } - }} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -271,6 +240,13 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do "assistant" => %{"content" => "Create invalid workflow"} }) + # Wait and simulate streaming response + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + workflow.id, + response: "Here's a workflow with validation issues", + code: invalid_workflow_yaml + ) + assert_push_event(view, "template_selected", %{template: _}) render_async(view) @@ -404,39 +380,7 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do target_job: first_job """ - Mox.stub(Lightning.MockConfig, :apollo, fn key -> - case key do - :endpoint -> "http://localhost:3000" - :ai_assistant_api_key -> "api_key" - :timeout -> 5_000 - end - end) - - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: "http://localhost:3000/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => "Here's a workflow with validation issues", - "response_yaml" => workflow_yaml, - "usage" => %{}, - "history" => [ - %{ - "role" => "user", - "content" => "Create workflow with errors" - }, - %{ - "role" => "assistant", - "content" => "Here's a workflow with validation issues" - } - ] - } - }} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -451,6 +395,13 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do assistant: %{content: "Create workflow with errors"} }) + # Simulate streaming response (job runs inline in test mode) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + workflow.id, + response: "Here's a workflow with validation issues", + code: workflow_yaml + ) + render_async(view) assert_push_event(view, "template_selected", %{template: template}) @@ -540,39 +491,7 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do Oban.Testing.with_testing_mode(:manual, fn -> workflow_yaml = "unparseable workflow" - Mox.stub(Lightning.MockConfig, :apollo, fn key -> - case key do - :endpoint -> "http://localhost:3000" - :ai_assistant_api_key -> "api_key" - :timeout -> 5_000 - end - end) - - Mox.stub(Lightning.Tesla.Mock, :call, fn - %{method: :get, url: "http://localhost:3000/"}, _opts -> - {:ok, %Tesla.Env{status: 200}} - - %{method: :post}, _opts -> - {:ok, - %Tesla.Env{ - status: 200, - body: %{ - "response" => "Here's a workflow with validation issues", - "response_yaml" => workflow_yaml, - "usage" => %{}, - "history" => [ - %{ - "role" => "user", - "content" => "Create workflow with errors" - }, - %{ - "role" => "assistant", - "content" => "Here's a workflow with validation issues" - } - ] - } - }} - end) + Lightning.AiAssistantHelpers.stub_online() skip_disclaimer(user) @@ -587,6 +506,13 @@ defmodule LightningWeb.WorkflowLive.WorkflowAiChatComponentTest do assistant: %{content: "Create workflow with errors"} }) + # Simulate streaming response (job runs inline in test mode) + Lightning.AiAssistantHelpers.submit_and_simulate_stream( + workflow.id, + response: "Here's a workflow with validation issues", + code: workflow_yaml + ) + render_async(view) assert_push_event(view, "template_selected", %{template: template}) diff --git a/test/support/ai_assistant_helpers.ex b/test/support/ai_assistant_helpers.ex index 45f31b2e08..531c37b07c 100644 --- a/test/support/ai_assistant_helpers.ex +++ b/test/support/ai_assistant_helpers.ex @@ -1,5 +1,7 @@ defmodule Lightning.AiAssistantHelpers do require Logger + import ExUnit.Assertions + import Eventually @apollo_endpoint "http://localhost:4001" @@ -7,6 +9,7 @@ defmodule Lightning.AiAssistantHelpers do Mox.stub(Lightning.MockConfig, :apollo, fn :endpoint -> @apollo_endpoint :ai_assistant_api_key -> "ai_assistant_api_key" + :timeout -> 30_000 end) Mox.stub(Lightning.Tesla.Mock, :call, fn @@ -15,12 +18,235 @@ defmodule Lightning.AiAssistantHelpers do %{method: :post} = request, _opts -> Logger.warning(""" - Unexpected request sent to Apollo: + Unexpected Tesla HTTP request sent to Apollo (streaming should be used): #{inspect(request, pretty: true)} """) {:error, :unknown} end) + + :ok + end + + @doc """ + Waits for a chat session to be created and then simulates a streaming response. + + This is useful in tests where you've submitted a form and need to simulate + the AI response. + + ## Examples + + # For workflow-based assistant: + submit_and_simulate_stream(workflow_id: workflow.id, + response: "I'll create your workflow", + code: valid_yaml + ) + + # For job-based assistant: + submit_and_simulate_stream(job_id: job.id, + response: "Here's your answer" + ) + """ + def submit_and_simulate_stream(opts) when is_list(opts) do + response = Keyword.get(opts, :response, "AI response") + code = Keyword.get(opts, :code) + workflow_id = Keyword.get(opts, :workflow_id) + job_id = Keyword.get(opts, :job_id) + timeout = Keyword.get(opts, :timeout, 1000) + + # Drain the ai_assistant Oban queue to execute jobs synchronously + Oban.drain_queue(Lightning.Oban, queue: :ai_assistant) + + # Get the session based on workflow_id or job_id + session = + cond do + workflow_id -> + # For workflow template mode (new workflows), check project_id first + # as sessions are created with project_id not workflow_id + Lightning.AiAssistant.ChatSession + |> Lightning.Repo.get_by(project_id: workflow_id) || + Lightning.AiAssistant.ChatSession + |> Lightning.Repo.get_by(workflow_id: workflow_id) + + job_id -> + Lightning.AiAssistant.ChatSession + |> Lightning.Repo.get_by(job_id: job_id) + + true -> + nil + end + + if session do + # Subscribe to the session's PubSub topic to wait for completion + Phoenix.PubSub.subscribe(Lightning.PubSub, "ai_session:#{session.id}") + + simulate_streaming_response(session.id, response, code: code) + + # Wait for the streaming_payload_complete message to arrive + assert_receive {:ai_assistant, :streaming_payload_complete, _}, timeout + + # Poll until message status is updated in database (indicates LiveView processed it) + eventually( + fn -> + session + |> Lightning.Repo.reload() + |> Lightning.Repo.preload(:messages, force: true) + |> then(& &1.messages) + |> Enum.any?(fn msg -> msg.status == :success end) + end, + true, + timeout, + 10 + ) + end + end + + # Backward compatibility: support old function signature + def submit_and_simulate_stream(workflow_id, opts) + when is_binary(workflow_id) do + submit_and_simulate_stream([workflow_id: workflow_id] ++ opts) + end + + @doc """ + Simulates SSE streaming by broadcasting events directly via PubSub. + + This mocks the behavior of Lightning.ApolloClient.SSEStream without + actually making HTTP requests to Apollo. + + Note: This function broadcasts messages but does not wait for them to be processed. + Use submit_and_simulate_stream/1 which includes synchronization. + """ + def simulate_streaming_response(session_id, content, opts \\ []) do + code = Keyword.get(opts, :code) + + # Broadcast thinking status + Lightning.broadcast( + "ai_session:#{session_id}", + {:ai_assistant, :status_update, + %{ + status: "Analyzing your request...", + session_id: session_id + }} + ) + + # Broadcast content chunks + words = String.split(content, " ") + + words + |> Enum.with_index() + |> Enum.each(fn {word, index} -> + # Add space after each word except the last one + chunk = if index < length(words) - 1, do: word <> " ", else: word + + Lightning.broadcast( + "ai_session:#{session_id}", + {:ai_assistant, :streaming_chunk, + %{ + content: chunk, + session_id: session_id + }} + ) + end) + + # Broadcast completion + Lightning.broadcast( + "ai_session:#{session_id}", + {:ai_assistant, :streaming_complete, %{session_id: session_id}} + ) + + # Broadcast payload with usage and code + payload_data = %{ + session_id: session_id, + usage: %{"input_tokens" => 100, "output_tokens" => 50}, + meta: %{}, + code: code + } + + Lightning.broadcast( + "ai_session:#{session_id}", + {:ai_assistant, :streaming_payload_complete, payload_data} + ) + end + + @doc """ + Simulates a streaming error by broadcasting an error event via PubSub. + + This mocks error scenarios from Apollo without making actual HTTP requests. + + Note: This function broadcasts the error but does not wait for it to be processed. + Use submit_and_simulate_error/1 which includes synchronization. + """ + def simulate_streaming_error(session_id, error_message) do + # Broadcast error + Lightning.broadcast( + "ai_session:#{session_id}", + {:ai_assistant, :streaming_error, + %{ + session_id: session_id, + error: error_message + }} + ) + end + + @doc """ + Waits for a chat session to be created and then simulates a streaming error. + + This is useful in tests where you've submitted a form and need to simulate + an AI error response. + """ + def submit_and_simulate_error(opts) when is_list(opts) do + error_message = + Keyword.get(opts, :error, "An error occurred") + + workflow_id = Keyword.get(opts, :workflow_id) + job_id = Keyword.get(opts, :job_id) + timeout = Keyword.get(opts, :timeout, 1000) + + # Drain the ai_assistant Oban queue to execute jobs synchronously + Oban.drain_queue(Lightning.Oban, queue: :ai_assistant) + + # Get the session based on workflow_id or job_id + session = + cond do + workflow_id -> + # For workflow template mode (new workflows), check project_id first + # as sessions are created with project_id not workflow_id + Lightning.AiAssistant.ChatSession + |> Lightning.Repo.get_by(project_id: workflow_id) || + Lightning.AiAssistant.ChatSession + |> Lightning.Repo.get_by(workflow_id: workflow_id) + + job_id -> + Lightning.AiAssistant.ChatSession + |> Lightning.Repo.get_by(job_id: job_id) + + true -> + nil + end + + if session do + # Subscribe to the session's PubSub topic to wait for the error + Phoenix.PubSub.subscribe(Lightning.PubSub, "ai_session:#{session.id}") + + simulate_streaming_error(session.id, error_message) + + # Wait for the streaming_error message to arrive + assert_receive {:ai_assistant, :streaming_error, _}, timeout + + # Poll until message status is updated in database (indicates LiveView processed it) + eventually( + fn -> + session + |> Lightning.Repo.reload() + |> Lightning.Repo.preload(:messages, force: true) + |> then(& &1.messages) + |> Enum.any?(fn msg -> msg.status == :error end) + end, + true, + timeout, + 10 + ) + end end end diff --git a/test/test_helper.exs b/test/test_helper.exs index 7b747fbd37..8af6c1df16 100644 --- a/test/test_helper.exs +++ b/test/test_helper.exs @@ -9,6 +9,7 @@ Mox.defmock(Lightning.Tesla.Mock, for: Tesla.Adapter) Mimic.copy(:hackney) Mimic.copy(File) +Mimic.copy(Finch) Mimic.copy(IO) Mimic.copy(Lightning.FailureEmail) Mimic.copy(Mix.Tasks.Lightning.InstallSchemas)