diff --git a/src/uipath/_cli/_chat/_bridge.py b/src/uipath/_cli/_chat/_bridge.py
index 2dfb95c58..37eb7f529 100644
--- a/src/uipath/_cli/_chat/_bridge.py
+++ b/src/uipath/_cli/_chat/_bridge.py
@@ -1,6 +1,7 @@
"""Chat bridge implementations for conversational agents."""
import asyncio
+import json
import logging
import os
import uuid
@@ -57,6 +58,10 @@ def __init__(
self._client: AsyncClient | None = None
self._connected_event = asyncio.Event()
+ # Set CAS_WEBSOCKET_DISABLED when using the debugger to prevent websocket errors from
+ # interrupting the debugging session. Events will be logged instead of being sent.
+ self._websocket_disabled = os.environ.get("CAS_WEBSOCKET_DISABLED") == "true"
+
async def connect(self, timeout: float = 10.0) -> None:
"""Establish WebSocket connection to the server.
@@ -89,34 +94,39 @@ async def connect(self, timeout: float = 10.0) -> None:
self._connected_event.clear()
- try:
- # Attempt to connect with timeout
- await asyncio.wait_for(
- self._client.connect(
- url=self.websocket_url,
- socketio_path=self.websocket_path,
- headers=self.headers,
- auth=self.auth,
- transports=["websocket"],
- ),
- timeout=timeout,
+ if self._websocket_disabled:
+ logger.warning(
+ "SocketIOChatBridge is in debug mode. Not connecting websocket."
)
+ else:
+ try:
+ # Attempt to connect with timeout
+ await asyncio.wait_for(
+ self._client.connect(
+ url=self.websocket_url,
+ socketio_path=self.websocket_path,
+ headers=self.headers,
+ auth=self.auth,
+ transports=["websocket"],
+ ),
+ timeout=timeout,
+ )
- await asyncio.wait_for(self._connected_event.wait(), timeout=timeout)
+ await asyncio.wait_for(self._connected_event.wait(), timeout=timeout)
- except asyncio.TimeoutError as e:
- error_message = (
- f"Failed to connect to WebSocket server within {timeout}s timeout"
- )
- logger.error(error_message)
- await self._cleanup_client()
- raise RuntimeError(error_message) from e
+ except asyncio.TimeoutError as e:
+ error_message = (
+ f"Failed to connect to WebSocket server within {timeout}s timeout"
+ )
+ logger.error(error_message)
+ await self._cleanup_client()
+ raise RuntimeError(error_message) from e
- except Exception as e:
- error_message = f"Failed to connect to WebSocket server: {e}"
- logger.error(error_message)
- await self._cleanup_client()
- raise RuntimeError(error_message) from e
+ except Exception as e:
+ error_message = f"Failed to connect to WebSocket server: {e}"
+ logger.error(error_message)
+ await self._cleanup_client()
+ raise RuntimeError(error_message) from e
async def disconnect(self) -> None:
"""Close the WebSocket connection gracefully.
@@ -149,7 +159,7 @@ async def emit_message_event(
if self._client is None:
raise RuntimeError("WebSocket client not connected. Call connect() first.")
- if not self._connected_event.is_set():
+ if not self._connected_event.is_set() and not self._websocket_disabled:
raise RuntimeError("WebSocket client not in connected state")
try:
@@ -166,7 +176,12 @@ async def emit_message_event(
mode="json", exclude_none=True, by_alias=True
)
- await self._client.emit("ConversationEvent", event_data)
+ if self._websocket_disabled:
+ logger.info(
+ f"SocketIOChatBridge is in debug mode. Not sending event: {json.dumps(event_data)}"
+ )
+ else:
+ await self._client.emit("ConversationEvent", event_data)
# Store the current message ID, used for emitting interrupt events.
self._current_message_id = message_event.message_id
@@ -184,7 +199,7 @@ async def emit_exchange_end_event(self) -> None:
if self._client is None:
raise RuntimeError("WebSocket client not connected. Call connect() first.")
- if not self._connected_event.is_set():
+ if not self._connected_event.is_set() and not self._websocket_disabled:
raise RuntimeError("WebSocket client not in connected state")
try:
@@ -200,7 +215,12 @@ async def emit_exchange_end_event(self) -> None:
mode="json", exclude_none=True, by_alias=True
)
- await self._client.emit("ConversationEvent", event_data)
+ if self._websocket_disabled:
+ logger.info(
+ f"SocketIOChatBridge is in debug mode. Not sending event: {json.dumps(event_data)}"
+ )
+ else:
+ await self._client.emit("ConversationEvent", event_data)
except Exception as e:
logger.error(f"Error sending conversation event to WebSocket: {e}")
@@ -230,7 +250,12 @@ async def emit_interrupt_event(self, runtime_result: UiPathRuntimeResult):
event_data = interrupt_event.model_dump(
mode="json", exclude_none=True, by_alias=True
)
- await self._client.emit("ConversationEvent", event_data)
+ if self._websocket_disabled:
+ logger.info(
+ f"SocketIOChatBridge is in debug mode. Not sending event: {json.dumps(event_data)}"
+ )
+ else:
+ await self._client.emit("ConversationEvent", event_data)
except Exception as e:
logger.warning(f"Error sending interrupt event: {e}")
@@ -315,6 +340,13 @@ def get_chat_bridge(
websocket_url = f"wss://{host}?conversationId={context.conversation_id}"
websocket_path = "autopilotforeveryone_/websocket_/socket.io"
+ if os.environ.get("CAS_WEBSOCKET_HOST"):
+ websocket_url = f"ws://{os.environ.get('CAS_WEBSOCKET_HOST')}?conversationId={context.conversation_id}"
+ websocket_path = "/socket.io"
+ logger.warning(
+ f"CAS_WEBSOCKET_HOST is set. Using websocket_url '{websocket_url}{websocket_path}'."
+ )
+
# Build headers from context
headers = {
"Authorization": f"Bearer {os.environ.get('UIPATH_ACCESS_TOKEN', '')}",
diff --git a/src/uipath/agent/models/agent.py b/src/uipath/agent/models/agent.py
index 916f24bcf..b67d95f0c 100644
--- a/src/uipath/agent/models/agent.py
+++ b/src/uipath/agent/models/agent.py
@@ -795,6 +795,15 @@ class AgentDefinition(BaseModel):
validate_by_name=True, validate_by_alias=True, extra="allow"
)
+ @property
+ def is_conversational(self) -> bool:
+ """Checks the settings.engine property to determine if the agent is conversational."""
+ if hasattr(self, "metadata") and self.metadata:
+ metadata = self.metadata
+ if hasattr(metadata, "is_conversational"):
+ return metadata.is_conversational
+ return False
+
@staticmethod
def _normalize_guardrails(v: Dict[str, Any]) -> None:
guards = v.get("guardrails")
diff --git a/src/uipath/agent/react/__init__.py b/src/uipath/agent/react/__init__.py
index e3f4db781..9474165fb 100644
--- a/src/uipath/agent/react/__init__.py
+++ b/src/uipath/agent/react/__init__.py
@@ -3,6 +3,10 @@
This module includes UiPath ReAct Agent Loop constructs such as prompts, tools
"""
+from .conversational_prompts import (
+ PromptUserSettings,
+ generate_conversational_agent_system_prompt,
+)
from .prompts import AGENT_SYSTEM_PROMPT_TEMPLATE
from .tools import (
END_EXECUTION_TOOL,
@@ -19,4 +23,6 @@
"RAISE_ERROR_TOOL",
"EndExecutionToolSchemaModel",
"RaiseErrorToolSchemaModel",
+ "PromptUserSettings",
+ "generate_conversational_agent_system_prompt",
]
diff --git a/src/uipath/agent/react/conversational_prompts.py b/src/uipath/agent/react/conversational_prompts.py
new file mode 100644
index 000000000..768faee03
--- /dev/null
+++ b/src/uipath/agent/react/conversational_prompts.py
@@ -0,0 +1,301 @@
+"""Conversational agent prompt generation logic."""
+
+import json
+import logging
+from dataclasses import asdict, dataclass
+from datetime import datetime, timezone
+from enum import Enum
+from typing import Optional
+
+from uipath.agent.models.agent import AgentDefinition, LowCodeAgentDefinition
+
+logger = logging.getLogger(__name__)
+
+
+class CitationType(Enum):
+ """Citation type for system prompt generation.
+
+ Some models may have issues wrapping citation tags around text.
+ In those cases, we can prompt the citation tags to be placed after the text instead.
+ We also allow disabling citations entirely, for scenarios such as voice output.
+ """
+
+ NONE = "none"
+ WRAPPED = "wrapped"
+ TRAILING = "trailing"
+
+
+@dataclass
+class PromptUserSettings:
+ """User settings for inclusion in the system prompt."""
+
+ name: Optional[str] = None
+ email: Optional[str] = None
+ role: Optional[str] = None
+ department: Optional[str] = None
+ company: Optional[str] = None
+ country: Optional[str] = None
+ timezone: Optional[str] = None
+
+
+_AGENT_SYSTEM_PROMPT_PREFIX_TEMPLATE = """You are {{CONVERSATIONAL_AGENT_SERVICE_PREFIX_agentName}}.
+The current date is: {{CONVERSATIONAL_AGENT_SERVICE_PREFIX_currentDate}}.
+Understand user goals through conversation and use appropriate tools to fulfill requests.
+
+=====================================================================
+PRECEDENCE HIERARCHY
+=====================================================================
+1. Core System Instructions (highest authority)
+2. Agent System Prompt
+3. Tool definitions and parameter schemas
+4. User instructions and follow-up messages
+
+When conflicts occur, follow the highest-precedence rule above.
+
+=====================================================================
+AGENT SYSTEM PROMPT
+=====================================================================
+{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_systemPrompt}}
+
+{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_attachmentsPrompt}}
+
+{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_userSettingsPrompt}}
+
+=====================================================================
+TOOL USAGE RULES
+=====================================================================
+Parameter Resolution Priority:
+1. Check tool definitions for pre-configured values
+2. Use conversation context
+3. Ask user only if unavailable
+
+Execution:
+- Use tools ONLY with complete, specific data for all required parameters
+- NEVER use placeholders or incomplete information
+- Call independent tools in parallel when possible
+
+On Missing Data:
+- Ask user for specifics before proceeding
+- Never attempt calls with incomplete data
+- On errors: modify parameters or change approach (never retry identical calls)
+
+=====================================================================
+TOOL RESULTS
+=====================================================================
+Tool results contain:
+- status: "success" or "error"
+- data: result payload or exception details
+
+Rules:
+- For "success": check data for actual results
+- For "error": summarize issue and adjust approach
+
+=====================================================================
+CITATION RULES
+=====================================================================
+Citations will be parsed into the user interface.
+
+WHAT TO CITE:
+- Any information drawn from web search results.
+- Any information drawn from Context Grounding documents.
+
+CITATION FORMAT:
+{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_citationFormatPrompt}}
+
+TOOL RESULT PATTERNS REQUIRING CITATION:
+Tool results containing these fields indicate citable sources:
+- Web results: "url", "title" fields
+- Context Grounding: objects with "reference", "source", "page_number", "content"
+
+SOURCE FORMATS:
+- URLs: {"title":"Page Title","url":"https://example.com"}
+- Context Grounding: {"title":"filename.pdf","reference":"https://ref.url","page_number":1}
+ where title is set to the document source (filename), and reference and page_number
+ are from the tool results
+
+RULES:
+- Minimum 1 source per citation (never empty array)
+- Truncate titles >48 chars
+- Never include citations in tool inputs
+
+{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_citationExamplePrompt}}
+
+=====================================================================
+EXECUTION CHECKLIST
+=====================================================================
+Before each tool call, verify:
+1. Pre-configured values have been checked
+2. All parameters are complete and specific
+
+If execution cannot proceed:
+- State why
+- Request missing or clarifying information"""
+
+_ATTACHMENTS_TEMPLATE = """=====================================================================
+ATTACHMENTS
+=====================================================================
+- You are capable of working with job attachments. Job attachments are file references.
+- If the user has attached files, they will be in the format of [...] in the user message. Example: [{"ID":"123","Type":"JobAttachment","FullName":"example.json","MimeType":"application/json","Metadata":{"key1":"value1","key2":"value2"}}]
+- You must send only the JobAttachment ID as the parameter values to a tool that accepts job attachments.
+- If the attachment ID is passed and not found, suggest the user to upload the file again."""
+
+_USER_CONTEXT_TEMPLATE = """=====================================================================
+USER CONTEXT
+=====================================================================
+You have the following information about the user:
+```json
+{user_settings_json}
+```"""
+
+_CITATION_FORMAT_WRAPPED = "factual claim here"
+_CITATION_FORMAT_TRAILING = "factual claim here"
+
+_CITATION_EXAMPLE_WRAPPED = """EXAMPLES OF CORRECT USAGE:
+AI adoption is growing
+
+CRITICAL ERRORS TO AVOID:
+text (empty sources)
+Some textpartmore text (spacing)
+ (empty claim)"""
+
+_CITATION_EXAMPLE_TRAILING = """EXAMPLES OF CORRECT USAGE:
+AI adoption is growing
+
+CRITICAL ERRORS TO AVOID:
+text (empty sources)
+Some textpartmore text (content between citation tags)"""
+
+
+def generate_conversational_agent_system_prompt(
+ agent_definition: LowCodeAgentDefinition,
+ user_settings: Optional[PromptUserSettings],
+) -> str:
+ """Generate a system prompt for a conversational agent.
+
+ Args:
+ agent_definition: Conversational agent definition
+ user_settings: Optional user data that is injected into the system prompt.
+
+ Returns:
+ The complete system prompt string
+ """
+ system_message = next(
+ (msg for msg in agent_definition.messages if msg.role == "system"), None
+ )
+ if system_message is None:
+ raise ValueError(
+ "Conversational agent configuration must contain exactly one system message"
+ )
+
+ # Determine citation type based on model
+ citation_type = _get_citation_type(agent_definition)
+
+ # Format date as ISO 8601 (yyyy-MM-ddTHH:mmZ)
+ formatted_date = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%MZ")
+
+ prompt = _AGENT_SYSTEM_PROMPT_PREFIX_TEMPLATE
+ prompt = prompt.replace(
+ "{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_agentName}}",
+ agent_definition.name or "Unnamed Agent",
+ )
+ prompt = prompt.replace(
+ "{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_currentDate}}",
+ formatted_date,
+ )
+ prompt = prompt.replace(
+ "{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_systemPrompt}}",
+ system_message.content,
+ )
+ # Always include attachments prompt
+ prompt = prompt.replace(
+ "{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_attachmentsPrompt}}",
+ _ATTACHMENTS_TEMPLATE,
+ )
+ prompt = prompt.replace(
+ "{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_userSettingsPrompt}}",
+ _get_user_settings_template(user_settings),
+ )
+ prompt = prompt.replace(
+ "{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_citationFormatPrompt}}",
+ _get_citation_format_prompt(citation_type),
+ )
+ prompt = prompt.replace(
+ "{{CONVERSATIONAL_AGENT_SERVICE_PREFIX_citationExamplePrompt}}",
+ _get_citation_example_prompt(citation_type),
+ )
+
+ return prompt
+
+
+def _get_citation_type(agent_definition: AgentDefinition) -> CitationType:
+ """Determine the citation type based on the agent's model.
+
+ GPT models use trailing citations due to issues with generating
+ wrapped citations around text.
+
+ Args:
+ agent_definition: The agent definition containing settings
+
+ Returns:
+ CitationType.TRAILING for GPT models, CitationType.WRAPPED otherwise
+ """
+ model = agent_definition.settings.model.lower()
+ if "gpt" in model:
+ return CitationType.TRAILING
+ return CitationType.WRAPPED
+
+
+def _get_user_settings_template(
+ user_settings: Optional[PromptUserSettings],
+) -> str:
+ """Get the user settings template section.
+
+ Args:
+ user_settings: User profile information
+
+ Returns:
+ The user context template with JSON or empty string
+ """
+ if user_settings is None:
+ return ""
+
+ # Convert to dict, filtering out None values
+ settings_dict = {k: v for k, v in asdict(user_settings).items() if v is not None}
+
+ if not settings_dict:
+ return ""
+
+ user_settings_json = json.dumps(settings_dict, ensure_ascii=False)
+ return _USER_CONTEXT_TEMPLATE.format(user_settings_json=user_settings_json)
+
+
+def _get_citation_format_prompt(citation_type: CitationType) -> str:
+ """Get the citation format prompt based on citation type.
+
+ Args:
+ citation_type: The type of citation formatting to use
+
+ Returns:
+ The citation format string or empty string for NONE
+ """
+ if citation_type == CitationType.WRAPPED:
+ return _CITATION_FORMAT_WRAPPED
+ elif citation_type == CitationType.TRAILING:
+ return _CITATION_FORMAT_TRAILING
+ return ""
+
+
+def _get_citation_example_prompt(citation_type: CitationType) -> str:
+ """Get the citation example prompt based on citation type.
+
+ Args:
+ citation_type: The type of citation formatting to use
+
+ Returns:
+ The citation examples string or empty string for NONE
+ """
+ if citation_type == CitationType.WRAPPED:
+ return _CITATION_EXAMPLE_WRAPPED
+ elif citation_type == CitationType.TRAILING:
+ return _CITATION_EXAMPLE_TRAILING
+ return ""
diff --git a/src/uipath/agent/react/prompts.py b/src/uipath/agent/react/prompts.py
index 0e806b64c..81ee4e3f6 100644
--- a/src/uipath/agent/react/prompts.py
+++ b/src/uipath/agent/react/prompts.py
@@ -8,7 +8,7 @@
{{systemPrompt}}
-Your adhere strictly to the following rules to ensure accuracy and data validity:
+You adhere strictly to the following rules to ensure accuracy and data validity:
Data Verification and Tool Analysis:
diff --git a/src/uipath/platform/common/interrupt_models.py b/src/uipath/platform/common/interrupt_models.py
index 97f775f01..3c11409b1 100644
--- a/src/uipath/platform/common/interrupt_models.py
+++ b/src/uipath/platform/common/interrupt_models.py
@@ -134,3 +134,7 @@ class WaitDocumentExtraction(BaseModel):
"""Model representing a wait document extraction task creation."""
extraction: StartExtractionResponse
+
+
+class UserMessageWait(BaseModel):
+ """Model representing a wait for a new user input message."""