Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 32 additions & 7 deletions backend/app/api/routes/responses.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,12 +102,27 @@ def get_file_search_results(response):

def get_additional_data(request: dict) -> dict:
"""Extract additional data from request, excluding specific keys."""
return {
k: v
for k, v in request.items()
if k not in {"assistant_id", "callback_url", "response_id", "question"}
# Keys to exclude for async request (ResponsesAPIRequest)
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I assume that the only reason we are hardcoding here is because:

  • this route is very specific to OpenAI Responses API AND
  • these keys are very specific to that API
    Right? If yes, this is fine.

async_exclude_keys = {"assistant_id", "callback_url", "response_id", "question"}
# Keys to exclude for sync request (ResponsesSyncAPIRequest)
sync_exclude_keys = {
"model",
"instructions",
"vector_store_ids",
"max_num_results",
"temperature",
"response_id",
"question",
}

# Determine which keys to exclude based on the request structure
if "assistant_id" in request:
exclude_keys = async_exclude_keys
else:
exclude_keys = sync_exclude_keys

return {k: v for k, v in request.items() if k not in exclude_keys}


def process_response(
request: ResponsesAPIRequest,
Expand Down Expand Up @@ -249,7 +264,11 @@ def process_response(
exc_info=True,
)
tracer.log_error(error_message, response_id=request.response_id)
callback_response = ResponsesAPIResponse.failure_response(error=error_message)

request_dict = request.model_dump()
callback_response = ResponsesAPIResponse.failure_response(
error=error_message, metadata=get_additional_data(request_dict)
)

tracer.flush()

Expand Down Expand Up @@ -360,11 +379,13 @@ async def responses_sync(
project_id=project_id,
)
if not credentials or "api_key" not in credentials:
request_dict = request.model_dump()
logger.error(
f"[response_sync] OpenAI API key not configured for org_id={organization_id}, project_id={project_id}"
)
return APIResponse.failure_response(
error="OpenAI API key not configured for this organization."
error="OpenAI API key not configured for this organization.",
metadata=get_additional_data(request_dict),
)

client = OpenAI(api_key=credentials["api_key"])
Expand Down Expand Up @@ -457,4 +478,8 @@ async def responses_sync(
)
tracer.log_error(error_message, response_id=request.response_id)
tracer.flush()
return ResponsesAPIResponse.failure_response(error=error_message)

request_dict = request.model_dump()
return ResponsesAPIResponse.failure_response(
error=error_message, metadata=get_additional_data(request_dict)
)
6 changes: 4 additions & 2 deletions backend/app/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,15 @@ def success_response(
return cls(success=True, data=data, error=None, metadata=metadata)

@classmethod
def failure_response(cls, error: str | list) -> "APIResponse[None]":
def failure_response(
cls, error: str | list, metadata: Optional[Dict[str, Any]] = None
) -> "APIResponse[None]":
if isinstance(error, list): # to handle cases when error is a list of errors
error_message = "\n".join([f"{err['loc']}: {err['msg']}" for err in error])
else:
error_message = error

return cls(success=False, data=None, error=error_message)
return cls(success=False, data=None, error=error_message, metadata=metadata)


@dataclass
Expand Down