From d343cf8c5efb62439632be9466fbf050b221f5be Mon Sep 17 00:00:00 2001 From: letusfly85 Date: Tue, 30 Dec 2025 15:51:43 +0900 Subject: [PATCH] fix(http-agent): extract content from OpenAI-compatible API responses HTTPAgent was returning the entire API response JSON as a string instead of extracting the actual message content. This caused agent validation failures when using OpenAI-compatible servers like vLLM. Changes: - Add detection for OpenAI response format (checks for 'choices' field) - Extract content from choices[0].message.content when available - Maintain backward compatibility with fallback to return_format This fix enables AgentBench to work correctly with vLLM and other OpenAI-compatible inference servers. --- src/client/agents/http_agent.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/client/agents/http_agent.py b/src/client/agents/http_agent.py index 677f668c..d21f1c92 100644 --- a/src/client/agents/http_agent.py +++ b/src/client/agents/http_agent.py @@ -210,6 +210,15 @@ def inference(self, history: List[dict]) -> str: pass else: resp = resp.json() + + # Extract content from OpenAI-compatible API response (vLLM) + if isinstance(resp, dict) and "choices" in resp and len(resp["choices"]) > 0: + message = resp["choices"][0].get("message", {}) + content = message.get("content", "") + if content: + return content + + # Fallback to return_format if not OpenAI format return self.return_format.format(response=resp) time.sleep(_ + 2) raise Exception("Failed.")