diff --git a/lib/crewai/src/crewai/agent/core.py b/lib/crewai/src/crewai/agent/core.py index 10ebfd38c1..c55bbfca17 100644 --- a/lib/crewai/src/crewai/agent/core.py +++ b/lib/crewai/src/crewai/agent/core.py @@ -1053,11 +1053,7 @@ def create_agent_executor( respect_context_window=self.respect_context_window, request_within_rpm_limit=rpm_limit_fn, callbacks=[TokenCalcHandler(self._token_process)], - response_model=( - task.response_model or task.output_pydantic or task.output_json - ) - if task - else None, + response_model=task.response_model if task else None, ) def _update_executor_parameters( @@ -1093,11 +1089,7 @@ def _update_executor_parameters( self.agent_executor.stop = stop_words self.agent_executor.tools_names = get_tool_names(tools) self.agent_executor.tools_description = render_text_description_and_args(tools) - self.agent_executor.response_model = ( - (task.response_model or task.output_pydantic or task.output_json) - if task - else None - ) + self.agent_executor.response_model = task.response_model if task else None self.agent_executor.tools_handler = self.tools_handler self.agent_executor.request_within_rpm_limit = rpm_limit_fn diff --git a/lib/crewai/tests/agents/test_agent.py b/lib/crewai/tests/agents/test_agent.py index f6101a9e06..3331069cf0 100644 --- a/lib/crewai/tests/agents/test_agent.py +++ b/lib/crewai/tests/agents/test_agent.py @@ -5,6 +5,9 @@ from unittest import mock from unittest.mock import MagicMock, patch +import pytest +from pydantic import BaseModel + from crewai.agents.crew_agent_executor import AgentFinish, CrewAgentExecutor from crewai.cli.constants import DEFAULT_LLM_MODEL from crewai.events.event_bus import crewai_event_bus @@ -19,7 +22,6 @@ from crewai.tools.tool_calling import InstructorToolCalling from crewai.tools.tool_usage import ToolUsage from crewai.utilities.errors import AgentRepositoryError -import pytest from crewai import Agent, Crew, Task from crewai.agents.cache import CacheHandler @@ -799,6 +801,49 @@ def test_using_system_prompt(): assert agent.agent_executor.prompt.get("system") +def test_output_models_do_not_become_executor_response_model(): + """output_pydantic/json should be post-processed after the tool loop.""" + + class ScoreOutput(BaseModel): + score: int + + class NativeOutput(BaseModel): + answer: str + + agent = Agent( + role="test role", + goal="test goal", + backstory="test backstory", + ) + + pydantic_task = Task( + description="Return a score", + expected_output="A score", + output_pydantic=ScoreOutput, + agent=agent, + ) + agent.create_agent_executor(task=pydantic_task) + assert agent.agent_executor.response_model is None + + native_task = Task( + description="Return a native structured answer", + expected_output="A structured answer", + response_model=NativeOutput, + agent=agent, + ) + agent.create_agent_executor(task=native_task) + assert agent.agent_executor.response_model is NativeOutput + + json_task = Task( + description="Return JSON", + expected_output="A JSON score", + output_json=ScoreOutput, + agent=agent, + ) + agent.create_agent_executor(task=json_task) + assert agent.agent_executor.response_model is None + + def test_system_and_prompt_template(): agent = Agent( role="{topic} specialist",