Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 2 additions & 10 deletions lib/crewai/src/crewai/agent/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -1053,11 +1053,7 @@ def create_agent_executor(
respect_context_window=self.respect_context_window,
request_within_rpm_limit=rpm_limit_fn,
callbacks=[TokenCalcHandler(self._token_process)],
response_model=(
task.response_model or task.output_pydantic or task.output_json
)
if task
else None,
response_model=task.response_model if task else None,
)

def _update_executor_parameters(
Expand Down Expand Up @@ -1093,11 +1089,7 @@ def _update_executor_parameters(
self.agent_executor.stop = stop_words
self.agent_executor.tools_names = get_tool_names(tools)
self.agent_executor.tools_description = render_text_description_and_args(tools)
self.agent_executor.response_model = (
(task.response_model or task.output_pydantic or task.output_json)
if task
else None
)
self.agent_executor.response_model = task.response_model if task else None

self.agent_executor.tools_handler = self.tools_handler
self.agent_executor.request_within_rpm_limit = rpm_limit_fn
Expand Down
47 changes: 46 additions & 1 deletion lib/crewai/tests/agents/test_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@
from unittest import mock
from unittest.mock import MagicMock, patch

import pytest
from pydantic import BaseModel

from crewai.agents.crew_agent_executor import AgentFinish, CrewAgentExecutor
from crewai.cli.constants import DEFAULT_LLM_MODEL
from crewai.events.event_bus import crewai_event_bus
Expand All @@ -19,7 +22,6 @@
from crewai.tools.tool_calling import InstructorToolCalling
from crewai.tools.tool_usage import ToolUsage
from crewai.utilities.errors import AgentRepositoryError
import pytest

from crewai import Agent, Crew, Task
from crewai.agents.cache import CacheHandler
Expand Down Expand Up @@ -799,6 +801,49 @@ def test_using_system_prompt():
assert agent.agent_executor.prompt.get("system")


def test_output_models_do_not_become_executor_response_model():
"""output_pydantic/json should be post-processed after the tool loop."""

class ScoreOutput(BaseModel):
score: int

class NativeOutput(BaseModel):
answer: str

agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
)

pydantic_task = Task(
description="Return a score",
expected_output="A score",
output_pydantic=ScoreOutput,
agent=agent,
)
agent.create_agent_executor(task=pydantic_task)
assert agent.agent_executor.response_model is None

native_task = Task(
description="Return a native structured answer",
expected_output="A structured answer",
response_model=NativeOutput,
agent=agent,
)
agent.create_agent_executor(task=native_task)
assert agent.agent_executor.response_model is NativeOutput

json_task = Task(
description="Return JSON",
expected_output="A JSON score",
output_json=ScoreOutput,
agent=agent,
)
agent.create_agent_executor(task=json_task)
assert agent.agent_executor.response_model is None


def test_system_and_prompt_template():
agent = Agent(
role="{topic} specialist",
Expand Down