Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
745 changes: 745 additions & 0 deletions demo_agent_Gigachat.ipynb

Large diffs are not rendered by default.

73 changes: 51 additions & 22 deletions sgr_deep_research/core/agents/sgr_agent.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from typing import Type

from openai import AsyncOpenAI
from openai import AsyncOpenAI, pydantic_function_tool

from sgr_deep_research.core.agent_definition import ExecutionConfig, LLMConfig, PromptsConfig
from sgr_deep_research.core.base_agent import BaseAgent
Expand Down Expand Up @@ -58,40 +58,64 @@ async def _prepare_tools(self) -> Type[NextStepToolStub]:
return NextStepToolsBuilder.build_NextStepTools(list(tools))

async def _reasoning_phase(self) -> NextStepToolStub:
async with self.openai_client.chat.completions.stream(
# GigaChat/Legacy path using functions to simulate Structured Outputs
next_step_cls = await self._prepare_tools()
# Create a function definition from the model
tool_def = pydantic_function_tool(next_step_cls, name="plan_next_step", description="Plan the next step and select a tool")
functions = [tool_def["function"]]

reasoning = None

messages = await self._prepare_context()

completion = await self.openai_client.chat.completions.create(
model=self.llm_config.model,
response_format=await self._prepare_tools(),
messages=await self._prepare_context(),
messages=messages,
max_tokens=self.llm_config.max_tokens,
temperature=self.llm_config.temperature,
) as stream:
async for event in stream:
if event.type == "chunk":
self.streaming_generator.add_chunk(event.chunk)
reasoning: NextStepToolStub = (await stream.get_final_completion()).choices[0].message.parsed # type: ignore
# we are not fully sure if it should be in conversation or not. Looks like not necessary data
# self.conversation.append({"role": "assistant", "content": reasoning.model_dump_json(exclude={"function"})})
functions=functions,
function_call={"name": "plan_next_step"},
stream=False
)
self._accumulate_tokens(getattr(completion, "usage", None))

message = completion.choices[0].message
tool_args_str = None

if message.function_call and message.function_call.name == "plan_next_step":
tool_args_str = message.function_call.arguments
elif message.tool_calls:
for tc in message.tool_calls:
if tc.function.name == "plan_next_step":
tool_args_str = tc.function.arguments
break

if tool_args_str:
reasoning = next_step_cls.model_validate_json(tool_args_str)
else:
error_msg = f"Model did not call plan_next_step. Content: {message.content}"
raise ValueError(f"Model failed to generate structured output. Error: {error_msg}")

# We do NOT append reasoning to conversation for SGRAgent as it is an internal thought process
# that resolves to a tool call in the next step, or it might be appended if desired.
# The original SGRAgent didn't seem to append it.
self._log_reasoning(reasoning)
return reasoning

async def _select_action_phase(self, reasoning: NextStepToolStub) -> BaseTool:
tool = reasoning.function
if not isinstance(tool, BaseTool):
raise ValueError("Selected tool is not a valid BaseTool instance")

# Use legacy function_call format for history
self.conversation.append(
{
"role": "assistant",
"content": reasoning.remaining_steps[0] if reasoning.remaining_steps else "Completing",
"tool_calls": [
{
"type": "function",
"id": f"{self._context.iteration}-action",
"function": {
"name": tool.tool_name,
"arguments": tool.model_dump_json(),
},
}
],
"function_call": {
"name": tool.tool_name,
"arguments": tool.model_dump(),
}
}
)
self.streaming_generator.add_tool_call(
Expand All @@ -101,8 +125,13 @@ async def _select_action_phase(self, reasoning: NextStepToolStub) -> BaseTool:

async def _action_phase(self, tool: BaseTool) -> str:
result = await tool(self._context)
# Use legacy function role for history
self.conversation.append(
{"role": "tool", "content": result, "tool_call_id": f"{self._context.iteration}-action"}
{
"role": "function",
"name": tool.tool_name,
"content": result
}
)
self.streaming_generator.add_chunk_from_str(f"{result}\n")
self._log_tool_execution(tool, result)
Expand Down
90 changes: 52 additions & 38 deletions sgr_deep_research/core/agents/sgr_so_tool_calling_agent.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,11 @@
from typing import Type
from warnings import warn

from openai import AsyncOpenAI
from openai import AsyncOpenAI, pydantic_function_tool

from sgr_deep_research.core.agent_definition import ExecutionConfig, LLMConfig, PromptsConfig
from sgr_deep_research.core.agents.sgr_tool_calling_agent import SGRToolCallingAgent
from sgr_deep_research.core.base_tool import BaseTool
from sgr_deep_research.core.tools import ReasoningTool
from .sgr_tool_calling_agent import SGRToolCallingAgent
from sgr_deep_research.core.tools import BaseTool, ReasoningTool


class SGRSOToolCallingAgent(SGRToolCallingAgent):
Expand Down Expand Up @@ -39,50 +38,65 @@ def __init__(
)

async def _reasoning_phase(self) -> ReasoningTool:
async with self.openai_client.chat.completions.stream(
model=self.llm_config.model,
messages=await self._prepare_context(),
max_tokens=self.llm_config.max_tokens,
temperature=self.llm_config.temperature,
tools=await self._prepare_tools(),
tool_choice={"type": "function", "function": {"name": ReasoningTool.tool_name}},
) as stream:
async for event in stream:
if event.type == "chunk":
self.streaming_generator.add_chunk(event.chunk)
reasoning: ReasoningTool = ( # noqa
(await stream.get_final_completion()).choices[0].message.tool_calls[0].function.parsed_arguments #
)
async with self.openai_client.chat.completions.stream(
# GigaChat/Legacy implementation
# Since GigaChat doesn't support response_format with Pydantic models,
# we simulate Structured Output by forcing a function call to ReasoningTool.

# Create function definition for ReasoningTool
tool_def = pydantic_function_tool(ReasoningTool, name=ReasoningTool.tool_name, description=ReasoningTool.description)
functions = [tool_def["function"]]

reasoning = None

messages = await self._prepare_context()

completion = await self.openai_client.chat.completions.create(
model=self.llm_config.model,
response_format=ReasoningTool,
messages=await self._prepare_context(),
messages=messages,
max_tokens=self.llm_config.max_tokens,
temperature=self.llm_config.temperature,
) as stream:
async for event in stream:
if event.type == "chunk":
self.streaming_generator.add_chunk(event.chunk)
reasoning: ReasoningTool = (await stream.get_final_completion()).choices[0].message.parsed
functions=functions,
function_call={"name": ReasoningTool.tool_name}, # Force specific function
stream=False
)
self._accumulate_tokens(getattr(completion, "usage", None))

message = completion.choices[0].message
tool_args_str = None

if message.function_call and message.function_call.name == ReasoningTool.tool_name:
tool_args_str = message.function_call.arguments
elif message.tool_calls:
for tc in message.tool_calls:
if tc.function.name == ReasoningTool.tool_name:
tool_args_str = tc.function.arguments
break

if tool_args_str:
reasoning = ReasoningTool.model_validate_json(tool_args_str)
else:
error_msg = f"Model did not call {ReasoningTool.tool_name}. Content: {message.content}"
raise ValueError(f"Model failed to select ReasoningTool. Error: {error_msg}")

tool_call_result = await reasoning(self._context)

# Use legacy function_call format for history
self.conversation.append(
{
"role": "assistant",
"content": None,
"tool_calls": [
{
"type": "function",
"id": f"{self._context.iteration}-reasoning",
"function": {
"name": reasoning.tool_name,
"arguments": "{}",
},
}
],
"content": "",
"function_call": {
"name": reasoning.tool_name,
"arguments": {}, # Match original which passed empty dict
}
}
)
self.conversation.append(
{"role": "tool", "content": tool_call_result, "tool_call_id": f"{self._context.iteration}-reasoning"}
{
"role": "function",
"name": reasoning.tool_name,
"content": tool_call_result
}
)
self._log_reasoning(reasoning)
return reasoning
Loading