Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion backend/app/core/agents/agent.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from app.core.llm.llm import LLM, simple_chat
from app.config.setting import settings
from app.utils.log_util import logger
from icecream import ic

Expand All @@ -11,7 +12,7 @@ def __init__(
self,
task_id: str,
model: LLM,
max_chat_turns: int = 30, # 单个agent最大对话轮次
max_chat_turns: int = settings.MAX_CHAT_TURNS, # 单个agent最大对话轮次
max_memory: int = 12, # 最大记忆轮次
) -> None:
self.task_id = task_id
Expand Down
5 changes: 3 additions & 2 deletions backend/app/core/agents/coder_agent.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json_repair
from app.core.agents.agent import Agent
from app.config.setting import settings
from app.utils.log_util import logger
Expand All @@ -8,7 +9,6 @@
from app.schemas.A2A import CoderToWriter
from app.core.prompts import CODER_PROMPT
from app.utils.common_utils import get_current_files
import json
from app.core.prompts import get_reflection_prompt
from app.core.functions import coder_tools

Expand Down Expand Up @@ -113,7 +113,7 @@ async def run(self, prompt: str, subtask_title: str) -> CoderToWriter:
),
)

code = json.loads(tool_call.function.arguments)["code"]
code = json_repair.loads(tool_call.function.arguments)["code"]

await redis_manager.publish_message(
self.task_id,
Expand Down Expand Up @@ -173,6 +173,7 @@ async def run(self, prompt: str, subtask_title: str) -> CoderToWriter:
"content": text_to_gpt,
}
)
retry_count = 0
# 成功执行后继续循环,等待下一步指令
continue
else:
Expand Down
12 changes: 7 additions & 5 deletions backend/app/core/agents/coordinator_agent.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
import json
import re
from json_repair import repair_json
from app.config.setting import settings
from app.core.agents.agent import Agent
from app.core.llm.llm import LLM
from app.core.prompts import COORDINATOR_PROMPT
import json
import re
from app.utils.log_util import logger
from app.schemas.A2A import CoordinatorToModeler

Expand All @@ -12,7 +14,7 @@ def __init__(
self,
task_id: str,
model: LLM,
max_chat_turns: int = 30,
max_chat_turns: int = settings.MAX_CHAT_TURNS,
) -> None:
super().__init__(task_id, model, max_chat_turns)
self.system_prompt = COORDINATOR_PROMPT
Expand All @@ -39,8 +41,8 @@ async def run(self, ques_all: str) -> CoordinatorToModeler:

if not json_str:
raise ValueError("返回的 JSON 字符串为空")

questions = json.loads(json_str)
questions = json.loads(repair_json(json_str))
ques_count = questions["ques_count"]
logger.info(f"questions:{questions}")
return CoordinatorToModeler(questions=questions, ques_count=ques_count)
Expand Down
9 changes: 5 additions & 4 deletions backend/app/core/agents/modeler_agent.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
from app.core.agents.agent import Agent
from app.core.llm.llm import LLM
from app.config.setting import settings
from app.core.prompts import MODELER_PROMPT
from app.schemas.A2A import CoordinatorToModeler, ModelerToCoder
from app.utils.log_util import logger
import json
from icecream import ic
from json_repair import repair_json
import json

# TODO: 提问工具tool

Expand All @@ -14,7 +15,7 @@ def __init__(
self,
task_id: str,
model: LLM,
max_chat_turns: int = 30, # 添加最大对话轮次限制
max_chat_turns: int = settings.MAX_CHAT_TURNS, # 添加最大对话轮次限制
) -> None:
super().__init__(task_id, model, max_chat_turns)
self.system_prompt = MODELER_PROMPT
Expand Down Expand Up @@ -42,7 +43,7 @@ async def run(self, coordinator_to_modeler: CoordinatorToModeler) -> ModelerToCo
if not json_str:
raise ValueError("返回的 JSON 字符串为空,请检查输入内容。")
try:
questions_solution = json.loads(json_str)
questions_solution = json.loads(repair_json(json_str))
ic(questions_solution)
return ModelerToCoder(questions_solution=questions_solution)
except json.JSONDecodeError as e:
Expand Down
5 changes: 3 additions & 2 deletions backend/app/core/agents/writer_agent.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
import json
from app.core.agents.agent import Agent
from app.core.llm.llm import LLM
from app.config.setting import settings
from app.core.prompts import get_writer_prompt
from app.schemas.enums import CompTemplate, FormatOutPut
from app.tools.openalex_scholar import OpenAlexScholar
from app.utils.log_util import logger
from app.services.redis_manager import redis_manager
from app.schemas.response import SystemMessage, WriterMessage
import json
from app.core.functions import writer_tools
from icecream import ic
from app.schemas.A2A import WriterResponse
Expand All @@ -21,7 +22,7 @@ def __init__(
self,
task_id: str,
model: LLM,
max_chat_turns: int = 10, # 添加最大对话轮次限制
max_chat_turns: int = settings.MAX_CHAT_TURNS, # 添加最大对话轮次限制
comp_template: CompTemplate = CompTemplate,
format_output: FormatOutPut = FormatOutPut.Markdown,
scholar: OpenAlexScholar = None,
Expand Down
5 changes: 3 additions & 2 deletions backend/app/core/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@
1. Primary: Seaborn (Nature/Science style)
2. Secondary: Matplotlib
3. Always:
- Handle Chinese characters properly
- Handle Chinese characters properly,for example,use SimHei font.
- Set semantic filenames (e.g., "feature_correlation.png")
- Save figures to working directory
- Include model evaluation printouts
Expand All @@ -119,7 +119,8 @@
- Release unused resources immediately


Key improvements:
Key improvements
0. Do not generate or fabricate data. If there is an error where the file cannot be found, please search for the data from the question or the current working directory
1. **Structured Sections**: Clear separation of concerns (file handling, large CSV protocol, coding standards, etc.)
2. **Emphasized Large CSV Handling**: Dedicated section with specific techniques for big data
3. **Optimized Readability**: Bulleted lists and code examples for quick scanning
Expand Down
1 change: 0 additions & 1 deletion backend/app/routers/modeling_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,6 @@ async def validate_api_key(request: ValidateApiKeyRequest):
await litellm.acompletion(
model=request.model_id,
messages=[{"role": "user", "content": "Hi"}],
max_tokens=1,
api_key=request.api_key,
base_url=request.base_url
if request.base_url != "https://api.openai.com/v1"
Expand Down
1 change: 1 addition & 0 deletions backend/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ dependencies = [
"httpx[socks]>=0.28.1",
"icecream>=2.1.4",
"ipykernel>=6.29.5",
"json-repair>=0.52.1",
"jupyter-client>=8.6.3",
"litellm>=1.69.0",
"loguru>=0.7.3",
Expand Down
Loading