Skip to content

Commit 9b7f1d4

Browse files
committed
refactor: Convert config comments to English and improve model display
- Remove all Chinese comments from mindmap_ai_config.toml - Always display both prompt and mindmap model configuration at start - Show model name when generating each language version - Improve visibility of which models are being used during generation
1 parent 8396bca commit 9b7f1d4

File tree

2 files changed

+116
-24
lines changed

2 files changed

+116
-24
lines changed

tools/generate_mindmaps_ai.py

Lines changed: 77 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -73,10 +73,53 @@ def load_config(config_path: Path | None = None) -> dict[str, Any]:
7373
return parse_toml_simple(config_path.read_text(encoding="utf-8"))
7474

7575

76+
def get_model_config(config: dict[str, Any], model_type: str = "mindmap") -> dict[str, Any]:
77+
"""
78+
Get model configuration for a specific type (prompt or mindmap).
79+
80+
Args:
81+
config: Full configuration dict
82+
model_type: "prompt" or "mindmap"
83+
84+
Returns:
85+
Dict with model configuration (name, temperature, max_completion_tokens, api_base)
86+
"""
87+
model_config = config.get("model", {})
88+
89+
# Support new separate model configs
90+
if model_type == "prompt":
91+
model_name = model_config.get("prompt_model") or model_config.get("name", DEFAULT_MODEL)
92+
temperature = float(model_config.get("prompt_temperature", model_config.get("temperature", 0.7)))
93+
max_tokens = int(model_config.get("prompt_max_completion_tokens", model_config.get("max_completion_tokens", 8000)))
94+
else: # mindmap
95+
model_name = model_config.get("mindmap_model") or model_config.get("name", DEFAULT_MODEL)
96+
temperature = float(model_config.get("mindmap_temperature", model_config.get("temperature", 0.7)))
97+
max_tokens = int(model_config.get("mindmap_max_completion_tokens", model_config.get("max_completion_tokens", 8000)))
98+
99+
api_base = model_config.get("api_base", "")
100+
101+
return {
102+
"name": model_name,
103+
"temperature": temperature,
104+
"max_completion_tokens": max_tokens,
105+
"api_base": api_base,
106+
}
107+
108+
76109
def get_default_config() -> dict[str, Any]:
77110
"""Return default configuration."""
78111
return {
79-
"model": {"name": DEFAULT_MODEL, "temperature": 0.7, "max_completion_tokens": 8000},
112+
"model": {
113+
"name": DEFAULT_MODEL,
114+
"temperature": 0.7,
115+
"max_completion_tokens": 8000,
116+
"prompt_model": "gpt-4o",
117+
"prompt_temperature": 0.7,
118+
"prompt_max_completion_tokens": 8000,
119+
"mindmap_model": DEFAULT_MODEL,
120+
"mindmap_temperature": 0.7,
121+
"mindmap_max_completion_tokens": 8000,
122+
},
80123
"output": {"directory": "docs/mindmaps", "prefix": "ai_generated"},
81124
"ontology": {
82125
"api_kernels": True, "patterns": True, "algorithms": True,
@@ -669,11 +712,12 @@ def generate_with_openai(
669712
config: dict[str, Any],
670713
) -> str:
671714
"""Call OpenAI API to generate mind map."""
672-
model_config = config.get("model", {})
673-
model = model_config.get("name", DEFAULT_MODEL)
674-
temperature = float(model_config.get("temperature", 0.7))
675-
max_completion_tokens = int(model_config.get("max_completion_tokens", 8000))
676-
api_base = model_config.get("api_base", "")
715+
# Use mindmap model configuration
716+
model_config = get_model_config(config, "mindmap")
717+
model = model_config["name"]
718+
temperature = model_config["temperature"]
719+
max_completion_tokens = model_config["max_completion_tokens"]
720+
api_base = model_config["api_base"]
677721

678722
# Get API key
679723
api_key = get_api_key()
@@ -792,11 +836,12 @@ def optimize_prompt_with_ai(
792836
print("⚠️ OpenAI library not installed. Cannot optimize prompt.")
793837
return existing_system_prompt, existing_user_prompt
794838

795-
model_config = config.get("model", {})
796-
model = model_config.get("name", DEFAULT_MODEL)
797-
temperature = float(model_config.get("temperature", 0.7))
798-
max_completion_tokens = int(model_config.get("max_completion_tokens", 8000))
799-
api_base = model_config.get("api_base", "")
839+
# Use prompt model configuration
840+
model_config = get_model_config(config, "prompt")
841+
model = model_config["name"]
842+
temperature = model_config["temperature"]
843+
max_completion_tokens = model_config["max_completion_tokens"]
844+
api_base = model_config["api_base"]
800845

801846
# Get API key
802847
api_key = get_api_key()
@@ -999,7 +1044,8 @@ def generate_mindmap_ai(config: dict[str, Any]) -> str:
9991044
elif prompt_action == "optimize":
10001045
# Optimize existing prompt with AI
10011046
if existing_prompt_file:
1002-
print(f"\n🤖 Optimizing existing prompt with AI...")
1047+
prompt_model_config = get_model_config(config, "prompt")
1048+
print(f"\n🤖 Optimizing existing prompt with AI (using {prompt_model_config['name']})...")
10031049
prompt_content = existing_prompt_file.read_text(encoding="utf-8")
10041050

10051051
# Parse existing prompt
@@ -1032,7 +1078,8 @@ def generate_mindmap_ai(config: dict[str, Any]) -> str:
10321078
print(f"📄 AI-generated prompt saved: {prompt_file}")
10331079
else:
10341080
# First time: Generate base prompt, then optimize with AI
1035-
print(f"\n🤖 Generating prompt with AI...")
1081+
prompt_model_config = get_model_config(config, "prompt")
1082+
print(f"\n🤖 Generating prompt with AI (using {prompt_model_config['name']})...")
10361083
print(" Step 1: Building base prompt from config...")
10371084
base_system_prompt = build_system_prompt(config)
10381085
base_user_prompt = build_user_prompt(
@@ -1050,14 +1097,15 @@ def generate_mindmap_ai(config: dict[str, Any]) -> str:
10501097
print(f"📄 AI-generated prompt saved: {prompt_file}")
10511098
elif prompt_action == "regenerate_and_optimize":
10521099
# Regenerate from config, then optimize with AI
1100+
prompt_model_config = get_model_config(config, "prompt")
10531101
print("\n📝 Regenerating prompt from config and data...")
10541102
print(" Step 1: Building prompt from config...")
10551103
base_system_prompt = build_system_prompt(config)
10561104
base_user_prompt = build_user_prompt(
10571105
ontology_data, docs_patterns, meta_patterns, problems_data, config
10581106
)
10591107

1060-
print(" Step 2: Optimizing with AI...")
1108+
print(f" Step 2: Optimizing with AI (using {prompt_model_config['name']})...")
10611109
# Let AI optimize the regenerated prompt
10621110
system_prompt, user_prompt = optimize_prompt_with_ai(
10631111
base_system_prompt, base_user_prompt, config
@@ -1082,8 +1130,13 @@ def generate_mindmap_ai(config: dict[str, Any]) -> str:
10821130
output_config = config.get("output", {})
10831131
output_dir = Path(output_config.get("directory", "docs/mindmaps"))
10841132

1085-
model_name = config.get("model", {}).get("name", DEFAULT_MODEL)
1086-
print(f"\n🤖 Generating with {model_name}...")
1133+
# Show which models are being used
1134+
prompt_model_config = get_model_config(config, "prompt")
1135+
mindmap_model_config = get_model_config(config, "mindmap")
1136+
1137+
print(f"\n🤖 Model Configuration:")
1138+
print(f" 📝 Prompt optimization: {prompt_model_config['name']}")
1139+
print(f" 🗺️ Mind map generation: {mindmap_model_config['name']}")
10871140

10881141
if not HAS_OPENAI:
10891142
print("\n⚠️ OpenAI library not installed.")
@@ -1128,6 +1181,10 @@ def generate_mindmap_ai(config: dict[str, Any]) -> str:
11281181
for lang in languages:
11291182
print(f"\n🌐 Generating {lang} version...")
11301183

1184+
# Show which model is being used for this generation
1185+
current_mindmap_model = get_model_config(config, "mindmap")
1186+
print(f" 🤖 Using model: {current_mindmap_model['name']}")
1187+
11311188
try:
11321189
# Create language-specific config
11331190
lang_config = config.copy()
@@ -1287,7 +1344,11 @@ def main() -> int:
12871344
if args.style:
12881345
config.setdefault("generation", {})["style"] = args.style
12891346
if args.model:
1347+
# If --model is specified, set both prompt and mindmap models to the same value
1348+
# User can still override individually in config file
12901349
config.setdefault("model", {})["name"] = args.model
1350+
config.setdefault("model", {})["prompt_model"] = args.model
1351+
config.setdefault("model", {})["mindmap_model"] = args.model
12911352

12921353
# Show config if requested
12931354
if args.list_config:

tools/mindmap_ai_config.toml

Lines changed: 39 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,20 +8,51 @@
88
# =============================================================================
99

1010
[model]
11-
# LLM model to use
12-
# GPT-5.2 series: gpt-5.2, gpt-5.2-coding, gpt-5.2-instant, gpt-5.2-thinking
13-
# GPT-5.1 series: gpt-5.1, gpt-5.1-codex, gpt-5.1-instant, gpt-5.1-thinking
14-
# GPT-4 series: gpt-4o, gpt-4o-mini, gpt-4-turbo
15-
# Reasoning models: o1, o1-mini, o3-mini
16-
name = "gpt-5.1-codex"
17-
temperature = 0.7
11+
# =============================================================================
12+
# Model Configuration
13+
# =============================================================================
14+
# You can separately configure models for prompt generation/optimization and mind map generation
15+
16+
# -----------------------------------------------------------------------------
17+
# Prompt Generation/Optimization Model
18+
# -----------------------------------------------------------------------------
19+
# This model is used for:
20+
# - Optimizing existing prompts (when selecting [o] Optimize option)
21+
# - First-time prompt generation (when selecting [o] Generate prompt with AI option)
22+
#
23+
# Recommended: Models good at understanding and optimizing text (e.g., GPT-4 series)
24+
prompt_model = "gpt-4o"
25+
prompt_temperature = 0.7
26+
prompt_max_completion_tokens = 8000
27+
28+
# -----------------------------------------------------------------------------
29+
# Mind Map Generation Model
30+
# -----------------------------------------------------------------------------
31+
# This model is used for:
32+
# - Generating final mind map content based on prompts
33+
# - This is the main generation task and consumes more tokens
34+
#
35+
# Recommended: Models good at creative generation and long text output (e.g., GPT-5.1-codex, GPT-5.2)
36+
mindmap_model = "gpt-5.1-codex"
37+
mindmap_temperature = 0.7
1838
# GPT-5.1-codex uses max_completion_tokens (older models use max_tokens)
19-
max_completion_tokens = 10000
39+
mindmap_max_completion_tokens = 10000
2040

41+
# -----------------------------------------------------------------------------
42+
# Common Configuration
43+
# -----------------------------------------------------------------------------
2144
# API Key: Read from OPENAI_API_KEY environment variable at runtime, or interactive input
2245
# API Base URL (optional, for custom endpoint or proxy)
2346
# api_base = "https://api.openai.com/v1"
2447

48+
# -----------------------------------------------------------------------------
49+
# Model Series Notes
50+
# -----------------------------------------------------------------------------
51+
# GPT-5.2 series: gpt-5.2, gpt-5.2-coding, gpt-5.2-instant, gpt-5.2-thinking
52+
# GPT-5.1 series: gpt-5.1, gpt-5.1-codex, gpt-5.1-instant, gpt-5.1-thinking
53+
# GPT-4 series: gpt-4o, gpt-4o-mini, gpt-4-turbo
54+
# Reasoning models: o1, o1-mini, o3-mini
55+
2556
[output]
2657
# Output directory
2758
directory = "docs/mindmaps"

0 commit comments

Comments
 (0)