@@ -73,10 +73,53 @@ def load_config(config_path: Path | None = None) -> dict[str, Any]:
7373 return parse_toml_simple (config_path .read_text (encoding = "utf-8" ))
7474
7575
76+ def get_model_config (config : dict [str , Any ], model_type : str = "mindmap" ) -> dict [str , Any ]:
77+ """
78+ Get model configuration for a specific type (prompt or mindmap).
79+
80+ Args:
81+ config: Full configuration dict
82+ model_type: "prompt" or "mindmap"
83+
84+ Returns:
85+ Dict with model configuration (name, temperature, max_completion_tokens, api_base)
86+ """
87+ model_config = config .get ("model" , {})
88+
89+ # Support new separate model configs
90+ if model_type == "prompt" :
91+ model_name = model_config .get ("prompt_model" ) or model_config .get ("name" , DEFAULT_MODEL )
92+ temperature = float (model_config .get ("prompt_temperature" , model_config .get ("temperature" , 0.7 )))
93+ max_tokens = int (model_config .get ("prompt_max_completion_tokens" , model_config .get ("max_completion_tokens" , 8000 )))
94+ else : # mindmap
95+ model_name = model_config .get ("mindmap_model" ) or model_config .get ("name" , DEFAULT_MODEL )
96+ temperature = float (model_config .get ("mindmap_temperature" , model_config .get ("temperature" , 0.7 )))
97+ max_tokens = int (model_config .get ("mindmap_max_completion_tokens" , model_config .get ("max_completion_tokens" , 8000 )))
98+
99+ api_base = model_config .get ("api_base" , "" )
100+
101+ return {
102+ "name" : model_name ,
103+ "temperature" : temperature ,
104+ "max_completion_tokens" : max_tokens ,
105+ "api_base" : api_base ,
106+ }
107+
108+
76109def get_default_config () -> dict [str , Any ]:
77110 """Return default configuration."""
78111 return {
79- "model" : {"name" : DEFAULT_MODEL , "temperature" : 0.7 , "max_completion_tokens" : 8000 },
112+ "model" : {
113+ "name" : DEFAULT_MODEL ,
114+ "temperature" : 0.7 ,
115+ "max_completion_tokens" : 8000 ,
116+ "prompt_model" : "gpt-4o" ,
117+ "prompt_temperature" : 0.7 ,
118+ "prompt_max_completion_tokens" : 8000 ,
119+ "mindmap_model" : DEFAULT_MODEL ,
120+ "mindmap_temperature" : 0.7 ,
121+ "mindmap_max_completion_tokens" : 8000 ,
122+ },
80123 "output" : {"directory" : "docs/mindmaps" , "prefix" : "ai_generated" },
81124 "ontology" : {
82125 "api_kernels" : True , "patterns" : True , "algorithms" : True ,
@@ -669,11 +712,12 @@ def generate_with_openai(
669712 config : dict [str , Any ],
670713) -> str :
671714 """Call OpenAI API to generate mind map."""
672- model_config = config .get ("model" , {})
673- model = model_config .get ("name" , DEFAULT_MODEL )
674- temperature = float (model_config .get ("temperature" , 0.7 ))
675- max_completion_tokens = int (model_config .get ("max_completion_tokens" , 8000 ))
676- api_base = model_config .get ("api_base" , "" )
715+ # Use mindmap model configuration
716+ model_config = get_model_config (config , "mindmap" )
717+ model = model_config ["name" ]
718+ temperature = model_config ["temperature" ]
719+ max_completion_tokens = model_config ["max_completion_tokens" ]
720+ api_base = model_config ["api_base" ]
677721
678722 # Get API key
679723 api_key = get_api_key ()
@@ -792,11 +836,12 @@ def optimize_prompt_with_ai(
792836 print ("⚠️ OpenAI library not installed. Cannot optimize prompt." )
793837 return existing_system_prompt , existing_user_prompt
794838
795- model_config = config .get ("model" , {})
796- model = model_config .get ("name" , DEFAULT_MODEL )
797- temperature = float (model_config .get ("temperature" , 0.7 ))
798- max_completion_tokens = int (model_config .get ("max_completion_tokens" , 8000 ))
799- api_base = model_config .get ("api_base" , "" )
839+ # Use prompt model configuration
840+ model_config = get_model_config (config , "prompt" )
841+ model = model_config ["name" ]
842+ temperature = model_config ["temperature" ]
843+ max_completion_tokens = model_config ["max_completion_tokens" ]
844+ api_base = model_config ["api_base" ]
800845
801846 # Get API key
802847 api_key = get_api_key ()
@@ -999,7 +1044,8 @@ def generate_mindmap_ai(config: dict[str, Any]) -> str:
9991044 elif prompt_action == "optimize" :
10001045 # Optimize existing prompt with AI
10011046 if existing_prompt_file :
1002- print (f"\n 🤖 Optimizing existing prompt with AI..." )
1047+ prompt_model_config = get_model_config (config , "prompt" )
1048+ print (f"\n 🤖 Optimizing existing prompt with AI (using { prompt_model_config ['name' ]} )..." )
10031049 prompt_content = existing_prompt_file .read_text (encoding = "utf-8" )
10041050
10051051 # Parse existing prompt
@@ -1032,7 +1078,8 @@ def generate_mindmap_ai(config: dict[str, Any]) -> str:
10321078 print (f"📄 AI-generated prompt saved: { prompt_file } " )
10331079 else :
10341080 # First time: Generate base prompt, then optimize with AI
1035- print (f"\n 🤖 Generating prompt with AI..." )
1081+ prompt_model_config = get_model_config (config , "prompt" )
1082+ print (f"\n 🤖 Generating prompt with AI (using { prompt_model_config ['name' ]} )..." )
10361083 print (" Step 1: Building base prompt from config..." )
10371084 base_system_prompt = build_system_prompt (config )
10381085 base_user_prompt = build_user_prompt (
@@ -1050,14 +1097,15 @@ def generate_mindmap_ai(config: dict[str, Any]) -> str:
10501097 print (f"📄 AI-generated prompt saved: { prompt_file } " )
10511098 elif prompt_action == "regenerate_and_optimize" :
10521099 # Regenerate from config, then optimize with AI
1100+ prompt_model_config = get_model_config (config , "prompt" )
10531101 print ("\n 📝 Regenerating prompt from config and data..." )
10541102 print (" Step 1: Building prompt from config..." )
10551103 base_system_prompt = build_system_prompt (config )
10561104 base_user_prompt = build_user_prompt (
10571105 ontology_data , docs_patterns , meta_patterns , problems_data , config
10581106 )
10591107
1060- print (" Step 2: Optimizing with AI..." )
1108+ print (f " Step 2: Optimizing with AI (using { prompt_model_config [ 'name' ] } ) ..." )
10611109 # Let AI optimize the regenerated prompt
10621110 system_prompt , user_prompt = optimize_prompt_with_ai (
10631111 base_system_prompt , base_user_prompt , config
@@ -1082,8 +1130,13 @@ def generate_mindmap_ai(config: dict[str, Any]) -> str:
10821130 output_config = config .get ("output" , {})
10831131 output_dir = Path (output_config .get ("directory" , "docs/mindmaps" ))
10841132
1085- model_name = config .get ("model" , {}).get ("name" , DEFAULT_MODEL )
1086- print (f"\n 🤖 Generating with { model_name } ..." )
1133+ # Show which models are being used
1134+ prompt_model_config = get_model_config (config , "prompt" )
1135+ mindmap_model_config = get_model_config (config , "mindmap" )
1136+
1137+ print (f"\n 🤖 Model Configuration:" )
1138+ print (f" 📝 Prompt optimization: { prompt_model_config ['name' ]} " )
1139+ print (f" 🗺️ Mind map generation: { mindmap_model_config ['name' ]} " )
10871140
10881141 if not HAS_OPENAI :
10891142 print ("\n ⚠️ OpenAI library not installed." )
@@ -1128,6 +1181,10 @@ def generate_mindmap_ai(config: dict[str, Any]) -> str:
11281181 for lang in languages :
11291182 print (f"\n 🌐 Generating { lang } version..." )
11301183
1184+ # Show which model is being used for this generation
1185+ current_mindmap_model = get_model_config (config , "mindmap" )
1186+ print (f" 🤖 Using model: { current_mindmap_model ['name' ]} " )
1187+
11311188 try :
11321189 # Create language-specific config
11331190 lang_config = config .copy ()
@@ -1287,7 +1344,11 @@ def main() -> int:
12871344 if args .style :
12881345 config .setdefault ("generation" , {})["style" ] = args .style
12891346 if args .model :
1347+ # If --model is specified, set both prompt and mindmap models to the same value
1348+ # User can still override individually in config file
12901349 config .setdefault ("model" , {})["name" ] = args .model
1350+ config .setdefault ("model" , {})["prompt_model" ] = args .model
1351+ config .setdefault ("model" , {})["mindmap_model" ] = args .model
12911352
12921353 # Show config if requested
12931354 if args .list_config :
0 commit comments