Skip to content

Commit f7c4122

Browse files
committed
refactor(ai-markmap-agent): remove V2, rename V3 to standard names
Config: - Remove V2 models: generalist, specialist, optimizer, summarizer, judges - Remove V2 workflow settings - Update debug output phase names Files: - Rename writer_v3.py → writer.py (WriterAgentV3 → WriterAgent) - Rename graph_v3.py → graph.py (run_pipeline_v3 → run_pipeline) - Update all imports in main.py, agents/__init__.py
1 parent 3b42aaf commit f7c4122

File tree

5 files changed

+80
-224
lines changed

5 files changed

+80
-224
lines changed

tools/ai-markmap-agent/config/config.yaml

Lines changed: 32 additions & 176 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ prompt_mode:
172172
mode: "static"
173173

174174
# Model to use for generating dynamic prompts (only used when mode="dynamic")
175-
generator_model: "gpt-4" # ORIGINAL: gpt-5.2
175+
generator_model: "gpt-4"
176176

177177
# Meta-prompts for dynamic generation
178178
meta_prompts:
@@ -188,37 +188,7 @@ prompt_mode:
188188
# Model Configuration
189189
# -----------------------------------------------------------------------------
190190
models:
191-
# Generalist - Broad understanding, knowledge organization
192-
generalist:
193-
en:
194-
model: "gpt-4o" # 128K context window
195-
persona_prompt: "prompts/generators/generalist_persona.md"
196-
behavior_prompt: "prompts/generators/generalist_behavior.md"
197-
temperature: 0.7
198-
max_tokens: 4096
199-
zh:
200-
model: "gpt-4o" # 128K context window
201-
persona_prompt: "prompts/generators/generalist_persona.md"
202-
behavior_prompt: "prompts/generators/generalist_behavior.md"
203-
temperature: 0.7
204-
max_tokens: 4096
205-
206-
# Specialist - Engineering details, structural rigor
207-
specialist:
208-
en:
209-
model: "gpt-4o" # 128K context window
210-
persona_prompt: "prompts/generators/specialist_persona.md"
211-
behavior_prompt: "prompts/generators/specialist_behavior.md"
212-
temperature: 0.5
213-
max_tokens: 4096
214-
zh:
215-
model: "gpt-4o" # 128K context window
216-
persona_prompt: "prompts/generators/specialist_persona.md"
217-
behavior_prompt: "prompts/generators/specialist_behavior.md"
218-
temperature: 0.5
219-
max_tokens: 4096
220-
221-
# Planners (V3) - Structure Specification generators
191+
# Planners - Structure Specification generators
222192
generalist_planner:
223193
en:
224194
model: "gpt-4o"
@@ -247,7 +217,7 @@ models:
247217
temperature: 0.5
248218
max_tokens: 4096
249219

250-
# Content Strategists (V3) - Three distinct expert perspectives for debate
220+
# Content Strategists - Three distinct expert perspectives for discussion
251221
# Uses Structure Spec (YAML), not Markdown
252222
content_strategist:
253223
- id: "architect_strategist"
@@ -277,73 +247,15 @@ models:
277247
max_tokens: 4096
278248
focus: "user_experience"
279249

280-
# Optimizers (V2, legacy) - Three distinct expert perspectives for debate
281-
optimizer:
282-
# Top-tier Software Architect (Dr. Alexander Chen)
283-
- id: "optimizer_architect"
284-
name: "The Software Architect"
285-
persona_name: "Dr. Alexander Chen"
286-
model: "gpt-4" # ORIGINAL: gpt-5.1
287-
persona_prompt: "prompts/optimizers/optimizer_architect_persona.md"
288-
behavior_prompt: "prompts/optimizers/optimizer_architect_behavior.md"
289-
temperature: 0.6
290-
max_tokens: 4096
291-
focus: "architecture_modularity"
292-
# For dynamic mode:
293-
dynamic_config:
294-
role_description: "Top-tier Software Architect"
295-
focus_area: "system design, modularity, clean architecture, design patterns"
296-
perspective: "structural and organizational excellence"
297-
298-
# Senior Algorithm Professor (Prof. David Knuth Jr.)
299-
- id: "optimizer_professor"
300-
name: "The Algorithm Professor"
301-
persona_name: "Prof. David Knuth Jr."
302-
model: "gpt-4" # ORIGINAL: gpt-5.1
303-
persona_prompt: "prompts/optimizers/optimizer_professor_persona.md"
304-
behavior_prompt: "prompts/optimizers/optimizer_professor_behavior.md"
305-
temperature: 0.6
306-
max_tokens: 4096
307-
focus: "correctness_completeness"
308-
# For dynamic mode:
309-
dynamic_config:
310-
role_description: "Distinguished Algorithm Professor and Computer Scientist"
311-
focus_area: "algorithms, data structures, computational complexity, formal methods"
312-
perspective: "academic rigor and correctness"
313-
314-
# Senior Technical Architect / API Designer (James Patterson)
315-
- id: "optimizer_apidesigner"
316-
name: "The Technical API Architect"
317-
persona_name: "James Patterson"
318-
model: "gpt-4" # ORIGINAL: gpt-5.1
319-
persona_prompt: "prompts/optimizers/optimizer_apidesigner_persona.md"
320-
behavior_prompt: "prompts/optimizers/optimizer_apidesigner_behavior.md"
321-
temperature: 0.7
322-
max_tokens: 4096
323-
focus: "developer_experience"
324-
# For dynamic mode:
325-
dynamic_config:
326-
role_description: "Senior Technical Architect and API Designer"
327-
focus_area: "API design, developer experience, documentation, interface patterns"
328-
perspective: "usability and developer-centric design"
329-
330-
# Integrator (V3) - Consolidates strategist suggestions
250+
# Integrator - Consolidates strategist suggestions
331251
integrator:
332252
model: "gpt-4o"
333253
persona_prompt: "prompts/integrator/integrator_persona.md"
334254
behavior_prompt: "prompts/integrator/integrator_behavior.md"
335255
temperature: 0.5
336256
max_tokens: 4096
337257

338-
# Summarizer (V2, legacy) - Consolidates each round's discussion
339-
summarizer:
340-
model: "gpt-4o" # ORIGINAL: gpt-5.2
341-
persona_prompt: "prompts/summarizer/summarizer_persona.md"
342-
behavior_prompt: "prompts/summarizer/summarizer_behavior.md"
343-
temperature: 0.5
344-
max_tokens: 4096
345-
346-
# Evaluators (V3) - Structure Specification evaluation
258+
# Evaluators - Structure Specification evaluation
347259
# Uses Structure Spec (YAML), not Markdown
348260
evaluator:
349261
- id: "structure_evaluator"
@@ -368,43 +280,13 @@ models:
368280
- "learning_progression"
369281
- "practical_value"
370282

371-
# Judges (V2, legacy) - Evaluation and selection
372-
judges:
373-
- id: "judge_structure"
374-
name: "Structure Judge"
375-
persona_name: "Dr. Sarah Chen"
376-
model: "gpt-4"
377-
persona_prompt: "prompts/judges/judge_quality_persona.md"
378-
behavior_prompt: "prompts/judges/judge_quality_behavior.md"
379-
temperature: 0.4
380-
max_tokens: 4096
381-
criteria:
382-
- "hierarchy_quality"
383-
- "depth_balance"
384-
- "logical_grouping"
385-
- "naming_consistency"
386-
387-
- id: "judge_completeness"
388-
name: "Completeness Judge"
389-
persona_name: "Prof. Michael Torres"
390-
model: "gpt-4"
391-
persona_prompt: "prompts/judges/judge_completeness_persona.md"
392-
behavior_prompt: "prompts/judges/judge_completeness_behavior.md"
393-
temperature: 0.4
394-
max_tokens: 4096
395-
criteria:
396-
- "coverage"
397-
- "practical_value"
398-
- "learning_path"
399-
- "technical_accuracy"
400-
401-
# Writer - Final Markmap generation (V2 NEW)
283+
# Writer - Final Markmap generation
402284
# Responsible for:
403-
# 1. Applying judge feedback and suggestions
285+
# 1. Applying evaluator feedback and suggestions
404286
# 2. Generating proper links (GitHub/LeetCode)
405287
# 3. Applying Markmap formatting (checkboxes, KaTeX, fold, etc.)
406288
writer:
407-
model: "gpt-4o" # 128K context window (gpt-4 only has 8K)
289+
model: "gpt-4o" # 128K context window
408290
persona_prompt: "prompts/writer/writer_persona.md"
409291
behavior_prompt: "prompts/writer/writer_behavior.md"
410292
format_guide: "prompts/writer/markmap_format_guide.md"
@@ -413,7 +295,7 @@ models:
413295

414296
# Translator - For translate mode languages
415297
translator:
416-
model: "gpt-4" # ORIGINAL: gpt-4o
298+
model: "gpt-4"
417299
temperature: 0.3
418300
max_tokens: 8192
419301

@@ -428,37 +310,18 @@ models:
428310
# Workflow Configuration
429311
# -----------------------------------------------------------------------------
430312
workflow:
431-
# Number of optimization rounds (Phase 2)
432-
# NOTE: Recommended setting is 3 rounds for production quality
433-
# Currently set to 1 for faster iteration during development
434-
optimization_rounds: 1 # Production: 3
313+
# Maximum discussion rounds for strategists
314+
max_discussion_rounds: 3
435315

436-
# Number of optimizers (must match models.optimizer count)
437-
optimizer_count: 3
316+
# Consensus threshold (0.0-1.0)
317+
# If strategists agree above this threshold, discussion ends early
318+
consensus_threshold: 0.8
438319

439320
# Token threshold to trigger compression
440321
max_tokens_before_compress: 8000
441322

442-
# Enable parallel baseline generation (Phase 1)
443-
parallel_baseline_generation: true
444-
445-
# ---------------------------------------------------------------------------
446-
# Evaluation & Debate Settings (Phase 3)
447-
# ---------------------------------------------------------------------------
448-
# Number of judges (minimum 2 required, must match models.judges count)
449-
judge_count: 2
450-
451-
# Enable debate between judges for consensus
452-
# When enabled, judges will discuss and debate to reach agreement
453-
enable_debate: true # Recommended: true for production
454-
455-
# Maximum debate rounds before forcing a decision
456-
max_debate_rounds: 1 # Production: 2-3
457-
458-
# Consensus threshold (0.0-1.0)
459-
# If judges agree above this threshold, debate ends early
460-
# 0.8 = 80% agreement required for consensus
461-
debate_consensus_threshold: 0.8
323+
# Enable parallel structure generation (Phase 1)
324+
parallel_generation: true
462325

463326
# ---------------------------------------------------------------------------
464327
# Post-Processing Settings (applied by program, not LLM)
@@ -496,29 +359,28 @@ debug_output:
496359

497360
# Save outputs for each phase
498361
phases:
499-
# Phase 1: Baseline generation
362+
# Phase 1: Structure generation
500363
baseline:
501364
enabled: true
502-
save_each_generator: true # Save output from each generator (generalist, specialist)
365+
save_each_generator: true # Save output from each planner
503366

504-
# Phase 2: Optimization rounds
367+
# Phase 2: Strategy discussion rounds
505368
optimization:
506369
enabled: true
507-
save_each_round: true # Save markmap after each round
508-
save_optimizer_suggestions: true # Save each optimizer's suggestions
509-
save_summarizer_output: true # Save summarizer's consolidated output
370+
save_each_round: true # Save structure after each round
371+
save_strategist_suggestions: true # Save each strategist's suggestions
372+
save_integrator_output: true # Save integrator's consolidated output
510373

511-
# Phase 3: Judge evaluation & debate
512-
judging:
374+
# Phase 3: Evaluation
375+
evaluation:
513376
enabled: true
514-
save_initial_evaluations: true # Save each judge's initial evaluation
515-
save_debate_rounds: true # Save each debate round's discussion
516-
save_final_consensus: true # Save final consensus and selected winner
377+
save_evaluations: true # Save each evaluator's assessment
378+
save_final_consensus: true # Save final consensus
517379

518380
# Phase 4: Writer
519381
writer:
520382
enabled: true
521-
save_writer_input: true # Save input to writer (selected markmap + feedback)
383+
save_writer_input: true # Save input to writer (structure + feedback)
522384
save_writer_output: true # Save writer's final output
523385

524386
# Phase 5: Translation
@@ -574,15 +436,15 @@ output:
574436
markdown: "../../docs/mindmaps" # .md files
575437
html: "../../docs/pages/mindmaps" # .html files
576438

577-
# Naming convention - generates 4 final outputs (2 types × 2 languages)
439+
# Naming convention
578440
# Output files: neetcode_{type}_ai_{lang}.md / .html
579441
naming:
580442
prefix: "neetcode"
581443

582444
# Languages to generate
583445
# Each language can use one of two modes:
584-
# "generate" - Run full optimization pipeline from scratch (slow)
585-
# "translate" - Translate from another language's output (fast, DEFAULT for non-primary)
446+
# "generate" - Run full pipeline from scratch (slow)
447+
# "translate" - Translate from another language's output (fast)
586448
#
587449
languages:
588450
en:
@@ -591,26 +453,20 @@ output:
591453

592454
zh-TW:
593455
enabled: true
594-
mode: "translate" # DEFAULT: translate from English (fast)
595-
# mode: "generate" # Alternative: run full pipeline independently (slow)
456+
mode: "translate" # Translate from English (fast)
596457
source_lang: "en" # Source language to translate from
597-
translator_model: "gpt-4" # ORIGINAL: gpt-4o
458+
translator_model: "gpt-4"
598459

599460
# Output types
600461
types:
601462
general:
602463
description: "Broad understanding, knowledge organization"
603464
generator: "generalist"
604-
specialist:
605-
description: "Engineering details, structural rigor"
606-
generator: "specialist"
607465

608466
# File naming template: {prefix}_{type}_ai_{lang}.{ext}
609467
# Examples:
610468
# neetcode_general_ai_en.md
611469
# neetcode_general_ai_zh-TW.html
612-
# neetcode_specialist_ai_en.md
613-
# neetcode_specialist_ai_zh-TW.html
614470
template: "{prefix}_{type}_ai_{lang}"
615471

616472
# Intermediate files

tools/ai-markmap-agent/main.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@
2828
get_api_key,
2929
)
3030
from src.data_sources import DataSourcesLoader, load_data_sources
31-
from src.graph_v3 import run_pipeline_v3, build_markmap_graph_v3
31+
from src.graph import run_pipeline, build_markmap_graph
3232

3333

3434
def print_banner() -> None:
@@ -185,7 +185,7 @@ def main() -> int:
185185
print(" 4. Render final Markmap (Writer)")
186186
print(" 5. Translate if needed")
187187
print(" 6. Post-process and save")
188-
result = run_pipeline_v3(data, config)
188+
result = run_pipeline(data, config)
189189

190190
# Report results
191191
print("\n" + "=" * 60)

tools/ai-markmap-agent/src/agents/__init__.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
create_evaluators,
3939
aggregate_evaluations,
4040
)
41-
from .writer_v3 import WriterAgentV3, create_writer_v3
41+
from .writer import WriterAgent, create_writer
4242
from .translator import TranslatorAgent, create_translators
4343

4444
__all__ = [
@@ -62,8 +62,8 @@
6262
"ContentEvaluator",
6363
"create_evaluators",
6464
"aggregate_evaluations",
65-
"WriterAgentV3",
66-
"create_writer_v3",
65+
"WriterAgent",
66+
"create_writer",
6767
"TranslatorAgent",
6868
"create_translators",
6969
]

0 commit comments

Comments
 (0)