Skip to content

Commit a326935

Browse files
committed
fix(pipeline): ensure all LLM calls persist inputs/outputs and adjust run ordering
- Persist input/output for async LLM calls in base_agent.ainvoke() - Ensure compressor LLM calls save inputs and outputs - compress_content() - _summarize_history() - Confirm all LLM entry points now persist request/response data - Change resume run ordering from newest-first to oldest-first - Update resume UI to label ordering and mark the latest run explicitly This guarantees that every LLM API invocation is traceable and resumable, improving debugging, replay, and resume reliability across the pipeline.
1 parent f014b57 commit a326935

File tree

3 files changed

+78
-5
lines changed

3 files changed

+78
-5
lines changed

tools/ai-markmap-agent/src/agents/base_agent.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -341,8 +341,15 @@ async def ainvoke(self, input_data: dict[str, Any]) -> str:
341341
)
342342

343343
messages = self._build_messages(formatted_prompt)
344+
345+
# Save LLM input if debug enabled
346+
self._save_llm_call_input(messages, "invoke")
347+
344348
response = await self.llm.ainvoke(messages)
345349

350+
# Save LLM output if debug enabled
351+
self._save_llm_call_output(response.content, "invoke")
352+
346353
return response.content
347354

348355
@abstractmethod

tools/ai-markmap-agent/src/compression/compressor.py

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,56 @@ def __init__(self, config: dict[str, Any] | None = None):
5656
"max_tokens_before_compress", 8000
5757
)
5858

59+
def _save_llm_call(self, agent_id: str, call_type: str, content: Any, is_input: bool = True):
60+
"""Save LLM call input or output for debugging."""
61+
try:
62+
from ..debug_output import get_debug_manager
63+
debug = get_debug_manager(self.config)
64+
65+
if not debug.enabled:
66+
return
67+
68+
# Get LLM debug config
69+
llm_config = self.config.get("debug_output", {}).get("llm_calls", {})
70+
if not llm_config.get("enabled", False):
71+
return
72+
73+
if is_input and not llm_config.get("save_input", False):
74+
return
75+
if not is_input and not llm_config.get("save_output", False):
76+
return
77+
78+
# Format content
79+
if is_input:
80+
# messages list
81+
import json
82+
if isinstance(content, list):
83+
content_str = json.dumps(
84+
[{"role": msg.type if hasattr(msg, "type") else "unknown",
85+
"content": msg.content if hasattr(msg, "content") else str(msg)}
86+
for msg in content],
87+
indent=2,
88+
ensure_ascii=False
89+
)
90+
else:
91+
content_str = str(content)
92+
filename = f"llm_input_{agent_id}_{call_type}"
93+
else:
94+
# response string
95+
content_str = str(content)
96+
filename = f"llm_output_{agent_id}_{call_type}"
97+
98+
# Save to debug directory
99+
ext = "md"
100+
filepath = debug.run_dir / f"{filename}.{ext}"
101+
filepath.write_text(content_str, encoding="utf-8")
102+
prefix = "📝" if is_input else "📤"
103+
print(f" {prefix} LLM {'input' if is_input else 'output'} saved: {filepath.name}")
104+
105+
except Exception as e:
106+
# Silently fail if debug output is not available
107+
pass
108+
59109
def _load_prompt(self, prompt_path: str) -> str:
60110
"""Load prompt from file."""
61111
if not prompt_path:
@@ -134,8 +184,15 @@ def compress(
134184
HumanMessage(content=prompt),
135185
]
136186

187+
# Save LLM input
188+
self._save_llm_call("compressor", "compress_content", messages, is_input=True)
189+
137190
try:
138191
response = self.llm.invoke(messages)
192+
193+
# Save LLM output
194+
self._save_llm_call("compressor", "compress_content", response.content, is_input=False)
195+
139196
return response.content
140197
except Exception as e:
141198
print(f"Warning: Compression failed: {e}")
@@ -202,8 +259,15 @@ def _summarize_history(self, history: list[dict[str, Any]]) -> str:
202259
HumanMessage(content=prompt),
203260
]
204261

262+
# Save LLM input
263+
self._save_llm_call("compressor", "summarize_history", messages, is_input=True)
264+
205265
try:
206266
response = self.llm.invoke(messages)
267+
268+
# Save LLM output
269+
self._save_llm_call("compressor", "summarize_history", response.content, is_input=False)
270+
207271
return response.content
208272
except Exception:
209273
return f"Summary of {len(history)} earlier rounds."

tools/ai-markmap-agent/src/resume.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ def scan_previous_runs(debug_output_dir: Path) -> list[RunInfo]:
143143
debug_output_dir: Path to debug outputs directory
144144
145145
Returns:
146-
List of RunInfo objects, sorted by timestamp (newest first)
146+
List of RunInfo objects, sorted by timestamp (oldest first)
147147
"""
148148
if not debug_output_dir.exists():
149149
return []
@@ -154,8 +154,8 @@ def scan_previous_runs(debug_output_dir: Path) -> list[RunInfo]:
154154
# Skip regeneration runs (they will be handled separately)
155155
runs.append(RunInfo(item))
156156

157-
# Sort by timestamp (newest first)
158-
runs.sort(key=lambda r: r.timestamp or datetime.min, reverse=True)
157+
# Sort by timestamp (oldest first) - so newest prints at the bottom
158+
runs.sort(key=lambda r: r.timestamp or datetime.min, reverse=False)
159159

160160
return runs
161161

@@ -175,13 +175,15 @@ def select_run_interactive(runs: list[RunInfo]) -> RunInfo | None:
175175
return None
176176

177177
print("\n" + "=" * 60)
178-
print("Available Previous Runs")
178+
print("Available Previous Runs (oldest to newest)")
179179
print("=" * 60)
180180

181181
for i, run in enumerate(runs, 1):
182182
timestamp_str = run.timestamp.strftime("%Y-%m-%d %H:%M:%S") if run.timestamp else "Unknown"
183183
file_count = sum(len(files) for files in run.files.values())
184-
print(f"\n[{i}] {run.run_id}")
184+
is_latest = (i == len(runs)) # Last one is newest
185+
marker = " ← Latest" if is_latest else ""
186+
print(f"\n[{i}] {run.run_id}{marker}")
185187
print(f" Last modified: {timestamp_str}")
186188
print(f" Files: {file_count} total")
187189

0 commit comments

Comments
 (0)