Skip to content

Commit 545aff6

Browse files
committed
feat(translator): add prompt size validation and improve error messages
- Add prompt size checking with token estimation - Warn if prompt exceeds 80% of model context limit - Enhance empty response error messages with prompt size info - Add documentation clarifying this is prompt-driven, not agent-driven - Include prompt format issues as possible cause of empty responses
1 parent 35f342e commit 545aff6

File tree

1 file changed

+25
-4
lines changed

1 file changed

+25
-4
lines changed

tools/ai-markmap-agent/src/agents/translator.py

Lines changed: 25 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,11 @@ class TranslatorAgent(BaseAgent):
2222
"""
2323
Translator agent for converting Markmaps between languages.
2424
25+
NOTE: This is a PROMPT-DRIVEN translator, not a true agent architecture.
26+
It loads prompt templates from external .md files, combines them with content,
27+
and makes a single LLM API call. No multi-turn conversation, tool usage, or
28+
state management is involved.
29+
2530
Translates the content while preserving structure, links, and formatting.
2631
Prompts are loaded from external .md files for easy customization.
2732
"""
@@ -158,14 +163,23 @@ def translate(self, content: str, output_type: str) -> str:
158163
f" Combined prompt length: {len(prompt)} chars (after strip: {len(prompt_str)} chars)"
159164
)
160165

166+
# Check prompt size (warn if too large)
167+
prompt_size = len(prompt)
168+
max_tokens = self.model_config.get("max_tokens", 8192)
169+
# Rough estimate: 1 token ≈ 4 characters
170+
estimated_tokens = prompt_size / 4
171+
172+
if estimated_tokens > max_tokens * 0.8: # Warn if > 80% of max_tokens
173+
print(f" ⚠️ Warning: Prompt size ({prompt_size:,} chars, ~{estimated_tokens:.0f} tokens) "
174+
f"may exceed model context limit (max_tokens: {max_tokens})")
175+
161176
messages = self._build_messages(prompt)
162177

163178
# Save LLM input
164179
self._save_llm_call_input(messages, "translate")
165180

166181
# Show progress info
167182
model_name = self.model_config.get("model", "unknown")
168-
prompt_size = len(prompt)
169183
content_size = len(content_str)
170184
print(f" 📤 Sending request to {model_name}...")
171185
print(f" Prompt: {prompt_size:,} chars, Content: {content_size:,} chars")
@@ -227,17 +241,24 @@ def translate(self, content: str, output_type: str) -> str:
227241
# Validate content is not empty
228242
if not content_str or len(content_str.strip()) == 0:
229243
model_name = self.model_config.get('model', 'unknown')
244+
prompt_size = len(prompt) if 'prompt' in locals() else 0
245+
max_tokens = self.model_config.get("max_tokens", 8192)
246+
estimated_tokens = prompt_size / 4 if prompt_size > 0 else 0
247+
230248
error_msg = (
231249
f"LLM returned empty response.\n"
232250
f" Model: {model_name}\n"
233251
f" Source: {self.source_language} → Target: {self.target_language}\n"
234252
f" Response length: {len(content_str)} chars\n"
253+
f" Prompt size: {prompt_size:,} chars (~{estimated_tokens:.0f} tokens, max_tokens: {max_tokens})\n"
235254
f" Debug output has been saved (check debug files for actual API response).\n"
236255
f" Possible causes:\n"
237256
f" 1. Invalid model name '{model_name}' (verify it's a valid model for your API provider)\n"
238-
f" 2. API quota/rate limit exceeded\n"
239-
f" 3. API returned empty content due to content filtering or safety checks\n"
240-
f" 4. Network/API connection issue"
257+
f" 2. Prompt too large: {prompt_size:,} chars may exceed model context limit\n"
258+
f" 3. API quota/rate limit exceeded\n"
259+
f" 4. API returned empty content due to content filtering or safety checks\n"
260+
f" 5. Prompt format issue causing model to reject the request\n"
261+
f" 6. Network/API connection issue"
241262
)
242263
raise ValueError(error_msg)
243264

0 commit comments

Comments
 (0)