Skip to content

Commit 815c084

Browse files
authored
Fix the model config issue, OpenRouter Oauth issue and model refresh … (#45)
- New Features - Per-task model-size options (mini/nano) across agents, tools, and evaluations. - New LLM context support for tool executions and a new ObjectiveDrivenAction tool. - Automatic fetch of OpenRouter models after OAuth with UI update and event dispatch. - OpenRouter model caching with TTL, auto-refresh, and programmatic update hook. - Changes - Many agent/tool flows now prefer or require mini/nano models; evaluation/UI accept explicit main/mini/nano. - Tool execute signatures extended to accept LLM context; reduced verbose tool logging. - Centralized model/provider sentinels and placeholders; stricter JSON parsing for extractors. - Tests - Added unit tests covering OpenRouter model cache and refresh behavior.
1 parent d0ce4d2 commit 815c084

29 files changed

+917
-250
lines changed

front_end/panels/ai_chat/BUILD.gn

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -337,6 +337,7 @@ ts_library("unittests") {
337337
"ui/__tests__/ChatViewAgentSessionsOrder.test.ts",
338338
"ui/__tests__/ChatViewSequentialSessionsTransition.test.ts",
339339
"ui/__tests__/ChatViewInputClear.test.ts",
340+
"ui/__tests__/SettingsDialogOpenRouterCache.test.ts",
340341
"ui/input/__tests__/InputBarClear.test.ts",
341342
"ui/message/__tests__/MessageCombiner.test.ts",
342343
"ui/message/__tests__/StructuredResponseController.test.ts",

front_end/panels/ai_chat/agent_framework/AgentRunner.ts

Lines changed: 36 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ import { sanitizeMessagesForModel } from '../LLM/MessageSanitizer.js';
1818
const logger = createLogger('AgentRunner');
1919

2020
import { ConfigurableAgentTool, ToolRegistry, type ConfigurableAgentArgs, type ConfigurableAgentResult, type AgentRunTerminationReason, type HandoffConfig /* , HandoffContextTransform, ContextFilterRegistry*/ } from './ConfigurableAgentTool.js';
21+
import { MODEL_SENTINELS } from '../core/Constants.js';
2122

2223
/**
2324
* Configuration for the AgentRunner
@@ -33,6 +34,10 @@ export interface AgentRunnerConfig {
3334
provider: LLMProvider;
3435
/** Optional vision capability check. Defaults to false (no vision). */
3536
getVisionCapability?: (modelName: string) => Promise<boolean> | boolean;
37+
/** Mini model for smaller/faster operations */
38+
miniModel?: string;
39+
/** Nano model for smallest/fastest operations */
40+
nanoModel?: string;
3641
}
3742

3843
/**
@@ -218,6 +223,8 @@ export class AgentRunner {
218223
parentSession?: AgentSession, // For natural nesting
219224
defaultProvider?: LLMProvider,
220225
defaultGetVisionCapability?: (modelName: string) => Promise<boolean> | boolean,
226+
miniModel?: string, // Mini model for smaller/faster operations
227+
nanoModel?: string, // Nano model for smallest/fastest operations
221228
overrides?: { sessionId?: string; parentSessionId?: string; traceId?: string }
222229
): Promise<ConfigurableAgentResult & { agentSession: AgentSession }> {
223230
const targetAgentName = handoffConfig.targetAgentName;
@@ -286,12 +293,28 @@ export class AgentRunner {
286293
// Enhance the target agent's system prompt with page context
287294
const enhancedSystemPrompt = await enhancePromptWithPageContext(targetConfig.systemPrompt);
288295

296+
// Resolve model name for the target agent
297+
let resolvedModelName: string;
298+
if (typeof targetConfig.modelName === 'function') {
299+
resolvedModelName = targetConfig.modelName();
300+
} else if (targetConfig.modelName === MODEL_SENTINELS.USE_MINI) {
301+
if (!miniModel) {
302+
throw new Error(`Mini model not provided for handoff to agent '${targetAgentName}'. Ensure miniModel is passed in context.`);
303+
}
304+
resolvedModelName = miniModel;
305+
} else if (targetConfig.modelName === MODEL_SENTINELS.USE_NANO) {
306+
if (!nanoModel) {
307+
throw new Error(`Nano model not provided for handoff to agent '${targetAgentName}'. Ensure nanoModel is passed in context.`);
308+
}
309+
resolvedModelName = nanoModel;
310+
} else {
311+
resolvedModelName = targetConfig.modelName || defaultModelName;
312+
}
313+
289314
// Construct Runner Config & Hooks for the target agent
290315
const targetRunnerConfig: AgentRunnerConfig = {
291316
apiKey,
292-
modelName: typeof targetConfig.modelName === 'function'
293-
? targetConfig.modelName()
294-
: (targetConfig.modelName || defaultModelName),
317+
modelName: resolvedModelName,
295318
systemPrompt: enhancedSystemPrompt,
296319
tools: targetConfig.tools
297320
.map(toolName => ToolRegistry.getRegisteredTool(toolName))
@@ -300,6 +323,8 @@ export class AgentRunner {
300323
temperature: targetConfig.temperature ?? defaultTemperature,
301324
provider: defaultProvider as LLMProvider,
302325
getVisionCapability: defaultGetVisionCapability,
326+
miniModel,
327+
nanoModel,
303328
};
304329
const targetRunnerHooks: AgentRunnerHooks = {
305330
prepareInitialMessages: undefined, // History already formed by transform or passthrough
@@ -845,6 +870,8 @@ export class AgentRunner {
845870
currentSession, // Pass current session for natural nesting
846871
config.provider,
847872
config.getVisionCapability,
873+
config.miniModel,
874+
config.nanoModel,
848875
{ sessionId: nestedSessionId, parentSessionId: currentSession.sessionId, traceId: getCurrentTracingContext()?.traceId }
849876
);
850877

@@ -947,11 +974,13 @@ export class AgentRunner {
947974
}
948975

949976
try {
950-
logger.info(`${agentName} Executing tool: ${toolToExecute.name} with args:`, toolArgs);
977+
logger.info(`${agentName} Executing tool: ${toolToExecute.name}`);
951978
const execTracingContext = getCurrentTracingContext();
952979
toolResultData = await toolToExecute.execute(toolArgs as any, ({
953980
provider: config.provider,
954981
model: modelName,
982+
miniModel: config.miniModel,
983+
nanoModel: config.nanoModel,
955984
getVisionCapability: config.getVisionCapability,
956985
overrideSessionId: preallocatedChildId,
957986
overrideParentSessionId: currentSession.sessionId,
@@ -1210,7 +1239,9 @@ export class AgentRunner {
12101239
undefined, // No llmToolArgs for max iterations handoff
12111240
currentSession, // Pass current session for natural nesting
12121241
config.provider,
1213-
config.getVisionCapability
1242+
config.getVisionCapability,
1243+
config.miniModel,
1244+
config.nanoModel
12141245
);
12151246
// Extract the result and session
12161247
const { agentSession: childSession, ...actualResult } = handoffResult;

front_end/panels/ai_chat/agent_framework/ConfigurableAgentTool.ts

Lines changed: 60 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -4,16 +4,30 @@
44

55
import { AgentService } from '../core/AgentService.js';
66
import type { Tool } from '../tools/Tools.js';
7-
import { AIChatPanel } from '../ui/AIChatPanel.js';
87
import { ChatMessageEntity, type ChatMessage } from '../models/ChatTypes.js';
98
import { createLogger } from '../core/Logger.js';
109
import { getCurrentTracingContext } from '../tracing/TracingConfig.js';
10+
import { MODEL_SENTINELS } from '../core/Constants.js';
1111
import type { AgentSession } from './AgentSessionTypes.js';
12+
import type { LLMProvider } from '../LLM/LLMTypes.js';
1213

1314
const logger = createLogger('ConfigurableAgentTool');
1415

1516
import { AgentRunner, type AgentRunnerConfig, type AgentRunnerHooks } from './AgentRunner.js';
1617

18+
// Context passed along with agent/tool calls
19+
export interface CallCtx {
20+
provider?: LLMProvider,
21+
model?: string,
22+
miniModel?: string,
23+
nanoModel?: string,
24+
mainModel?: string,
25+
getVisionCapability?: (modelName: string) => Promise<boolean> | boolean,
26+
overrideSessionId?: string,
27+
overrideParentSessionId?: string,
28+
overrideTraceId?: string,
29+
}
30+
1731
/**
1832
* Defines the possible reasons an agent run might terminate.
1933
*/
@@ -412,27 +426,62 @@ export class ConfigurableAgentTool implements Tool<ConfigurableAgentArgs, Config
412426

413427
// Initialize
414428
const maxIterations = this.config.maxIterations || 10;
415-
const modelName = typeof this.config.modelName === 'function'
416-
? this.config.modelName()
417-
: (this.config.modelName || AIChatPanel.instance().getSelectedModel());
429+
430+
// Parse execution context first
431+
const callCtx = (_ctx || {}) as CallCtx;
432+
433+
// Resolve model name from context or configuration
434+
let modelName: string;
435+
if (this.config.modelName === MODEL_SENTINELS.USE_MINI) {
436+
if (!callCtx.miniModel) {
437+
throw new Error(`Mini model not provided in context for agent '${this.name}'. Ensure context includes miniModel.`);
438+
}
439+
modelName = callCtx.miniModel;
440+
} else if (this.config.modelName === MODEL_SENTINELS.USE_NANO) {
441+
if (!callCtx.nanoModel) {
442+
throw new Error(`Nano model not provided in context for agent '${this.name}'. Ensure context includes nanoModel.`);
443+
}
444+
modelName = callCtx.nanoModel;
445+
} else if (typeof this.config.modelName === 'function') {
446+
modelName = this.config.modelName();
447+
} else if (this.config.modelName) {
448+
modelName = this.config.modelName;
449+
} else {
450+
// Use main model from context, or fallback to context model
451+
const contextModel = callCtx.mainModel || callCtx.model;
452+
if (!contextModel) {
453+
throw new Error(`No model provided for agent '${this.name}'. Ensure context includes model or mainModel.`);
454+
}
455+
modelName = contextModel;
456+
}
457+
458+
// Override with context model only if agent doesn't have its own model configuration
459+
if (callCtx.model && !this.config.modelName) {
460+
modelName = callCtx.model;
461+
}
462+
463+
// Validate required context
464+
if (!callCtx.provider) {
465+
throw new Error(`Provider not provided in context for agent '${this.name}'. Ensure context includes provider.`);
466+
}
467+
418468
const temperature = this.config.temperature ?? 0;
419-
420469
const systemPrompt = this.config.systemPrompt;
421470
const tools = this.getToolInstances();
422-
471+
423472
// Prepare initial messages
424473
const internalMessages = this.prepareInitialMessages(args);
425-
426-
// Prepare runner config and hooks
427474
const runnerConfig: AgentRunnerConfig = {
428475
apiKey,
429476
modelName,
430477
systemPrompt,
431478
tools,
432479
maxIterations,
433480
temperature,
434-
provider: AIChatPanel.getProviderForModel(modelName),
435-
getVisionCapability: (m: string) => AIChatPanel.isVisionCapable(m),
481+
provider: callCtx.provider,
482+
getVisionCapability: callCtx.getVisionCapability ?? (() => false),
483+
miniModel: callCtx.miniModel,
484+
nanoModel: callCtx.nanoModel,
436485
};
437486

438487
const runnerHooks: AgentRunnerHooks = {
@@ -446,7 +495,7 @@ export class ConfigurableAgentTool implements Tool<ConfigurableAgentArgs, Config
446495
};
447496

448497
// Run the agent
449-
const ctx: any = _ctx || {};
498+
const ctx: any = callCtx || {};
450499
const result = await AgentRunner.run(
451500
internalMessages,
452501
args,

front_end/panels/ai_chat/agent_framework/implementation/ConfiguredAgents.ts

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -10,13 +10,13 @@ import { BookmarkStoreTool } from '../../tools/BookmarkStoreTool.js';
1010
import { DocumentSearchTool } from '../../tools/DocumentSearchTool.js';
1111
import { NavigateURLTool, PerformActionTool, GetAccessibilityTreeTool, SearchContentTool, NavigateBackTool, NodeIDsToURLsTool, TakeScreenshotTool, ScrollPageTool } from '../../tools/Tools.js';
1212
import { HTMLToMarkdownTool } from '../../tools/HTMLToMarkdownTool.js';
13-
import { AIChatPanel } from '../../ui/AIChatPanel.js';
1413
import { ChatMessageEntity, type ChatMessage } from '../../models/ChatTypes.js';
1514
import {
1615
ConfigurableAgentTool,
1716
ToolRegistry, type AgentToolConfig, type ConfigurableAgentArgs
1817
} from '../ConfigurableAgentTool.js';
1918
import { WaitTool } from '../../tools/Tools.js';
19+
import { MODEL_SENTINELS } from '../../core/Constants.js';
2020
import { ThinkingTool } from '../../tools/ThinkingTool.js';
2121
import type { Tool } from '../../tools/Tools.js';
2222

@@ -70,7 +70,6 @@ If the page does not match the expected content, retry with a different URL patt
7070
Remember: Always use navigate_url to actually go to the constructed URLs. Return easy-to-read markdown reports.`,
7171
tools: ['navigate_url', 'get_page_content'],
7272
maxIterations: 5,
73-
modelName: () => AIChatPanel.instance().getSelectedModel(),
7473
temperature: 0.1,
7574
schema: {
7675
type: 'object',
@@ -322,7 +321,7 @@ Remember: You gather data, content_writer_agent writes the report. Always hand o
322321
'document_search'
323322
],
324323
maxIterations: 15,
325-
modelName: () => AIChatPanel.getMiniModel(),
324+
modelName: MODEL_SENTINELS.USE_MINI,
326325
temperature: 0,
327326
schema: {
328327
type: 'object',
@@ -423,7 +422,7 @@ Your process should follow these steps:
423422
The final output should be in markdown format, and it should be lengthy and detailed. Aim for 5-10 pages of content, at least 1000 words.`,
424423
tools: [],
425424
maxIterations: 3,
426-
modelName: () => AIChatPanel.getMiniModel(),
425+
modelName: MODEL_SENTINELS.USE_MINI,
427426
temperature: 0.3,
428427
schema: {
429428
type: 'object',
@@ -531,7 +530,7 @@ Conclusion: Fix the args format and retry with proper syntax: { "method": "fill"
531530
'take_screenshot',
532531
],
533532
maxIterations: 10,
534-
modelName: () => AIChatPanel.getMiniModel(),
533+
modelName: MODEL_SENTINELS.USE_MINI,
535534
temperature: 0.5,
536535
schema: {
537536
type: 'object',
@@ -640,7 +639,7 @@ Remember that verification is time-sensitive - the page state might change durin
640639
'take_screenshot'
641640
],
642641
maxIterations: 3,
643-
modelName: () => AIChatPanel.getMiniModel(),
642+
modelName: MODEL_SENTINELS.USE_MINI,
644643
temperature: 0.2,
645644
schema: {
646645
type: 'object',
@@ -725,7 +724,7 @@ When selecting an element to click, prioritize:
725724
'node_ids_to_urls',
726725
],
727726
maxIterations: 5,
728-
modelName: () => AIChatPanel.getMiniModel(),
727+
modelName: MODEL_SENTINELS.USE_MINI,
729728
temperature: 0.7,
730729
schema: {
731730
type: 'object',
@@ -805,7 +804,7 @@ When selecting a form field to fill, prioritize:
805804
'schema_based_extractor',
806805
],
807806
maxIterations: 5,
808-
modelName: () => AIChatPanel.getMiniModel(),
807+
modelName: MODEL_SENTINELS.USE_MINI,
809808
temperature: 0.7,
810809
schema: {
811810
type: 'object',
@@ -881,7 +880,7 @@ When selecting an element for keyboard input, prioritize:
881880
'schema_based_extractor',
882881
],
883882
maxIterations: 5,
884-
modelName: () => AIChatPanel.getMiniModel(),
883+
modelName: MODEL_SENTINELS.USE_MINI,
885884
temperature: 0.7,
886885
schema: {
887886
type: 'object',
@@ -966,7 +965,7 @@ When selecting an element to hover over, prioritize:
966965
'schema_based_extractor',
967966
],
968967
maxIterations: 5,
969-
modelName: () => AIChatPanel.getMiniModel(),
968+
modelName: MODEL_SENTINELS.USE_MINI,
970969
temperature: 0.7,
971970
schema: {
972971
type: 'object',
@@ -1048,7 +1047,7 @@ The accessibility tree includes information about scrollable containers. Look fo
10481047
'schema_based_extractor',
10491048
],
10501049
maxIterations: 5,
1051-
modelName: () => AIChatPanel.getMiniModel(),
1050+
modelName: MODEL_SENTINELS.USE_MINI,
10521051
temperature: 0.7,
10531052
schema: {
10541053
type: 'object',
@@ -1283,7 +1282,6 @@ Remember: **Plan adaptively, execute systematically, validate continuously, and
12831282
'thinking',
12841283
],
12851284
maxIterations: 15,
1286-
modelName: () => AIChatPanel.instance().getSelectedModel(),
12871285
temperature: 0.3,
12881286
schema: {
12891287
type: 'object',
@@ -1422,7 +1420,7 @@ Remember to adapt your analysis based on the product category - different attrib
14221420
'get_page_content',
14231421
],
14241422
maxIterations: 5,
1425-
modelName: () => AIChatPanel.getMiniModel(),
1423+
modelName: MODEL_SENTINELS.USE_MINI,
14261424
temperature: 0.2,
14271425
schema: {
14281426
type: 'object',

0 commit comments

Comments
 (0)