@@ -2,13 +2,14 @@ import {readFileSync} from 'node:fs';
22import { z } from 'zod' ;
33import { prepareContextFilesMessage } from '../../orchestration/codegen.js' ;
44import { Environment } from '../../configuration/environment.js' ;
5+ import { IndividualAssessmentState , LlmResponseFile , Usage } from '../../shared-interfaces.js' ;
56import {
6- IndividualAssessment ,
7- IndividualAssessmentState ,
8- LlmResponseFile ,
9- SkippedIndividualAssessment ,
10- } from '../../shared-interfaces.js' ;
11- import { AutoRateResult , getCoefficient , MAX_RATING } from './auto-rate-shared.js' ;
7+ AutoRateResult ,
8+ ExecutorAutoRateResponse ,
9+ getCoefficient ,
10+ MAX_RATING ,
11+ MIN_RATING ,
12+ } from './auto-rate-shared.js' ;
1213import { GenkitRunner } from '../../codegen/genkit/genkit-runner.js' ;
1314import defaultCodeRaterPrompt from './code-rating-prompt.js' ;
1415import { RatingsResult } from '../rating-types.js' ;
@@ -46,13 +47,7 @@ export async function autoRateCode(
4647 appPrompt : string ,
4748 ratingsResult : RatingsResult ,
4849) : Promise < AutoRateResult > {
49- const contextMessage = prepareContextFilesMessage (
50- files . map ( o => ( {
51- relativePath : o . filePath ,
52- content : o . code ,
53- } ) ) ,
54- ) ;
55-
50+ const contextFiles = files . map ( o => ( { relativePath : o . filePath , content : o . code } ) ) ;
5651 let promptText : string ;
5752
5853 if ( environment . codeRatingPromptPath ) {
@@ -80,32 +75,56 @@ export async function autoRateCode(
8075 SAFETY_WEB_RESULTS_JSON : safetyWebResultsJson ,
8176 } ) . result ;
8277
83- const result = await llm . generateConstrained ( {
84- abortSignal,
85- messages : contextMessage ? [ contextMessage ] : [ ] ,
86- model,
87- prompt,
88- skipMcp : true ,
89- schema : z . object ( {
90- rating : z . number ( ) . describe ( `Rating from 1-${ MAX_RATING } . Best is ${ MAX_RATING } .` ) ,
91- summary : z . string ( ) . describe ( 'Summary of the overall code quality.' ) ,
92- categories : z . array (
93- z . object ( {
94- name : z . string ( ) . describe ( 'Category name' ) ,
95- message : z . string ( ) . describe ( 'Short description of the problem.' ) ,
96- } ) ,
97- ) ,
98- } ) ,
99- } ) ;
78+ let output : ExecutorAutoRateResponse ;
79+ let usage : Usage | null ;
80+
81+ if ( environment . executor . autoRateCode ) {
82+ output = await environment . executor . autoRateCode (
83+ {
84+ ratingPrompt : prompt ,
85+ files : contextFiles ,
86+ minRating : MIN_RATING ,
87+ maxRating : MAX_RATING ,
88+ } ,
89+ abortSignal ,
90+ ) ;
91+ usage = output . usage || null ;
92+ } else {
93+ // TODO(crisbeto): move this into the local executor once
94+ // `Executor.autoRateVisuals` becomes a required method.
95+ const contextMessage = prepareContextFilesMessage ( contextFiles ) ;
96+ const result = await llm . generateConstrained ( {
97+ abortSignal,
98+ messages : contextMessage ? [ contextMessage ] : [ ] ,
99+ model,
100+ prompt,
101+ skipMcp : true ,
102+ schema : z . object ( {
103+ rating : z
104+ . number ( )
105+ . describe ( `Rating from ${ MIN_RATING } -${ MAX_RATING } . Best is ${ MAX_RATING } .` ) ,
106+ summary : z . string ( ) . describe ( 'Summary of the overall code quality.' ) ,
107+ categories : z . array (
108+ z . object ( {
109+ name : z . string ( ) . describe ( 'Category name' ) ,
110+ message : z . string ( ) . describe ( 'Short description of the problem.' ) ,
111+ } ) ,
112+ ) ,
113+ } ) ,
114+ } ) ;
115+
116+ output = result . output ! ;
117+ usage = result . usage || null ;
118+ }
100119
101120 return {
102- coefficient : getCoefficient ( result . output ! . rating ) ,
121+ coefficient : getCoefficient ( output . rating , MAX_RATING ) ,
103122 usage : {
104- inputTokens : result . usage ?. inputTokens ?? 0 ,
105- outputTokens : result . usage ?. outputTokens ?? 0 ,
106- totalTokens : result . usage ?. totalTokens ?? 0 ,
107- thinkingTokens : result . usage ?. thinkingTokens ?? 0 ,
123+ inputTokens : usage ?. inputTokens ?? 0 ,
124+ outputTokens : usage ?. outputTokens ?? 0 ,
125+ totalTokens : usage ?. totalTokens ?? 0 ,
126+ thinkingTokens : usage ?. thinkingTokens ?? 0 ,
108127 } ,
109- details : result . output ! ,
128+ details : output ,
110129 } ;
111130}
0 commit comments