Skip to content

Commit abde8f3

Browse files
authored
Make automodel batch size configurable (#2985)
1 parent 8d5574e commit abde8f3

File tree

3 files changed

+18
-5
lines changed

3 files changed

+18
-5
lines changed

extensions/ql-vscode/src/config.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -702,6 +702,10 @@ export function showQueriesPanel(): boolean {
702702
const MODEL_SETTING = new Setting("model", ROOT_SETTING);
703703
const FLOW_GENERATION = new Setting("flowGeneration", MODEL_SETTING);
704704
const LLM_GENERATION = new Setting("llmGeneration", MODEL_SETTING);
705+
const LLM_GENERATION_BATCH_SIZE = new Setting(
706+
"llmGenerationBatchSize",
707+
MODEL_SETTING,
708+
);
705709
const EXTENSIONS_DIRECTORY = new Setting("extensionsDirectory", MODEL_SETTING);
706710
const SHOW_MULTIPLE_MODELS = new Setting("showMultipleModels", MODEL_SETTING);
707711

@@ -725,6 +729,14 @@ export class ModelConfigListener extends ConfigListener implements ModelConfig {
725729
return !!LLM_GENERATION.getValue<boolean>();
726730
}
727731

732+
/**
733+
* Limits the number of candidates we send to the model in each request to avoid long requests.
734+
* Note that the model may return fewer than this number of candidates.
735+
*/
736+
public get llmGenerationBatchSize(): number {
737+
return LLM_GENERATION_BATCH_SIZE.getValue<number | null>() || 10;
738+
}
739+
728740
public getExtensionsDirectory(languageId: string): string | undefined {
729741
return EXTENSIONS_DIRECTORY.getValue<string>({
730742
languageId,

extensions/ql-vscode/src/model-editor/auto-modeler.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,7 @@ import { DatabaseItem } from "../databases/local-databases";
1717
import { Mode } from "./shared/mode";
1818
import { CancellationTokenSource } from "vscode";
1919
import { ModelingStore } from "./modeling-store";
20-
21-
// Limit the number of candidates we send to the model in each request
22-
// to avoid long requests.
23-
// Note that the model may return fewer than this number of candidates.
24-
const candidateBatchSize = 20;
20+
import { ModelConfigListener } from "../config";
2521

2622
/**
2723
* The auto-modeler holds state around auto-modeling jobs and allows
@@ -36,6 +32,7 @@ export class AutoModeler {
3632
private readonly app: App,
3733
private readonly cliServer: CodeQLCliServer,
3834
private readonly queryRunner: QueryRunner,
35+
private readonly modelConfig: ModelConfigListener,
3936
private readonly modelingStore: ModelingStore,
4037
private readonly queryStorageDir: string,
4138
private readonly databaseItem: DatabaseItem,
@@ -109,6 +106,9 @@ export class AutoModeler {
109106
cancellationTokenSource: CancellationTokenSource,
110107
): Promise<void> {
111108
void extLogger.log(`Modeling package ${packageName}`);
109+
110+
const candidateBatchSize = this.modelConfig.llmGenerationBatchSize;
111+
112112
await withProgress(async (progress) => {
113113
// Fetch the candidates to send to the model
114114
const allCandidateMethods = getCandidates(mode, methods, modeledMethods);

extensions/ql-vscode/src/model-editor/model-editor-view.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@ export class ModelEditorView extends AbstractWebview<
7676
app,
7777
cliServer,
7878
queryRunner,
79+
this.modelConfig,
7980
modelingStore,
8081
queryStorageDir,
8182
databaseItem,

0 commit comments

Comments
 (0)