Skip to content

Commit 9923dab

Browse files
authored
Merge pull request #544 from majiayu000/fix/issue-529-max-completion-tokens
fix: use max_completion_tokens for reasoning models (o1/o3/o4/gpt-5)
2 parents db8a22b + f74ba2d commit 9923dab

File tree

3 files changed

+39
-5
lines changed

3 files changed

+39
-5
lines changed

jest.config.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ const config: Config = {
1919
'<rootDir>/test/e2e/prompt-module/data/'
2020
],
2121
transformIgnorePatterns: [
22-
'node_modules/(?!(cli-testing-library|@clack|cleye)/.*)'
22+
'node_modules/(?!(cli-testing-library|@clack|cleye|chalk)/.*)'
2323
],
2424
transform: {
2525
'^.+\\.(ts|tsx|js|jsx|mjs)$': [

src/engine/openAi.ts

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -43,12 +43,18 @@ export class OpenAiEngine implements AiEngine {
4343
public generateCommitMessage = async (
4444
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
4545
): Promise<string | null> => {
46+
const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model);
47+
4648
const params = {
4749
model: this.config.model,
4850
messages,
49-
temperature: 0,
50-
top_p: 0.1,
51-
max_tokens: this.config.maxTokensOutput
51+
...(isReasoningModel
52+
? { max_completion_tokens: this.config.maxTokensOutput }
53+
: {
54+
temperature: 0,
55+
top_p: 0.1,
56+
max_tokens: this.config.maxTokensOutput
57+
})
5258
};
5359

5460
try {
@@ -62,7 +68,9 @@ export class OpenAiEngine implements AiEngine {
6268
)
6369
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
6470

65-
const completion = await this.client.chat.completions.create(params);
71+
const completion = await this.client.chat.completions.create(
72+
params as OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
73+
);
6674

6775
const message = completion.choices[0].message;
6876
let content = message?.content;

test/unit/openAi.test.ts

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
// Test the reasoning model detection regex used in OpenAiEngine.
2+
// Integration test with the engine is not possible because mistral.ts
3+
// uses require() which is unavailable in the ESM test environment.
4+
const REASONING_MODEL_RE = /^(o[1-9]|gpt-5)/;
5+
6+
describe('OpenAiEngine reasoning model detection', () => {
7+
it.each([
8+
['o1', true],
9+
['o1-preview', true],
10+
['o1-mini', true],
11+
['o3', true],
12+
['o3-mini', true],
13+
['o4-mini', true],
14+
['gpt-5', true],
15+
['gpt-5-nano', true],
16+
['gpt-4o', false],
17+
['gpt-4o-mini', false],
18+
['gpt-4', false],
19+
['gpt-3.5-turbo', false]
20+
])(
21+
'model "%s" isReasoning=%s',
22+
(model, expected) => {
23+
expect(REASONING_MODEL_RE.test(model)).toBe(expected);
24+
}
25+
);
26+
});

0 commit comments

Comments
 (0)