diff --git a/jest.config.ts b/jest.config.ts index 001e0ef9..b71693ee 100644 --- a/jest.config.ts +++ b/jest.config.ts @@ -19,7 +19,7 @@ const config: Config = { '/test/e2e/prompt-module/data/' ], transformIgnorePatterns: [ - 'node_modules/(?!(cli-testing-library|@clack|cleye)/.*)' + 'node_modules/(?!(cli-testing-library|@clack|cleye|chalk)/.*)' ], transform: { '^.+\\.(ts|tsx|js|jsx|mjs)$': [ diff --git a/src/engine/openAi.ts b/src/engine/openAi.ts index 4d7d4cb8..3c407c32 100644 --- a/src/engine/openAi.ts +++ b/src/engine/openAi.ts @@ -43,12 +43,18 @@ export class OpenAiEngine implements AiEngine { public generateCommitMessage = async ( messages: Array ): Promise => { + const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model); + const params = { model: this.config.model, messages, - temperature: 0, - top_p: 0.1, - max_tokens: this.config.maxTokensOutput + ...(isReasoningModel + ? { max_completion_tokens: this.config.maxTokensOutput } + : { + temperature: 0, + top_p: 0.1, + max_tokens: this.config.maxTokensOutput + }) }; try { @@ -62,7 +68,9 @@ export class OpenAiEngine implements AiEngine { ) throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens); - const completion = await this.client.chat.completions.create(params); + const completion = await this.client.chat.completions.create( + params as OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming + ); const message = completion.choices[0].message; let content = message?.content; diff --git a/test/unit/openAi.test.ts b/test/unit/openAi.test.ts new file mode 100644 index 00000000..c1defd5f --- /dev/null +++ b/test/unit/openAi.test.ts @@ -0,0 +1,26 @@ +// Test the reasoning model detection regex used in OpenAiEngine. +// Integration test with the engine is not possible because mistral.ts +// uses require() which is unavailable in the ESM test environment. +const REASONING_MODEL_RE = /^(o[1-9]|gpt-5)/; + +describe('OpenAiEngine reasoning model detection', () => { + it.each([ + ['o1', true], + ['o1-preview', true], + ['o1-mini', true], + ['o3', true], + ['o3-mini', true], + ['o4-mini', true], + ['gpt-5', true], + ['gpt-5-nano', true], + ['gpt-4o', false], + ['gpt-4o-mini', false], + ['gpt-4', false], + ['gpt-3.5-turbo', false] + ])( + 'model "%s" isReasoning=%s', + (model, expected) => { + expect(REASONING_MODEL_RE.test(model)).toBe(expected); + } + ); +});