File tree Expand file tree Collapse file tree 3 files changed +39
-5
lines changed
Expand file tree Collapse file tree 3 files changed +39
-5
lines changed Original file line number Diff line number Diff line change @@ -19,7 +19,7 @@ const config: Config = {
1919 '<rootDir>/test/e2e/prompt-module/data/'
2020 ] ,
2121 transformIgnorePatterns : [
22- 'node_modules/(?!(cli-testing-library|@clack|cleye)/.*)'
22+ 'node_modules/(?!(cli-testing-library|@clack|cleye|chalk )/.*)'
2323 ] ,
2424 transform : {
2525 '^.+\\.(ts|tsx|js|jsx|mjs)$' : [
Original file line number Diff line number Diff line change @@ -43,12 +43,18 @@ export class OpenAiEngine implements AiEngine {
4343 public generateCommitMessage = async (
4444 messages : Array < OpenAI . Chat . Completions . ChatCompletionMessageParam >
4545 ) : Promise < string | null > => {
46+ const isReasoningModel = / ^ ( o [ 1 - 9 ] | g p t - 5 ) / . test ( this . config . model ) ;
47+
4648 const params = {
4749 model : this . config . model ,
4850 messages,
49- temperature : 0 ,
50- top_p : 0.1 ,
51- max_tokens : this . config . maxTokensOutput
51+ ...( isReasoningModel
52+ ? { max_completion_tokens : this . config . maxTokensOutput }
53+ : {
54+ temperature : 0 ,
55+ top_p : 0.1 ,
56+ max_tokens : this . config . maxTokensOutput
57+ } )
5258 } ;
5359
5460 try {
@@ -62,7 +68,9 @@ export class OpenAiEngine implements AiEngine {
6268 )
6369 throw new Error ( GenerateCommitMessageErrorEnum . tooMuchTokens ) ;
6470
65- const completion = await this . client . chat . completions . create ( params ) ;
71+ const completion = await this . client . chat . completions . create (
72+ params as OpenAI . Chat . Completions . ChatCompletionCreateParamsNonStreaming
73+ ) ;
6674
6775 const message = completion . choices [ 0 ] . message ;
6876 let content = message ?. content ;
Original file line number Diff line number Diff line change 1+ // Test the reasoning model detection regex used in OpenAiEngine.
2+ // Integration test with the engine is not possible because mistral.ts
3+ // uses require() which is unavailable in the ESM test environment.
4+ const REASONING_MODEL_RE = / ^ ( o [ 1 - 9 ] | g p t - 5 ) / ;
5+
6+ describe ( 'OpenAiEngine reasoning model detection' , ( ) => {
7+ it . each ( [
8+ [ 'o1' , true ] ,
9+ [ 'o1-preview' , true ] ,
10+ [ 'o1-mini' , true ] ,
11+ [ 'o3' , true ] ,
12+ [ 'o3-mini' , true ] ,
13+ [ 'o4-mini' , true ] ,
14+ [ 'gpt-5' , true ] ,
15+ [ 'gpt-5-nano' , true ] ,
16+ [ 'gpt-4o' , false ] ,
17+ [ 'gpt-4o-mini' , false ] ,
18+ [ 'gpt-4' , false ] ,
19+ [ 'gpt-3.5-turbo' , false ]
20+ ] ) (
21+ 'model "%s" isReasoning=%s' ,
22+ ( model , expected ) => {
23+ expect ( REASONING_MODEL_RE . test ( model ) ) . toBe ( expected ) ;
24+ }
25+ ) ;
26+ } ) ;
You can’t perform that action at this time.
0 commit comments