Skip to content

Commit 6982e76

Browse files
committed
fix: improve type safety for max_completion_tokens params
Remove Record<string, unknown> type annotation to let TypeScript infer the params object type, preserving type checking on all properties. Cast to ChatCompletionCreateParamsNonStreaming at the create() call site to accommodate the SDK's missing max_completion_tokens type. Add unit test for reasoning model detection regex. Signed-off-by: majiayu000 <1835304752@qq.com>
1 parent dc7f7f6 commit 6982e76

File tree

2 files changed

+34
-6
lines changed

2 files changed

+34
-6
lines changed

src/engine/openAi.ts

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,16 +45,16 @@ export class OpenAiEngine implements AiEngine {
4545
): Promise<string | null> => {
4646
const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model);
4747

48-
const params: Record<string, unknown> = {
48+
const params = {
4949
model: this.config.model,
5050
messages,
5151
...(isReasoningModel
5252
? { max_completion_tokens: this.config.maxTokensOutput }
5353
: {
54-
temperature: 0,
55-
top_p: 0.1,
56-
max_tokens: this.config.maxTokensOutput
57-
})
54+
temperature: 0,
55+
top_p: 0.1,
56+
max_tokens: this.config.maxTokensOutput
57+
})
5858
};
5959

6060
try {
@@ -68,7 +68,9 @@ export class OpenAiEngine implements AiEngine {
6868
)
6969
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
7070

71-
const completion = await this.client.chat.completions.create(params);
71+
const completion = await this.client.chat.completions.create(
72+
params as OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
73+
);
7274

7375
const message = completion.choices[0].message;
7476
let content = message?.content;

test/unit/openAi.test.ts

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
// Test the reasoning model detection regex used in OpenAiEngine.
2+
// Integration test with the engine is not possible because mistral.ts
3+
// uses require() which is unavailable in the ESM test environment.
4+
const REASONING_MODEL_RE = /^(o[1-9]|gpt-5)/;
5+
6+
describe('OpenAiEngine reasoning model detection', () => {
7+
it.each([
8+
['o1', true],
9+
['o1-preview', true],
10+
['o1-mini', true],
11+
['o3', true],
12+
['o3-mini', true],
13+
['o4-mini', true],
14+
['gpt-5', true],
15+
['gpt-5-nano', true],
16+
['gpt-4o', false],
17+
['gpt-4o-mini', false],
18+
['gpt-4', false],
19+
['gpt-3.5-turbo', false]
20+
])(
21+
'model "%s" isReasoning=%s',
22+
(model, expected) => {
23+
expect(REASONING_MODEL_RE.test(model)).toBe(expected);
24+
}
25+
);
26+
});

0 commit comments

Comments
 (0)