Skip to content

Commit 5a153c3

Browse files
committed
fix: improve type safety for max_completion_tokens params
Remove Record<string, unknown> type annotation to let TypeScript infer the params object type, preserving type checking on all properties. Cast to ChatCompletionCreateParamsNonStreaming at the create() call site to accommodate the SDK's missing max_completion_tokens type. Add unit test for reasoning model detection regex. Signed-off-by: majiayu000 <1835304752@qq.com>
1 parent 4d8cdf5 commit 5a153c3

File tree

4 files changed

+56
-18
lines changed

4 files changed

+56
-18
lines changed

out/cli.cjs

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -67158,18 +67158,23 @@ var { OpenAIError: OpenAIError2, APIError: APIError4, APIConnectionError: APICon
6715867158
var OpenAiEngine = class {
6715967159
constructor(config7) {
6716067160
this.generateCommitMessage = async (messages) => {
67161+
const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model);
6716167162
const params = {
6716267163
model: this.config.model,
6716367164
messages,
67164-
temperature: 0,
67165-
top_p: 0.1,
67166-
max_tokens: this.config.maxTokensOutput
67165+
...isReasoningModel ? { max_completion_tokens: this.config.maxTokensOutput } : {
67166+
temperature: 0,
67167+
top_p: 0.1,
67168+
max_tokens: this.config.maxTokensOutput
67169+
}
6716767170
};
6716867171
try {
6716967172
const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content) + 4).reduce((a4, b7) => a4 + b7, 0);
6717067173
if (REQUEST_TOKENS > this.config.maxTokensInput - this.config.maxTokensOutput)
6717167174
throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */);
67172-
const completion = await this.client.chat.completions.create(params);
67175+
const completion = await this.client.chat.completions.create(
67176+
params
67177+
);
6717367178
const message = completion.choices[0].message;
6717467179
let content = message?.content;
6717567180
return removeContentTags(content, "think");
@@ -67277,8 +67282,8 @@ var MLXEngine = class {
6727767282
var DeepseekEngine = class extends OpenAiEngine {
6727867283
constructor(config7) {
6727967284
super({
67280-
...config7,
67281-
baseURL: "https://api.deepseek.com/v1"
67285+
baseURL: "https://api.deepseek.com/v1",
67286+
...config7
6728267287
});
6728367288
// Identical method from OpenAiEngine, re-implemented here
6728467289
this.generateCommitMessage = async (messages) => {

out/github-action.cjs

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87943,18 +87943,23 @@ var { OpenAIError: OpenAIError2, APIError: APIError4, APIConnectionError: APICon
8794387943
var OpenAiEngine = class {
8794487944
constructor(config6) {
8794587945
this.generateCommitMessage = async (messages) => {
87946+
const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model);
8794687947
const params = {
8794787948
model: this.config.model,
8794887949
messages,
87949-
temperature: 0,
87950-
top_p: 0.1,
87951-
max_tokens: this.config.maxTokensOutput
87950+
...isReasoningModel ? { max_completion_tokens: this.config.maxTokensOutput } : {
87951+
temperature: 0,
87952+
top_p: 0.1,
87953+
max_tokens: this.config.maxTokensOutput
87954+
}
8795287955
};
8795387956
try {
8795487957
const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content) + 4).reduce((a4, b4) => a4 + b4, 0);
8795587958
if (REQUEST_TOKENS > this.config.maxTokensInput - this.config.maxTokensOutput)
8795687959
throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */);
87957-
const completion = await this.client.chat.completions.create(params);
87960+
const completion = await this.client.chat.completions.create(
87961+
params
87962+
);
8795887963
const message = completion.choices[0].message;
8795987964
let content = message?.content;
8796087965
return removeContentTags(content, "think");
@@ -88062,8 +88067,8 @@ var MLXEngine = class {
8806288067
var DeepseekEngine = class extends OpenAiEngine {
8806388068
constructor(config6) {
8806488069
super({
88065-
...config6,
88066-
baseURL: "https://api.deepseek.com/v1"
88070+
baseURL: "https://api.deepseek.com/v1",
88071+
...config6
8806788072
});
8806888073
// Identical method from OpenAiEngine, re-implemented here
8806988074
this.generateCommitMessage = async (messages) => {

src/engine/openAi.ts

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -38,16 +38,16 @@ export class OpenAiEngine implements AiEngine {
3838
): Promise<string | null> => {
3939
const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model);
4040

41-
const params: Record<string, unknown> = {
41+
const params = {
4242
model: this.config.model,
4343
messages,
4444
...(isReasoningModel
4545
? { max_completion_tokens: this.config.maxTokensOutput }
4646
: {
47-
temperature: 0,
48-
top_p: 0.1,
49-
max_tokens: this.config.maxTokensOutput
50-
})
47+
temperature: 0,
48+
top_p: 0.1,
49+
max_tokens: this.config.maxTokensOutput
50+
})
5151
};
5252

5353
try {
@@ -61,7 +61,9 @@ export class OpenAiEngine implements AiEngine {
6161
)
6262
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
6363

64-
const completion = await this.client.chat.completions.create(params);
64+
const completion = await this.client.chat.completions.create(
65+
params as OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
66+
);
6567

6668
const message = completion.choices[0].message;
6769
let content = message?.content;

test/unit/openAi.test.ts

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
// Test the reasoning model detection regex used in OpenAiEngine.
2+
// Integration test with the engine is not possible because mistral.ts
3+
// uses require() which is unavailable in the ESM test environment.
4+
const REASONING_MODEL_RE = /^(o[1-9]|gpt-5)/;
5+
6+
describe('OpenAiEngine reasoning model detection', () => {
7+
it.each([
8+
['o1', true],
9+
['o1-preview', true],
10+
['o1-mini', true],
11+
['o3', true],
12+
['o3-mini', true],
13+
['o4-mini', true],
14+
['gpt-5', true],
15+
['gpt-5-nano', true],
16+
['gpt-4o', false],
17+
['gpt-4o-mini', false],
18+
['gpt-4', false],
19+
['gpt-3.5-turbo', false]
20+
])(
21+
'model "%s" isReasoning=%s',
22+
(model, expected) => {
23+
expect(REASONING_MODEL_RE.test(model)).toBe(expected);
24+
}
25+
);
26+
});

0 commit comments

Comments
 (0)