Skip to content

Commit 3a80d13

Browse files
authored
update comments
1 parent 074e8b2 commit 3a80d13

File tree

2 files changed

+6
-4
lines changed

2 files changed

+6
-4
lines changed

action.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,11 +43,11 @@ inputs:
4343
required: false
4444
default: ''
4545
max-tokens:
46-
description: Maximum tokens to generate (deprecated)
46+
description: The maximum tokens to generate (deprecated)
4747
required: false
4848
default: '200'
4949
max-completion-tokens:
50-
description: Maximum tokens to generate
50+
description: The maximum tokens to generate
5151
required: false
5252
default: ''
5353
temperature:

src/inference.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ export interface InferenceResponse {
3535
}
3636

3737
/**
38-
* Build the token limit params for a chat completion request.
38+
* Build according to what input was passed, default to max_tokens.
3939
* Only one of max_tokens or max_completion_tokens will be set.
4040
*/
4141
function buildMaxTokensParam(request: InferenceRequest): {max_tokens?: number; max_completion_tokens?: number} {
@@ -177,7 +177,9 @@ export async function mcpInference(
177177
}
178178

179179
/**
180-
* Wrapper around OpenAI chat.completions.create with response validation.
180+
* Wrapper around OpenAI chat.completions.create with defensive handling for cases where
181+
* the SDK returns a raw string (e.g., unexpected content-type or streaming body) instead of
182+
* a parsed object. Ensures an object with a 'choices' array is returned or throws a descriptive error.
181183
*/
182184
async function chatCompletion(
183185
client: OpenAI,

0 commit comments

Comments
 (0)