Skip to content

Commit e608d2b

Browse files
authored
update dist
1 parent 27965bc commit e608d2b

File tree

4 files changed

+33
-35
lines changed

4 files changed

+33
-35
lines changed

README.md

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -140,12 +140,12 @@ modelParameters:
140140
temperature: 0.7
141141
```
142142

143-
| Key | Type | Description |
144-
| --------------------- | ------ | -------------------------------------------------------------- |
145-
| `maxCompletionTokens` | number | The maximum number of tokens to generate |
146-
| `maxTokens` | number | The maximum number of tokens to generate (deprecated) |
147-
| `temperature` | number | The sampling temperature to use (0-1) |
148-
| `topP` | number | The nucleus sampling parameter to use (0-1) |
143+
| Key | Type | Description |
144+
| --------------------- | ------ | ----------------------------------------------------- |
145+
| `maxCompletionTokens` | number | The maximum number of tokens to generate |
146+
| `maxTokens` | number | The maximum number of tokens to generate (deprecated) |
147+
| `temperature` | number | The sampling temperature to use (0-1) |
148+
| `topP` | number | The nucleus sampling parameter to use (0-1) |
149149

150150
> ![Note]
151151
> Parameters set in `modelParameters` take precedence over the corresponding action inputs.
@@ -303,24 +303,24 @@ perform actions like searching issues and PRs.
303303
Various inputs are defined in [`action.yml`](action.yml) to let you configure
304304
the action:
305305

306-
| Name | Description | Default |
307-
| -------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------ |
308-
| `token` | Token to use for inference. Typically the GITHUB_TOKEN secret | `github.token` |
309-
| `prompt` | The prompt to send to the model | N/A |
310-
| `prompt-file` | Path to a file containing the prompt (supports .txt and .prompt.yml formats). If both `prompt` and `prompt-file` are provided, `prompt-file` takes precedence | `""` |
311-
| `input` | Template variables in YAML format for .prompt.yml files (e.g., `var1: value1` on separate lines) | `""` |
312-
| `file_input` | Template variables in YAML where values are file paths. The file contents are read and used for templating | `""` |
313-
| `system-prompt` | The system prompt to send to the model | `"You are a helpful assistant"` |
314-
| `system-prompt-file` | Path to a file containing the system prompt. If both `system-prompt` and `system-prompt-file` are provided, `system-prompt-file` takes precedence | `""` |
315-
| `model` | The model to use for inference. Must be available in the [GitHub Models](https://github.com/marketplace?type=models) catalog | `openai/gpt-4o` |
316-
| `endpoint` | The endpoint to use for inference. If you're running this as part of an org, you should probably use the org-specific Models endpoint | `https://models.github.ai/inference` |
317-
| `max-tokens` | The maximum number of tokens to generate (deprecated, use `max-completion-tokens` instead) | 200 |
318-
| `max-completion-tokens` | The maximum number of tokens to generate | `""` |
319-
| `temperature` | The sampling temperature to use (0-1) | `""` |
320-
| `top-p` | The nucleus sampling parameter to use (0-1) | `""` |
321-
| `enable-github-mcp` | Enable Model Context Protocol integration with GitHub tools | `false` |
322-
| `github-mcp-token` | Token to use for GitHub MCP server (defaults to the main token if not specified). | `""` |
323-
| `custom-headers` | Custom HTTP headers to include in API requests. Supports both YAML format (`header1: value1`) and JSON format (`{"header1": "value1"}`). Useful for API Management platforms, rate limiting, and request tracking. | `""` |
306+
| Name | Description | Default |
307+
| ----------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------ |
308+
| `token` | Token to use for inference. Typically the GITHUB_TOKEN secret | `github.token` |
309+
| `prompt` | The prompt to send to the model | N/A |
310+
| `prompt-file` | Path to a file containing the prompt (supports .txt and .prompt.yml formats). If both `prompt` and `prompt-file` are provided, `prompt-file` takes precedence | `""` |
311+
| `input` | Template variables in YAML format for .prompt.yml files (e.g., `var1: value1` on separate lines) | `""` |
312+
| `file_input` | Template variables in YAML where values are file paths. The file contents are read and used for templating | `""` |
313+
| `system-prompt` | The system prompt to send to the model | `"You are a helpful assistant"` |
314+
| `system-prompt-file` | Path to a file containing the system prompt. If both `system-prompt` and `system-prompt-file` are provided, `system-prompt-file` takes precedence | `""` |
315+
| `model` | The model to use for inference. Must be available in the [GitHub Models](https://github.com/marketplace?type=models) catalog | `openai/gpt-4o` |
316+
| `endpoint` | The endpoint to use for inference. If you're running this as part of an org, you should probably use the org-specific Models endpoint | `https://models.github.ai/inference` |
317+
| `max-tokens` | The maximum number of tokens to generate (deprecated, use `max-completion-tokens` instead) | 200 |
318+
| `max-completion-tokens` | The maximum number of tokens to generate | `""` |
319+
| `temperature` | The sampling temperature to use (0-1) | `""` |
320+
| `top-p` | The nucleus sampling parameter to use (0-1) | `""` |
321+
| `enable-github-mcp` | Enable Model Context Protocol integration with GitHub tools | `false` |
322+
| `github-mcp-token` | Token to use for GitHub MCP server (defaults to the main token if not specified). | `""` |
323+
| `custom-headers` | Custom HTTP headers to include in API requests. Supports both YAML format (`header1: value1`) and JSON format (`{"header1": "value1"}`). Useful for API Management platforms, rate limiting, and request tracking. | `""` |
324324

325325
## Outputs
326326

dist/index.js

Lines changed: 1 addition & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

dist/index.js.map

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/inference.ts

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import * as core from '@actions/core'
22
import OpenAI from 'openai'
3-
import { GitHubMCPClient, executeToolCalls, ToolCall } from './mcp.js'
3+
import {GitHubMCPClient, executeToolCalls, ToolCall} from './mcp.js'
44

55
interface ChatMessage {
66
role: 'system' | 'user' | 'assistant' | 'tool'
@@ -10,15 +10,15 @@ interface ChatMessage {
1010
}
1111

1212
export interface InferenceRequest {
13-
messages: Array<{ role: 'system' | 'user' | 'assistant' | 'tool'; content: string }>
13+
messages: Array<{role: 'system' | 'user' | 'assistant' | 'tool'; content: string}>
1414
modelName: string
1515
maxTokens?: number // Deprecated
1616
maxCompletionTokens?: number
1717
endpoint: string
1818
token: string
1919
temperature?: number
2020
topP?: number
21-
responseFormat?: { type: 'json_schema'; json_schema: unknown } // Processed response format for the API
21+
responseFormat?: {type: 'json_schema'; json_schema: unknown} // Processed response format for the API
2222
customHeaders?: Record<string, string> // Custom HTTP headers to include in API requests
2323
}
2424

@@ -34,17 +34,16 @@ export interface InferenceResponse {
3434
}>
3535
}
3636

37-
3837
/**
3938
* Build according to what input was passed, default to max_tokens.
4039
* Only one of max_tokens or max_completion_tokens will be set.
4140
*/
42-
function buildMaxTokensParam(request: InferenceRequest): { max_tokens?: number; max_completion_tokens?: number } {
41+
function buildMaxTokensParam(request: InferenceRequest): {max_tokens?: number; max_completion_tokens?: number} {
4342
if (request.maxCompletionTokens != null) {
44-
return { max_completion_tokens: request.maxCompletionTokens }
43+
return {max_completion_tokens: request.maxCompletionTokens}
4544
}
4645
if (request.maxTokens != null) {
47-
return { max_tokens: request.maxTokens }
46+
return {max_tokens: request.maxTokens}
4847
}
4948
return {}
5049
}
@@ -137,7 +136,7 @@ export async function mcpInference(
137136
messages.push({
138137
role: 'assistant',
139138
content: modelResponse || '',
140-
...(toolCalls && { tool_calls: toolCalls as ToolCall[] }),
139+
...(toolCalls && {tool_calls: toolCalls as ToolCall[]}),
141140
})
142141

143142
if (!toolCalls || toolCalls.length === 0) {

0 commit comments

Comments
 (0)