Skip to content

Commit 6ea0f67

Browse files
author
PR Bot
committed
feat: add MiniMax as LLM provider
Add MiniMax (M2.7, M2.5, M2.5-highspeed) as a first-class LLM provider using the OpenAI-compatible API at api.minimax.io/v1. - New MiniMaxEngine extending OpenAiEngine with think-tag stripping - Full setup wizard integration (provider selection, model list, API key URL) - Dynamic model fetching via /v1/models endpoint with 7-day cache - Error handling with billing URL and model suggestions - README updated with MiniMax provider config example - 32 unit tests + 3 integration tests
1 parent 40182f2 commit 6ea0f67

File tree

9 files changed

+440
-7
lines changed

9 files changed

+440
-7
lines changed

README.md

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ Create a `.env` file and add OpenCommit config variables there like this:
122122

123123
```env
124124
...
125-
OCO_AI_PROVIDER=<openai (default), anthropic, azure, ollama, gemini, flowise, deepseek, aimlapi>
125+
OCO_AI_PROVIDER=<openai (default), anthropic, azure, ollama, gemini, flowise, deepseek, aimlapi, minimax>
126126
OCO_API_KEY=<your OpenAI API token> // or other LLM provider API token
127127
OCO_API_URL=<may be used to set proxy path to OpenAI api>
128128
OCO_API_CUSTOM_HEADERS=<JSON string of custom HTTP headers to include in API requests>
@@ -235,6 +235,8 @@ oco config set OCO_AI_PROVIDER=azure OCO_API_KEY=<your_azure_api_key> OCO_API_UR
235235
oco config set OCO_AI_PROVIDER=flowise OCO_API_KEY=<your_flowise_api_key> OCO_API_URL=<your_flowise_endpoint>
236236

237237
oco config set OCO_AI_PROVIDER=ollama OCO_API_KEY=<your_ollama_api_key> OCO_API_URL=<your_ollama_endpoint>
238+
239+
oco config set OCO_AI_PROVIDER=minimax OCO_API_KEY=<your_minimax_api_key>
238240
```
239241

240242
### Locale configuration

src/commands/config.ts

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -136,6 +136,8 @@ export const MODEL_LIST = {
136136
],
137137
deepseek: ['deepseek-chat', 'deepseek-reasoner'],
138138

139+
minimax: ['MiniMax-M2.7', 'MiniMax-M2.5', 'MiniMax-M2.5-highspeed'],
140+
139141
// AI/ML API available chat-completion models
140142
// https://api.aimlapi.com/v1/models
141143
aimlapi: [
@@ -593,6 +595,8 @@ const getDefaultModel = (provider: string | undefined): string => {
593595
return MODEL_LIST.aimlapi[0];
594596
case 'openrouter':
595597
return MODEL_LIST.openrouter[0];
598+
case 'minimax':
599+
return MODEL_LIST.minimax[0];
596600
default:
597601
return MODEL_LIST.openai[0];
598602
}
@@ -784,9 +788,10 @@ export const configValidators = {
784788
'groq',
785789
'deepseek',
786790
'aimlapi',
787-
'openrouter'
791+
'openrouter',
792+
'minimax'
788793
].includes(value) || value.startsWith('ollama'),
789-
`${value} is not supported yet, use 'ollama', 'mlx', 'anthropic', 'azure', 'gemini', 'flowise', 'mistral', 'deepseek', 'aimlapi' or 'openai' (default)`
794+
`${value} is not supported yet, use 'ollama', 'mlx', 'anthropic', 'azure', 'gemini', 'flowise', 'mistral', 'deepseek', 'aimlapi', 'minimax' or 'openai' (default)`
790795
);
791796

792797
return value;
@@ -844,7 +849,8 @@ export enum OCO_AI_PROVIDER_ENUM {
844849
MLX = 'mlx',
845850
DEEPSEEK = 'deepseek',
846851
AIMLAPI = 'aimlapi',
847-
OPENROUTER = 'openrouter'
852+
OPENROUTER = 'openrouter',
853+
MINIMAX = 'minimax'
848854
}
849855

850856
export const PROVIDER_API_KEY_URLS: Record<string, string | null> = {
@@ -857,6 +863,7 @@ export const PROVIDER_API_KEY_URLS: Record<string, string | null> = {
857863
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/keys',
858864
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/keys',
859865
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/',
866+
[OCO_AI_PROVIDER_ENUM.MINIMAX]: 'https://platform.minimaxi.com/user-center/basic-information/interface-key',
860867
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
861868
[OCO_AI_PROVIDER_ENUM.MLX]: null,
862869
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
@@ -871,7 +878,8 @@ export const RECOMMENDED_MODELS: Record<string, string> = {
871878
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'mistral-small-latest',
872879
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'deepseek-chat',
873880
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'openai/gpt-4o-mini',
874-
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'gpt-4o-mini'
881+
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'gpt-4o-mini',
882+
[OCO_AI_PROVIDER_ENUM.MINIMAX]: 'MiniMax-M2.7'
875883
}
876884

877885
export type ConfigType = {

src/commands/setup.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,8 @@ const PROVIDER_DISPLAY_NAMES: Record<string, string> = {
3131
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'OpenRouter (Multiple providers)',
3232
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'AI/ML API',
3333
[OCO_AI_PROVIDER_ENUM.AZURE]: 'Azure OpenAI',
34-
[OCO_AI_PROVIDER_ENUM.MLX]: 'MLX (Apple Silicon, local)'
34+
[OCO_AI_PROVIDER_ENUM.MLX]: 'MLX (Apple Silicon, local)',
35+
[OCO_AI_PROVIDER_ENUM.MINIMAX]: 'MiniMax (M2.7, M2.5, fast inference)'
3536
};
3637

3738
const PRIMARY_PROVIDERS = [
@@ -48,7 +49,8 @@ const OTHER_PROVIDERS = [
4849
OCO_AI_PROVIDER_ENUM.OPENROUTER,
4950
OCO_AI_PROVIDER_ENUM.AIMLAPI,
5051
OCO_AI_PROVIDER_ENUM.AZURE,
51-
OCO_AI_PROVIDER_ENUM.MLX
52+
OCO_AI_PROVIDER_ENUM.MLX,
53+
OCO_AI_PROVIDER_ENUM.MINIMAX
5254
];
5355

5456
const NO_API_KEY_PROVIDERS = [

src/engine/minimax.ts

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
import { OpenAI } from 'openai';
2+
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
3+
import { normalizeEngineError } from '../utils/engineErrorHandler';
4+
import { removeContentTags } from '../utils/removeContentTags';
5+
import { tokenCount } from '../utils/tokenCount';
6+
import { OpenAiEngine, OpenAiConfig } from './openAi';
7+
8+
export interface MiniMaxConfig extends OpenAiConfig {}
9+
10+
export class MiniMaxEngine extends OpenAiEngine {
11+
constructor(config: MiniMaxConfig) {
12+
super({
13+
baseURL: 'https://api.minimax.io/v1',
14+
...config
15+
});
16+
}
17+
18+
public generateCommitMessage = async (
19+
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
20+
): Promise<string | null> => {
21+
const params = {
22+
model: this.config.model,
23+
messages,
24+
temperature: 0.01,
25+
top_p: 0.1,
26+
max_tokens: this.config.maxTokensOutput
27+
};
28+
29+
try {
30+
const REQUEST_TOKENS = messages
31+
.map((msg) => tokenCount(msg.content as string) + 4)
32+
.reduce((a, b) => a + b, 0);
33+
34+
if (
35+
REQUEST_TOKENS >
36+
this.config.maxTokensInput - this.config.maxTokensOutput
37+
)
38+
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
39+
40+
const completion = await this.client.chat.completions.create(params);
41+
42+
const message = completion.choices[0].message;
43+
let content = message?.content;
44+
return removeContentTags(content, 'think');
45+
} catch (error) {
46+
throw normalizeEngineError(error, 'minimax', this.config.model);
47+
}
48+
};
49+
}

src/utils/engine.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ import { MLXEngine } from '../engine/mlx';
1313
import { DeepseekEngine } from '../engine/deepseek';
1414
import { AimlApiEngine } from '../engine/aimlapi';
1515
import { OpenRouterEngine } from '../engine/openrouter';
16+
import { MiniMaxEngine } from '../engine/minimax';
1617

1718
export function parseCustomHeaders(headers: any): Record<string, string> {
1819
let parsedHeaders = {};
@@ -88,6 +89,9 @@ export function getEngine(): AiEngine {
8889
case OCO_AI_PROVIDER_ENUM.OPENROUTER:
8990
return new OpenRouterEngine(DEFAULT_CONFIG);
9091

92+
case OCO_AI_PROVIDER_ENUM.MINIMAX:
93+
return new MiniMaxEngine(DEFAULT_CONFIG);
94+
9195
default:
9296
return new OpenAiEngine(DEFAULT_CONFIG);
9397
}

src/utils/errors.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ export const PROVIDER_BILLING_URLS: Record<string, string | null> = {
1212
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/credits',
1313
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/billing',
1414
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/#view/Microsoft_Azure_CostManagement',
15+
[OCO_AI_PROVIDER_ENUM.MINIMAX]: 'https://platform.minimaxi.com/user-center/basic-information',
1516
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
1617
[OCO_AI_PROVIDER_ENUM.MLX]: null,
1718
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
@@ -202,6 +203,8 @@ export function getRecommendedModel(provider: string): string | null {
202203
return 'openai/gpt-4o-mini';
203204
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
204205
return 'gpt-4o-mini';
206+
case OCO_AI_PROVIDER_ENUM.MINIMAX:
207+
return 'MiniMax-M2.7';
205208
default:
206209
return null;
207210
}

src/utils/modelCache.ts

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -185,6 +185,30 @@ export async function fetchOpenRouterModels(apiKey: string): Promise<string[]> {
185185
}
186186
}
187187

188+
export async function fetchMiniMaxModels(apiKey: string): Promise<string[]> {
189+
try {
190+
const response = await fetch('https://api.minimax.io/v1/models', {
191+
headers: {
192+
Authorization: `Bearer ${apiKey}`
193+
}
194+
});
195+
196+
if (!response.ok) {
197+
return MODEL_LIST.minimax;
198+
}
199+
200+
const data = await response.json();
201+
const models = data.data
202+
?.map((m: { id: string }) => m.id)
203+
.filter((id: string) => id.startsWith('MiniMax-'))
204+
.sort();
205+
206+
return models && models.length > 0 ? models : MODEL_LIST.minimax;
207+
} catch {
208+
return MODEL_LIST.minimax;
209+
}
210+
}
211+
188212
export async function fetchDeepSeekModels(apiKey: string): Promise<string[]> {
189213
try {
190214
const response = await fetch('https://api.deepseek.com/v1/models', {
@@ -273,6 +297,14 @@ export async function fetchModelsForProvider(
273297
}
274298
break;
275299

300+
case OCO_AI_PROVIDER_ENUM.MINIMAX:
301+
if (apiKey) {
302+
models = await fetchMiniMaxModels(apiKey);
303+
} else {
304+
models = MODEL_LIST.minimax;
305+
}
306+
break;
307+
276308
case OCO_AI_PROVIDER_ENUM.AIMLAPI:
277309
models = MODEL_LIST.aimlapi;
278310
break;
Lines changed: 112 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,112 @@
1+
import { OpenAI } from 'openai';
2+
3+
// Mock @clack/prompts to prevent process.exit calls
4+
jest.mock('@clack/prompts', () => ({
5+
intro: jest.fn(),
6+
outro: jest.fn()
7+
}));
8+
9+
/**
10+
* Integration tests for MiniMax engine.
11+
* These tests verify the MiniMax API works correctly via OpenAI-compatible SDK.
12+
* This mirrors the exact behavior of MiniMaxEngine which extends OpenAiEngine.
13+
*
14+
* Run with: MINIMAX_API_KEY=<key> npm run test -- test/unit/minimax-integration.test.ts
15+
*/
16+
const MINIMAX_API_KEY = process.env.MINIMAX_API_KEY;
17+
const describeIntegration = MINIMAX_API_KEY ? describe : describe.skip;
18+
19+
describeIntegration('MiniMax Integration (requires MINIMAX_API_KEY)', () => {
20+
let client: OpenAI;
21+
22+
beforeAll(() => {
23+
client = new OpenAI({
24+
apiKey: MINIMAX_API_KEY!,
25+
baseURL: 'https://api.minimax.io/v1'
26+
});
27+
});
28+
29+
it('should generate a commit message with M2.7', async () => {
30+
const completion = await client.chat.completions.create({
31+
model: 'MiniMax-M2.7',
32+
messages: [
33+
{
34+
role: 'system',
35+
content:
36+
'You are an expert at writing concise, meaningful git commit messages. Generate a conventional commit message for the provided code diff. Output only the commit message, nothing else.'
37+
},
38+
{
39+
role: 'user',
40+
content: `diff --git a/src/utils.ts b/src/utils.ts
41+
--- a/src/utils.ts
42+
+++ b/src/utils.ts
43+
@@ -10,6 +10,10 @@ export function formatDate(date: Date): string {
44+
return date.toISOString();
45+
}
46+
47+
+export function formatCurrency(amount: number, currency: string = 'USD'): string {
48+
+ return new Intl.NumberFormat('en-US', { style: 'currency', currency }).format(amount);
49+
+}
50+
+
51+
export function capitalize(str: string): string {`
52+
}
53+
],
54+
temperature: 0.01,
55+
top_p: 0.1,
56+
max_tokens: 500
57+
});
58+
59+
const content = completion.choices[0].message?.content;
60+
expect(content).toBeDefined();
61+
expect(typeof content).toBe('string');
62+
expect(content!.length).toBeGreaterThan(0);
63+
}, 30000);
64+
65+
it('should generate commit message with M2.5-highspeed', async () => {
66+
const completion = await client.chat.completions.create({
67+
model: 'MiniMax-M2.5-highspeed',
68+
messages: [
69+
{
70+
role: 'system',
71+
content:
72+
'You are an expert at writing concise git commit messages. Generate a conventional commit message. Output only the commit message.'
73+
},
74+
{
75+
role: 'user',
76+
content: `diff --git a/README.md b/README.md
77+
--- a/README.md
78+
+++ b/README.md
79+
@@ -1,3 +1,5 @@
80+
# My Project
81+
82+
A simple project.
83+
+
84+
+## Installation`
85+
}
86+
],
87+
temperature: 0.01,
88+
top_p: 0.1,
89+
max_tokens: 500
90+
});
91+
92+
const content = completion.choices[0].message?.content;
93+
expect(content).toBeDefined();
94+
expect(typeof content).toBe('string');
95+
expect(content!.length).toBeGreaterThan(0);
96+
}, 30000);
97+
98+
it('should handle authentication error with invalid API key', async () => {
99+
const badClient = new OpenAI({
100+
apiKey: 'invalid-api-key',
101+
baseURL: 'https://api.minimax.io/v1'
102+
});
103+
104+
await expect(
105+
badClient.chat.completions.create({
106+
model: 'MiniMax-M2.7',
107+
messages: [{ role: 'user', content: 'test' }],
108+
max_tokens: 10
109+
})
110+
).rejects.toThrow();
111+
}, 30000);
112+
});

0 commit comments

Comments
 (0)