Skip to content

Commit 0b5adf1

Browse files
committed
build
1 parent ec699c4 commit 0b5adf1

2 files changed

Lines changed: 110 additions & 30 deletions

File tree

out/cli.cjs

Lines changed: 55 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -18650,7 +18650,6 @@ function getI18nLocal(value) {
1865018650

1865118651
// src/commands/config.ts
1865218652
dotenv.config();
18653-
var DEFAULT_MODEL_TOKEN_LIMIT = 4096;
1865418653
var validateConfig = (key, condition, validationMessage) => {
1865518654
if (!condition) {
1865618655
ce(
@@ -18682,17 +18681,33 @@ var configValidators = {
1868218681
);
1868318682
return value;
1868418683
},
18685-
["OCO_OPENAI_MAX_TOKENS" /* OCO_OPENAI_MAX_TOKENS */](value) {
18684+
["OCO_TOKENS_MAX_INPUT" /* OCO_TOKENS_MAX_INPUT */](value) {
1868618685
if (typeof value === "string") {
1868718686
value = parseInt(value);
1868818687
validateConfig(
18689-
"OCO_OPENAI_MAX_TOKENS" /* OCO_OPENAI_MAX_TOKENS */,
18688+
"OCO_TOKENS_MAX_INPUT" /* OCO_TOKENS_MAX_INPUT */,
1869018689
!isNaN(value),
1869118690
"Must be a number"
1869218691
);
1869318692
}
1869418693
validateConfig(
18695-
"OCO_OPENAI_MAX_TOKENS" /* OCO_OPENAI_MAX_TOKENS */,
18694+
"OCO_TOKENS_MAX_INPUT" /* OCO_TOKENS_MAX_INPUT */,
18695+
value ? typeof value === "number" : void 0,
18696+
"Must be a number"
18697+
);
18698+
return value;
18699+
},
18700+
["OCO_TOKENS_MAX_OUTPUT" /* OCO_TOKENS_MAX_OUTPUT */](value) {
18701+
if (typeof value === "string") {
18702+
value = parseInt(value);
18703+
validateConfig(
18704+
"OCO_TOKENS_MAX_OUTPUT" /* OCO_TOKENS_MAX_OUTPUT */,
18705+
!isNaN(value),
18706+
"Must be a number"
18707+
);
18708+
}
18709+
validateConfig(
18710+
"OCO_TOKENS_MAX_OUTPUT" /* OCO_TOKENS_MAX_OUTPUT */,
1869618711
value ? typeof value === "number" : void 0,
1869718712
"Must be a number"
1869818713
);
@@ -18729,9 +18744,10 @@ var configValidators = {
1872918744
"gpt-3.5-turbo",
1873018745
"gpt-4",
1873118746
"gpt-3.5-turbo-16k",
18732-
"gpt-3.5-turbo-0613"
18747+
"gpt-3.5-turbo-0613",
18748+
"gpt-4-1106-preview"
1873318749
].includes(value),
18734-
`${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo-16k' (default), 'gpt-3.5-turbo-0613' or 'gpt-3.5-turbo'`
18750+
`${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo-16k' (default), 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo' or 'gpt-4-1106-preview'`
1873518751
);
1873618752
return value;
1873718753
},
@@ -18768,7 +18784,8 @@ var configPath = (0, import_path.join)((0, import_os.homedir)(), ".opencommit");
1876818784
var getConfig = () => {
1876918785
const configFromEnv = {
1877018786
OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY,
18771-
OCO_OPENAI_MAX_TOKENS: process.env.OCO_OPENAI_MAX_TOKENS ? Number(process.env.OCO_OPENAI_MAX_TOKENS) : void 0,
18787+
OCO_TOKENS_MAX_INPUT: process.env.OCO_TOKENS_MAX_INPUT ? Number(process.env.OCO_TOKENS_MAX_INPUT) : void 0,
18788+
OCO_TOKENS_MAX_OUTPUT: process.env.OCO_TOKENS_MAX_OUTPUT ? Number(process.env.OCO_TOKENS_MAX_OUTPUT) : void 0,
1877218789
OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH,
1877318790
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === "true" ? true : false,
1877418791
OCO_EMOJI: process.env.OCO_EMOJI === "true" ? true : false,
@@ -19036,6 +19053,15 @@ var removeDoubleNewlines = (input) => {
1903619053
}
1903719054
return input;
1903819055
};
19056+
var getJSONBlock = (input) => {
19057+
const jsonIndex = input.search("```json");
19058+
if (jsonIndex > -1) {
19059+
input = input.slice(jsonIndex + 8);
19060+
const endJsonIndex = consistency.search("```");
19061+
input = input.slice(0, endJsonIndex);
19062+
}
19063+
return input;
19064+
};
1903919065
var commitlintLLMConfigExists = async () => {
1904019066
let exists;
1904119067
try {
@@ -21899,7 +21925,8 @@ function tokenCount(content) {
2189921925

2190021926
// src/engine/openAi.ts
2190121927
var config3 = getConfig();
21902-
var maxTokens = config3?.OCO_OPENAI_MAX_TOKENS;
21928+
var MAX_TOKENS_OUTPUT = config3?.OCO_TOKENS_MAX_OUTPUT || 500 /* DEFAULT_MAX_TOKENS_OUTPUT */;
21929+
var MAX_TOKENS_INPUT = config3?.OCO_TOKENS_MAX_INPUT || 4096 /* DEFAULT_MAX_TOKENS_INPUT */;
2190321930
var basePath = config3?.OCO_OPENAI_BASE_PATH;
2190421931
var apiKey = config3?.OCO_OPENAI_API_KEY;
2190521932
var [command, mode] = process.argv.slice(2);
@@ -21932,11 +21959,11 @@ var OpenAi = class {
2193221959
messages,
2193321960
temperature: 0,
2193421961
top_p: 0.1,
21935-
max_tokens: maxTokens || 500
21962+
max_tokens: MAX_TOKENS_OUTPUT
2193621963
};
2193721964
try {
2193821965
const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content) + 4).reduce((a2, b6) => a2 + b6, 0);
21939-
if (REQUEST_TOKENS > DEFAULT_MODEL_TOKEN_LIMIT - maxTokens) {
21966+
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) {
2194021967
throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */);
2194121968
}
2194221969
const { data } = await this.openAI.createChatCompletion(params);
@@ -22020,15 +22047,16 @@ var configureCommitlintIntegration = async (force = false) => {
2202022047
const prompts = inferPromptsFromCommitlintConfig(commitLintConfig);
2202122048
const consistencyPrompts = commitlintPrompts.GEN_COMMITLINT_CONSISTENCY_PROMPT(prompts);
2202222049
const engine = getEngine();
22023-
let consistency = await engine.generateCommitMessage(consistencyPrompts) || "{}";
22024-
prompts.forEach((prompt) => consistency = consistency.replace(prompt, ""));
22025-
consistency = removeDoubleNewlines(consistency);
22050+
let consistency2 = await engine.generateCommitMessage(consistencyPrompts) || "{}";
22051+
prompts.forEach((prompt) => consistency2 = consistency2.replace(prompt, ""));
22052+
consistency2 = getJSONBlock(consistency2);
22053+
consistency2 = removeDoubleNewlines(consistency2);
2202622054
const commitlintLLMConfig = {
2202722055
hash,
2202822056
prompts,
2202922057
consistency: {
2203022058
[translation2.localLanguage]: {
22031-
...JSON.parse(consistency)
22059+
...JSON.parse(consistency2)
2203222060
}
2203322061
}
2203422062
};
@@ -22127,6 +22155,8 @@ function mergeDiffs(arr, maxStringLength) {
2212722155

2212822156
// src/generateCommitMessageFromGitDiff.ts
2212922157
var config6 = getConfig();
22158+
var MAX_TOKENS_INPUT2 = config6?.OCO_TOKENS_MAX_INPUT || 4096 /* DEFAULT_MAX_TOKENS_INPUT */;
22159+
var MAX_TOKENS_OUTPUT2 = config6?.OCO_TOKENS_MAX_OUTPUT || 500 /* DEFAULT_MAX_TOKENS_OUTPUT */;
2213022160
var generateCommitMessageChatCompletionPrompt = async (diff) => {
2213122161
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt();
2213222162
const chatContextAsCompletionRequest = [...INIT_MESSAGES_PROMPT];
@@ -22136,14 +22166,21 @@ var generateCommitMessageChatCompletionPrompt = async (diff) => {
2213622166
});
2213722167
return chatContextAsCompletionRequest;
2213822168
};
22169+
var GenerateCommitMessageErrorEnum = ((GenerateCommitMessageErrorEnum2) => {
22170+
GenerateCommitMessageErrorEnum2["tooMuchTokens"] = "TOO_MUCH_TOKENS";
22171+
GenerateCommitMessageErrorEnum2["internalError"] = "INTERNAL_ERROR";
22172+
GenerateCommitMessageErrorEnum2["emptyMessage"] = "EMPTY_MESSAGE";
22173+
GenerateCommitMessageErrorEnum2[GenerateCommitMessageErrorEnum2["outputTokensTooHigh"] = `Token limit exceeded, OCO_TOKENS_MAX_OUTPUT must not be much higher than the default ${500 /* DEFAULT_MAX_TOKENS_OUTPUT */} tokens.`] = "outputTokensTooHigh";
22174+
return GenerateCommitMessageErrorEnum2;
22175+
})(GenerateCommitMessageErrorEnum || {});
2213922176
var ADJUSTMENT_FACTOR = 20;
2214022177
var generateCommitMessageByDiff = async (diff) => {
2214122178
try {
2214222179
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt();
2214322180
const INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
2214422181
(msg) => tokenCount(msg.content) + 4
2214522182
).reduce((a2, b6) => a2 + b6, 0);
22146-
const MAX_REQUEST_TOKENS = DEFAULT_MODEL_TOKEN_LIMIT - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config6?.OCO_OPENAI_MAX_TOKENS;
22183+
const MAX_REQUEST_TOKENS = MAX_TOKENS_INPUT2 - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - MAX_TOKENS_OUTPUT2;
2214722184
if (tokenCount(diff) >= MAX_REQUEST_TOKENS) {
2214822185
const commitMessagePromises = await getCommitMsgsPromisesFromFileDiffs(
2214922186
diff,
@@ -22198,6 +22235,9 @@ function splitDiff(diff, maxChangeLength) {
2219822235
const lines = diff.split("\n");
2219922236
const splitDiffs = [];
2220022237
let currentDiff = "";
22238+
if (maxChangeLength <= 0) {
22239+
throw new Error(GenerateCommitMessageErrorEnum.outputTokensTooHigh);
22240+
}
2220122241
for (let line of lines) {
2220222242
while (tokenCount(line) > maxChangeLength) {
2220322243
const subLine = line.substring(0, maxChangeLength);

out/github-action.cjs

Lines changed: 55 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -24145,7 +24145,6 @@ function getI18nLocal(value) {
2414524145

2414624146
// src/commands/config.ts
2414724147
dotenv.config();
24148-
var DEFAULT_MODEL_TOKEN_LIMIT = 4096;
2414924148
var validateConfig = (key, condition, validationMessage) => {
2415024149
if (!condition) {
2415124150
ce(
@@ -24177,17 +24176,33 @@ var configValidators = {
2417724176
);
2417824177
return value;
2417924178
},
24180-
["OCO_OPENAI_MAX_TOKENS" /* OCO_OPENAI_MAX_TOKENS */](value) {
24179+
["OCO_TOKENS_MAX_INPUT" /* OCO_TOKENS_MAX_INPUT */](value) {
2418124180
if (typeof value === "string") {
2418224181
value = parseInt(value);
2418324182
validateConfig(
24184-
"OCO_OPENAI_MAX_TOKENS" /* OCO_OPENAI_MAX_TOKENS */,
24183+
"OCO_TOKENS_MAX_INPUT" /* OCO_TOKENS_MAX_INPUT */,
2418524184
!isNaN(value),
2418624185
"Must be a number"
2418724186
);
2418824187
}
2418924188
validateConfig(
24190-
"OCO_OPENAI_MAX_TOKENS" /* OCO_OPENAI_MAX_TOKENS */,
24189+
"OCO_TOKENS_MAX_INPUT" /* OCO_TOKENS_MAX_INPUT */,
24190+
value ? typeof value === "number" : void 0,
24191+
"Must be a number"
24192+
);
24193+
return value;
24194+
},
24195+
["OCO_TOKENS_MAX_OUTPUT" /* OCO_TOKENS_MAX_OUTPUT */](value) {
24196+
if (typeof value === "string") {
24197+
value = parseInt(value);
24198+
validateConfig(
24199+
"OCO_TOKENS_MAX_OUTPUT" /* OCO_TOKENS_MAX_OUTPUT */,
24200+
!isNaN(value),
24201+
"Must be a number"
24202+
);
24203+
}
24204+
validateConfig(
24205+
"OCO_TOKENS_MAX_OUTPUT" /* OCO_TOKENS_MAX_OUTPUT */,
2419124206
value ? typeof value === "number" : void 0,
2419224207
"Must be a number"
2419324208
);
@@ -24224,9 +24239,10 @@ var configValidators = {
2422424239
"gpt-3.5-turbo",
2422524240
"gpt-4",
2422624241
"gpt-3.5-turbo-16k",
24227-
"gpt-3.5-turbo-0613"
24242+
"gpt-3.5-turbo-0613",
24243+
"gpt-4-1106-preview"
2422824244
].includes(value),
24229-
`${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo-16k' (default), 'gpt-3.5-turbo-0613' or 'gpt-3.5-turbo'`
24245+
`${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo-16k' (default), 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo' or 'gpt-4-1106-preview'`
2423024246
);
2423124247
return value;
2423224248
},
@@ -24263,7 +24279,8 @@ var configPath = (0, import_path.join)((0, import_os.homedir)(), ".opencommit");
2426324279
var getConfig = () => {
2426424280
const configFromEnv = {
2426524281
OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY,
24266-
OCO_OPENAI_MAX_TOKENS: process.env.OCO_OPENAI_MAX_TOKENS ? Number(process.env.OCO_OPENAI_MAX_TOKENS) : void 0,
24282+
OCO_TOKENS_MAX_INPUT: process.env.OCO_TOKENS_MAX_INPUT ? Number(process.env.OCO_TOKENS_MAX_INPUT) : void 0,
24283+
OCO_TOKENS_MAX_OUTPUT: process.env.OCO_TOKENS_MAX_OUTPUT ? Number(process.env.OCO_TOKENS_MAX_OUTPUT) : void 0,
2426724284
OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH,
2426824285
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === "true" ? true : false,
2426924286
OCO_EMOJI: process.env.OCO_EMOJI === "true" ? true : false,
@@ -24531,6 +24548,15 @@ var removeDoubleNewlines = (input) => {
2453124548
}
2453224549
return input;
2453324550
};
24551+
var getJSONBlock = (input) => {
24552+
const jsonIndex = input.search("```json");
24553+
if (jsonIndex > -1) {
24554+
input = input.slice(jsonIndex + 8);
24555+
const endJsonIndex = consistency.search("```");
24556+
input = input.slice(0, endJsonIndex);
24557+
}
24558+
return input;
24559+
};
2453424560
var commitlintLLMConfigExists = async () => {
2453524561
let exists;
2453624562
try {
@@ -27394,7 +27420,8 @@ function tokenCount(content) {
2739427420

2739527421
// src/engine/openAi.ts
2739627422
var config3 = getConfig();
27397-
var maxTokens = config3?.OCO_OPENAI_MAX_TOKENS;
27423+
var MAX_TOKENS_OUTPUT = config3?.OCO_TOKENS_MAX_OUTPUT || 500 /* DEFAULT_MAX_TOKENS_OUTPUT */;
27424+
var MAX_TOKENS_INPUT = config3?.OCO_TOKENS_MAX_INPUT || 4096 /* DEFAULT_MAX_TOKENS_INPUT */;
2739827425
var basePath = config3?.OCO_OPENAI_BASE_PATH;
2739927426
var apiKey = config3?.OCO_OPENAI_API_KEY;
2740027427
var [command, mode] = process.argv.slice(2);
@@ -27427,11 +27454,11 @@ var OpenAi = class {
2742727454
messages,
2742827455
temperature: 0,
2742927456
top_p: 0.1,
27430-
max_tokens: maxTokens || 500
27457+
max_tokens: MAX_TOKENS_OUTPUT
2743127458
};
2743227459
try {
2743327460
const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content) + 4).reduce((a2, b2) => a2 + b2, 0);
27434-
if (REQUEST_TOKENS > DEFAULT_MODEL_TOKEN_LIMIT - maxTokens) {
27461+
if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT) {
2743527462
throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */);
2743627463
}
2743727464
const { data } = await this.openAI.createChatCompletion(params);
@@ -27515,15 +27542,16 @@ var configureCommitlintIntegration = async (force = false) => {
2751527542
const prompts = inferPromptsFromCommitlintConfig(commitLintConfig);
2751627543
const consistencyPrompts = commitlintPrompts.GEN_COMMITLINT_CONSISTENCY_PROMPT(prompts);
2751727544
const engine = getEngine();
27518-
let consistency = await engine.generateCommitMessage(consistencyPrompts) || "{}";
27519-
prompts.forEach((prompt) => consistency = consistency.replace(prompt, ""));
27520-
consistency = removeDoubleNewlines(consistency);
27545+
let consistency2 = await engine.generateCommitMessage(consistencyPrompts) || "{}";
27546+
prompts.forEach((prompt) => consistency2 = consistency2.replace(prompt, ""));
27547+
consistency2 = getJSONBlock(consistency2);
27548+
consistency2 = removeDoubleNewlines(consistency2);
2752127549
const commitlintLLMConfig = {
2752227550
hash,
2752327551
prompts,
2752427552
consistency: {
2752527553
[translation2.localLanguage]: {
27526-
...JSON.parse(consistency)
27554+
...JSON.parse(consistency2)
2752727555
}
2752827556
}
2752927557
};
@@ -27622,6 +27650,8 @@ function mergeDiffs(arr, maxStringLength) {
2762227650

2762327651
// src/generateCommitMessageFromGitDiff.ts
2762427652
var config6 = getConfig();
27653+
var MAX_TOKENS_INPUT2 = config6?.OCO_TOKENS_MAX_INPUT || 4096 /* DEFAULT_MAX_TOKENS_INPUT */;
27654+
var MAX_TOKENS_OUTPUT2 = config6?.OCO_TOKENS_MAX_OUTPUT || 500 /* DEFAULT_MAX_TOKENS_OUTPUT */;
2762527655
var generateCommitMessageChatCompletionPrompt = async (diff) => {
2762627656
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt();
2762727657
const chatContextAsCompletionRequest = [...INIT_MESSAGES_PROMPT];
@@ -27631,14 +27661,21 @@ var generateCommitMessageChatCompletionPrompt = async (diff) => {
2763127661
});
2763227662
return chatContextAsCompletionRequest;
2763327663
};
27664+
var GenerateCommitMessageErrorEnum = ((GenerateCommitMessageErrorEnum2) => {
27665+
GenerateCommitMessageErrorEnum2["tooMuchTokens"] = "TOO_MUCH_TOKENS";
27666+
GenerateCommitMessageErrorEnum2["internalError"] = "INTERNAL_ERROR";
27667+
GenerateCommitMessageErrorEnum2["emptyMessage"] = "EMPTY_MESSAGE";
27668+
GenerateCommitMessageErrorEnum2[GenerateCommitMessageErrorEnum2["outputTokensTooHigh"] = `Token limit exceeded, OCO_TOKENS_MAX_OUTPUT must not be much higher than the default ${500 /* DEFAULT_MAX_TOKENS_OUTPUT */} tokens.`] = "outputTokensTooHigh";
27669+
return GenerateCommitMessageErrorEnum2;
27670+
})(GenerateCommitMessageErrorEnum || {});
2763427671
var ADJUSTMENT_FACTOR = 20;
2763527672
var generateCommitMessageByDiff = async (diff) => {
2763627673
try {
2763727674
const INIT_MESSAGES_PROMPT = await getMainCommitPrompt();
2763827675
const INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
2763927676
(msg) => tokenCount(msg.content) + 4
2764027677
).reduce((a2, b2) => a2 + b2, 0);
27641-
const MAX_REQUEST_TOKENS = DEFAULT_MODEL_TOKEN_LIMIT - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config6?.OCO_OPENAI_MAX_TOKENS;
27678+
const MAX_REQUEST_TOKENS = MAX_TOKENS_INPUT2 - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - MAX_TOKENS_OUTPUT2;
2764227679
if (tokenCount(diff) >= MAX_REQUEST_TOKENS) {
2764327680
const commitMessagePromises = await getCommitMsgsPromisesFromFileDiffs(
2764427681
diff,
@@ -27693,6 +27730,9 @@ function splitDiff(diff, maxChangeLength) {
2769327730
const lines = diff.split("\n");
2769427731
const splitDiffs = [];
2769527732
let currentDiff = "";
27733+
if (maxChangeLength <= 0) {
27734+
throw new Error(GenerateCommitMessageErrorEnum.outputTokensTooHigh);
27735+
}
2769627736
for (let line of lines) {
2769727737
while (tokenCount(line) > maxChangeLength) {
2769827738
const subLine = line.substring(0, maxChangeLength);

0 commit comments

Comments
 (0)