@@ -18650,7 +18650,6 @@ function getI18nLocal(value) {
1865018650
1865118651// src/commands/config.ts
1865218652dotenv.config();
18653- var DEFAULT_MODEL_TOKEN_LIMIT = 4096;
1865418653var validateConfig = (key, condition, validationMessage) => {
1865518654 if (!condition) {
1865618655 ce(
@@ -18682,17 +18681,33 @@ var configValidators = {
1868218681 );
1868318682 return value;
1868418683 },
18685- ["OCO_OPENAI_MAX_TOKENS " /* OCO_OPENAI_MAX_TOKENS */](value) {
18684+ ["OCO_TOKENS_MAX_INPUT " /* OCO_TOKENS_MAX_INPUT */](value) {
1868618685 if (typeof value === "string") {
1868718686 value = parseInt(value);
1868818687 validateConfig(
18689- "OCO_OPENAI_MAX_TOKENS " /* OCO_OPENAI_MAX_TOKENS */,
18688+ "OCO_TOKENS_MAX_INPUT " /* OCO_TOKENS_MAX_INPUT */,
1869018689 !isNaN(value),
1869118690 "Must be a number"
1869218691 );
1869318692 }
1869418693 validateConfig(
18695- "OCO_OPENAI_MAX_TOKENS" /* OCO_OPENAI_MAX_TOKENS */,
18694+ "OCO_TOKENS_MAX_INPUT" /* OCO_TOKENS_MAX_INPUT */,
18695+ value ? typeof value === "number" : void 0,
18696+ "Must be a number"
18697+ );
18698+ return value;
18699+ },
18700+ ["OCO_TOKENS_MAX_OUTPUT" /* OCO_TOKENS_MAX_OUTPUT */](value) {
18701+ if (typeof value === "string") {
18702+ value = parseInt(value);
18703+ validateConfig(
18704+ "OCO_TOKENS_MAX_OUTPUT" /* OCO_TOKENS_MAX_OUTPUT */,
18705+ !isNaN(value),
18706+ "Must be a number"
18707+ );
18708+ }
18709+ validateConfig(
18710+ "OCO_TOKENS_MAX_OUTPUT" /* OCO_TOKENS_MAX_OUTPUT */,
1869618711 value ? typeof value === "number" : void 0,
1869718712 "Must be a number"
1869818713 );
@@ -18729,9 +18744,10 @@ var configValidators = {
1872918744 "gpt-3.5-turbo",
1873018745 "gpt-4",
1873118746 "gpt-3.5-turbo-16k",
18732- "gpt-3.5-turbo-0613"
18747+ "gpt-3.5-turbo-0613",
18748+ "gpt-4-1106-preview"
1873318749 ].includes(value),
18734- `${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo-16k' (default), 'gpt-3.5-turbo-0613' or 'gpt-3.5-turbo'`
18750+ `${value} is not supported yet, use 'gpt-4', 'gpt-3.5-turbo-16k' (default), 'gpt-3.5-turbo-0613', 'gpt-3.5-turbo' or 'gpt-4-1106-preview '`
1873518751 );
1873618752 return value;
1873718753 },
@@ -18768,7 +18784,8 @@ var configPath = (0, import_path.join)((0, import_os.homedir)(), ".opencommit");
1876818784var getConfig = () => {
1876918785 const configFromEnv = {
1877018786 OCO_OPENAI_API_KEY: process.env.OCO_OPENAI_API_KEY,
18771- OCO_OPENAI_MAX_TOKENS: process.env.OCO_OPENAI_MAX_TOKENS ? Number(process.env.OCO_OPENAI_MAX_TOKENS) : void 0,
18787+ OCO_TOKENS_MAX_INPUT: process.env.OCO_TOKENS_MAX_INPUT ? Number(process.env.OCO_TOKENS_MAX_INPUT) : void 0,
18788+ OCO_TOKENS_MAX_OUTPUT: process.env.OCO_TOKENS_MAX_OUTPUT ? Number(process.env.OCO_TOKENS_MAX_OUTPUT) : void 0,
1877218789 OCO_OPENAI_BASE_PATH: process.env.OCO_OPENAI_BASE_PATH,
1877318790 OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === "true" ? true : false,
1877418791 OCO_EMOJI: process.env.OCO_EMOJI === "true" ? true : false,
@@ -19036,6 +19053,15 @@ var removeDoubleNewlines = (input) => {
1903619053 }
1903719054 return input;
1903819055};
19056+ var getJSONBlock = (input) => {
19057+ const jsonIndex = input.search("```json");
19058+ if (jsonIndex > -1) {
19059+ input = input.slice(jsonIndex + 8);
19060+ const endJsonIndex = consistency.search("```");
19061+ input = input.slice(0, endJsonIndex);
19062+ }
19063+ return input;
19064+ };
1903919065var commitlintLLMConfigExists = async () => {
1904019066 let exists;
1904119067 try {
@@ -21899,7 +21925,8 @@ function tokenCount(content) {
2189921925
2190021926// src/engine/openAi.ts
2190121927var config3 = getConfig();
21902- var maxTokens = config3?.OCO_OPENAI_MAX_TOKENS;
21928+ var MAX_TOKENS_OUTPUT = config3?.OCO_TOKENS_MAX_OUTPUT || 500 /* DEFAULT_MAX_TOKENS_OUTPUT */;
21929+ var MAX_TOKENS_INPUT = config3?.OCO_TOKENS_MAX_INPUT || 4096 /* DEFAULT_MAX_TOKENS_INPUT */;
2190321930var basePath = config3?.OCO_OPENAI_BASE_PATH;
2190421931var apiKey = config3?.OCO_OPENAI_API_KEY;
2190521932var [command, mode] = process.argv.slice(2);
@@ -21932,11 +21959,11 @@ var OpenAi = class {
2193221959 messages,
2193321960 temperature: 0,
2193421961 top_p: 0.1,
21935- max_tokens: maxTokens || 500
21962+ max_tokens: MAX_TOKENS_OUTPUT
2193621963 };
2193721964 try {
2193821965 const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content) + 4).reduce((a2, b6) => a2 + b6, 0);
21939- if (REQUEST_TOKENS > DEFAULT_MODEL_TOKEN_LIMIT - maxTokens ) {
21966+ if (REQUEST_TOKENS > MAX_TOKENS_INPUT - MAX_TOKENS_OUTPUT ) {
2194021967 throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */);
2194121968 }
2194221969 const { data } = await this.openAI.createChatCompletion(params);
@@ -22020,15 +22047,16 @@ var configureCommitlintIntegration = async (force = false) => {
2202022047 const prompts = inferPromptsFromCommitlintConfig(commitLintConfig);
2202122048 const consistencyPrompts = commitlintPrompts.GEN_COMMITLINT_CONSISTENCY_PROMPT(prompts);
2202222049 const engine = getEngine();
22023- let consistency = await engine.generateCommitMessage(consistencyPrompts) || "{}";
22024- prompts.forEach((prompt) => consistency = consistency.replace(prompt, ""));
22025- consistency = removeDoubleNewlines(consistency);
22050+ let consistency2 = await engine.generateCommitMessage(consistencyPrompts) || "{}";
22051+ prompts.forEach((prompt) => consistency2 = consistency2.replace(prompt, ""));
22052+ consistency2 = getJSONBlock(consistency2);
22053+ consistency2 = removeDoubleNewlines(consistency2);
2202622054 const commitlintLLMConfig = {
2202722055 hash,
2202822056 prompts,
2202922057 consistency: {
2203022058 [translation2.localLanguage]: {
22031- ...JSON.parse(consistency )
22059+ ...JSON.parse(consistency2 )
2203222060 }
2203322061 }
2203422062 };
@@ -22127,6 +22155,8 @@ function mergeDiffs(arr, maxStringLength) {
2212722155
2212822156// src/generateCommitMessageFromGitDiff.ts
2212922157var config6 = getConfig();
22158+ var MAX_TOKENS_INPUT2 = config6?.OCO_TOKENS_MAX_INPUT || 4096 /* DEFAULT_MAX_TOKENS_INPUT */;
22159+ var MAX_TOKENS_OUTPUT2 = config6?.OCO_TOKENS_MAX_OUTPUT || 500 /* DEFAULT_MAX_TOKENS_OUTPUT */;
2213022160var generateCommitMessageChatCompletionPrompt = async (diff) => {
2213122161 const INIT_MESSAGES_PROMPT = await getMainCommitPrompt();
2213222162 const chatContextAsCompletionRequest = [...INIT_MESSAGES_PROMPT];
@@ -22136,14 +22166,21 @@ var generateCommitMessageChatCompletionPrompt = async (diff) => {
2213622166 });
2213722167 return chatContextAsCompletionRequest;
2213822168};
22169+ var GenerateCommitMessageErrorEnum = ((GenerateCommitMessageErrorEnum2) => {
22170+ GenerateCommitMessageErrorEnum2["tooMuchTokens"] = "TOO_MUCH_TOKENS";
22171+ GenerateCommitMessageErrorEnum2["internalError"] = "INTERNAL_ERROR";
22172+ GenerateCommitMessageErrorEnum2["emptyMessage"] = "EMPTY_MESSAGE";
22173+ GenerateCommitMessageErrorEnum2[GenerateCommitMessageErrorEnum2["outputTokensTooHigh"] = `Token limit exceeded, OCO_TOKENS_MAX_OUTPUT must not be much higher than the default ${500 /* DEFAULT_MAX_TOKENS_OUTPUT */} tokens.`] = "outputTokensTooHigh";
22174+ return GenerateCommitMessageErrorEnum2;
22175+ })(GenerateCommitMessageErrorEnum || {});
2213922176var ADJUSTMENT_FACTOR = 20;
2214022177var generateCommitMessageByDiff = async (diff) => {
2214122178 try {
2214222179 const INIT_MESSAGES_PROMPT = await getMainCommitPrompt();
2214322180 const INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
2214422181 (msg) => tokenCount(msg.content) + 4
2214522182 ).reduce((a2, b6) => a2 + b6, 0);
22146- const MAX_REQUEST_TOKENS = DEFAULT_MODEL_TOKEN_LIMIT - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config6?.OCO_OPENAI_MAX_TOKENS ;
22183+ const MAX_REQUEST_TOKENS = MAX_TOKENS_INPUT2 - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - MAX_TOKENS_OUTPUT2 ;
2214722184 if (tokenCount(diff) >= MAX_REQUEST_TOKENS) {
2214822185 const commitMessagePromises = await getCommitMsgsPromisesFromFileDiffs(
2214922186 diff,
@@ -22198,6 +22235,9 @@ function splitDiff(diff, maxChangeLength) {
2219822235 const lines = diff.split("\n");
2219922236 const splitDiffs = [];
2220022237 let currentDiff = "";
22238+ if (maxChangeLength <= 0) {
22239+ throw new Error(GenerateCommitMessageErrorEnum.outputTokensTooHigh);
22240+ }
2220122241 for (let line of lines) {
2220222242 while (tokenCount(line) > maxChangeLength) {
2220322243 const subLine = line.substring(0, maxChangeLength);
0 commit comments