From 433f267f60fbe104ae1b5b3a1199ded6d17abd9e Mon Sep 17 00:00:00 2001 From: Alejandro Ramirez Date: Mon, 24 Feb 2025 08:57:08 -0500 Subject: [PATCH] Add token limit --- src/main.ts | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/src/main.ts b/src/main.ts index dcbb9ee..0ab388f 100644 --- a/src/main.ts +++ b/src/main.ts @@ -9,6 +9,7 @@ const GITHUB_TOKEN: string = core.getInput("GITHUB_TOKEN"); const OPENAI_API_KEY: string = core.getInput("OPENAI_API_KEY"); const OPENAI_API_MODEL: string = core.getInput("OPENAI_API_MODEL"); const MAX_FILES: number = 25; +const MAX_TOKENS: number = 4096; const octokit = new Octokit({ auth: GITHUB_TOKEN }); @@ -145,14 +146,36 @@ ${chunk.changes `; } +// Rough estimation of tokens (4 chars ~= 1 token) +function estimateTokens(text: string): number { + return Math.ceil(text.length / 4); +} + async function getAIResponse(prompt: string): Promise | null> { + // Estimate prompt tokens and ensure we don't exceed model limits + const estimatedPromptTokens = estimateTokens(prompt); + const maxResponseTokens = 700; + + // If prompt is too long, truncate it while keeping essential parts + if (estimatedPromptTokens + maxResponseTokens > MAX_TOKENS) { + const allowedPromptTokens = MAX_TOKENS - maxResponseTokens; + const truncateAt = allowedPromptTokens * 4; // Convert back to characters + + // Keep the beginning instructions and truncate the diff part + const parts = prompt.split("Git diff to review:"); + if (parts.length === 2) { + const truncatedDiff = parts[1].slice(-truncateAt); + prompt = parts[0] + "Git diff to review:" + truncatedDiff; + } + } + const queryConfig = { model: OPENAI_API_MODEL, temperature: 0.2, - max_tokens: 700, + max_tokens: maxResponseTokens, top_p: 1, frequency_penalty: 0, presence_penalty: 0,