Merge branch 'dev' into update-readme-and-workflow

This commit is contained in:
Luke Hollenback 2024-04-03 08:47:10 -06:00 committed by GitHub
commit 74d6b7b9e5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 1338 additions and 1313 deletions

View file

@ -9,18 +9,19 @@ permissions: write-all
jobs:
if: '! github.event.pull_request.draft'
code_review:
if: '! github.event.pull_request.draft'
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3
- name: Code Review
uses: freeedcom/ai-codereviewer@main
uses: lukehollenback/ai-codereviewer@main-luke
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
OPENAI_API_MODEL: "gpt-4-turbo-preview"
OPENAI_API_MODEL: "gpt-3.5-turbo"
exclude: "yarn.lock, dist/**, **/*.json, **/*.md, **/*.yaml, **/*.xml"
custom_prompts: |
Do not worry about the verbosity of variable names, as long as they are somewhat descriptive.
Be sure to call out potential null pointer exceptions.
Be sure to call out concurrency issues and potential race conditions.
Do not worry about things a static analyzer would catch in real-time during development.

View file

@ -11,10 +11,18 @@ inputs:
description: "OpenAI API model."
required: false
default: "gpt-4"
max_tokens:
description: "Maximum number of tokens that can be generated per analysis."
required: false
default: "700"
exclude:
description: "Glob patterns to exclude files from the diff analysis"
required: false
default: ""
custom_prompts:
description: "Custom commands to augment the agent's prompts with. Each line is an individual command."
required: false
default: ""
runs:
using: "node16"
main: "dist/index.js"

18
dist/index.js vendored
View file

@ -51,6 +51,7 @@ const minimatch_1 = __importDefault(__nccwpck_require__(2002));
const GITHUB_TOKEN = core.getInput("GITHUB_TOKEN");
const OPENAI_API_KEY = core.getInput("OPENAI_API_KEY");
const OPENAI_API_MODEL = core.getInput("OPENAI_API_MODEL");
const MAX_TOKENS = Number(core.getInput("max_tokens"));
const octokit = new rest_1.Octokit({ auth: GITHUB_TOKEN });
const openai = new openai_1.default({
apiKey: OPENAI_API_KEY,
@ -85,14 +86,14 @@ function getDiff(owner, repo, pull_number) {
return response.data;
});
}
function analyzeCode(parsedDiff, prDetails) {
function analyzeCode(parsedDiff, prDetails, customPrompts) {
return __awaiter(this, void 0, void 0, function* () {
const comments = [];
for (const file of parsedDiff) {
if (file.to === "/dev/null")
continue; // Ignore deleted files
for (const chunk of file.chunks) {
const prompt = createPrompt(file, chunk, prDetails);
const prompt = createPrompt(file, chunk, prDetails, customPrompts);
const aiResponse = yield getAIResponse(prompt);
if (aiResponse) {
const newComments = createComment(file, chunk, aiResponse);
@ -105,7 +106,7 @@ function analyzeCode(parsedDiff, prDetails) {
return comments;
});
}
function createPrompt(file, chunk, prDetails) {
function createPrompt(file, chunk, prDetails, customPrompts) {
return `Your task is to review pull requests. Instructions:
- Provide the response in following JSON format: {"reviews": [{"lineNumber": <line_number>, "reviewComment": "<review comment>"}]}
- Do not give positive comments or compliments.
@ -113,6 +114,7 @@ function createPrompt(file, chunk, prDetails) {
- Write the comment in GitHub Markdown format.
- Use the given description only for the overall context and only comment the code.
- IMPORTANT: NEVER suggest adding comments to the code.
${customPrompts}
Review the following code diff in the file "${file.to}" and take the pull request title and description into account when writing the response.
@ -140,13 +142,13 @@ function getAIResponse(prompt) {
const queryConfig = {
model: OPENAI_API_MODEL,
temperature: 0.2,
max_tokens: 700,
max_tokens: MAX_TOKENS,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
};
try {
const response = yield openai.chat.completions.create(Object.assign(Object.assign(Object.assign({}, queryConfig), (OPENAI_API_MODEL === "gpt-4-1106-preview"
const response = yield openai.chat.completions.create(Object.assign(Object.assign(Object.assign({}, queryConfig), (OPENAI_API_MODEL === "gpt-4-turbo-preview" || OPENAI_API_MODEL === "gpt-4-turbo" || OPENAI_API_MODEL === "gpt-3.5-turbo" || OPENAI_API_MODEL === "gpt-4-0125-preview" || OPENAI_API_MODEL === "gpt-4-1106-preview" || OPENAI_API_MODEL === "gpt-3.5-turbo-0125" || OPENAI_API_MODEL === "gpt-3.5-turbo-1106"
? { response_format: { type: "json_object" } }
: {})), { messages: [
{
@ -155,6 +157,7 @@ function getAIResponse(prompt) {
},
] }));
const res = ((_b = (_a = response.choices[0].message) === null || _a === void 0 ? void 0 : _a.content) === null || _b === void 0 ? void 0 : _b.trim()) || "{}";
console.log(`Trimmed Response: ${res}`);
return JSON.parse(res).reviews;
}
catch (error) {
@ -225,7 +228,10 @@ function main() {
const filteredDiff = parsedDiff.filter((file) => {
return !excludePatterns.some((pattern) => { var _a; return (0, minimatch_1.default)((_a = file.to) !== null && _a !== void 0 ? _a : "", pattern); });
});
const comments = yield analyzeCode(filteredDiff, prDetails);
const customPrompts = core.getMultilineInput("custom_prompts")
.map(customPrompt => `- ${customPrompt}`)
.join("\n");
const comments = yield analyzeCode(filteredDiff, prDetails, customPrompts);
if (comments.length > 0) {
yield createReviewComment(prDetails.owner, prDetails.repo, prDetails.pull_number, comments);
}

2
dist/index.js.map vendored

File diff suppressed because one or more lines are too long

View file

@ -8,6 +8,7 @@ import minimatch from "minimatch";
const GITHUB_TOKEN: string = core.getInput("GITHUB_TOKEN");
const OPENAI_API_KEY: string = core.getInput("OPENAI_API_KEY");
const OPENAI_API_MODEL: string = core.getInput("OPENAI_API_MODEL");
const MAX_TOKENS: number = Number(core.getInput("max_tokens"));
const octokit = new Octokit({ auth: GITHUB_TOKEN });
@ -58,14 +59,15 @@ async function getDiff(
async function analyzeCode(
parsedDiff: File[],
prDetails: PRDetails
prDetails: PRDetails,
customPrompts: string
): Promise<Array<{ body: string; path: string; line: number }>> {
const comments: Array<{ body: string; path: string; line: number }> = [];
for (const file of parsedDiff) {
if (file.to === "/dev/null") continue; // Ignore deleted files
for (const chunk of file.chunks) {
const prompt = createPrompt(file, chunk, prDetails);
const prompt = createPrompt(file, chunk, prDetails, customPrompts);
const aiResponse = await getAIResponse(prompt);
if (aiResponse) {
const newComments = createComment(file, chunk, aiResponse);
@ -78,7 +80,7 @@ async function analyzeCode(
return comments;
}
function createPrompt(file: File, chunk: Chunk, prDetails: PRDetails): string {
function createPrompt(file: File, chunk: Chunk, prDetails: PRDetails, customPrompts: string): string {
return `Your task is to review pull requests. Instructions:
- Provide the response in following JSON format: {"reviews": [{"lineNumber": <line_number>, "reviewComment": "<review comment>"}]}
- Do not give positive comments or compliments.
@ -86,6 +88,7 @@ function createPrompt(file: File, chunk: Chunk, prDetails: PRDetails): string {
- Write the comment in GitHub Markdown format.
- Use the given description only for the overall context and only comment the code.
- IMPORTANT: NEVER suggest adding comments to the code.
${customPrompts}
Review the following code diff in the file "${
file.to
@ -117,7 +120,7 @@ async function getAIResponse(prompt: string): Promise<Array<{
const queryConfig = {
model: OPENAI_API_MODEL,
temperature: 0.2,
max_tokens: 700,
max_tokens: MAX_TOKENS,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,
@ -127,7 +130,7 @@ async function getAIResponse(prompt: string): Promise<Array<{
const response = await openai.chat.completions.create({
...queryConfig,
// return JSON if the model supports it:
...(OPENAI_API_MODEL === "gpt-4-1106-preview"
...(OPENAI_API_MODEL === "gpt-4-turbo-preview" || OPENAI_API_MODEL === "gpt-4-turbo" || OPENAI_API_MODEL === "gpt-3.5-turbo" || OPENAI_API_MODEL === "gpt-4-0125-preview" || OPENAI_API_MODEL === "gpt-4-1106-preview" || OPENAI_API_MODEL === "gpt-3.5-turbo-0125" || OPENAI_API_MODEL === "gpt-3.5-turbo-1106"
? { response_format: { type: "json_object" } }
: {}),
messages: [
@ -139,6 +142,9 @@ async function getAIResponse(prompt: string): Promise<Array<{
});
const res = response.choices[0].message?.content?.trim() || "{}";
console.log(`Trimmed Response: ${res}`);
return JSON.parse(res).reviews;
} catch (error) {
console.error("Error:", error);
@ -232,7 +238,11 @@ async function main() {
);
});
const comments = await analyzeCode(filteredDiff, prDetails);
const customPrompts = core.getMultilineInput("custom_prompts")
.map(customPrompt => `- ${customPrompt}`)
.join("\n")
const comments = await analyzeCode(filteredDiff, prDetails, customPrompts);
if (comments.length > 0) {
await createReviewComment(
prDetails.owner,