Use max completion_tokens instead max_tokens

This commit is contained in:
revastanislav 2025-08-22 12:37:50 +03:00
commit 4d5c91f15d
2 changed files with 2 additions and 2 deletions

2
dist/index.js vendored
View file

@ -151,7 +151,7 @@ function getAIResponse(prompt) {
return {
model: OPENAI_API_MODEL,
temperature: 0.2,
max_tokens: 1400,
max_completion_tokens: 1400,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0

View file

@ -129,7 +129,7 @@ async function getAIResponse(prompt: string): Promise<Array<{
return {
model: OPENAI_API_MODEL,
temperature: 0.2,
max_tokens: 1400,
max_completion_tokens: 1400,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0