Use max_completion_tokens instead of max-tokens since it doesnt work for o3 model

This commit is contained in:
Alejandro Ramirez 2025-02-25 10:55:59 -05:00
parent 6f7b3de062
commit 1ddb1c2953

View file

@ -176,7 +176,7 @@ async function getAIResponse(prompt: string): Promise<Array<{
const queryConfig = {
model: OPENAI_API_MODEL,
temperature: 0.2,
max_tokens: maxResponseTokens,
max_completion_tokens: maxResponseTokens,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0,