feat: Add OpenAI Responses API (#282)

* feat: Add OpenAI Responses API

* fix: Make temperature optional

* chore: Increase default value of max tokens
This commit is contained in:
Petr Mironychev
2025-12-01 12:14:55 +01:00
committed by GitHub
parent e1fa01d123
commit a466332822
26 changed files with 3261 additions and 4 deletions

View File

@ -151,7 +151,7 @@ CodeCompletionSettings::CodeCompletionSettings()
maxTokens.setSettingsKey(Constants::CC_MAX_TOKENS);
maxTokens.setLabelText(Tr::tr("Max Tokens:"));
maxTokens.setRange(-1, 900000);
maxTokens.setDefaultValue(100);
maxTokens.setDefaultValue(500);
// Advanced Parameters
useTopP.setSettingsKey(Constants::CC_USE_TOP_P);
@ -313,6 +313,25 @@ CodeCompletionSettings::CodeCompletionSettings()
contextWindow.setRange(-1, 10000);
contextWindow.setDefaultValue(2048);
// OpenAI Responses API Settings
openAIResponsesReasoningEffort.setSettingsKey(Constants::CC_OPENAI_RESPONSES_REASONING_EFFORT);
openAIResponsesReasoningEffort.setLabelText(Tr::tr("Reasoning effort:"));
openAIResponsesReasoningEffort.setDisplayStyle(Utils::SelectionAspect::DisplayStyle::ComboBox);
openAIResponsesReasoningEffort.addOption("None");
openAIResponsesReasoningEffort.addOption("Minimal");
openAIResponsesReasoningEffort.addOption("Low");
openAIResponsesReasoningEffort.addOption("Medium");
openAIResponsesReasoningEffort.addOption("High");
openAIResponsesReasoningEffort.setDefaultValue("Medium");
openAIResponsesReasoningEffort.setToolTip(
Tr::tr("Constrains effort on reasoning for OpenAI gpt-5 and o-series models:\n\n"
"None: No reasoning (gpt-5.1 only)\n"
"Minimal: Minimal reasoning effort (o-series only)\n"
"Low: Low reasoning effort\n"
"Medium: Balanced reasoning (default for most models)\n"
"High: Maximum reasoning effort (gpt-5-pro only supports this)\n\n"
"Note: Reducing effort = faster responses + fewer tokens"));
resetToDefaults.m_buttonText = Tr::tr("Reset Page to Defaults");
readSettings();
@ -338,6 +357,9 @@ CodeCompletionSettings::CodeCompletionSettings()
ollamaGrid.addRow({ollamaLivetime});
ollamaGrid.addRow({contextWindow});
auto openAIResponsesGrid = Grid{};
openAIResponsesGrid.addRow({openAIResponsesReasoningEffort});
auto contextGrid = Grid{};
contextGrid.addRow({Row{readFullFile}});
contextGrid.addRow({Row{readFileParts, readStringsBeforeCursor, readStringsAfterCursor}});
@ -398,6 +420,8 @@ CodeCompletionSettings::CodeCompletionSettings()
Group{title(Tr::tr("Quick Refactor Settings")),
Column{useOpenFilesInQuickRefactor, quickRefactorSystemPrompt}},
Space{8},
Group{title(Tr::tr("OpenAI Responses API")), Column{Row{openAIResponsesGrid, Stretch{1}}}},
Space{8},
Group{title(Tr::tr("Ollama Settings")), Column{Row{ollamaGrid, Stretch{1}}}},
Stretch{1}};
});
@ -458,6 +482,7 @@ void CodeCompletionSettings::resetSettingsToDefaults()
resetAspect(maxChangesCacheSize);
resetAspect(ollamaLivetime);
resetAspect(contextWindow);
resetAspect(openAIResponsesReasoningEffort);
resetAspect(useUserMessageTemplateForCC);
resetAspect(userMessageTemplateForCC);
resetAspect(systemPromptForNonFimModels);