refactor: Full rework quick refactor (#257)

This commit is contained in:
Petr Mironychev
2025-11-15 14:51:47 +01:00
committed by GitHub
parent 9ecd285d1d
commit 953774aaa8
45 changed files with 2002 additions and 125 deletions

View File

@ -30,6 +30,7 @@
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
@ -76,7 +77,8 @@ void ClaudeProvider::prepareRequest(
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled)
bool isToolsEnabled,
bool isThinkingEnabled)
{
if (!prompt->isSupportProvider(providerID())) {
LOG_MESSAGE(QString("Template %1 doesn't support %2 provider").arg(name(), prompt->name()));
@ -93,20 +95,33 @@ void ClaudeProvider::prepareRequest(
request["stream"] = true;
};
auto applyThinkingMode = [&request](const auto &settings) {
QJsonObject thinkingObj;
thinkingObj["type"] = "enabled";
thinkingObj["budget_tokens"] = settings.thinkingBudgetTokens();
request["thinking"] = thinkingObj;
request["max_tokens"] = settings.thinkingMaxTokens();
request["temperature"] = 1.0;
};
if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
request["temperature"] = Settings::codeCompletionSettings().temperature();
} else if (type == LLMCore::RequestType::QuickRefactoring) {
const auto &qrSettings = Settings::quickRefactorSettings();
applyModelParams(qrSettings);
if (isThinkingEnabled) {
applyThinkingMode(qrSettings);
} else {
request["temperature"] = qrSettings.temperature();
}
} else {
const auto &chatSettings = Settings::chatAssistantSettings();
applyModelParams(chatSettings);
if (chatSettings.enableThinkingMode()) {
QJsonObject thinkingObj;
thinkingObj["type"] = "enabled";
thinkingObj["budget_tokens"] = chatSettings.thinkingBudgetTokens();
request["thinking"] = thinkingObj;
request["max_tokens"] = chatSettings.thinkingMaxTokens();
request["temperature"] = 1.0;
if (isThinkingEnabled) {
applyThinkingMode(chatSettings);
} else {
request["temperature"] = chatSettings.temperature();
}

View File

@ -42,7 +42,8 @@ public:
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled) override;
bool isToolsEnabled,
bool isThinkingEnabled) override;
QList<QString> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
QString apiKey() const override;

View File

@ -30,6 +30,7 @@
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
@ -76,7 +77,8 @@ void GoogleAIProvider::prepareRequest(
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled)
bool isToolsEnabled,
bool isThinkingEnabled)
{
if (!prompt->isSupportProvider(providerID())) {
LOG_MESSAGE(QString("Template %1 doesn't support %2 provider").arg(name(), prompt->name()));
@ -97,49 +99,43 @@ void GoogleAIProvider::prepareRequest(
request["generationConfig"] = generationConfig;
};
auto applyThinkingMode = [&request](const auto &settings) {
QJsonObject generationConfig;
generationConfig["maxOutputTokens"] = settings.thinkingMaxTokens();
if (settings.useTopP())
generationConfig["topP"] = settings.topP();
if (settings.useTopK())
generationConfig["topK"] = settings.topK();
generationConfig["temperature"] = 1.0;
QJsonObject thinkingConfig;
thinkingConfig["includeThoughts"] = true;
int budgetTokens = settings.thinkingBudgetTokens();
if (budgetTokens != -1) {
thinkingConfig["thinkingBudget"] = budgetTokens;
}
generationConfig["thinkingConfig"] = thinkingConfig;
request["generationConfig"] = generationConfig;
};
if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
const auto &qrSettings = Settings::quickRefactorSettings();
if (isThinkingEnabled) {
applyThinkingMode(qrSettings);
} else {
applyModelParams(qrSettings);
}
} else {
const auto &chatSettings = Settings::chatAssistantSettings();
if (chatSettings.enableThinkingMode()) {
QJsonObject generationConfig;
generationConfig["maxOutputTokens"] = chatSettings.thinkingMaxTokens();
if (chatSettings.useTopP())
generationConfig["topP"] = chatSettings.topP();
if (chatSettings.useTopK())
generationConfig["topK"] = chatSettings.topK();
// Set temperature to 1.0 for thinking mode
generationConfig["temperature"] = 1.0;
// Add thinkingConfig
QJsonObject thinkingConfig;
int budgetTokens = chatSettings.thinkingBudgetTokens();
// Dynamic thinking: -1 (let model decide)
// Disabled: 0 (no thinking)
// Custom budget: positive integer
if (budgetTokens == -1) {
// Dynamic thinking - omit budget to let model decide
thinkingConfig["includeThoughts"] = true;
} else if (budgetTokens == 0) {
// Disabled thinking
thinkingConfig["thinkingBudget"] = 0;
thinkingConfig["includeThoughts"] = false;
} else {
// Custom budget
thinkingConfig["thinkingBudget"] = budgetTokens;
thinkingConfig["includeThoughts"] = true;
}
generationConfig["thinkingConfig"] = thinkingConfig;
request["generationConfig"] = generationConfig;
LOG_MESSAGE(QString("Google AI thinking mode enabled: budget=%1 tokens, maxTokens=%2")
.arg(budgetTokens)
.arg(chatSettings.thinkingMaxTokens()));
if (isThinkingEnabled) {
applyThinkingMode(chatSettings);
} else {
applyModelParams(chatSettings);
}

View File

@ -41,7 +41,8 @@ public:
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled) override;
bool isToolsEnabled,
bool isThinkingEnabled) override;
QList<QString> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
QString apiKey() const override;

View File

@ -23,6 +23,7 @@
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
@ -223,7 +224,8 @@ void LMStudioProvider::prepareRequest(
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled)
bool isToolsEnabled,
bool isThinkingEnabled)
{
if (!prompt->isSupportProvider(providerID())) {
LOG_MESSAGE(QString("Template %1 doesn't support %2 provider").arg(name(), prompt->name()));
@ -247,6 +249,8 @@ void LMStudioProvider::prepareRequest(
if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
applyModelParams(Settings::quickRefactorSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
}

View File

@ -41,7 +41,8 @@ public:
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled) override;
bool isToolsEnabled,
bool isThinkingEnabled) override;
QList<QString> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
QString apiKey() const override;

View File

@ -23,6 +23,7 @@
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include <QEventLoop>
@ -74,7 +75,8 @@ void LlamaCppProvider::prepareRequest(
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled)
bool isToolsEnabled,
bool isThinkingEnabled)
{
if (!prompt->isSupportProvider(providerID())) {
LOG_MESSAGE(QString("Template %1 doesn't support %2 provider").arg(name(), prompt->name()));
@ -98,6 +100,8 @@ void LlamaCppProvider::prepareRequest(
if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
applyModelParams(Settings::quickRefactorSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
}

View File

@ -41,7 +41,8 @@ public:
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled) override;
bool isToolsEnabled,
bool isThinkingEnabled) override;
QList<QString> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
QString apiKey() const override;

View File

@ -23,6 +23,7 @@
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
@ -244,7 +245,8 @@ void MistralAIProvider::prepareRequest(
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled)
bool isToolsEnabled,
bool isThinkingEnabled)
{
if (!prompt->isSupportProvider(providerID())) {
LOG_MESSAGE(QString("Template %1 doesn't support %2 provider").arg(name(), prompt->name()));
@ -268,6 +270,8 @@ void MistralAIProvider::prepareRequest(
if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
applyModelParams(Settings::quickRefactorSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
}

View File

@ -41,7 +41,8 @@ public:
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled) override;
bool isToolsEnabled,
bool isThinkingEnabled) override;
QList<QString> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
QString apiKey() const override;

View File

@ -29,6 +29,7 @@
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
@ -75,7 +76,8 @@ void OllamaProvider::prepareRequest(
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled)
bool isToolsEnabled,
bool isThinkingEnabled)
{
if (!prompt->isSupportProvider(providerID())) {
LOG_MESSAGE(QString("Template %1 doesn't support %2 provider").arg(name(), prompt->name()));
@ -104,6 +106,8 @@ void OllamaProvider::prepareRequest(
if (type == LLMCore::RequestType::CodeCompletion) {
applySettings(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
applySettings(Settings::quickRefactorSettings());
} else {
applySettings(Settings::chatAssistantSettings());
}

View File

@ -42,7 +42,8 @@ public:
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled) override;
bool isToolsEnabled,
bool isThinkingEnabled) override;
QList<QString> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
QString apiKey() const override;

View File

@ -23,6 +23,7 @@
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
@ -74,7 +75,8 @@ void OpenAICompatProvider::prepareRequest(
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled)
bool isToolsEnabled,
bool isThinkingEnabled)
{
if (!prompt->isSupportProvider(providerID())) {
LOG_MESSAGE(QString("Template %1 doesn't support %2 provider").arg(name(), prompt->name()));
@ -98,6 +100,8 @@ void OpenAICompatProvider::prepareRequest(
if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
applyModelParams(Settings::quickRefactorSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
}

View File

@ -41,7 +41,8 @@ public:
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled) override;
bool isToolsEnabled,
bool isThinkingEnabled) override;
QList<QString> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
QString apiKey() const override;

View File

@ -23,6 +23,7 @@
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
@ -75,7 +76,8 @@ void OpenAIProvider::prepareRequest(
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled)
bool isToolsEnabled,
bool isThinkingEnabled)
{
if (!prompt->isSupportProvider(providerID())) {
LOG_MESSAGE(QString("Template %1 doesn't support %2 provider").arg(name(), prompt->name()));
@ -118,6 +120,8 @@ void OpenAIProvider::prepareRequest(
if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
applyModelParams(Settings::quickRefactorSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
}

View File

@ -41,7 +41,8 @@ public:
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
bool isToolsEnabled) override;
bool isToolsEnabled,
bool isThinkingEnabled) override;
QList<QString> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
QString apiKey() const override;