mirror of
https://github.com/Palm1r/QodeAssist.git
synced 2025-07-18 21:14:34 -04:00
Adapt new settings
This commit is contained in:
@ -26,7 +26,8 @@
|
||||
#include <QNetworkReply>
|
||||
|
||||
#include "logger/Logger.hpp"
|
||||
#include "settings/PresetPromptsSettings.hpp"
|
||||
#include "settings/ChatAssistantSettings.hpp"
|
||||
#include "settings/CodeCompletionSettings.hpp"
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
@ -54,36 +55,43 @@ QString LMStudioProvider::chatEndpoint() const
|
||||
|
||||
void LMStudioProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
|
||||
{
|
||||
auto &promptSettings = Settings::presetPromptsSettings();
|
||||
auto settings = promptSettings.getSettings(type);
|
||||
auto prepareMessages = [](QJsonObject &req) -> QJsonArray {
|
||||
QJsonArray messages;
|
||||
if (req.contains("system")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "system"}, {"content", req.take("system").toString()}});
|
||||
}
|
||||
if (req.contains("prompt")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "user"}, {"content", req.take("prompt").toString()}});
|
||||
}
|
||||
return messages;
|
||||
};
|
||||
|
||||
QJsonArray messages;
|
||||
auto applyModelParams = [&request](const auto &settings) {
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
request["temperature"] = settings.temperature();
|
||||
|
||||
if (request.contains("system")) {
|
||||
QJsonObject systemMessage{{"role", "system"},
|
||||
{"content", request.take("system").toString()}};
|
||||
messages.append(systemMessage);
|
||||
}
|
||||
|
||||
if (request.contains("prompt")) {
|
||||
QJsonObject userMessage{{"role", "user"}, {"content", request.take("prompt").toString()}};
|
||||
messages.append(userMessage);
|
||||
}
|
||||
if (settings.useTopP())
|
||||
request["top_p"] = settings.topP();
|
||||
if (settings.useTopK())
|
||||
request["top_k"] = settings.topK();
|
||||
if (settings.useFrequencyPenalty())
|
||||
request["frequency_penalty"] = settings.frequencyPenalty();
|
||||
if (settings.usePresencePenalty())
|
||||
request["presence_penalty"] = settings.presencePenalty();
|
||||
};
|
||||
|
||||
QJsonArray messages = prepareMessages(request);
|
||||
if (!messages.isEmpty()) {
|
||||
request["messages"] = std::move(messages);
|
||||
}
|
||||
|
||||
request["max_tokens"] = settings.maxTokens;
|
||||
request["temperature"] = settings.temperature;
|
||||
if (settings.useTopP)
|
||||
request["top_p"] = settings.topP;
|
||||
if (settings.useTopK)
|
||||
request["top_k"] = settings.topK;
|
||||
if (settings.useFrequencyPenalty)
|
||||
request["frequency_penalty"] = settings.frequencyPenalty;
|
||||
if (settings.usePresencePenalty)
|
||||
request["presence_penalty"] = settings.presencePenalty;
|
||||
if (type == LLMCore::RequestType::Fim) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
applyModelParams(Settings::chatAssistantSettings());
|
||||
}
|
||||
}
|
||||
|
||||
bool LMStudioProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
|
||||
|
@ -26,7 +26,8 @@
|
||||
#include <QtCore/qeventloop.h>
|
||||
|
||||
#include "logger/Logger.hpp"
|
||||
#include "settings/PresetPromptsSettings.hpp"
|
||||
#include "settings/ChatAssistantSettings.hpp"
|
||||
#include "settings/CodeCompletionSettings.hpp"
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
@ -54,22 +55,29 @@ QString OllamaProvider::chatEndpoint() const
|
||||
|
||||
void OllamaProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
|
||||
{
|
||||
auto &promptSettings = Settings::presetPromptsSettings();
|
||||
auto settings = promptSettings.getSettings(type);
|
||||
auto applySettings = [&request](const auto &settings) {
|
||||
QJsonObject options;
|
||||
options["num_predict"] = settings.maxTokens();
|
||||
options["temperature"] = settings.temperature();
|
||||
|
||||
QJsonObject options;
|
||||
options["num_predict"] = settings.maxTokens;
|
||||
options["temperature"] = settings.temperature;
|
||||
if (settings.useTopP)
|
||||
options["top_p"] = settings.topP;
|
||||
if (settings.useTopK)
|
||||
options["top_k"] = settings.topK;
|
||||
if (settings.useFrequencyPenalty)
|
||||
options["frequency_penalty"] = settings.frequencyPenalty;
|
||||
if (settings.usePresencePenalty)
|
||||
options["presence_penalty"] = settings.presencePenalty;
|
||||
request["options"] = options;
|
||||
request["keep_alive"] = settings.ollamaLivetime;
|
||||
if (settings.useTopP())
|
||||
options["top_p"] = settings.topP();
|
||||
if (settings.useTopK())
|
||||
options["top_k"] = settings.topK();
|
||||
if (settings.useFrequencyPenalty())
|
||||
options["frequency_penalty"] = settings.frequencyPenalty();
|
||||
if (settings.usePresencePenalty())
|
||||
options["presence_penalty"] = settings.presencePenalty();
|
||||
|
||||
request["options"] = options;
|
||||
request["keep_alive"] = settings.ollamaLivetime();
|
||||
};
|
||||
|
||||
if (type == LLMCore::RequestType::Fim) {
|
||||
applySettings(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
applySettings(Settings::chatAssistantSettings());
|
||||
}
|
||||
}
|
||||
|
||||
bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
|
||||
|
@ -18,14 +18,14 @@
|
||||
*/
|
||||
|
||||
#include "OpenAICompatProvider.hpp"
|
||||
#include "settings/ChatAssistantSettings.hpp"
|
||||
#include "settings/CodeCompletionSettings.hpp"
|
||||
|
||||
#include <QJsonArray>
|
||||
#include <QJsonDocument>
|
||||
#include <QJsonObject>
|
||||
#include <QNetworkReply>
|
||||
|
||||
#include "settings/PresetPromptsSettings.hpp"
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
OpenAICompatProvider::OpenAICompatProvider() {}
|
||||
@ -52,39 +52,42 @@ QString OpenAICompatProvider::chatEndpoint() const
|
||||
|
||||
void OpenAICompatProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
|
||||
{
|
||||
auto &promptSettings = Settings::presetPromptsSettings();
|
||||
auto settings = promptSettings.getSettings(type);
|
||||
QJsonArray messages;
|
||||
auto prepareMessages = [](QJsonObject &req) -> QJsonArray {
|
||||
QJsonArray messages;
|
||||
if (req.contains("system")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "system"}, {"content", req.take("system").toString()}});
|
||||
}
|
||||
if (req.contains("prompt")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "user"}, {"content", req.take("prompt").toString()}});
|
||||
}
|
||||
return messages;
|
||||
};
|
||||
|
||||
if (request.contains("system")) {
|
||||
QJsonObject systemMessage{{"role", "system"},
|
||||
{"content", request.take("system").toString()}};
|
||||
messages.append(systemMessage);
|
||||
}
|
||||
auto applyModelParams = [&request](const auto &settings) {
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
request["temperature"] = settings.temperature();
|
||||
|
||||
if (request.contains("prompt")) {
|
||||
QJsonObject userMessage{{"role", "user"}, {"content", request.take("prompt").toString()}};
|
||||
messages.append(userMessage);
|
||||
}
|
||||
if (settings.useTopP())
|
||||
request["top_p"] = settings.topP();
|
||||
if (settings.useTopK())
|
||||
request["top_k"] = settings.topK();
|
||||
if (settings.useFrequencyPenalty())
|
||||
request["frequency_penalty"] = settings.frequencyPenalty();
|
||||
if (settings.usePresencePenalty())
|
||||
request["presence_penalty"] = settings.presencePenalty();
|
||||
};
|
||||
|
||||
QJsonArray messages = prepareMessages(request);
|
||||
if (!messages.isEmpty()) {
|
||||
request["messages"] = std::move(messages);
|
||||
}
|
||||
|
||||
request["max_tokens"] = settings.maxTokens;
|
||||
request["temperature"] = settings.temperature;
|
||||
if (settings.useTopP)
|
||||
request["top_p"] = settings.topP;
|
||||
if (settings.useTopK)
|
||||
request["top_k"] = settings.topK;
|
||||
if (settings.useFrequencyPenalty)
|
||||
request["frequency_penalty"] = settings.frequencyPenalty;
|
||||
if (settings.usePresencePenalty)
|
||||
request["presence_penalty"] = settings.presencePenalty;
|
||||
|
||||
const QString &apiKey = settings.apiKey;
|
||||
if (!apiKey.isEmpty()) {
|
||||
request["api_key"] = apiKey;
|
||||
if (type == LLMCore::RequestType::Fim) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
applyModelParams(Settings::chatAssistantSettings());
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user