refactor: Rework providers and templates logic

This commit is contained in:
Petr Mironychev
2025-02-22 19:39:28 +01:00
committed by GitHub
parent bd25736a55
commit d96f44d42c
44 changed files with 701 additions and 524 deletions

View File

@ -30,13 +30,10 @@
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
namespace QodeAssist::Providers {
ClaudeProvider::ClaudeProvider() {}
QString ClaudeProvider::name() const
{
return "Claude";
@ -62,31 +59,17 @@ bool ClaudeProvider::supportsModelListing() const
return true;
}
void ClaudeProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
void ClaudeProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type)
{
auto prepareMessages = [](QJsonObject &req) -> QJsonArray {
QJsonArray messages;
if (req.contains("messages")) {
QJsonArray origMessages = req["messages"].toArray();
for (const auto &msg : origMessages) {
QJsonObject message = msg.toObject();
if (message["role"].toString() == "system") {
req["system"] = message["content"];
} else {
messages.append(message);
}
}
} else {
if (req.contains("system")) {
req["system"] = req["system"].toString();
}
if (req.contains("prompt")) {
messages.append(
QJsonObject{{"role", "user"}, {"content", req.take("prompt").toString()}});
}
}
return messages;
};
// if (!isSupportedTemplate(prompt->name())) {
// LOG_MESSAGE(QString("Provider doesn't support %1 template").arg(prompt->name()));
// }
prompt->prepareRequest(request, context);
auto applyModelParams = [&request](const auto &settings) {
request["max_tokens"] = settings.maxTokens();
@ -98,11 +81,6 @@ void ClaudeProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t
request["stream"] = true;
};
QJsonArray messages = prepareMessages(request);
if (!messages.isEmpty()) {
request["messages"] = std::move(messages);
}
if (type == LLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else {