mirror of
https://github.com/Palm1r/QodeAssist.git
synced 2025-07-18 21:14:34 -04:00
refactor: Rework providers and templates logic
This commit is contained in:
@ -30,13 +30,10 @@
|
||||
#include "logger/Logger.hpp"
|
||||
#include "settings/ChatAssistantSettings.hpp"
|
||||
#include "settings/CodeCompletionSettings.hpp"
|
||||
#include "settings/GeneralSettings.hpp"
|
||||
#include "settings/ProviderSettings.hpp"
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
ClaudeProvider::ClaudeProvider() {}
|
||||
|
||||
QString ClaudeProvider::name() const
|
||||
{
|
||||
return "Claude";
|
||||
@ -62,31 +59,17 @@ bool ClaudeProvider::supportsModelListing() const
|
||||
return true;
|
||||
}
|
||||
|
||||
void ClaudeProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
|
||||
void ClaudeProvider::prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type)
|
||||
{
|
||||
auto prepareMessages = [](QJsonObject &req) -> QJsonArray {
|
||||
QJsonArray messages;
|
||||
if (req.contains("messages")) {
|
||||
QJsonArray origMessages = req["messages"].toArray();
|
||||
for (const auto &msg : origMessages) {
|
||||
QJsonObject message = msg.toObject();
|
||||
if (message["role"].toString() == "system") {
|
||||
req["system"] = message["content"];
|
||||
} else {
|
||||
messages.append(message);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (req.contains("system")) {
|
||||
req["system"] = req["system"].toString();
|
||||
}
|
||||
if (req.contains("prompt")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "user"}, {"content", req.take("prompt").toString()}});
|
||||
}
|
||||
}
|
||||
return messages;
|
||||
};
|
||||
// if (!isSupportedTemplate(prompt->name())) {
|
||||
// LOG_MESSAGE(QString("Provider doesn't support %1 template").arg(prompt->name()));
|
||||
// }
|
||||
|
||||
prompt->prepareRequest(request, context);
|
||||
|
||||
auto applyModelParams = [&request](const auto &settings) {
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
@ -98,11 +81,6 @@ void ClaudeProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t
|
||||
request["stream"] = true;
|
||||
};
|
||||
|
||||
QJsonArray messages = prepareMessages(request);
|
||||
if (!messages.isEmpty()) {
|
||||
request["messages"] = std::move(messages);
|
||||
}
|
||||
|
||||
if (type == LLMCore::RequestType::CodeCompletion) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
|
@ -26,14 +26,16 @@ namespace QodeAssist::Providers {
|
||||
class ClaudeProvider : public LLMCore::Provider
|
||||
{
|
||||
public:
|
||||
ClaudeProvider();
|
||||
|
||||
QString name() const override;
|
||||
QString url() const override;
|
||||
QString completionEndpoint() const override;
|
||||
QString chatEndpoint() const override;
|
||||
bool supportsModelListing() const override;
|
||||
void prepareRequest(QJsonObject &request, LLMCore::RequestType type) override;
|
||||
void prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
QList<QString> getInstalledModels(const QString &url) override;
|
||||
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
|
||||
|
@ -33,8 +33,6 @@
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
LMStudioProvider::LMStudioProvider() {}
|
||||
|
||||
QString LMStudioProvider::name() const
|
||||
{
|
||||
return "LM Studio";
|
||||
@ -60,47 +58,6 @@ bool LMStudioProvider::supportsModelListing() const
|
||||
return true;
|
||||
}
|
||||
|
||||
void LMStudioProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
|
||||
{
|
||||
auto prepareMessages = [](QJsonObject &req) -> QJsonArray {
|
||||
QJsonArray messages;
|
||||
if (req.contains("system")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "system"}, {"content", req.take("system").toString()}});
|
||||
}
|
||||
if (req.contains("prompt")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "user"}, {"content", req.take("prompt").toString()}});
|
||||
}
|
||||
return messages;
|
||||
};
|
||||
|
||||
auto applyModelParams = [&request](const auto &settings) {
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
request["temperature"] = settings.temperature();
|
||||
|
||||
if (settings.useTopP())
|
||||
request["top_p"] = settings.topP();
|
||||
if (settings.useTopK())
|
||||
request["top_k"] = settings.topK();
|
||||
if (settings.useFrequencyPenalty())
|
||||
request["frequency_penalty"] = settings.frequencyPenalty();
|
||||
if (settings.usePresencePenalty())
|
||||
request["presence_penalty"] = settings.presencePenalty();
|
||||
};
|
||||
|
||||
QJsonArray messages = prepareMessages(request);
|
||||
if (!messages.isEmpty()) {
|
||||
request["messages"] = std::move(messages);
|
||||
}
|
||||
|
||||
if (type == LLMCore::RequestType::CodeCompletion) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
applyModelParams(Settings::chatAssistantSettings());
|
||||
}
|
||||
}
|
||||
|
||||
bool LMStudioProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
|
||||
{
|
||||
QByteArray data = reply->readAll();
|
||||
@ -211,4 +168,37 @@ void LMStudioProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) co
|
||||
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
|
||||
}
|
||||
|
||||
void QodeAssist::Providers::LMStudioProvider::prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type)
|
||||
{
|
||||
// if (!isSupportedTemplate(prompt->name())) {
|
||||
// LOG_MESSAGE(QString("Provider doesn't support %1 template").arg(prompt->name()));
|
||||
// }
|
||||
|
||||
prompt->prepareRequest(request, context);
|
||||
|
||||
auto applyModelParams = [&request](const auto &settings) {
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
request["temperature"] = settings.temperature();
|
||||
|
||||
if (settings.useTopP())
|
||||
request["top_p"] = settings.topP();
|
||||
if (settings.useTopK())
|
||||
request["top_k"] = settings.topK();
|
||||
if (settings.useFrequencyPenalty())
|
||||
request["frequency_penalty"] = settings.frequencyPenalty();
|
||||
if (settings.usePresencePenalty())
|
||||
request["presence_penalty"] = settings.presencePenalty();
|
||||
};
|
||||
|
||||
if (type == LLMCore::RequestType::CodeCompletion) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
applyModelParams(Settings::chatAssistantSettings());
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
|
@ -26,14 +26,16 @@ namespace QodeAssist::Providers {
|
||||
class LMStudioProvider : public LLMCore::Provider
|
||||
{
|
||||
public:
|
||||
LMStudioProvider();
|
||||
|
||||
QString name() const override;
|
||||
QString url() const override;
|
||||
QString completionEndpoint() const override;
|
||||
QString chatEndpoint() const override;
|
||||
bool supportsModelListing() const override;
|
||||
void prepareRequest(QJsonObject &request, LLMCore::RequestType type) override;
|
||||
void prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
QList<QString> getInstalledModels(const QString &url) override;
|
||||
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
|
||||
|
215
providers/MistralAIProvider.cpp
Normal file
215
providers/MistralAIProvider.cpp
Normal file
@ -0,0 +1,215 @@
|
||||
#include "MistralAIProvider.hpp"
|
||||
|
||||
#include "settings/ChatAssistantSettings.hpp"
|
||||
#include "settings/CodeCompletionSettings.hpp"
|
||||
#include "settings/ProviderSettings.hpp"
|
||||
|
||||
#include <QJsonArray>
|
||||
#include <QJsonDocument>
|
||||
#include <QJsonObject>
|
||||
#include <QNetworkReply>
|
||||
#include <QtCore/qeventloop.h>
|
||||
|
||||
#include "llmcore/OpenAIMessage.hpp"
|
||||
#include "llmcore/ValidationUtils.hpp"
|
||||
#include "logger/Logger.hpp"
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
QString MistralAIProvider::name() const
|
||||
{
|
||||
return "Mistral AI";
|
||||
}
|
||||
|
||||
QString MistralAIProvider::url() const
|
||||
{
|
||||
return "https://api.mistral.ai";
|
||||
}
|
||||
|
||||
QString MistralAIProvider::completionEndpoint() const
|
||||
{
|
||||
return "/v1/fim/completions";
|
||||
}
|
||||
|
||||
QString MistralAIProvider::chatEndpoint() const
|
||||
{
|
||||
return "/v1/chat/completions";
|
||||
}
|
||||
|
||||
bool MistralAIProvider::supportsModelListing() const
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MistralAIProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
|
||||
{
|
||||
QByteArray data = reply->readAll();
|
||||
if (data.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isDone = false;
|
||||
QByteArrayList lines = data.split('\n');
|
||||
|
||||
for (const QByteArray &line : lines) {
|
||||
if (line.trimmed().isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (line == "data: [DONE]") {
|
||||
isDone = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
QByteArray jsonData = line;
|
||||
if (line.startsWith("data: ")) {
|
||||
jsonData = line.mid(6);
|
||||
}
|
||||
|
||||
QJsonParseError error;
|
||||
QJsonDocument doc = QJsonDocument::fromJson(jsonData, &error);
|
||||
|
||||
if (doc.isNull()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto message = LLMCore::OpenAIMessage::fromJson(doc.object());
|
||||
if (message.hasError()) {
|
||||
LOG_MESSAGE("Error in OpenAI response: " + message.error);
|
||||
continue;
|
||||
}
|
||||
|
||||
QString content = message.getContent();
|
||||
if (!content.isEmpty()) {
|
||||
accumulatedResponse += content;
|
||||
}
|
||||
|
||||
if (message.isDone()) {
|
||||
isDone = true;
|
||||
}
|
||||
}
|
||||
|
||||
return isDone;
|
||||
}
|
||||
|
||||
QList<QString> MistralAIProvider::getInstalledModels(const QString &url)
|
||||
{
|
||||
QList<QString> models;
|
||||
QNetworkAccessManager manager;
|
||||
QNetworkRequest request(QString("%1/v1/models").arg(url));
|
||||
|
||||
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
|
||||
if (!apiKey().isEmpty()) {
|
||||
request.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
|
||||
}
|
||||
|
||||
QNetworkReply *reply = manager.get(request);
|
||||
QEventLoop loop;
|
||||
QObject::connect(reply, &QNetworkReply::finished, &loop, &QEventLoop::quit);
|
||||
loop.exec();
|
||||
|
||||
if (reply->error() == QNetworkReply::NoError) {
|
||||
QByteArray responseData = reply->readAll();
|
||||
QJsonDocument jsonResponse = QJsonDocument::fromJson(responseData);
|
||||
QJsonObject jsonObject = jsonResponse.object();
|
||||
|
||||
if (jsonObject.contains("data") && jsonObject["object"].toString() == "list") {
|
||||
QJsonArray modelArray = jsonObject["data"].toArray();
|
||||
for (const QJsonValue &value : modelArray) {
|
||||
QJsonObject modelObject = value.toObject();
|
||||
if (modelObject.contains("id")) {
|
||||
QString modelId = modelObject["id"].toString();
|
||||
models.append(modelId);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG_MESSAGE(QString("Error fetching Mistral AI models: %1").arg(reply->errorString()));
|
||||
}
|
||||
|
||||
reply->deleteLater();
|
||||
return models;
|
||||
}
|
||||
|
||||
QList<QString> MistralAIProvider::validateRequest(
|
||||
const QJsonObject &request, LLMCore::TemplateType type)
|
||||
{
|
||||
const auto fimReq = QJsonObject{
|
||||
{"model", {}},
|
||||
{"max_tokens", {}},
|
||||
{"stream", {}},
|
||||
{"temperature", {}},
|
||||
{"prompt", {}},
|
||||
{"suffix", {}}};
|
||||
|
||||
const auto templateReq = QJsonObject{
|
||||
{"model", {}},
|
||||
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}},
|
||||
{"temperature", {}},
|
||||
{"max_tokens", {}},
|
||||
{"top_p", {}},
|
||||
{"frequency_penalty", {}},
|
||||
{"presence_penalty", {}},
|
||||
{"stop", QJsonArray{}},
|
||||
{"stream", {}}};
|
||||
|
||||
return LLMCore::ValidationUtils::validateRequestFields(
|
||||
request, type == LLMCore::TemplateType::FIM ? fimReq : templateReq);
|
||||
}
|
||||
|
||||
QString MistralAIProvider::apiKey() const
|
||||
{
|
||||
return Settings::providerSettings().mistralAiApiKey();
|
||||
}
|
||||
|
||||
void MistralAIProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
|
||||
{
|
||||
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
|
||||
|
||||
if (!apiKey().isEmpty()) {
|
||||
networkRequest.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
|
||||
}
|
||||
}
|
||||
|
||||
void MistralAIProvider::prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type)
|
||||
{
|
||||
// if (!isSupportedTemplate(prompt->name())) {
|
||||
// LOG_MESSAGE(QString("Provider doesn't support %1 template").arg(prompt->name()));
|
||||
// }
|
||||
|
||||
prompt->prepareRequest(request, context);
|
||||
|
||||
if (type == LLMCore::RequestType::Chat) {
|
||||
auto &settings = Settings::chatAssistantSettings();
|
||||
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
request["temperature"] = settings.temperature();
|
||||
|
||||
if (settings.useTopP())
|
||||
request["top_p"] = settings.topP();
|
||||
|
||||
// request["random_seed"] = "";
|
||||
|
||||
if (settings.useFrequencyPenalty())
|
||||
request["frequency_penalty"] = settings.frequencyPenalty();
|
||||
if (settings.usePresencePenalty())
|
||||
request["presence_penalty"] = settings.presencePenalty();
|
||||
|
||||
} else {
|
||||
auto &settings = Settings::codeCompletionSettings();
|
||||
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
request["temperature"] = settings.temperature();
|
||||
|
||||
if (settings.useTopP())
|
||||
request["top_p"] = settings.topP();
|
||||
|
||||
// request["random_seed"] = "";
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::Providers
|
46
providers/MistralAIProvider.hpp
Normal file
46
providers/MistralAIProvider.hpp
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
* Copyright (C) 2024 Petr Mironychev
|
||||
*
|
||||
* This file is part of QodeAssist.
|
||||
*
|
||||
* QodeAssist is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* QodeAssist is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "llmcore/Provider.hpp"
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
class MistralAIProvider : public LLMCore::Provider
|
||||
{
|
||||
public:
|
||||
QString name() const override;
|
||||
QString url() const override;
|
||||
QString completionEndpoint() const override;
|
||||
QString chatEndpoint() const override;
|
||||
bool supportsModelListing() const override;
|
||||
void prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
QList<QString> getInstalledModels(const QString &url) override;
|
||||
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
|
||||
QString apiKey() const override;
|
||||
void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
|
||||
};
|
||||
|
||||
} // namespace QodeAssist::Providers
|
@ -33,8 +33,6 @@
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
OllamaProvider::OllamaProvider() {}
|
||||
|
||||
QString OllamaProvider::name() const
|
||||
{
|
||||
return "Ollama";
|
||||
@ -60,8 +58,18 @@ bool OllamaProvider::supportsModelListing() const
|
||||
return true;
|
||||
}
|
||||
|
||||
void OllamaProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
|
||||
void OllamaProvider::prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type)
|
||||
{
|
||||
// if (!isSupportedTemplate(prompt->name())) {
|
||||
// LOG_MESSAGE(QString("Provider doesn't support %1 template").arg(prompt->name()));
|
||||
// }
|
||||
|
||||
prompt->prepareRequest(request, context);
|
||||
|
||||
auto applySettings = [&request](const auto &settings) {
|
||||
QJsonObject options;
|
||||
options["num_predict"] = settings.maxTokens();
|
||||
@ -192,7 +200,7 @@ QList<QString> OllamaProvider::validateRequest(const QJsonObject &request, LLMCo
|
||||
{"presence_penalty", {}}}}};
|
||||
|
||||
return LLMCore::ValidationUtils::validateRequestFields(
|
||||
request, type == LLMCore::TemplateType::Fim ? fimReq : messageReq);
|
||||
request, type == LLMCore::TemplateType::FIM ? fimReq : messageReq);
|
||||
}
|
||||
|
||||
QString OllamaProvider::apiKey() const
|
||||
@ -203,6 +211,6 @@ QString OllamaProvider::apiKey() const
|
||||
void OllamaProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
|
||||
{
|
||||
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
|
||||
};
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
|
@ -26,14 +26,16 @@ namespace QodeAssist::Providers {
|
||||
class OllamaProvider : public LLMCore::Provider
|
||||
{
|
||||
public:
|
||||
OllamaProvider();
|
||||
|
||||
QString name() const override;
|
||||
QString url() const override;
|
||||
QString completionEndpoint() const override;
|
||||
QString chatEndpoint() const override;
|
||||
bool supportsModelListing() const override;
|
||||
void prepareRequest(QJsonObject &request, LLMCore::RequestType type) override;
|
||||
void prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
QList<QString> getInstalledModels(const QString &url) override;
|
||||
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
|
||||
|
@ -34,8 +34,6 @@
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
OpenAICompatProvider::OpenAICompatProvider() {}
|
||||
|
||||
QString OpenAICompatProvider::name() const
|
||||
{
|
||||
return "OpenAI Compatible";
|
||||
@ -61,20 +59,17 @@ bool OpenAICompatProvider::supportsModelListing() const
|
||||
return false;
|
||||
}
|
||||
|
||||
void OpenAICompatProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
|
||||
void OpenAICompatProvider::prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type)
|
||||
{
|
||||
auto prepareMessages = [](QJsonObject &req) -> QJsonArray {
|
||||
QJsonArray messages;
|
||||
if (req.contains("system")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "system"}, {"content", req.take("system").toString()}});
|
||||
}
|
||||
if (req.contains("prompt")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "user"}, {"content", req.take("prompt").toString()}});
|
||||
}
|
||||
return messages;
|
||||
};
|
||||
// if (!isSupportedTemplate(prompt->name())) {
|
||||
// LOG_MESSAGE(QString("Provider doesn't support %1 template").arg(prompt->name()));
|
||||
// }
|
||||
|
||||
prompt->prepareRequest(request, context);
|
||||
|
||||
auto applyModelParams = [&request](const auto &settings) {
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
@ -90,11 +85,6 @@ void OpenAICompatProvider::prepareRequest(QJsonObject &request, LLMCore::Request
|
||||
request["presence_penalty"] = settings.presencePenalty();
|
||||
};
|
||||
|
||||
QJsonArray messages = prepareMessages(request);
|
||||
if (!messages.isEmpty()) {
|
||||
request["messages"] = std::move(messages);
|
||||
}
|
||||
|
||||
if (type == LLMCore::RequestType::CodeCompletion) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
|
@ -26,14 +26,16 @@ namespace QodeAssist::Providers {
|
||||
class OpenAICompatProvider : public LLMCore::Provider
|
||||
{
|
||||
public:
|
||||
OpenAICompatProvider();
|
||||
|
||||
QString name() const override;
|
||||
QString url() const override;
|
||||
QString completionEndpoint() const override;
|
||||
QString chatEndpoint() const override;
|
||||
bool supportsModelListing() const override;
|
||||
void prepareRequest(QJsonObject &request, LLMCore::RequestType type) override;
|
||||
void prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
QList<QString> getInstalledModels(const QString &url) override;
|
||||
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
|
||||
|
@ -35,8 +35,6 @@
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
OpenAIProvider::OpenAIProvider() {}
|
||||
|
||||
QString OpenAIProvider::name() const
|
||||
{
|
||||
return "OpenAI";
|
||||
@ -62,20 +60,17 @@ bool OpenAIProvider::supportsModelListing() const
|
||||
return true;
|
||||
}
|
||||
|
||||
void OpenAIProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
|
||||
void OpenAIProvider::prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type)
|
||||
{
|
||||
auto prepareMessages = [](QJsonObject &req) -> QJsonArray {
|
||||
QJsonArray messages;
|
||||
if (req.contains("system")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "system"}, {"content", req.take("system").toString()}});
|
||||
}
|
||||
if (req.contains("prompt")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "user"}, {"content", req.take("prompt").toString()}});
|
||||
}
|
||||
return messages;
|
||||
};
|
||||
// if (!isSupportedTemplate(prompt->name())) {
|
||||
// LOG_MESSAGE(QString("Provider doesn't support %1 template").arg(prompt->name()));
|
||||
// }
|
||||
|
||||
prompt->prepareRequest(request, context);
|
||||
|
||||
auto applyModelParams = [&request](const auto &settings) {
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
@ -91,11 +86,6 @@ void OpenAIProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t
|
||||
request["presence_penalty"] = settings.presencePenalty();
|
||||
};
|
||||
|
||||
QJsonArray messages = prepareMessages(request);
|
||||
if (!messages.isEmpty()) {
|
||||
request["messages"] = std::move(messages);
|
||||
}
|
||||
|
||||
if (type == LLMCore::RequestType::CodeCompletion) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
|
@ -26,14 +26,16 @@ namespace QodeAssist::Providers {
|
||||
class OpenAIProvider : public LLMCore::Provider
|
||||
{
|
||||
public:
|
||||
OpenAIProvider();
|
||||
|
||||
QString name() const override;
|
||||
QString url() const override;
|
||||
QString completionEndpoint() const override;
|
||||
QString chatEndpoint() const override;
|
||||
bool supportsModelListing() const override;
|
||||
void prepareRequest(QJsonObject &request, LLMCore::RequestType type) override;
|
||||
void prepareRequest(
|
||||
QJsonObject &request,
|
||||
LLMCore::PromptTemplate *prompt,
|
||||
LLMCore::ContextData context,
|
||||
LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
QList<QString> getInstalledModels(const QString &url) override;
|
||||
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
|
||||
|
@ -33,8 +33,6 @@
|
||||
|
||||
namespace QodeAssist::Providers {
|
||||
|
||||
OpenRouterProvider::OpenRouterProvider() {}
|
||||
|
||||
QString OpenRouterProvider::name() const
|
||||
{
|
||||
return "OpenRouter";
|
||||
@ -45,47 +43,6 @@ QString OpenRouterProvider::url() const
|
||||
return "https://openrouter.ai/api";
|
||||
}
|
||||
|
||||
void OpenRouterProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType type)
|
||||
{
|
||||
auto prepareMessages = [](QJsonObject &req) -> QJsonArray {
|
||||
QJsonArray messages;
|
||||
if (req.contains("system")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "system"}, {"content", req.take("system").toString()}});
|
||||
}
|
||||
if (req.contains("prompt")) {
|
||||
messages.append(
|
||||
QJsonObject{{"role", "user"}, {"content", req.take("prompt").toString()}});
|
||||
}
|
||||
return messages;
|
||||
};
|
||||
|
||||
auto applyModelParams = [&request](const auto &settings) {
|
||||
request["max_tokens"] = settings.maxTokens();
|
||||
request["temperature"] = settings.temperature();
|
||||
|
||||
if (settings.useTopP())
|
||||
request["top_p"] = settings.topP();
|
||||
if (settings.useTopK())
|
||||
request["top_k"] = settings.topK();
|
||||
if (settings.useFrequencyPenalty())
|
||||
request["frequency_penalty"] = settings.frequencyPenalty();
|
||||
if (settings.usePresencePenalty())
|
||||
request["presence_penalty"] = settings.presencePenalty();
|
||||
};
|
||||
|
||||
QJsonArray messages = prepareMessages(request);
|
||||
if (!messages.isEmpty()) {
|
||||
request["messages"] = std::move(messages);
|
||||
}
|
||||
|
||||
if (type == LLMCore::RequestType::CodeCompletion) {
|
||||
applyModelParams(Settings::codeCompletionSettings());
|
||||
} else {
|
||||
applyModelParams(Settings::chatAssistantSettings());
|
||||
}
|
||||
}
|
||||
|
||||
bool OpenRouterProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
|
||||
{
|
||||
QByteArray data = reply->readAll();
|
||||
|
@ -27,11 +27,8 @@ namespace QodeAssist::Providers {
|
||||
class OpenRouterProvider : public OpenAICompatProvider
|
||||
{
|
||||
public:
|
||||
OpenRouterProvider();
|
||||
|
||||
QString name() const override;
|
||||
QString url() const override;
|
||||
void prepareRequest(QJsonObject &request, LLMCore::RequestType type) override;
|
||||
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
|
||||
QString apiKey() const override;
|
||||
};
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include "llmcore/ProvidersManager.hpp"
|
||||
#include "providers/ClaudeProvider.hpp"
|
||||
#include "providers/LMStudioProvider.hpp"
|
||||
#include "providers/MistralAIProvider.hpp"
|
||||
#include "providers/OllamaProvider.hpp"
|
||||
#include "providers/OpenAICompatProvider.hpp"
|
||||
#include "providers/OpenAIProvider.hpp"
|
||||
@ -33,11 +34,12 @@ inline void registerProviders()
|
||||
{
|
||||
auto &providerManager = LLMCore::ProvidersManager::instance();
|
||||
providerManager.registerProvider<OllamaProvider>();
|
||||
providerManager.registerProvider<LMStudioProvider>();
|
||||
providerManager.registerProvider<OpenAICompatProvider>();
|
||||
providerManager.registerProvider<OpenRouterProvider>();
|
||||
providerManager.registerProvider<ClaudeProvider>();
|
||||
providerManager.registerProvider<OpenAIProvider>();
|
||||
providerManager.registerProvider<OpenAICompatProvider>();
|
||||
providerManager.registerProvider<LMStudioProvider>();
|
||||
providerManager.registerProvider<OpenRouterProvider>();
|
||||
providerManager.registerProvider<MistralAIProvider>();
|
||||
}
|
||||
|
||||
} // namespace QodeAssist::Providers
|
||||
|
Reference in New Issue
Block a user