refactor: Add external LLMCore lib (#334)

* feat: Add LLMCore submodule
This commit is contained in:
Petr Mironychev
2026-04-03 12:30:40 +02:00
committed by GitHub
parent 15d714588f
commit 6c05f0d594
137 changed files with 1340 additions and 4905 deletions

View File

@ -37,32 +37,32 @@ void ClaudeMessage::handleContentBlockStart(
.arg(blockType));
if (blockType == "text") {
addCurrentContent<LLMCore::TextContent>();
addCurrentContent<PluginLLMCore::TextContent>();
} else if (blockType == "image") {
QJsonObject source = data["source"].toObject();
QString sourceType = source["type"].toString();
QString imageData;
QString mediaType;
LLMCore::ImageContent::ImageSourceType imgSourceType = LLMCore::ImageContent::ImageSourceType::Base64;
PluginLLMCore::ImageContent::ImageSourceType imgSourceType = PluginLLMCore::ImageContent::ImageSourceType::Base64;
if (sourceType == "base64") {
imageData = source["data"].toString();
mediaType = source["media_type"].toString();
imgSourceType = LLMCore::ImageContent::ImageSourceType::Base64;
imgSourceType = PluginLLMCore::ImageContent::ImageSourceType::Base64;
} else if (sourceType == "url") {
imageData = source["url"].toString();
imgSourceType = LLMCore::ImageContent::ImageSourceType::Url;
imgSourceType = PluginLLMCore::ImageContent::ImageSourceType::Url;
}
addCurrentContent<LLMCore::ImageContent>(imageData, mediaType, imgSourceType);
addCurrentContent<PluginLLMCore::ImageContent>(imageData, mediaType, imgSourceType);
} else if (blockType == "tool_use") {
QString toolId = data["id"].toString();
QString toolName = data["name"].toString();
QJsonObject toolInput = data["input"].toObject();
addCurrentContent<LLMCore::ToolUseContent>(toolId, toolName, toolInput);
addCurrentContent<PluginLLMCore::ToolUseContent>(toolId, toolName, toolInput);
m_pendingToolInputs[index] = "";
} else if (blockType == "thinking") {
@ -70,13 +70,13 @@ void ClaudeMessage::handleContentBlockStart(
QString signature = data["signature"].toString();
LOG_MESSAGE(QString("ClaudeMessage: Creating thinking block with signature length=%1")
.arg(signature.length()));
addCurrentContent<LLMCore::ThinkingContent>(thinking, signature);
addCurrentContent<PluginLLMCore::ThinkingContent>(thinking, signature);
} else if (blockType == "redacted_thinking") {
QString signature = data["signature"].toString();
LOG_MESSAGE(QString("ClaudeMessage: Creating redacted_thinking block with signature length=%1")
.arg(signature.length()));
addCurrentContent<LLMCore::RedactedThinkingContent>(signature);
addCurrentContent<PluginLLMCore::RedactedThinkingContent>(signature);
}
}
@ -88,7 +88,7 @@ void ClaudeMessage::handleContentBlockDelta(
}
if (deltaType == "text_delta") {
if (auto textContent = qobject_cast<LLMCore::TextContent *>(m_currentBlocks[index])) {
if (auto textContent = qobject_cast<PluginLLMCore::TextContent *>(m_currentBlocks[index])) {
textContent->appendText(delta["text"].toString());
}
@ -99,17 +99,17 @@ void ClaudeMessage::handleContentBlockDelta(
}
} else if (deltaType == "thinking_delta") {
if (auto thinkingContent = qobject_cast<LLMCore::ThinkingContent *>(m_currentBlocks[index])) {
if (auto thinkingContent = qobject_cast<PluginLLMCore::ThinkingContent *>(m_currentBlocks[index])) {
thinkingContent->appendThinking(delta["thinking"].toString());
}
} else if (deltaType == "signature_delta") {
if (auto thinkingContent = qobject_cast<LLMCore::ThinkingContent *>(m_currentBlocks[index])) {
if (auto thinkingContent = qobject_cast<PluginLLMCore::ThinkingContent *>(m_currentBlocks[index])) {
QString signature = delta["signature"].toString();
thinkingContent->setSignature(signature);
LOG_MESSAGE(QString("Set signature for thinking block %1: length=%2")
.arg(index).arg(signature.length()));
} else if (auto redactedContent = qobject_cast<LLMCore::RedactedThinkingContent *>(m_currentBlocks[index])) {
} else if (auto redactedContent = qobject_cast<PluginLLMCore::RedactedThinkingContent *>(m_currentBlocks[index])) {
QString signature = delta["signature"].toString();
redactedContent->setSignature(signature);
LOG_MESSAGE(QString("Set signature for redacted_thinking block %1: length=%2")
@ -132,7 +132,7 @@ void ClaudeMessage::handleContentBlockStop(int index)
}
if (index < m_currentBlocks.size()) {
if (auto toolContent = qobject_cast<LLMCore::ToolUseContent *>(m_currentBlocks[index])) {
if (auto toolContent = qobject_cast<PluginLLMCore::ToolUseContent *>(m_currentBlocks[index])) {
toolContent->setInput(inputObject);
}
}
@ -155,7 +155,7 @@ QJsonObject ClaudeMessage::toProviderFormat() const
QJsonArray content;
for (auto block : m_currentBlocks) {
QJsonValue blockJson = block->toJson(LLMCore::ProviderFormat::Claude);
QJsonValue blockJson = block->toJson(PluginLLMCore::ProviderFormat::Claude);
content.append(blockJson);
}
@ -173,42 +173,42 @@ QJsonArray ClaudeMessage::createToolResultsContent(const QHash<QString, QString>
for (auto toolContent : getCurrentToolUseContent()) {
if (toolResults.contains(toolContent->id())) {
auto toolResult = std::make_unique<LLMCore::ToolResultContent>(
auto toolResult = std::make_unique<PluginLLMCore::ToolResultContent>(
toolContent->id(), toolResults[toolContent->id()]);
results.append(toolResult->toJson(LLMCore::ProviderFormat::Claude));
results.append(toolResult->toJson(PluginLLMCore::ProviderFormat::Claude));
}
}
return results;
}
QList<LLMCore::ToolUseContent *> ClaudeMessage::getCurrentToolUseContent() const
QList<PluginLLMCore::ToolUseContent *> ClaudeMessage::getCurrentToolUseContent() const
{
QList<LLMCore::ToolUseContent *> toolBlocks;
QList<PluginLLMCore::ToolUseContent *> toolBlocks;
for (auto block : m_currentBlocks) {
if (auto toolContent = qobject_cast<LLMCore::ToolUseContent *>(block)) {
if (auto toolContent = qobject_cast<PluginLLMCore::ToolUseContent *>(block)) {
toolBlocks.append(toolContent);
}
}
return toolBlocks;
}
QList<LLMCore::ThinkingContent *> ClaudeMessage::getCurrentThinkingContent() const
QList<PluginLLMCore::ThinkingContent *> ClaudeMessage::getCurrentThinkingContent() const
{
QList<LLMCore::ThinkingContent *> thinkingBlocks;
QList<PluginLLMCore::ThinkingContent *> thinkingBlocks;
for (auto block : m_currentBlocks) {
if (auto thinkingContent = qobject_cast<LLMCore::ThinkingContent *>(block)) {
if (auto thinkingContent = qobject_cast<PluginLLMCore::ThinkingContent *>(block)) {
thinkingBlocks.append(thinkingContent);
}
}
return thinkingBlocks;
}
QList<LLMCore::RedactedThinkingContent *> ClaudeMessage::getCurrentRedactedThinkingContent() const
QList<PluginLLMCore::RedactedThinkingContent *> ClaudeMessage::getCurrentRedactedThinkingContent() const
{
QList<LLMCore::RedactedThinkingContent *> redactedBlocks;
QList<PluginLLMCore::RedactedThinkingContent *> redactedBlocks;
for (auto block : m_currentBlocks) {
if (auto redactedContent = qobject_cast<LLMCore::RedactedThinkingContent *>(block)) {
if (auto redactedContent = qobject_cast<PluginLLMCore::RedactedThinkingContent *>(block)) {
redactedBlocks.append(redactedContent);
}
}
@ -222,17 +222,17 @@ void ClaudeMessage::startNewContinuation()
m_currentBlocks.clear();
m_pendingToolInputs.clear();
m_stopReason.clear();
m_state = LLMCore::MessageState::Building;
m_state = PluginLLMCore::MessageState::Building;
}
void ClaudeMessage::updateStateFromStopReason()
{
if (m_stopReason == "tool_use" && !getCurrentToolUseContent().empty()) {
m_state = LLMCore::MessageState::RequiresToolExecution;
m_state = PluginLLMCore::MessageState::RequiresToolExecution;
} else if (m_stopReason == "end_turn") {
m_state = LLMCore::MessageState::Final;
m_state = PluginLLMCore::MessageState::Final;
} else {
m_state = LLMCore::MessageState::Complete;
m_state = PluginLLMCore::MessageState::Complete;
}
}

View File

@ -19,7 +19,7 @@
#pragma once
#include <llmcore/ContentBlocks.hpp>
#include <pluginllmcore/ContentBlocks.hpp>
namespace QodeAssist {
@ -37,18 +37,18 @@ public:
QJsonObject toProviderFormat() const;
QJsonArray createToolResultsContent(const QHash<QString, QString> &toolResults) const;
LLMCore::MessageState state() const { return m_state; }
QList<LLMCore::ToolUseContent *> getCurrentToolUseContent() const;
QList<LLMCore::ThinkingContent *> getCurrentThinkingContent() const;
QList<LLMCore::RedactedThinkingContent *> getCurrentRedactedThinkingContent() const;
const QList<LLMCore::ContentBlock *> &getCurrentBlocks() const { return m_currentBlocks; }
PluginLLMCore::MessageState state() const { return m_state; }
QList<PluginLLMCore::ToolUseContent *> getCurrentToolUseContent() const;
QList<PluginLLMCore::ThinkingContent *> getCurrentThinkingContent() const;
QList<PluginLLMCore::RedactedThinkingContent *> getCurrentRedactedThinkingContent() const;
const QList<PluginLLMCore::ContentBlock *> &getCurrentBlocks() const { return m_currentBlocks; }
void startNewContinuation();
private:
QString m_stopReason;
LLMCore::MessageState m_state = LLMCore::MessageState::Building;
QList<LLMCore::ContentBlock *> m_currentBlocks;
PluginLLMCore::MessageState m_state = PluginLLMCore::MessageState::Building;
QList<PluginLLMCore::ContentBlock *> m_currentBlocks;
QHash<int, QString> m_pendingToolInputs;
void updateStateFromStopReason();

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -22,27 +22,24 @@
#include <QJsonArray>
#include <QJsonDocument>
#include <QJsonObject>
#include <QUrlQuery>
#include "llmcore/ValidationUtils.hpp"
#include <LLMCore/ToolsManager.hpp>
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
#include "tools/ToolsRegistration.hpp"
namespace QodeAssist::Providers {
ClaudeProvider::ClaudeProvider(QObject *parent)
: LLMCore::Provider(parent)
, m_toolsManager(new Tools::ToolsManager(this))
: PluginLLMCore::Provider(parent)
, m_client(new ::LLMCore::ClaudeClient(QString(), QString(), QString(), this))
{
connect(
m_toolsManager,
&Tools::ToolsManager::toolExecutionComplete,
this,
&ClaudeProvider::onToolExecutionComplete);
Tools::registerQodeAssistTools(m_client->tools());
}
QString ClaudeProvider::name() const
@ -50,6 +47,11 @@ QString ClaudeProvider::name() const
return "Claude";
}
QString ClaudeProvider::apiKey() const
{
return Settings::providerSettings().claudeApiKey();
}
QString ClaudeProvider::url() const
{
return "https://api.anthropic.com";
@ -65,16 +67,11 @@ QString ClaudeProvider::chatEndpoint() const
return "/v1/messages";
}
bool ClaudeProvider::supportsModelListing() const
{
return true;
}
void ClaudeProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled)
{
@ -102,10 +99,10 @@ void ClaudeProvider::prepareRequest(
request["temperature"] = 1.0;
};
if (type == LLMCore::RequestType::CodeCompletion) {
if (type == PluginLLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
request["temperature"] = Settings::codeCompletionSettings().temperature();
} else if (type == LLMCore::RequestType::QuickRefactoring) {
} else if (type == PluginLLMCore::RequestType::QuickRefactoring) {
const auto &qrSettings = Settings::quickRefactorSettings();
applyModelParams(qrSettings);
@ -126,13 +123,8 @@ void ClaudeProvider::prepareRequest(
}
if (isToolsEnabled) {
LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL;
if (type == LLMCore::RequestType::QuickRefactoring) {
filter = LLMCore::RunToolsFilter::OnlyRead;
}
auto toolsDefinitions = m_client->tools()->getToolsDefinitions();
auto toolsDefinitions = m_toolsManager->getToolsDefinitions(
LLMCore::ToolSchemaFormat::Claude, filter);
if (!toolsDefinitions.isEmpty()) {
request["tools"] = toolsDefinitions;
LOG_MESSAGE(QString("Added %1 tools to Claude request").arg(toolsDefinitions.size()));
@ -142,407 +134,26 @@ void ClaudeProvider::prepareRequest(
QFuture<QList<QString>> ClaudeProvider::getInstalledModels(const QString &baseUrl)
{
QUrl url(baseUrl + "/v1/models");
QUrlQuery query;
query.addQueryItem("limit", "1000");
url.setQuery(query);
QNetworkRequest request(url);
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
request.setRawHeader("anthropic-version", "2023-06-01");
if (!apiKey().isEmpty()) {
request.setRawHeader("x-api-key", apiKey().toUtf8());
}
return httpClient()->get(request).then([](const QByteArray &data) {
QList<QString> models;
QJsonObject jsonObject = QJsonDocument::fromJson(data).object();
if (jsonObject.contains("data")) {
QJsonArray modelArray = jsonObject["data"].toArray();
for (const QJsonValue &value : modelArray) {
QJsonObject modelObject = value.toObject();
if (modelObject.contains("id")) {
models.append(modelObject["id"].toString());
}
}
}
return models;
}).onFailed([](const std::exception &e) {
LOG_MESSAGE(QString("Error fetching Claude models: %1").arg(e.what()));
return QList<QString>{};
});
m_client->setUrl(baseUrl);
m_client->setApiKey(apiKey());
return m_client->listModels();
}
QList<QString> ClaudeProvider::validateRequest(const QJsonObject &request, LLMCore::TemplateType type)
PluginLLMCore::ProviderID ClaudeProvider::providerID() const
{
const auto templateReq = QJsonObject{
{"model", {}},
{"system", {}},
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}},
{"temperature", {}},
{"max_tokens", {}},
{"anthropic-version", {}},
{"top_p", {}},
{"top_k", {}},
{"stop", QJsonArray{}},
{"stream", {}},
{"tools", {}},
{"thinking", QJsonObject{{"type", {}}, {"budget_tokens", {}}}}};
return LLMCore::ValidationUtils::validateRequestFields(request, templateReq);
return PluginLLMCore::ProviderID::Claude;
}
QString ClaudeProvider::apiKey() const
PluginLLMCore::ProviderCapabilities ClaudeProvider::capabilities() const
{
return Settings::providerSettings().claudeApiKey();
return PluginLLMCore::ProviderCapability::Tools | PluginLLMCore::ProviderCapability::Thinking
| PluginLLMCore::ProviderCapability::Image
| PluginLLMCore::ProviderCapability::ModelListing;
}
void ClaudeProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
::LLMCore::BaseClient *ClaudeProvider::client() const
{
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
networkRequest.setRawHeader("anthropic-version", "2023-06-01");
if (!apiKey().isEmpty()) {
networkRequest.setRawHeader("x-api-key", apiKey().toUtf8());
}
}
LLMCore::ProviderID ClaudeProvider::providerID() const
{
return LLMCore::ProviderID::Claude;
}
void ClaudeProvider::sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
if (!m_messages.contains(requestId)) {
m_dataBuffers[requestId].clear();
}
m_requestUrls[requestId] = url;
m_originalRequests[requestId] = payload;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LOG_MESSAGE(QString("ClaudeProvider: Sending request %1 to %2").arg(requestId, url.toString()));
httpClient()->postStreaming(requestId, networkRequest, payload);
}
bool ClaudeProvider::supportsTools() const
{
return true;
}
bool ClaudeProvider::supportThinking() const {
return true;
};
bool ClaudeProvider::supportImage() const {
return true;
};
void ClaudeProvider::cancelRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("ClaudeProvider: Cancelling request %1").arg(requestId));
LLMCore::Provider::cancelRequest(requestId);
cleanupRequest(requestId);
}
LLMCore::IToolsManager *ClaudeProvider::toolsManager() const
{
return m_toolsManager;
}
void ClaudeProvider::onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
{
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
QStringList lines = buffers.rawStreamBuffer.processData(data);
for (const QString &line : lines) {
QJsonObject responseObj = parseEventLine(line);
if (responseObj.isEmpty())
continue;
processStreamEvent(requestId, responseObj);
}
}
void ClaudeProvider::onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId, std::optional<QString> error)
{
if (error) {
LOG_MESSAGE(QString("ClaudeProvider request %1 failed: %2").arg(requestId, *error));
emit requestFailed(requestId, *error);
cleanupRequest(requestId);
return;
}
if (m_messages.contains(requestId)) {
ClaudeMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId));
m_dataBuffers.remove(requestId);
return;
}
}
if (m_dataBuffers.contains(requestId)) {
const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
if (!buffers.responseContent.isEmpty()) {
LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId));
emit fullResponseReceived(requestId, buffers.responseContent);
}
}
cleanupRequest(requestId);
}
void ClaudeProvider::onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults)
{
if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: Missing data for continuation request %1").arg(requestId));
cleanupRequest(requestId);
return;
}
LOG_MESSAGE(QString("Tool execution complete for Claude request %1").arg(requestId));
for (auto it = toolResults.begin(); it != toolResults.end(); ++it) {
ClaudeMessage *message = m_messages[requestId];
auto toolContent = message->getCurrentToolUseContent();
for (auto tool : toolContent) {
if (tool->id() == it.key()) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(tool->name());
emit toolExecutionCompleted(
requestId, tool->id(), toolStringName, toolResults[tool->id()]);
break;
}
}
}
ClaudeMessage *message = m_messages[requestId];
QJsonObject continuationRequest = m_originalRequests[requestId];
QJsonArray messages = continuationRequest["messages"].toArray();
messages.append(message->toProviderFormat());
QJsonObject userMessage;
userMessage["role"] = "user";
userMessage["content"] = message->createToolResultsContent(toolResults);
messages.append(userMessage);
continuationRequest["messages"] = messages;
if (continuationRequest.contains("thinking")) {
QJsonObject thinkingObj = continuationRequest["thinking"].toObject();
LOG_MESSAGE(QString("Thinking mode preserved for continuation: type=%1, budget=%2 tokens")
.arg(thinkingObj["type"].toString())
.arg(thinkingObj["budget_tokens"].toInt()));
}
LOG_MESSAGE(QString("Sending continuation request for %1 with %2 tool results")
.arg(requestId)
.arg(toolResults.size()));
sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
}
void ClaudeProvider::processStreamEvent(const QString &requestId, const QJsonObject &event)
{
QString eventType = event["type"].toString();
if (eventType == "message_stop") {
return;
}
ClaudeMessage *message = m_messages.value(requestId);
if (!message) {
if (eventType == "message_start") {
message = new ClaudeMessage(this);
m_messages[requestId] = message;
LOG_MESSAGE(QString("Created NEW ClaudeMessage for request %1").arg(requestId));
} else {
return;
}
}
if (eventType == "message_start") {
message->startNewContinuation();
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Starting NEW continuation for request %1").arg(requestId));
} else if (eventType == "content_block_start") {
int index = event["index"].toInt();
QJsonObject contentBlock = event["content_block"].toObject();
QString blockType = contentBlock["type"].toString();
LOG_MESSAGE(
QString("Adding new content block: type=%1, index=%2").arg(blockType).arg(index));
if (blockType == "thinking" || blockType == "redacted_thinking") {
QJsonDocument eventDoc(event);
LOG_MESSAGE(QString("content_block_start event for %1: %2")
.arg(blockType)
.arg(QString::fromUtf8(eventDoc.toJson(QJsonDocument::Compact))));
}
message->handleContentBlockStart(index, blockType, contentBlock);
} else if (eventType == "content_block_delta") {
int index = event["index"].toInt();
QJsonObject delta = event["delta"].toObject();
QString deltaType = delta["type"].toString();
message->handleContentBlockDelta(index, deltaType, delta);
if (deltaType == "text_delta") {
QString text = delta["text"].toString();
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
buffers.responseContent += text;
emit partialResponseReceived(requestId, text);
} else if (deltaType == "signature_delta") {
QString signature = delta["signature"].toString();
}
} else if (eventType == "content_block_stop") {
int index = event["index"].toInt();
auto allBlocks = message->getCurrentBlocks();
if (index < allBlocks.size()) {
QString blockType = allBlocks[index]->type();
if (blockType == "thinking" || blockType == "redacted_thinking") {
QJsonDocument eventDoc(event);
LOG_MESSAGE(QString("content_block_stop event for %1 at index %2: %3")
.arg(blockType)
.arg(index)
.arg(QString::fromUtf8(eventDoc.toJson(QJsonDocument::Compact))));
}
}
if (event.contains("content_block")) {
QJsonObject contentBlock = event["content_block"].toObject();
QString blockType = contentBlock["type"].toString();
if (blockType == "thinking") {
QString signature = contentBlock["signature"].toString();
if (!signature.isEmpty()) {
auto allBlocks = message->getCurrentBlocks();
if (index < allBlocks.size()) {
if (auto thinkingContent = qobject_cast<LLMCore::ThinkingContent *>(allBlocks[index])) {
thinkingContent->setSignature(signature);
LOG_MESSAGE(
QString("Updated thinking block signature from content_block_stop, "
"signature length=%1")
.arg(signature.length()));
}
}
}
} else if (blockType == "redacted_thinking") {
QString signature = contentBlock["signature"].toString();
if (!signature.isEmpty()) {
auto allBlocks = message->getCurrentBlocks();
if (index < allBlocks.size()) {
if (auto redactedContent = qobject_cast<LLMCore::RedactedThinkingContent *>(allBlocks[index])) {
redactedContent->setSignature(signature);
LOG_MESSAGE(
QString("Updated redacted_thinking block signature from content_block_stop, "
"signature length=%1")
.arg(signature.length()));
}
}
}
}
}
message->handleContentBlockStop(index);
auto thinkingBlocks = message->getCurrentThinkingContent();
for (auto thinkingContent : thinkingBlocks) {
auto allBlocks = message->getCurrentBlocks();
if (index < allBlocks.size() && allBlocks[index] == thinkingContent) {
emit thinkingBlockReceived(
requestId, thinkingContent->thinking(), thinkingContent->signature());
LOG_MESSAGE(
QString("Emitted thinking block for request %1, thinking length=%2, signature length=%3")
.arg(requestId)
.arg(thinkingContent->thinking().length())
.arg(thinkingContent->signature().length()));
break;
}
}
auto redactedBlocks = message->getCurrentRedactedThinkingContent();
for (auto redactedContent : redactedBlocks) {
auto allBlocks = message->getCurrentBlocks();
if (index < allBlocks.size() && allBlocks[index] == redactedContent) {
emit redactedThinkingBlockReceived(requestId, redactedContent->signature());
LOG_MESSAGE(
QString("Emitted redacted thinking block for request %1, signature length=%2")
.arg(requestId)
.arg(redactedContent->signature().length()));
break;
}
}
} else if (eventType == "message_delta") {
QJsonObject delta = event["delta"].toObject();
if (delta.contains("stop_reason")) {
QString stopReason = delta["stop_reason"].toString();
message->handleStopReason(stopReason);
handleMessageComplete(requestId);
}
}
}
void ClaudeProvider::handleMessageComplete(const QString &requestId)
{
if (!m_messages.contains(requestId))
return;
ClaudeMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Claude message requires tool execution for %1").arg(requestId));
auto toolUseContent = message->getCurrentToolUseContent();
if (toolUseContent.isEmpty()) {
LOG_MESSAGE(QString("No tools to execute for %1").arg(requestId));
return;
}
for (auto toolContent : toolUseContent) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(toolContent->name());
emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
m_toolsManager->executeToolCall(
requestId, toolContent->id(), toolContent->name(), toolContent->input());
}
} else {
LOG_MESSAGE(QString("Claude message marked as complete for %1").arg(requestId));
}
}
void ClaudeProvider::cleanupRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("Cleaning up Claude request %1").arg(requestId));
if (m_messages.contains(requestId)) {
ClaudeMessage *message = m_messages.take(requestId);
message->deleteLater();
}
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
m_originalRequests.remove(requestId);
m_toolsManager->cleanupRequest(requestId);
return m_client;
}
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,14 +19,13 @@
#pragma once
#include <llmcore/Provider.hpp>
#include <pluginllmcore/Provider.hpp>
#include "ClaudeMessage.hpp"
#include "tools/ToolsManager.hpp"
#include <LLMCore/ClaudeClient.hpp>
namespace QodeAssist::Providers {
class ClaudeProvider : public LLMCore::Provider
class ClaudeProvider : public PluginLLMCore::Provider
{
Q_OBJECT
public:
@ -36,50 +35,22 @@ public:
QString url() const override;
QString completionEndpoint() const override;
QString chatEndpoint() const override;
bool supportsModelListing() const override;
void prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled) override;
QFuture<QList<QString>> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
PluginLLMCore::ProviderID providerID() const override;
PluginLLMCore::ProviderCapabilities capabilities() const override;
::LLMCore::BaseClient *client() const override;
QString apiKey() const override;
void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
LLMCore::ProviderID providerID() const override;
void sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
bool supportsTools() const override;
bool supportThinking() const override;
bool supportImage() const override;
void cancelRequest(const LLMCore::RequestID &requestId) override;
LLMCore::IToolsManager *toolsManager() const override;
public slots:
void onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
void onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId,
std::optional<QString> error) override;
private slots:
void onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults);
private:
void processStreamEvent(const QString &requestId, const QJsonObject &event);
void handleMessageComplete(const QString &requestId);
void cleanupRequest(const LLMCore::RequestID &requestId);
QHash<QodeAssist::LLMCore::RequestID, ClaudeMessage *> m_messages;
QHash<QodeAssist::LLMCore::RequestID, QUrl> m_requestUrls;
QHash<QodeAssist::LLMCore::RequestID, QJsonObject> m_originalRequests;
Tools::ToolsManager *m_toolsManager;
::LLMCore::ClaudeClient *m_client;
};
} // namespace QodeAssist::Providers

View File

@ -23,24 +23,28 @@
namespace QodeAssist::Providers {
CodestralProvider::CodestralProvider(QObject *parent)
: MistralAIProvider(parent)
{}
QString CodestralProvider::name() const
{
return "Codestral";
}
QString CodestralProvider::url() const
{
return "https://codestral.mistral.ai";
}
bool CodestralProvider::supportsModelListing() const
{
return false;
}
QString CodestralProvider::apiKey() const
{
return Settings::providerSettings().codestralApiKey();
}
QString CodestralProvider::url() const
{
return "https://codestral.mistral.ai";
}
PluginLLMCore::ProviderCapabilities CodestralProvider::capabilities() const
{
return PluginLLMCore::ProviderCapability::Tools | PluginLLMCore::ProviderCapability::Image;
}
} // namespace QodeAssist::Providers

View File

@ -26,10 +26,12 @@ namespace QodeAssist::Providers {
class CodestralProvider : public MistralAIProvider
{
public:
explicit CodestralProvider(QObject *parent = nullptr);
QString name() const override;
QString url() const override;
bool supportsModelListing() const override;
QString apiKey() const override;
PluginLLMCore::ProviderCapabilities capabilities() const override;
};
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,12 +19,14 @@
#include "GoogleAIProvider.hpp"
#include <LLMCore/ToolsManager.hpp>
#include <QJsonArray>
#include "tools/ToolsRegistration.hpp"
#include <QJsonDocument>
#include <QJsonObject>
#include <QtCore/qurlquery.h>
#include "llmcore/ValidationUtils.hpp"
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
@ -35,14 +37,10 @@
namespace QodeAssist::Providers {
GoogleAIProvider::GoogleAIProvider(QObject *parent)
: LLMCore::Provider(parent)
, m_toolsManager(new Tools::ToolsManager(this))
: PluginLLMCore::Provider(parent)
, m_client(new ::LLMCore::GoogleAIClient(QString(), QString(), QString(), this))
{
connect(
m_toolsManager,
&Tools::ToolsManager::toolExecutionComplete,
this,
&GoogleAIProvider::onToolExecutionComplete);
Tools::registerQodeAssistTools(m_client->tools());
}
QString GoogleAIProvider::name() const
@ -50,6 +48,11 @@ QString GoogleAIProvider::name() const
return "Google AI";
}
QString GoogleAIProvider::apiKey() const
{
return Settings::providerSettings().googleAiApiKey();
}
QString GoogleAIProvider::url() const
{
return "https://generativelanguage.googleapis.com/v1beta";
@ -65,16 +68,11 @@ QString GoogleAIProvider::chatEndpoint() const
return {};
}
bool GoogleAIProvider::supportsModelListing() const
{
return true;
}
void GoogleAIProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled)
{
@ -119,9 +117,9 @@ void GoogleAIProvider::prepareRequest(
request["generationConfig"] = generationConfig;
};
if (type == LLMCore::RequestType::CodeCompletion) {
if (type == PluginLLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
} else if (type == PluginLLMCore::RequestType::QuickRefactoring) {
const auto &qrSettings = Settings::quickRefactorSettings();
if (isThinkingEnabled) {
@ -140,13 +138,7 @@ void GoogleAIProvider::prepareRequest(
}
if (isToolsEnabled) {
LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL;
if (type == LLMCore::RequestType::QuickRefactoring) {
filter = LLMCore::RunToolsFilter::OnlyRead;
}
auto toolsDefinitions = m_toolsManager->getToolsDefinitions(
LLMCore::ToolSchemaFormat::Google, filter);
auto toolsDefinitions = m_client->tools()->getToolsDefinitions();
if (!toolsDefinitions.isEmpty()) {
request["tools"] = toolsDefinitions;
LOG_MESSAGE(QString("Added %1 tools to Google AI request").arg(toolsDefinitions.size()));
@ -154,422 +146,28 @@ void GoogleAIProvider::prepareRequest(
}
}
QFuture<QList<QString>> GoogleAIProvider::getInstalledModels(const QString &url)
QFuture<QList<QString>> GoogleAIProvider::getInstalledModels(const QString &baseUrl)
{
QNetworkRequest request(QString("%1/models?key=%2").arg(url, apiKey()));
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
return httpClient()->get(request).then([](const QByteArray &data) {
QList<QString> models;
QJsonObject jsonObject = QJsonDocument::fromJson(data).object();
if (jsonObject.contains("models")) {
QJsonArray modelArray = jsonObject["models"].toArray();
for (const QJsonValue &value : modelArray) {
QJsonObject modelObject = value.toObject();
if (modelObject.contains("name")) {
QString modelName = modelObject["name"].toString();
if (modelName.contains("/")) {
modelName = modelName.split("/").last();
}
models.append(modelName);
}
}
}
return models;
}).onFailed([](const std::exception &e) {
LOG_MESSAGE(QString("Error fetching Google AI models: %1").arg(e.what()));
return QList<QString>{};
});
m_client->setUrl(baseUrl);
m_client->setApiKey(apiKey());
return m_client->listModels();
}
QList<QString> GoogleAIProvider::validateRequest(
const QJsonObject &request, LLMCore::TemplateType type)
PluginLLMCore::ProviderID GoogleAIProvider::providerID() const
{
QJsonObject templateReq;
templateReq = QJsonObject{
{"contents", QJsonArray{}},
{"system_instruction", QJsonArray{}},
{"generationConfig",
QJsonObject{
{"temperature", {}},
{"maxOutputTokens", {}},
{"topP", {}},
{"topK", {}},
{"thinkingConfig",
QJsonObject{{"thinkingBudget", {}}, {"includeThoughts", {}}}}}},
{"safetySettings", QJsonArray{}},
{"tools", QJsonArray{}}};
return LLMCore::ValidationUtils::validateRequestFields(request, templateReq);
return PluginLLMCore::ProviderID::GoogleAI;
}
QString GoogleAIProvider::apiKey() const
PluginLLMCore::ProviderCapabilities GoogleAIProvider::capabilities() const
{
return Settings::providerSettings().googleAiApiKey();
return PluginLLMCore::ProviderCapability::Tools | PluginLLMCore::ProviderCapability::Thinking
| PluginLLMCore::ProviderCapability::Image
| PluginLLMCore::ProviderCapability::ModelListing;
}
void GoogleAIProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
::LLMCore::BaseClient *GoogleAIProvider::client() const
{
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
QUrl url = networkRequest.url();
QUrlQuery query(url.query());
query.addQueryItem("key", apiKey());
url.setQuery(query);
networkRequest.setUrl(url);
}
LLMCore::ProviderID GoogleAIProvider::providerID() const
{
return LLMCore::ProviderID::GoogleAI;
}
void GoogleAIProvider::sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
if (!m_messages.contains(requestId)) {
m_dataBuffers[requestId].clear();
}
m_requestUrls[requestId] = url;
m_originalRequests[requestId] = payload;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LOG_MESSAGE(
QString("GoogleAIProvider: Sending request %1 to %2").arg(requestId, url.toString()));
httpClient()->postStreaming(requestId, networkRequest, payload);
}
bool GoogleAIProvider::supportsTools() const
{
return true;
}
bool GoogleAIProvider::supportThinking() const
{
return true;
}
bool GoogleAIProvider::supportImage() const
{
return true;
}
void GoogleAIProvider::cancelRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("GoogleAIProvider: Cancelling request %1").arg(requestId));
LLMCore::Provider::cancelRequest(requestId);
cleanupRequest(requestId);
}
void GoogleAIProvider::onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
{
if (data.isEmpty()) {
return;
}
QJsonParseError parseError;
QJsonDocument doc = QJsonDocument::fromJson(data, &parseError);
if (!doc.isNull() && doc.isObject()) {
QJsonObject obj = doc.object();
if (obj.contains("error")) {
QJsonObject error = obj["error"].toObject();
QString errorMessage = error["message"].toString();
int errorCode = error["code"].toInt();
QString fullError
= QString("Google AI API Error %1: %2").arg(errorCode).arg(errorMessage);
LOG_MESSAGE(fullError);
emit requestFailed(requestId, fullError);
cleanupRequest(requestId);
return;
}
}
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
QStringList lines = buffers.rawStreamBuffer.processData(data);
for (const QString &line : lines) {
if (line.trimmed().isEmpty()) {
continue;
}
QJsonObject chunk = parseEventLine(line);
if (chunk.isEmpty())
continue;
processStreamChunk(requestId, chunk);
}
}
void GoogleAIProvider::onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId, std::optional<QString> error)
{
if (error) {
LOG_MESSAGE(QString("GoogleAIProvider request %1 failed: %2").arg(requestId, *error));
emit requestFailed(requestId, *error);
cleanupRequest(requestId);
return;
}
if (m_failedRequests.contains(requestId)) {
cleanupRequest(requestId);
return;
}
emitPendingThinkingBlocks(requestId);
if (m_messages.contains(requestId)) {
GoogleMessage *message = m_messages[requestId];
handleMessageComplete(requestId);
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId));
m_dataBuffers.remove(requestId);
return;
}
}
if (m_dataBuffers.contains(requestId)) {
const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
if (!buffers.responseContent.isEmpty()) {
emit fullResponseReceived(requestId, buffers.responseContent);
} else {
emit fullResponseReceived(requestId, QString());
}
} else {
emit fullResponseReceived(requestId, QString());
}
cleanupRequest(requestId);
}
void GoogleAIProvider::onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults)
{
if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: Missing data for continuation request %1").arg(requestId));
cleanupRequest(requestId);
return;
}
for (auto it = toolResults.begin(); it != toolResults.end(); ++it) {
GoogleMessage *message = m_messages[requestId];
auto toolContent = message->getCurrentToolUseContent();
for (auto tool : toolContent) {
if (tool->id() == it.key()) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(tool->name());
emit toolExecutionCompleted(
requestId, tool->id(), toolStringName, toolResults[tool->id()]);
break;
}
}
}
GoogleMessage *message = m_messages[requestId];
QJsonObject continuationRequest = m_originalRequests[requestId];
QJsonArray contents = continuationRequest["contents"].toArray();
contents.append(message->toProviderFormat());
QJsonObject userMessage;
userMessage["role"] = "user";
userMessage["parts"] = message->createToolResultParts(toolResults);
contents.append(userMessage);
continuationRequest["contents"] = contents;
sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
}
void GoogleAIProvider::processStreamChunk(const QString &requestId, const QJsonObject &chunk)
{
if (!chunk.contains("candidates")) {
return;
}
GoogleMessage *message = m_messages.value(requestId);
if (!message) {
message = new GoogleMessage(this);
m_messages[requestId] = message;
LOG_MESSAGE(QString("Created NEW GoogleMessage for request %1").arg(requestId));
if (m_dataBuffers.contains(requestId)) {
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Starting continuation for request %1").arg(requestId));
}
} else if (
m_dataBuffers.contains(requestId)
&& message->state() == LLMCore::MessageState::RequiresToolExecution) {
message->startNewContinuation();
m_emittedThinkingBlocksCount[requestId] = 0;
LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId));
}
QJsonArray candidates = chunk["candidates"].toArray();
for (const QJsonValue &candidate : candidates) {
QJsonObject candidateObj = candidate.toObject();
if (candidateObj.contains("content")) {
QJsonObject content = candidateObj["content"].toObject();
if (content.contains("parts")) {
QJsonArray parts = content["parts"].toArray();
for (const QJsonValue &part : parts) {
QJsonObject partObj = part.toObject();
if (partObj.contains("text")) {
QString text = partObj["text"].toString();
bool isThought = partObj.value("thought").toBool(false);
if (isThought) {
message->handleThoughtDelta(text);
if (partObj.contains("signature")) {
QString signature = partObj["signature"].toString();
message->handleThoughtSignature(signature);
}
} else {
emitPendingThinkingBlocks(requestId);
message->handleContentDelta(text);
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
buffers.responseContent += text;
emit partialResponseReceived(requestId, text);
}
}
if (partObj.contains("thoughtSignature")) {
QString signature = partObj["thoughtSignature"].toString();
message->handleThoughtSignature(signature);
}
if (partObj.contains("functionCall")) {
emitPendingThinkingBlocks(requestId);
QJsonObject functionCall = partObj["functionCall"].toObject();
QString name = functionCall["name"].toString();
QJsonObject args = functionCall["args"].toObject();
message->handleFunctionCallStart(name);
message->handleFunctionCallArgsDelta(
QString::fromUtf8(QJsonDocument(args).toJson(QJsonDocument::Compact)));
message->handleFunctionCallComplete();
}
}
}
}
if (candidateObj.contains("finishReason")) {
QString finishReason = candidateObj["finishReason"].toString();
message->handleFinishReason(finishReason);
if (message->isErrorFinishReason()) {
QString errorMessage = message->getErrorMessage();
LOG_MESSAGE(QString("Google AI error: %1").arg(errorMessage));
m_failedRequests.insert(requestId);
emit requestFailed(requestId, errorMessage);
return;
}
}
}
if (chunk.contains("usageMetadata")) {
QJsonObject usageMetadata = chunk["usageMetadata"].toObject();
int thoughtsTokenCount = usageMetadata.value("thoughtsTokenCount").toInt(0);
int candidatesTokenCount = usageMetadata.value("candidatesTokenCount").toInt(0);
int totalTokenCount = usageMetadata.value("totalTokenCount").toInt(0);
if (totalTokenCount > 0) {
LOG_MESSAGE(QString("Google AI tokens: %1 (thoughts: %2, output: %3)")
.arg(totalTokenCount)
.arg(thoughtsTokenCount)
.arg(candidatesTokenCount));
}
}
}
void GoogleAIProvider::emitPendingThinkingBlocks(const QString &requestId)
{
if (!m_messages.contains(requestId))
return;
GoogleMessage *message = m_messages[requestId];
auto thinkingBlocks = message->getCurrentThinkingContent();
if (thinkingBlocks.isEmpty())
return;
int alreadyEmitted = m_emittedThinkingBlocksCount.value(requestId, 0);
int totalBlocks = thinkingBlocks.size();
for (int i = alreadyEmitted; i < totalBlocks; ++i) {
auto thinkingContent = thinkingBlocks[i];
if (thinkingContent->thinking().trimmed().isEmpty()) {
continue;
}
emit thinkingBlockReceived(
requestId,
thinkingContent->thinking(),
thinkingContent->signature());
}
m_emittedThinkingBlocksCount[requestId] = totalBlocks;
}
void GoogleAIProvider::handleMessageComplete(const QString &requestId)
{
if (!m_messages.contains(requestId))
return;
GoogleMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Google AI message requires tool execution for %1").arg(requestId));
auto toolUseContent = message->getCurrentToolUseContent();
if (toolUseContent.isEmpty()) {
LOG_MESSAGE(QString("No tools to execute for %1").arg(requestId));
return;
}
for (auto toolContent : toolUseContent) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(toolContent->name());
emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
m_toolsManager->executeToolCall(
requestId, toolContent->id(), toolContent->name(), toolContent->input());
}
} else {
LOG_MESSAGE(QString("Google AI message marked as complete for %1").arg(requestId));
}
}
void GoogleAIProvider::cleanupRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("Cleaning up Google AI request %1").arg(requestId));
if (m_messages.contains(requestId)) {
GoogleMessage *message = m_messages.take(requestId);
message->deleteLater();
}
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
m_originalRequests.remove(requestId);
m_emittedThinkingBlocksCount.remove(requestId);
m_failedRequests.remove(requestId);
m_toolsManager->cleanupRequest(requestId);
return m_client;
}
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,13 +19,13 @@
#pragma once
#include "GoogleMessage.hpp"
#include "llmcore/Provider.hpp"
#include "tools/ToolsManager.hpp"
#include <pluginllmcore/Provider.hpp>
#include <LLMCore/GoogleAIClient.hpp>
namespace QodeAssist::Providers {
class GoogleAIProvider : public LLMCore::Provider
class GoogleAIProvider : public PluginLLMCore::Provider
{
Q_OBJECT
public:
@ -35,51 +35,22 @@ public:
QString url() const override;
QString completionEndpoint() const override;
QString chatEndpoint() const override;
bool supportsModelListing() const override;
void prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled) override;
QFuture<QList<QString>> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
PluginLLMCore::ProviderID providerID() const override;
PluginLLMCore::ProviderCapabilities capabilities() const override;
::LLMCore::BaseClient *client() const override;
QString apiKey() const override;
void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
LLMCore::ProviderID providerID() const override;
void sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
bool supportsTools() const override;
bool supportThinking() const override;
bool supportImage() const override;
void cancelRequest(const LLMCore::RequestID &requestId) override;
public slots:
void onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
void onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId,
std::optional<QString> error) override;
private slots:
void onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults);
private:
void processStreamChunk(const QString &requestId, const QJsonObject &chunk);
void handleMessageComplete(const QString &requestId);
void emitPendingThinkingBlocks(const QString &requestId);
void cleanupRequest(const LLMCore::RequestID &requestId);
QHash<LLMCore::RequestID, GoogleMessage *> m_messages;
QHash<LLMCore::RequestID, QUrl> m_requestUrls;
QHash<LLMCore::RequestID, QJsonObject> m_originalRequests;
QHash<LLMCore::RequestID, int> m_emittedThinkingBlocksCount;
QSet<LLMCore::RequestID> m_failedRequests;
Tools::ToolsManager *m_toolsManager;
::LLMCore::GoogleAIClient *m_client;
};
} // namespace QodeAssist::Providers

View File

@ -32,26 +32,26 @@ GoogleMessage::GoogleMessage(QObject *parent)
void GoogleMessage::handleContentDelta(const QString &text)
{
if (m_currentBlocks.isEmpty() || !qobject_cast<LLMCore::TextContent *>(m_currentBlocks.last())) {
auto textContent = new LLMCore::TextContent();
if (m_currentBlocks.isEmpty() || !qobject_cast<PluginLLMCore::TextContent *>(m_currentBlocks.last())) {
auto textContent = new PluginLLMCore::TextContent();
textContent->setParent(this);
m_currentBlocks.append(textContent);
}
if (auto textContent = qobject_cast<LLMCore::TextContent *>(m_currentBlocks.last())) {
if (auto textContent = qobject_cast<PluginLLMCore::TextContent *>(m_currentBlocks.last())) {
textContent->appendText(text);
}
}
void GoogleMessage::handleThoughtDelta(const QString &text)
{
if (m_currentBlocks.isEmpty() || !qobject_cast<LLMCore::ThinkingContent *>(m_currentBlocks.last())) {
auto thinkingContent = new LLMCore::ThinkingContent();
if (m_currentBlocks.isEmpty() || !qobject_cast<PluginLLMCore::ThinkingContent *>(m_currentBlocks.last())) {
auto thinkingContent = new PluginLLMCore::ThinkingContent();
thinkingContent->setParent(this);
m_currentBlocks.append(thinkingContent);
}
if (auto thinkingContent = qobject_cast<LLMCore::ThinkingContent *>(m_currentBlocks.last())) {
if (auto thinkingContent = qobject_cast<PluginLLMCore::ThinkingContent *>(m_currentBlocks.last())) {
thinkingContent->appendThinking(text);
}
}
@ -59,13 +59,13 @@ void GoogleMessage::handleThoughtDelta(const QString &text)
void GoogleMessage::handleThoughtSignature(const QString &signature)
{
for (int i = m_currentBlocks.size() - 1; i >= 0; --i) {
if (auto thinkingContent = qobject_cast<LLMCore::ThinkingContent *>(m_currentBlocks[i])) {
if (auto thinkingContent = qobject_cast<PluginLLMCore::ThinkingContent *>(m_currentBlocks[i])) {
thinkingContent->setSignature(signature);
return;
}
}
auto thinkingContent = new LLMCore::ThinkingContent();
auto thinkingContent = new PluginLLMCore::ThinkingContent();
thinkingContent->setParent(this);
thinkingContent->setSignature(signature);
m_currentBlocks.append(thinkingContent);
@ -97,7 +97,7 @@ void GoogleMessage::handleFunctionCallComplete()
}
QString id = QUuid::createUuid().toString(QUuid::WithoutBraces);
auto toolContent = new LLMCore::ToolUseContent(id, m_currentFunctionName, args);
auto toolContent = new PluginLLMCore::ToolUseContent(id, m_currentFunctionName, args);
toolContent->setParent(this);
m_currentBlocks.append(toolContent);
@ -122,14 +122,14 @@ QJsonObject GoogleMessage::toProviderFormat() const
if (!block)
continue;
if (auto text = qobject_cast<LLMCore::TextContent *>(block)) {
if (auto text = qobject_cast<PluginLLMCore::TextContent *>(block)) {
parts.append(QJsonObject{{"text", text->text()}});
} else if (auto tool = qobject_cast<LLMCore::ToolUseContent *>(block)) {
} else if (auto tool = qobject_cast<PluginLLMCore::ToolUseContent *>(block)) {
QJsonObject functionCall;
functionCall["name"] = tool->name();
functionCall["args"] = tool->input();
parts.append(QJsonObject{{"functionCall", functionCall}});
} else if (auto thinking = qobject_cast<LLMCore::ThinkingContent *>(block)) {
} else if (auto thinking = qobject_cast<PluginLLMCore::ThinkingContent *>(block)) {
// Include thinking blocks with their text
QJsonObject thinkingPart;
thinkingPart["text"] = thinking->thinking();
@ -169,22 +169,22 @@ QJsonArray GoogleMessage::createToolResultParts(const QHash<QString, QString> &t
return parts;
}
QList<LLMCore::ToolUseContent *> GoogleMessage::getCurrentToolUseContent() const
QList<PluginLLMCore::ToolUseContent *> GoogleMessage::getCurrentToolUseContent() const
{
QList<LLMCore::ToolUseContent *> toolBlocks;
QList<PluginLLMCore::ToolUseContent *> toolBlocks;
for (auto block : m_currentBlocks) {
if (auto toolContent = qobject_cast<LLMCore::ToolUseContent *>(block)) {
if (auto toolContent = qobject_cast<PluginLLMCore::ToolUseContent *>(block)) {
toolBlocks.append(toolContent);
}
}
return toolBlocks;
}
QList<LLMCore::ThinkingContent *> GoogleMessage::getCurrentThinkingContent() const
QList<PluginLLMCore::ThinkingContent *> GoogleMessage::getCurrentThinkingContent() const
{
QList<LLMCore::ThinkingContent *> thinkingBlocks;
QList<PluginLLMCore::ThinkingContent *> thinkingBlocks;
for (auto block : m_currentBlocks) {
if (auto thinkingContent = qobject_cast<LLMCore::ThinkingContent *>(block)) {
if (auto thinkingContent = qobject_cast<PluginLLMCore::ThinkingContent *>(block)) {
thinkingBlocks.append(thinkingContent);
}
}
@ -199,7 +199,7 @@ void GoogleMessage::startNewContinuation()
m_pendingFunctionArgs.clear();
m_currentFunctionName.clear();
m_finishReason.clear();
m_state = LLMCore::MessageState::Building;
m_state = PluginLLMCore::MessageState::Building;
}
bool GoogleMessage::isErrorFinishReason() const
@ -234,10 +234,10 @@ void GoogleMessage::updateStateFromFinishReason()
{
if (m_finishReason == "STOP" || m_finishReason == "MAX_TOKENS") {
m_state = getCurrentToolUseContent().isEmpty()
? LLMCore::MessageState::Complete
: LLMCore::MessageState::RequiresToolExecution;
? PluginLLMCore::MessageState::Complete
: PluginLLMCore::MessageState::RequiresToolExecution;
} else {
m_state = LLMCore::MessageState::Complete;
m_state = PluginLLMCore::MessageState::Complete;
}
}

View File

@ -24,7 +24,7 @@
#include <QJsonObject>
#include <QObject>
#include <llmcore/ContentBlocks.hpp>
#include <pluginllmcore/ContentBlocks.hpp>
namespace QodeAssist::Providers {
@ -45,11 +45,11 @@ public:
QJsonObject toProviderFormat() const;
QJsonArray createToolResultParts(const QHash<QString, QString> &toolResults) const;
QList<LLMCore::ToolUseContent *> getCurrentToolUseContent() const;
QList<LLMCore::ThinkingContent *> getCurrentThinkingContent() const;
QList<LLMCore::ContentBlock *> currentBlocks() const { return m_currentBlocks; }
QList<PluginLLMCore::ToolUseContent *> getCurrentToolUseContent() const;
QList<PluginLLMCore::ThinkingContent *> getCurrentThinkingContent() const;
QList<PluginLLMCore::ContentBlock *> currentBlocks() const { return m_currentBlocks; }
LLMCore::MessageState state() const { return m_state; }
PluginLLMCore::MessageState state() const { return m_state; }
QString finishReason() const { return m_finishReason; }
bool isErrorFinishReason() const;
QString getErrorMessage() const;
@ -58,11 +58,11 @@ public:
private:
void updateStateFromFinishReason();
QList<LLMCore::ContentBlock *> m_currentBlocks;
QList<PluginLLMCore::ContentBlock *> m_currentBlocks;
QString m_pendingFunctionArgs;
QString m_currentFunctionName;
QString m_finishReason;
LLMCore::MessageState m_state = LLMCore::MessageState::Building;
PluginLLMCore::MessageState m_state = PluginLLMCore::MessageState::Building;
};
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,7 +19,9 @@
#include "LMStudioProvider.hpp"
#include "llmcore/ValidationUtils.hpp"
#include <LLMCore/ToolsManager.hpp>
#include "tools/ToolsRegistration.hpp"
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
@ -34,14 +36,10 @@
namespace QodeAssist::Providers {
LMStudioProvider::LMStudioProvider(QObject *parent)
: LLMCore::Provider(parent)
, m_toolsManager(new Tools::ToolsManager(this))
: PluginLLMCore::Provider(parent)
, m_client(new ::LLMCore::OpenAIClient(QString(), QString(), QString(), this))
{
connect(
m_toolsManager,
&Tools::ToolsManager::toolExecutionComplete,
this,
&LMStudioProvider::onToolExecutionComplete);
Tools::registerQodeAssistTools(m_client->tools());
}
QString LMStudioProvider::name() const
@ -49,6 +47,11 @@ QString LMStudioProvider::name() const
return "LM Studio";
}
QString LMStudioProvider::apiKey() const
{
return {};
}
QString LMStudioProvider::url() const
{
return "http://localhost:1234";
@ -64,155 +67,29 @@ QString LMStudioProvider::chatEndpoint() const
return "/v1/chat/completions";
}
bool LMStudioProvider::supportsModelListing() const
{
return true;
}
QFuture<QList<QString>> LMStudioProvider::getInstalledModels(const QString &url)
{
QNetworkRequest request(QString("%1%2").arg(url, "/v1/models"));
return httpClient()->get(request).then([](const QByteArray &data) {
QList<QString> models;
QJsonObject jsonObject = QJsonDocument::fromJson(data).object();
QJsonArray modelArray = jsonObject["data"].toArray();
for (const QJsonValue &value : modelArray) {
QJsonObject modelObject = value.toObject();
models.append(modelObject["id"].toString());
}
return models;
}).onFailed([](const std::exception &e) {
LOG_MESSAGE(QString("Error fetching LMStudio models: %1").arg(e.what()));
return QList<QString>{};
});
m_client->setUrl(url);
m_client->setApiKey(apiKey());
return m_client->listModels();
}
QList<QString> LMStudioProvider::validateRequest(
const QJsonObject &request, LLMCore::TemplateType type)
PluginLLMCore::ProviderID LMStudioProvider::providerID() const
{
const auto templateReq = QJsonObject{
{"model", {}},
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}},
{"temperature", {}},
{"max_tokens", {}},
{"top_p", {}},
{"top_k", {}},
{"frequency_penalty", {}},
{"presence_penalty", {}},
{"stop", QJsonArray{}},
{"stream", {}},
{"tools", {}}};
return LLMCore::ValidationUtils::validateRequestFields(request, templateReq);
return PluginLLMCore::ProviderID::LMStudio;
}
QString LMStudioProvider::apiKey() const
PluginLLMCore::ProviderCapabilities LMStudioProvider::capabilities() const
{
return {};
}
void LMStudioProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
{
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
}
LLMCore::ProviderID LMStudioProvider::providerID() const
{
return LLMCore::ProviderID::LMStudio;
}
void LMStudioProvider::sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
if (!m_messages.contains(requestId)) {
m_dataBuffers[requestId].clear();
}
m_requestUrls[requestId] = url;
m_originalRequests[requestId] = payload;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LOG_MESSAGE(
QString("LMStudioProvider: Sending request %1 to %2").arg(requestId, url.toString()));
httpClient()->postStreaming(requestId, networkRequest, payload);
}
bool LMStudioProvider::supportsTools() const
{
return true;
}
bool LMStudioProvider::supportImage() const
{
return true;
}
void LMStudioProvider::cancelRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("LMStudioProvider: Cancelling request %1").arg(requestId));
LLMCore::Provider::cancelRequest(requestId);
cleanupRequest(requestId);
}
void LMStudioProvider::onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
{
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
QStringList lines = buffers.rawStreamBuffer.processData(data);
for (const QString &line : lines) {
if (line.trimmed().isEmpty() || line == "data: [DONE]") {
continue;
}
QJsonObject chunk = parseEventLine(line);
if (chunk.isEmpty())
continue;
processStreamChunk(requestId, chunk);
}
}
void LMStudioProvider::onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId, std::optional<QString> error)
{
if (error) {
LOG_MESSAGE(QString("LMStudioProvider request %1 failed: %2").arg(requestId, *error));
emit requestFailed(requestId, *error);
cleanupRequest(requestId);
return;
}
if (m_messages.contains(requestId)) {
OpenAIMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId));
m_dataBuffers.remove(requestId);
return;
}
}
if (m_dataBuffers.contains(requestId)) {
const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
if (!buffers.responseContent.isEmpty()) {
LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId));
emit fullResponseReceived(requestId, buffers.responseContent);
}
}
cleanupRequest(requestId);
return PluginLLMCore::ProviderCapability::Tools | PluginLLMCore::ProviderCapability::Image
| PluginLLMCore::ProviderCapability::ModelListing;
}
void LMStudioProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled)
{
@ -236,22 +113,16 @@ void LMStudioProvider::prepareRequest(
request["presence_penalty"] = settings.presencePenalty();
};
if (type == LLMCore::RequestType::CodeCompletion) {
if (type == PluginLLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
} else if (type == PluginLLMCore::RequestType::QuickRefactoring) {
applyModelParams(Settings::quickRefactorSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
}
if (isToolsEnabled) {
LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL;
if (type == LLMCore::RequestType::QuickRefactoring) {
filter = LLMCore::RunToolsFilter::OnlyRead;
}
auto toolsDefinitions = m_toolsManager->getToolsDefinitions(
LLMCore::ToolSchemaFormat::OpenAI, filter);
auto toolsDefinitions = m_client->tools()->getToolsDefinitions();
if (!toolsDefinitions.isEmpty()) {
request["tools"] = toolsDefinitions;
LOG_MESSAGE(QString("Added %1 tools to LMStudio request").arg(toolsDefinitions.size()));
@ -259,163 +130,9 @@ void LMStudioProvider::prepareRequest(
}
}
void LMStudioProvider::onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults)
::LLMCore::BaseClient *LMStudioProvider::client() const
{
if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: Missing data for continuation request %1").arg(requestId));
cleanupRequest(requestId);
return;
}
LOG_MESSAGE(QString("Tool execution complete for LMStudio request %1").arg(requestId));
for (auto it = toolResults.begin(); it != toolResults.end(); ++it) {
OpenAIMessage *message = m_messages[requestId];
auto toolContent = message->getCurrentToolUseContent();
for (auto tool : toolContent) {
if (tool->id() == it.key()) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(tool->name());
emit toolExecutionCompleted(
requestId, tool->id(), toolStringName, toolResults[tool->id()]);
break;
}
}
}
OpenAIMessage *message = m_messages[requestId];
QJsonObject continuationRequest = m_originalRequests[requestId];
QJsonArray messages = continuationRequest["messages"].toArray();
messages.append(message->toProviderFormat());
QJsonArray toolResultMessages = message->createToolResultMessages(toolResults);
for (const auto &toolMsg : toolResultMessages) {
messages.append(toolMsg);
}
continuationRequest["messages"] = messages;
LOG_MESSAGE(QString("Sending continuation request for %1 with %2 tool results")
.arg(requestId)
.arg(toolResults.size()));
sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
}
void LMStudioProvider::processStreamChunk(const QString &requestId, const QJsonObject &chunk)
{
QJsonArray choices = chunk["choices"].toArray();
if (choices.isEmpty()) {
return;
}
QJsonObject choice = choices[0].toObject();
QJsonObject delta = choice["delta"].toObject();
QString finishReason = choice["finish_reason"].toString();
OpenAIMessage *message = m_messages.value(requestId);
if (!message) {
message = new OpenAIMessage(this);
m_messages[requestId] = message;
LOG_MESSAGE(QString("Created NEW OpenAIMessage for request %1").arg(requestId));
if (m_dataBuffers.contains(requestId)) {
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Starting continuation for request %1").arg(requestId));
}
} else if (
m_dataBuffers.contains(requestId)
&& message->state() == LLMCore::MessageState::RequiresToolExecution) {
message->startNewContinuation();
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId));
}
if (delta.contains("content") && !delta["content"].isNull()) {
QString content = delta["content"].toString();
message->handleContentDelta(content);
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
buffers.responseContent += content;
emit partialResponseReceived(requestId, content);
}
if (delta.contains("tool_calls")) {
QJsonArray toolCalls = delta["tool_calls"].toArray();
for (const auto &toolCallValue : toolCalls) {
QJsonObject toolCall = toolCallValue.toObject();
int index = toolCall["index"].toInt();
if (toolCall.contains("id")) {
QString id = toolCall["id"].toString();
QJsonObject function = toolCall["function"].toObject();
QString name = function["name"].toString();
message->handleToolCallStart(index, id, name);
}
if (toolCall.contains("function")) {
QJsonObject function = toolCall["function"].toObject();
if (function.contains("arguments")) {
QString args = function["arguments"].toString();
message->handleToolCallDelta(index, args);
}
}
}
}
if (!finishReason.isEmpty() && finishReason != "null") {
for (int i = 0; i < 10; ++i) {
message->handleToolCallComplete(i);
}
message->handleFinishReason(finishReason);
handleMessageComplete(requestId);
}
}
void LMStudioProvider::handleMessageComplete(const QString &requestId)
{
if (!m_messages.contains(requestId))
return;
OpenAIMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("LMStudio message requires tool execution for %1").arg(requestId));
auto toolUseContent = message->getCurrentToolUseContent();
if (toolUseContent.isEmpty()) {
LOG_MESSAGE(QString("No tools to execute for %1").arg(requestId));
return;
}
for (auto toolContent : toolUseContent) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(toolContent->name());
emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
m_toolsManager->executeToolCall(
requestId, toolContent->id(), toolContent->name(), toolContent->input());
}
} else {
LOG_MESSAGE(QString("LMStudio message marked as complete for %1").arg(requestId));
}
}
void LMStudioProvider::cleanupRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("Cleaning up LMStudio request %1").arg(requestId));
if (m_messages.contains(requestId)) {
OpenAIMessage *message = m_messages.take(requestId);
message->deleteLater();
}
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
m_originalRequests.remove(requestId);
m_toolsManager->cleanupRequest(requestId);
return m_client;
}
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,13 +19,12 @@
#pragma once
#include "OpenAIMessage.hpp"
#include "tools/ToolsManager.hpp"
#include <llmcore/Provider.hpp>
#include <LLMCore/OpenAIClient.hpp>
#include <pluginllmcore/Provider.hpp>
namespace QodeAssist::Providers {
class LMStudioProvider : public LLMCore::Provider
class LMStudioProvider : public PluginLLMCore::Provider
{
Q_OBJECT
public:
@ -35,47 +34,22 @@ public:
QString url() const override;
QString completionEndpoint() const override;
QString chatEndpoint() const override;
bool supportsModelListing() const override;
void prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled) override;
QFuture<QList<QString>> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
PluginLLMCore::ProviderID providerID() const override;
PluginLLMCore::ProviderCapabilities capabilities() const override;
::LLMCore::BaseClient *client() const override;
QString apiKey() const override;
void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
LLMCore::ProviderID providerID() const override;
void sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
bool supportsTools() const override;
bool supportImage() const override;
void cancelRequest(const LLMCore::RequestID &requestId) override;
public slots:
void onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
void onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId,
std::optional<QString> error) override;
private slots:
void onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults);
private:
void processStreamChunk(const QString &requestId, const QJsonObject &chunk);
void handleMessageComplete(const QString &requestId);
void cleanupRequest(const LLMCore::RequestID &requestId);
QHash<LLMCore::RequestID, OpenAIMessage *> m_messages;
QHash<LLMCore::RequestID, QUrl> m_requestUrls;
QHash<LLMCore::RequestID, QJsonObject> m_originalRequests;
Tools::ToolsManager *m_toolsManager;
::LLMCore::OpenAIClient *m_client;
};
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,12 +19,13 @@
#include "LlamaCppProvider.hpp"
#include "llmcore/ValidationUtils.hpp"
#include <LLMCore/ToolsManager.hpp>
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "tools/ToolsRegistration.hpp"
#include <QJsonArray>
#include <QJsonDocument>
@ -33,14 +34,10 @@
namespace QodeAssist::Providers {
LlamaCppProvider::LlamaCppProvider(QObject *parent)
: LLMCore::Provider(parent)
, m_toolsManager(new Tools::ToolsManager(this))
: PluginLLMCore::Provider(parent)
, m_client(new ::LLMCore::LlamaCppClient(QString(), QString(), QString(), this))
{
connect(
m_toolsManager,
&Tools::ToolsManager::toolExecutionComplete,
this,
&LlamaCppProvider::onToolExecutionComplete);
Tools::registerQodeAssistTools(m_client->tools());
}
QString LlamaCppProvider::name() const
@ -48,6 +45,11 @@ QString LlamaCppProvider::name() const
return "llama.cpp";
}
QString LlamaCppProvider::apiKey() const
{
return {};
}
QString LlamaCppProvider::url() const
{
return "http://localhost:8080";
@ -63,16 +65,11 @@ QString LlamaCppProvider::chatEndpoint() const
return "/v1/chat/completions";
}
bool LlamaCppProvider::supportsModelListing() const
{
return false;
}
void LlamaCppProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled)
{
@ -96,22 +93,16 @@ void LlamaCppProvider::prepareRequest(
request["presence_penalty"] = settings.presencePenalty();
};
if (type == LLMCore::RequestType::CodeCompletion) {
if (type == PluginLLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
} else if (type == PluginLLMCore::RequestType::QuickRefactoring) {
applyModelParams(Settings::quickRefactorSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
}
if (isToolsEnabled) {
LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL;
if (type == LLMCore::RequestType::QuickRefactoring) {
filter = LLMCore::RunToolsFilter::OnlyRead;
}
auto toolsDefinitions = m_toolsManager->getToolsDefinitions(
LLMCore::ToolSchemaFormat::OpenAI, filter);
auto toolsDefinitions = m_client->tools()->getToolsDefinitions();
if (!toolsDefinitions.isEmpty()) {
request["tools"] = toolsDefinitions;
LOG_MESSAGE(QString("Added %1 tools to llama.cpp request").arg(toolsDefinitions.size()));
@ -124,313 +115,19 @@ QFuture<QList<QString>> LlamaCppProvider::getInstalledModels(const QString &)
return QtFuture::makeReadyFuture(QList<QString>{});
}
QList<QString> LlamaCppProvider::validateRequest(
const QJsonObject &request, LLMCore::TemplateType type)
PluginLLMCore::ProviderID LlamaCppProvider::providerID() const
{
if (type == LLMCore::TemplateType::FIM) {
const auto infillReq = QJsonObject{
{"model", {}},
{"input_prefix", {}},
{"input_suffix", {}},
{"input_extra", {}},
{"prompt", {}},
{"temperature", {}},
{"top_p", {}},
{"top_k", {}},
{"max_tokens", {}},
{"frequency_penalty", {}},
{"presence_penalty", {}},
{"stop", QJsonArray{}},
{"stream", {}}};
return LLMCore::ValidationUtils::validateRequestFields(request, infillReq);
} else {
const auto chatReq = QJsonObject{
{"model", {}},
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}},
{"temperature", {}},
{"max_tokens", {}},
{"top_p", {}},
{"top_k", {}},
{"frequency_penalty", {}},
{"presence_penalty", {}},
{"stop", QJsonArray{}},
{"stream", {}},
{"tools", {}}};
return LLMCore::ValidationUtils::validateRequestFields(request, chatReq);
}
return PluginLLMCore::ProviderID::LlamaCpp;
}
QString LlamaCppProvider::apiKey() const
PluginLLMCore::ProviderCapabilities LlamaCppProvider::capabilities() const
{
return {};
return PluginLLMCore::ProviderCapability::Tools | PluginLLMCore::ProviderCapability::Image;
}
void LlamaCppProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
::LLMCore::BaseClient *LlamaCppProvider::client() const
{
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
}
LLMCore::ProviderID LlamaCppProvider::providerID() const
{
return LLMCore::ProviderID::LlamaCpp;
}
void LlamaCppProvider::sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
if (!m_messages.contains(requestId)) {
m_dataBuffers[requestId].clear();
}
m_requestUrls[requestId] = url;
m_originalRequests[requestId] = payload;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LOG_MESSAGE(
QString("LlamaCppProvider: Sending request %1 to %2").arg(requestId, url.toString()));
httpClient()->postStreaming(requestId, networkRequest, payload);
}
bool LlamaCppProvider::supportsTools() const
{
return true;
}
bool LlamaCppProvider::supportImage() const
{
return true;
}
void LlamaCppProvider::cancelRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("LlamaCppProvider: Cancelling request %1").arg(requestId));
LLMCore::Provider::cancelRequest(requestId);
cleanupRequest(requestId);
}
void LlamaCppProvider::onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
{
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
QStringList lines = buffers.rawStreamBuffer.processData(data);
for (const QString &line : lines) {
if (line.trimmed().isEmpty() || line == "data: [DONE]") {
continue;
}
QJsonObject chunk = parseEventLine(line);
if (chunk.isEmpty())
continue;
if (chunk.contains("content")) {
QString content = chunk["content"].toString();
if (!content.isEmpty()) {
buffers.responseContent += content;
emit partialResponseReceived(requestId, content);
}
if (chunk["stop"].toBool()) {
emit fullResponseReceived(requestId, buffers.responseContent);
m_dataBuffers.remove(requestId);
}
} else if (chunk.contains("choices")) {
processStreamChunk(requestId, chunk);
}
}
}
void LlamaCppProvider::onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId, std::optional<QString> error)
{
if (error) {
LOG_MESSAGE(QString("LlamaCppProvider request %1 failed: %2").arg(requestId, *error));
emit requestFailed(requestId, *error);
cleanupRequest(requestId);
return;
}
if (m_messages.contains(requestId)) {
OpenAIMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId));
m_dataBuffers.remove(requestId);
return;
}
}
if (m_dataBuffers.contains(requestId)) {
const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
if (!buffers.responseContent.isEmpty()) {
LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId));
emit fullResponseReceived(requestId, buffers.responseContent);
}
}
cleanupRequest(requestId);
}
void LlamaCppProvider::onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults)
{
if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: Missing data for continuation request %1").arg(requestId));
cleanupRequest(requestId);
return;
}
LOG_MESSAGE(QString("Tool execution complete for llama.cpp request %1").arg(requestId));
for (auto it = toolResults.begin(); it != toolResults.end(); ++it) {
OpenAIMessage *message = m_messages[requestId];
auto toolContent = message->getCurrentToolUseContent();
for (auto tool : toolContent) {
if (tool->id() == it.key()) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(tool->name());
emit toolExecutionCompleted(
requestId, tool->id(), toolStringName, toolResults[tool->id()]);
break;
}
}
}
OpenAIMessage *message = m_messages[requestId];
QJsonObject continuationRequest = m_originalRequests[requestId];
QJsonArray messages = continuationRequest["messages"].toArray();
messages.append(message->toProviderFormat());
QJsonArray toolResultMessages = message->createToolResultMessages(toolResults);
for (const auto &toolMsg : toolResultMessages) {
messages.append(toolMsg);
}
continuationRequest["messages"] = messages;
LOG_MESSAGE(QString("Sending continuation request for %1 with %2 tool results")
.arg(requestId)
.arg(toolResults.size()));
sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
}
void LlamaCppProvider::processStreamChunk(const QString &requestId, const QJsonObject &chunk)
{
QJsonArray choices = chunk["choices"].toArray();
if (choices.isEmpty()) {
return;
}
QJsonObject choice = choices[0].toObject();
QJsonObject delta = choice["delta"].toObject();
QString finishReason = choice["finish_reason"].toString();
OpenAIMessage *message = m_messages.value(requestId);
if (!message) {
message = new OpenAIMessage(this);
m_messages[requestId] = message;
LOG_MESSAGE(QString("Created NEW OpenAIMessage for llama.cpp request %1").arg(requestId));
if (m_dataBuffers.contains(requestId)) {
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Starting continuation for request %1").arg(requestId));
}
} else if (
m_dataBuffers.contains(requestId)
&& message->state() == LLMCore::MessageState::RequiresToolExecution) {
message->startNewContinuation();
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId));
}
if (delta.contains("content") && !delta["content"].isNull()) {
QString content = delta["content"].toString();
message->handleContentDelta(content);
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
buffers.responseContent += content;
emit partialResponseReceived(requestId, content);
}
if (delta.contains("tool_calls")) {
QJsonArray toolCalls = delta["tool_calls"].toArray();
for (const auto &toolCallValue : toolCalls) {
QJsonObject toolCall = toolCallValue.toObject();
int index = toolCall["index"].toInt();
if (toolCall.contains("id")) {
QString id = toolCall["id"].toString();
QJsonObject function = toolCall["function"].toObject();
QString name = function["name"].toString();
message->handleToolCallStart(index, id, name);
}
if (toolCall.contains("function")) {
QJsonObject function = toolCall["function"].toObject();
if (function.contains("arguments")) {
QString args = function["arguments"].toString();
message->handleToolCallDelta(index, args);
}
}
}
}
if (!finishReason.isEmpty() && finishReason != "null") {
for (int i = 0; i < 10; ++i) {
message->handleToolCallComplete(i);
}
message->handleFinishReason(finishReason);
handleMessageComplete(requestId);
}
}
void LlamaCppProvider::handleMessageComplete(const QString &requestId)
{
if (!m_messages.contains(requestId))
return;
OpenAIMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("llama.cpp message requires tool execution for %1").arg(requestId));
auto toolUseContent = message->getCurrentToolUseContent();
if (toolUseContent.isEmpty()) {
LOG_MESSAGE(QString("No tools to execute for %1").arg(requestId));
return;
}
for (auto toolContent : toolUseContent) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(toolContent->name());
emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
m_toolsManager->executeToolCall(
requestId, toolContent->id(), toolContent->name(), toolContent->input());
}
} else {
LOG_MESSAGE(QString("llama.cpp message marked as complete for %1").arg(requestId));
}
}
void LlamaCppProvider::cleanupRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("Cleaning up llama.cpp request %1").arg(requestId));
if (m_messages.contains(requestId)) {
OpenAIMessage *message = m_messages.take(requestId);
message->deleteLater();
}
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
m_originalRequests.remove(requestId);
m_toolsManager->cleanupRequest(requestId);
return m_client;
}
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,13 +19,13 @@
#pragma once
#include "OpenAIMessage.hpp"
#include "tools/ToolsManager.hpp"
#include <llmcore/Provider.hpp>
#include <pluginllmcore/Provider.hpp>
#include <LLMCore/LlamaCppClient.hpp>
namespace QodeAssist::Providers {
class LlamaCppProvider : public LLMCore::Provider
class LlamaCppProvider : public PluginLLMCore::Provider
{
Q_OBJECT
public:
@ -35,47 +35,22 @@ public:
QString url() const override;
QString completionEndpoint() const override;
QString chatEndpoint() const override;
bool supportsModelListing() const override;
void prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled) override;
QFuture<QList<QString>> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
PluginLLMCore::ProviderID providerID() const override;
PluginLLMCore::ProviderCapabilities capabilities() const override;
::LLMCore::BaseClient *client() const override;
QString apiKey() const override;
void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
LLMCore::ProviderID providerID() const override;
void sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
bool supportsTools() const override;
bool supportImage() const override;
void cancelRequest(const LLMCore::RequestID &requestId) override;
public slots:
void onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
void onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId,
std::optional<QString> error) override;
private slots:
void onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults);
private:
void processStreamChunk(const QString &requestId, const QJsonObject &chunk);
void handleMessageComplete(const QString &requestId);
void cleanupRequest(const LLMCore::RequestID &requestId);
QHash<LLMCore::RequestID, OpenAIMessage *> m_messages;
QHash<LLMCore::RequestID, QUrl> m_requestUrls;
QHash<LLMCore::RequestID, QJsonObject> m_originalRequests;
Tools::ToolsManager *m_toolsManager;
::LLMCore::LlamaCppClient *m_client;
};
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,13 +19,14 @@
#include "MistralAIProvider.hpp"
#include "llmcore/ValidationUtils.hpp"
#include <LLMCore/ToolsManager.hpp>
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
#include "tools/ToolsRegistration.hpp"
#include <QJsonArray>
#include <QJsonDocument>
@ -34,14 +35,10 @@
namespace QodeAssist::Providers {
MistralAIProvider::MistralAIProvider(QObject *parent)
: LLMCore::Provider(parent)
, m_toolsManager(new Tools::ToolsManager(this))
: PluginLLMCore::Provider(parent)
, m_client(new ::LLMCore::OpenAIClient(QString(), QString(), QString(), this))
{
connect(
m_toolsManager,
&Tools::ToolsManager::toolExecutionComplete,
this,
&MistralAIProvider::onToolExecutionComplete);
Tools::registerQodeAssistTools(m_client->tools());
}
QString MistralAIProvider::name() const
@ -49,6 +46,11 @@ QString MistralAIProvider::name() const
return "Mistral AI";
}
QString MistralAIProvider::apiKey() const
{
return Settings::providerSettings().mistralAiApiKey();
}
QString MistralAIProvider::url() const
{
return "https://api.mistral.ai";
@ -64,176 +66,29 @@ QString MistralAIProvider::chatEndpoint() const
return "/v1/chat/completions";
}
bool MistralAIProvider::supportsModelListing() const
{
return true;
}
QFuture<QList<QString>> MistralAIProvider::getInstalledModels(const QString &url)
{
QNetworkRequest request(QString("%1/v1/models").arg(url));
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
if (!apiKey().isEmpty()) {
request.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
}
return httpClient()->get(request).then([](const QByteArray &data) {
QList<QString> models;
QJsonObject jsonObject = QJsonDocument::fromJson(data).object();
if (jsonObject.contains("data") && jsonObject["object"].toString() == "list") {
QJsonArray modelArray = jsonObject["data"].toArray();
for (const QJsonValue &value : modelArray) {
QJsonObject modelObject = value.toObject();
if (modelObject.contains("id")) {
models.append(modelObject["id"].toString());
}
}
}
return models;
}).onFailed([](const std::exception &e) {
LOG_MESSAGE(QString("Error fetching Mistral AI models: %1").arg(e.what()));
return QList<QString>{};
});
m_client->setUrl(url);
m_client->setApiKey(apiKey());
return m_client->listModels();
}
QList<QString> MistralAIProvider::validateRequest(
const QJsonObject &request, LLMCore::TemplateType type)
PluginLLMCore::ProviderID MistralAIProvider::providerID() const
{
const auto fimReq = QJsonObject{
{"model", {}},
{"max_tokens", {}},
{"stream", {}},
{"temperature", {}},
{"prompt", {}},
{"suffix", {}}};
const auto templateReq = QJsonObject{
{"model", {}},
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}},
{"temperature", {}},
{"max_tokens", {}},
{"top_p", {}},
{"top_k", {}},
{"frequency_penalty", {}},
{"presence_penalty", {}},
{"stop", QJsonArray{}},
{"stream", {}},
{"tools", {}}};
return LLMCore::ValidationUtils::validateRequestFields(
request, type == LLMCore::TemplateType::FIM ? fimReq : templateReq);
return PluginLLMCore::ProviderID::MistralAI;
}
QString MistralAIProvider::apiKey() const
PluginLLMCore::ProviderCapabilities MistralAIProvider::capabilities() const
{
return Settings::providerSettings().mistralAiApiKey();
}
void MistralAIProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
{
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
if (!apiKey().isEmpty()) {
networkRequest.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
}
}
LLMCore::ProviderID MistralAIProvider::providerID() const
{
return LLMCore::ProviderID::MistralAI;
}
void MistralAIProvider::sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
if (!m_messages.contains(requestId)) {
m_dataBuffers[requestId].clear();
}
m_requestUrls[requestId] = url;
m_originalRequests[requestId] = payload;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LOG_MESSAGE(
QString("MistralAIProvider: Sending request %1 to %2").arg(requestId, url.toString()));
httpClient()->postStreaming(requestId, networkRequest, payload);
}
bool MistralAIProvider::supportsTools() const
{
return true;
}
bool MistralAIProvider::supportImage() const
{
return true;
}
void MistralAIProvider::cancelRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("MistralAIProvider: Cancelling request %1").arg(requestId));
LLMCore::Provider::cancelRequest(requestId);
cleanupRequest(requestId);
}
void MistralAIProvider::onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
{
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
QStringList lines = buffers.rawStreamBuffer.processData(data);
for (const QString &line : lines) {
if (line.trimmed().isEmpty() || line == "data: [DONE]") {
continue;
}
QJsonObject chunk = parseEventLine(line);
if (chunk.isEmpty())
continue;
processStreamChunk(requestId, chunk);
}
}
void MistralAIProvider::onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId, std::optional<QString> error)
{
if (error) {
LOG_MESSAGE(QString("MistralAIProvider request %1 failed: %2").arg(requestId, *error));
emit requestFailed(requestId, *error);
cleanupRequest(requestId);
return;
}
if (m_messages.contains(requestId)) {
OpenAIMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId));
m_dataBuffers.remove(requestId);
return;
}
}
if (m_dataBuffers.contains(requestId)) {
const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
if (!buffers.responseContent.isEmpty()) {
LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId));
emit fullResponseReceived(requestId, buffers.responseContent);
}
}
cleanupRequest(requestId);
return PluginLLMCore::ProviderCapability::Tools | PluginLLMCore::ProviderCapability::Image
| PluginLLMCore::ProviderCapability::ModelListing;
}
void MistralAIProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled)
{
@ -257,22 +112,16 @@ void MistralAIProvider::prepareRequest(
request["presence_penalty"] = settings.presencePenalty();
};
if (type == LLMCore::RequestType::CodeCompletion) {
if (type == PluginLLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
} else if (type == PluginLLMCore::RequestType::QuickRefactoring) {
applyModelParams(Settings::quickRefactorSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
}
if (isToolsEnabled) {
LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL;
if (type == LLMCore::RequestType::QuickRefactoring) {
filter = LLMCore::RunToolsFilter::OnlyRead;
}
auto toolsDefinitions = m_toolsManager->getToolsDefinitions(
LLMCore::ToolSchemaFormat::OpenAI, filter);
auto toolsDefinitions = m_client->tools()->getToolsDefinitions();
if (!toolsDefinitions.isEmpty()) {
request["tools"] = toolsDefinitions;
LOG_MESSAGE(QString("Added %1 tools to Mistral request").arg(toolsDefinitions.size()));
@ -280,163 +129,9 @@ void MistralAIProvider::prepareRequest(
}
}
void MistralAIProvider::onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults)
::LLMCore::BaseClient *MistralAIProvider::client() const
{
if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: Missing data for continuation request %1").arg(requestId));
cleanupRequest(requestId);
return;
}
LOG_MESSAGE(QString("Tool execution complete for Mistral request %1").arg(requestId));
for (auto it = toolResults.begin(); it != toolResults.end(); ++it) {
OpenAIMessage *message = m_messages[requestId];
auto toolContent = message->getCurrentToolUseContent();
for (auto tool : toolContent) {
if (tool->id() == it.key()) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(tool->name());
emit toolExecutionCompleted(
requestId, tool->id(), toolStringName, toolResults[tool->id()]);
break;
}
}
}
OpenAIMessage *message = m_messages[requestId];
QJsonObject continuationRequest = m_originalRequests[requestId];
QJsonArray messages = continuationRequest["messages"].toArray();
messages.append(message->toProviderFormat());
QJsonArray toolResultMessages = message->createToolResultMessages(toolResults);
for (const auto &toolMsg : toolResultMessages) {
messages.append(toolMsg);
}
continuationRequest["messages"] = messages;
LOG_MESSAGE(QString("Sending continuation request for %1 with %2 tool results")
.arg(requestId)
.arg(toolResults.size()));
sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
}
void MistralAIProvider::processStreamChunk(const QString &requestId, const QJsonObject &chunk)
{
QJsonArray choices = chunk["choices"].toArray();
if (choices.isEmpty()) {
return;
}
QJsonObject choice = choices[0].toObject();
QJsonObject delta = choice["delta"].toObject();
QString finishReason = choice["finish_reason"].toString();
OpenAIMessage *message = m_messages.value(requestId);
if (!message) {
message = new OpenAIMessage(this);
m_messages[requestId] = message;
LOG_MESSAGE(QString("Created NEW OpenAIMessage for Mistral request %1").arg(requestId));
if (m_dataBuffers.contains(requestId)) {
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Starting continuation for request %1").arg(requestId));
}
} else if (
m_dataBuffers.contains(requestId)
&& message->state() == LLMCore::MessageState::RequiresToolExecution) {
message->startNewContinuation();
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId));
}
if (delta.contains("content") && !delta["content"].isNull()) {
QString content = delta["content"].toString();
message->handleContentDelta(content);
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
buffers.responseContent += content;
emit partialResponseReceived(requestId, content);
}
if (delta.contains("tool_calls")) {
QJsonArray toolCalls = delta["tool_calls"].toArray();
for (const auto &toolCallValue : toolCalls) {
QJsonObject toolCall = toolCallValue.toObject();
int index = toolCall["index"].toInt();
if (toolCall.contains("id")) {
QString id = toolCall["id"].toString();
QJsonObject function = toolCall["function"].toObject();
QString name = function["name"].toString();
message->handleToolCallStart(index, id, name);
}
if (toolCall.contains("function")) {
QJsonObject function = toolCall["function"].toObject();
if (function.contains("arguments")) {
QString args = function["arguments"].toString();
message->handleToolCallDelta(index, args);
}
}
}
}
if (!finishReason.isEmpty() && finishReason != "null") {
for (int i = 0; i < 10; ++i) {
message->handleToolCallComplete(i);
}
message->handleFinishReason(finishReason);
handleMessageComplete(requestId);
}
}
void MistralAIProvider::handleMessageComplete(const QString &requestId)
{
if (!m_messages.contains(requestId))
return;
OpenAIMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Mistral message requires tool execution for %1").arg(requestId));
auto toolUseContent = message->getCurrentToolUseContent();
if (toolUseContent.isEmpty()) {
LOG_MESSAGE(QString("No tools to execute for %1").arg(requestId));
return;
}
for (auto toolContent : toolUseContent) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(toolContent->name());
emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
m_toolsManager->executeToolCall(
requestId, toolContent->id(), toolContent->name(), toolContent->input());
}
} else {
LOG_MESSAGE(QString("Mistral message marked as complete for %1").arg(requestId));
}
}
void MistralAIProvider::cleanupRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("Cleaning up Mistral request %1").arg(requestId));
if (m_messages.contains(requestId)) {
OpenAIMessage *message = m_messages.take(requestId);
message->deleteLater();
}
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
m_originalRequests.remove(requestId);
m_toolsManager->cleanupRequest(requestId);
return m_client;
}
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,13 +19,12 @@
#pragma once
#include "OpenAIMessage.hpp"
#include "tools/ToolsManager.hpp"
#include <llmcore/Provider.hpp>
#include <LLMCore/OpenAIClient.hpp>
#include <pluginllmcore/Provider.hpp>
namespace QodeAssist::Providers {
class MistralAIProvider : public LLMCore::Provider
class MistralAIProvider : public PluginLLMCore::Provider
{
Q_OBJECT
public:
@ -35,47 +34,22 @@ public:
QString url() const override;
QString completionEndpoint() const override;
QString chatEndpoint() const override;
bool supportsModelListing() const override;
void prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled) override;
QFuture<QList<QString>> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
PluginLLMCore::ProviderID providerID() const override;
PluginLLMCore::ProviderCapabilities capabilities() const override;
::LLMCore::BaseClient *client() const override;
QString apiKey() const override;
void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
LLMCore::ProviderID providerID() const override;
void sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
bool supportsTools() const override;
bool supportImage() const override;
void cancelRequest(const LLMCore::RequestID &requestId) override;
public slots:
void onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
void onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId,
std::optional<QString> error) override;
private slots:
void onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults);
private:
void processStreamChunk(const QString &requestId, const QJsonObject &chunk);
void handleMessageComplete(const QString &requestId);
void cleanupRequest(const LLMCore::RequestID &requestId);
QHash<LLMCore::RequestID, OpenAIMessage *> m_messages;
QHash<LLMCore::RequestID, QUrl> m_requestUrls;
QHash<LLMCore::RequestID, QJsonObject> m_originalRequests;
Tools::ToolsManager *m_toolsManager;
::LLMCore::OpenAIClient *m_client;
};
} // namespace QodeAssist::Providers

View File

@ -39,13 +39,13 @@ void OllamaMessage::handleContentDelta(const QString &content)
}
if (!m_contentAddedToTextBlock) {
LLMCore::TextContent *textContent = getOrCreateTextContent();
PluginLLMCore::TextContent *textContent = getOrCreateTextContent();
textContent->setText(m_accumulatedContent);
m_contentAddedToTextBlock = true;
LOG_MESSAGE(QString("OllamaMessage: Added accumulated content to TextContent, length=%1")
.arg(m_accumulatedContent.length()));
} else {
LLMCore::TextContent *textContent = getOrCreateTextContent();
PluginLLMCore::TextContent *textContent = getOrCreateTextContent();
textContent->appendText(content);
}
}
@ -65,7 +65,7 @@ void OllamaMessage::handleToolCall(const QJsonObject &toolCall)
m_accumulatedContent.clear();
}
addCurrentContent<LLMCore::ToolUseContent>(toolId, name, arguments);
addCurrentContent<PluginLLMCore::ToolUseContent>(toolId, name, arguments);
LOG_MESSAGE(
QString("OllamaMessage: Structured tool call detected - name=%1, id=%2").arg(name, toolId));
@ -73,7 +73,7 @@ void OllamaMessage::handleToolCall(const QJsonObject &toolCall)
void OllamaMessage::handleThinkingDelta(const QString &thinking)
{
LLMCore::ThinkingContent *thinkingContent = getOrCreateThinkingContent();
PluginLLMCore::ThinkingContent *thinkingContent = getOrCreateThinkingContent();
thinkingContent->appendThinking(thinking);
}
@ -102,7 +102,7 @@ void OllamaMessage::handleDone(bool done)
.arg(trimmed.length()));
for (auto it = m_currentBlocks.begin(); it != m_currentBlocks.end();) {
if (qobject_cast<LLMCore::TextContent *>(*it)) {
if (qobject_cast<PluginLLMCore::TextContent *>(*it)) {
LOG_MESSAGE(QString(
"OllamaMessage: Removing TextContent block (incomplete tool call)"));
(*it)->deleteLater();
@ -114,7 +114,7 @@ void OllamaMessage::handleDone(bool done)
m_accumulatedContent.clear();
} else {
LLMCore::TextContent *textContent = getOrCreateTextContent();
PluginLLMCore::TextContent *textContent = getOrCreateTextContent();
textContent->setText(m_accumulatedContent);
m_contentAddedToTextBlock = true;
LOG_MESSAGE(
@ -184,13 +184,13 @@ bool OllamaMessage::tryParseToolCall()
QString toolId = QString("call_%1_%2").arg(name).arg(QDateTime::currentMSecsSinceEpoch());
for (auto block : m_currentBlocks) {
if (qobject_cast<LLMCore::TextContent *>(block)) {
if (qobject_cast<PluginLLMCore::TextContent *>(block)) {
LOG_MESSAGE(QString("OllamaMessage: Removing TextContent block (tool call detected)"));
}
}
m_currentBlocks.clear();
addCurrentContent<LLMCore::ToolUseContent>(toolId, name, arguments);
addCurrentContent<PluginLLMCore::ToolUseContent>(toolId, name, arguments);
LOG_MESSAGE(
QString(
@ -238,14 +238,14 @@ QJsonObject OllamaMessage::toProviderFormat() const
if (!block)
continue;
if (auto text = qobject_cast<LLMCore::TextContent *>(block)) {
if (auto text = qobject_cast<PluginLLMCore::TextContent *>(block)) {
textContent += text->text();
} else if (auto tool = qobject_cast<LLMCore::ToolUseContent *>(block)) {
} else if (auto tool = qobject_cast<PluginLLMCore::ToolUseContent *>(block)) {
QJsonObject toolCall;
toolCall["type"] = "function";
toolCall["function"] = QJsonObject{{"name", tool->name()}, {"arguments", tool->input()}};
toolCalls.append(toolCall);
} else if (auto thinking = qobject_cast<LLMCore::ThinkingContent *>(block)) {
} else if (auto thinking = qobject_cast<PluginLLMCore::ThinkingContent *>(block)) {
thinkingContent += thinking->thinking();
}
}
@ -287,22 +287,22 @@ QJsonArray OllamaMessage::createToolResultMessages(const QHash<QString, QString>
return messages;
}
QList<LLMCore::ToolUseContent *> OllamaMessage::getCurrentToolUseContent() const
QList<PluginLLMCore::ToolUseContent *> OllamaMessage::getCurrentToolUseContent() const
{
QList<LLMCore::ToolUseContent *> toolBlocks;
QList<PluginLLMCore::ToolUseContent *> toolBlocks;
for (auto block : m_currentBlocks) {
if (auto toolContent = qobject_cast<LLMCore::ToolUseContent *>(block)) {
if (auto toolContent = qobject_cast<PluginLLMCore::ToolUseContent *>(block)) {
toolBlocks.append(toolContent);
}
}
return toolBlocks;
}
QList<LLMCore::ThinkingContent *> OllamaMessage::getCurrentThinkingContent() const
QList<PluginLLMCore::ThinkingContent *> OllamaMessage::getCurrentThinkingContent() const
{
QList<LLMCore::ThinkingContent *> thinkingBlocks;
QList<PluginLLMCore::ThinkingContent *> thinkingBlocks;
for (auto block : m_currentBlocks) {
if (auto thinkingContent = qobject_cast<LLMCore::ThinkingContent *>(block)) {
if (auto thinkingContent = qobject_cast<PluginLLMCore::ThinkingContent *>(block)) {
thinkingBlocks.append(thinkingContent);
}
}
@ -316,7 +316,7 @@ void OllamaMessage::startNewContinuation()
m_currentBlocks.clear();
m_accumulatedContent.clear();
m_done = false;
m_state = LLMCore::MessageState::Building;
m_state = PluginLLMCore::MessageState::Building;
m_contentAddedToTextBlock = false;
m_currentThinkingContent = nullptr;
}
@ -324,40 +324,40 @@ void OllamaMessage::startNewContinuation()
void OllamaMessage::updateStateFromDone()
{
if (!getCurrentToolUseContent().empty()) {
m_state = LLMCore::MessageState::RequiresToolExecution;
m_state = PluginLLMCore::MessageState::RequiresToolExecution;
LOG_MESSAGE(QString("OllamaMessage: State set to RequiresToolExecution, tools count=%1")
.arg(getCurrentToolUseContent().size()));
} else {
m_state = LLMCore::MessageState::Final;
m_state = PluginLLMCore::MessageState::Final;
LOG_MESSAGE(QString("OllamaMessage: State set to Final"));
}
}
LLMCore::TextContent *OllamaMessage::getOrCreateTextContent()
PluginLLMCore::TextContent *OllamaMessage::getOrCreateTextContent()
{
for (auto block : m_currentBlocks) {
if (auto textContent = qobject_cast<LLMCore::TextContent *>(block)) {
if (auto textContent = qobject_cast<PluginLLMCore::TextContent *>(block)) {
return textContent;
}
}
return addCurrentContent<LLMCore::TextContent>();
return addCurrentContent<PluginLLMCore::TextContent>();
}
LLMCore::ThinkingContent *OllamaMessage::getOrCreateThinkingContent()
PluginLLMCore::ThinkingContent *OllamaMessage::getOrCreateThinkingContent()
{
if (m_currentThinkingContent) {
return m_currentThinkingContent;
}
for (auto block : m_currentBlocks) {
if (auto thinkingContent = qobject_cast<LLMCore::ThinkingContent *>(block)) {
if (auto thinkingContent = qobject_cast<PluginLLMCore::ThinkingContent *>(block)) {
m_currentThinkingContent = thinkingContent;
return m_currentThinkingContent;
}
}
m_currentThinkingContent = addCurrentContent<LLMCore::ThinkingContent>();
m_currentThinkingContent = addCurrentContent<PluginLLMCore::ThinkingContent>();
LOG_MESSAGE(QString("OllamaMessage: Created new ThinkingContent block"));
return m_currentThinkingContent;
}

View File

@ -19,7 +19,7 @@
#pragma once
#include <llmcore/ContentBlocks.hpp>
#include <pluginllmcore/ContentBlocks.hpp>
namespace QodeAssist::Providers {
@ -38,26 +38,26 @@ public:
QJsonObject toProviderFormat() const;
QJsonArray createToolResultMessages(const QHash<QString, QString> &toolResults) const;
LLMCore::MessageState state() const { return m_state; }
QList<LLMCore::ToolUseContent *> getCurrentToolUseContent() const;
QList<LLMCore::ThinkingContent *> getCurrentThinkingContent() const;
QList<LLMCore::ContentBlock *> currentBlocks() const { return m_currentBlocks; }
PluginLLMCore::MessageState state() const { return m_state; }
QList<PluginLLMCore::ToolUseContent *> getCurrentToolUseContent() const;
QList<PluginLLMCore::ThinkingContent *> getCurrentThinkingContent() const;
QList<PluginLLMCore::ContentBlock *> currentBlocks() const { return m_currentBlocks; }
void startNewContinuation();
private:
bool m_done = false;
LLMCore::MessageState m_state = LLMCore::MessageState::Building;
QList<LLMCore::ContentBlock *> m_currentBlocks;
PluginLLMCore::MessageState m_state = PluginLLMCore::MessageState::Building;
QList<PluginLLMCore::ContentBlock *> m_currentBlocks;
QString m_accumulatedContent;
bool m_contentAddedToTextBlock = false;
LLMCore::ThinkingContent *m_currentThinkingContent = nullptr;
PluginLLMCore::ThinkingContent *m_currentThinkingContent = nullptr;
void updateStateFromDone();
bool tryParseToolCall();
bool isLikelyToolCallJson(const QString &content) const;
LLMCore::TextContent *getOrCreateTextContent();
LLMCore::ThinkingContent *getOrCreateThinkingContent();
PluginLLMCore::TextContent *getOrCreateTextContent();
PluginLLMCore::ThinkingContent *getOrCreateThinkingContent();
template<typename T, typename... Args>
T *addCurrentContent(Args &&...args)

View File

@ -19,29 +19,27 @@
#include "OllamaProvider.hpp"
#include <LLMCore/ToolsManager.hpp>
#include <QJsonArray>
#include <QJsonDocument>
#include <QJsonObject>
#include "llmcore/ValidationUtils.hpp"
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/QuickRefactorSettings.hpp"
#include "settings/GeneralSettings.hpp"
#include "settings/ProviderSettings.hpp"
#include "tools/ToolsRegistration.hpp"
namespace QodeAssist::Providers {
OllamaProvider::OllamaProvider(QObject *parent)
: LLMCore::Provider(parent)
, m_toolsManager(new Tools::ToolsManager(this))
: PluginLLMCore::Provider(parent)
, m_client(new ::LLMCore::OllamaClient(QString(), QString(), QString(), this))
{
connect(
m_toolsManager,
&Tools::ToolsManager::toolExecutionComplete,
this,
&OllamaProvider::onToolExecutionComplete);
Tools::registerQodeAssistTools(m_client->tools());
}
QString OllamaProvider::name() const
@ -49,6 +47,11 @@ QString OllamaProvider::name() const
return "Ollama";
}
QString OllamaProvider::apiKey() const
{
return Settings::providerSettings().ollamaBasicAuthApiKey();
}
QString OllamaProvider::url() const
{
return "http://localhost:11434";
@ -64,16 +67,11 @@ QString OllamaProvider::chatEndpoint() const
return "/api/chat";
}
bool OllamaProvider::supportsModelListing() const
{
return true;
}
void OllamaProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled)
{
@ -109,12 +107,12 @@ void OllamaProvider::prepareRequest(
request["options"] = options;
};
if (type == LLMCore::RequestType::CodeCompletion) {
if (type == PluginLLMCore::RequestType::CodeCompletion) {
applySettings(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
} else if (type == PluginLLMCore::RequestType::QuickRefactoring) {
const auto &qrSettings = Settings::quickRefactorSettings();
applySettings(qrSettings);
if (isThinkingEnabled) {
applyThinkingMode();
LOG_MESSAGE(QString("OllamaProvider: Thinking mode enabled for QuickRefactoring"));
@ -130,13 +128,7 @@ void OllamaProvider::prepareRequest(
}
if (isToolsEnabled) {
LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL;
if (type == LLMCore::RequestType::QuickRefactoring) {
filter = LLMCore::RunToolsFilter::OnlyRead;
}
auto toolsDefinitions = m_toolsManager->toolsFactory()->getToolsDefinitions(
LLMCore::ToolSchemaFormat::Ollama, filter);
auto toolsDefinitions = m_client->tools()->getToolsDefinitions();
if (!toolsDefinitions.isEmpty()) {
request["tools"] = toolsDefinitions;
LOG_MESSAGE(
@ -145,453 +137,28 @@ void OllamaProvider::prepareRequest(
}
}
QFuture<QList<QString>> OllamaProvider::getInstalledModels(const QString &url)
QFuture<QList<QString>> OllamaProvider::getInstalledModels(const QString &baseUrl)
{
QNetworkRequest request(QString("%1%2").arg(url, "/api/tags"));
prepareNetworkRequest(request);
return httpClient()->get(request).then([](const QByteArray &data) {
QList<QString> models;
QJsonObject jsonObject = QJsonDocument::fromJson(data).object();
QJsonArray modelArray = jsonObject["models"].toArray();
for (const QJsonValue &value : modelArray) {
QJsonObject modelObject = value.toObject();
models.append(modelObject["name"].toString());
}
return models;
}).onFailed([](const std::exception &e) {
LOG_MESSAGE(QString("Error fetching models: %1").arg(e.what()));
return QList<QString>{};
});
m_client->setUrl(baseUrl);
m_client->setApiKey(Settings::providerSettings().ollamaBasicAuthApiKey());
return m_client->listModels();
}
QList<QString> OllamaProvider::validateRequest(const QJsonObject &request, LLMCore::TemplateType type)
PluginLLMCore::ProviderID OllamaProvider::providerID() const
{
const auto fimReq = QJsonObject{
{"keep_alive", {}},
{"model", {}},
{"stream", {}},
{"prompt", {}},
{"suffix", {}},
{"system", {}},
{"images", QJsonArray{}},
{"options",
QJsonObject{
{"temperature", {}},
{"stop", {}},
{"top_p", {}},
{"top_k", {}},
{"num_predict", {}},
{"frequency_penalty", {}},
{"presence_penalty", {}}}}};
const auto messageReq = QJsonObject{
{"keep_alive", {}},
{"model", {}},
{"stream", {}},
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}, {"images", QJsonArray{}}}}}},
{"tools", QJsonArray{}},
{"options",
QJsonObject{
{"temperature", {}},
{"stop", {}},
{"top_p", {}},
{"top_k", {}},
{"num_predict", {}},
{"frequency_penalty", {}},
{"presence_penalty", {}}}}};
return LLMCore::ValidationUtils::validateRequestFields(
request, type == LLMCore::TemplateType::FIM ? fimReq : messageReq);
return PluginLLMCore::ProviderID::Ollama;
}
QString OllamaProvider::apiKey() const
PluginLLMCore::ProviderCapabilities OllamaProvider::capabilities() const
{
return {};
return PluginLLMCore::ProviderCapability::Tools | PluginLLMCore::ProviderCapability::Thinking
| PluginLLMCore::ProviderCapability::Image
| PluginLLMCore::ProviderCapability::ModelListing;
}
void OllamaProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
::LLMCore::BaseClient *OllamaProvider::client() const
{
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
const auto key = Settings::providerSettings().ollamaBasicAuthApiKey();
if (!key.isEmpty()) {
networkRequest.setRawHeader("Authorization", "Basic " + key.toLatin1());
}
}
LLMCore::ProviderID OllamaProvider::providerID() const
{
return LLMCore::ProviderID::Ollama;
}
void OllamaProvider::sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
m_dataBuffers[requestId].clear();
m_requestUrls[requestId] = url;
m_originalRequests[requestId] = payload;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LOG_MESSAGE(QString("OllamaProvider: Sending request %1 to %2").arg(requestId, url.toString()));
httpClient()->postStreaming(requestId, networkRequest, payload);
}
bool OllamaProvider::supportsTools() const
{
return true;
}
bool OllamaProvider::supportImage() const
{
return true;
}
bool OllamaProvider::supportThinking() const
{
return true;
}
void OllamaProvider::cancelRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("OllamaProvider: Cancelling request %1").arg(requestId));
LLMCore::Provider::cancelRequest(requestId);
cleanupRequest(requestId);
}
void OllamaProvider::onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
{
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
QStringList lines = buffers.rawStreamBuffer.processData(data);
if (data.isEmpty()) {
return;
}
for (const QString &line : lines) {
if (line.trimmed().isEmpty()) {
continue;
}
QJsonParseError error;
QJsonDocument doc = QJsonDocument::fromJson(line.toUtf8(), &error);
if (doc.isNull()) {
LOG_MESSAGE(QString("Failed to parse JSON: %1").arg(error.errorString()));
continue;
}
QJsonObject obj = doc.object();
if (obj.contains("error") && !obj["error"].toString().isEmpty()) {
LOG_MESSAGE("Error in Ollama response: " + obj["error"].toString());
continue;
}
processStreamData(requestId, obj);
}
}
void OllamaProvider::onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId, std::optional<QString> error)
{
if (error) {
LOG_MESSAGE(QString("OllamaProvider request %1 failed: %2").arg(requestId, *error));
emit requestFailed(requestId, *error);
cleanupRequest(requestId);
return;
}
if (m_messages.contains(requestId)) {
OllamaMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId));
return;
}
}
QString finalText;
if (m_messages.contains(requestId)) {
OllamaMessage *message = m_messages[requestId];
for (auto block : message->currentBlocks()) {
if (auto textContent = qobject_cast<LLMCore::TextContent *>(block)) {
finalText += textContent->text();
}
}
if (!finalText.isEmpty()) {
LOG_MESSAGE(QString("Emitting full response for %1, length=%2")
.arg(requestId)
.arg(finalText.length()));
emit fullResponseReceived(requestId, finalText);
}
}
cleanupRequest(requestId);
}
void OllamaProvider::onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults)
{
if (!m_messages.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: No message found for request %1").arg(requestId));
cleanupRequest(requestId);
return;
}
if (!m_requestUrls.contains(requestId) || !m_originalRequests.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: Missing data for continuation request %1").arg(requestId));
cleanupRequest(requestId);
return;
}
LOG_MESSAGE(QString("Tool execution complete for Ollama request %1").arg(requestId));
OllamaMessage *message = m_messages[requestId];
for (auto it = toolResults.begin(); it != toolResults.end(); ++it) {
auto toolContent = message->getCurrentToolUseContent();
for (auto tool : toolContent) {
if (tool->id() == it.key()) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(tool->name());
emit toolExecutionCompleted(requestId, tool->id(), toolStringName, it.value());
break;
}
}
}
QJsonObject continuationRequest = m_originalRequests[requestId];
QJsonArray messages = continuationRequest["messages"].toArray();
QJsonObject assistantMessage = message->toProviderFormat();
messages.append(assistantMessage);
LOG_MESSAGE(QString("Assistant message with tool_calls:\n%1")
.arg(
QString::fromUtf8(
QJsonDocument(assistantMessage).toJson(QJsonDocument::Indented))));
QJsonArray toolResultMessages = message->createToolResultMessages(toolResults);
for (const auto &toolMsg : toolResultMessages) {
messages.append(toolMsg);
LOG_MESSAGE(QString("Tool result message:\n%1")
.arg(
QString::fromUtf8(
QJsonDocument(toolMsg.toObject()).toJson(QJsonDocument::Indented))));
}
continuationRequest["messages"] = messages;
LOG_MESSAGE(QString("Sending continuation request for %1 with %2 tool results")
.arg(requestId)
.arg(toolResults.size()));
sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
}
void OllamaProvider::processStreamData(const QString &requestId, const QJsonObject &data)
{
OllamaMessage *message = m_messages.value(requestId);
if (!message) {
message = new OllamaMessage(this);
m_messages[requestId] = message;
LOG_MESSAGE(QString("Created NEW OllamaMessage for request %1").arg(requestId));
if (m_dataBuffers.contains(requestId)) {
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Starting continuation for request %1").arg(requestId));
}
} else if (
m_dataBuffers.contains(requestId)
&& message->state() == LLMCore::MessageState::RequiresToolExecution) {
message->startNewContinuation();
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId));
}
if (data.contains("thinking")) {
QString thinkingDelta = data["thinking"].toString();
if (!thinkingDelta.isEmpty()) {
message->handleThinkingDelta(thinkingDelta);
LOG_MESSAGE(QString("OllamaProvider: Received thinking delta, length=%1")
.arg(thinkingDelta.length()));
}
}
if (data.contains("message")) {
QJsonObject messageObj = data["message"].toObject();
if (messageObj.contains("thinking")) {
QString thinkingDelta = messageObj["thinking"].toString();
if (!thinkingDelta.isEmpty()) {
message->handleThinkingDelta(thinkingDelta);
if (!m_thinkingStarted.contains(requestId)) {
auto thinkingBlocks = message->getCurrentThinkingContent();
if (!thinkingBlocks.isEmpty() && thinkingBlocks.first()) {
QString currentThinking = thinkingBlocks.first()->thinking();
QString displayThinking = currentThinking.length() > 50
? QString("%1...").arg(currentThinking.left(50))
: currentThinking;
emit thinkingBlockReceived(requestId, displayThinking, "");
m_thinkingStarted.insert(requestId);
}
}
}
}
if (messageObj.contains("content")) {
QString content = messageObj["content"].toString();
if (!content.isEmpty()) {
emitThinkingBlocks(requestId, message);
message->handleContentDelta(content);
bool hasTextContent = false;
for (auto block : message->currentBlocks()) {
if (qobject_cast<LLMCore::TextContent *>(block)) {
hasTextContent = true;
break;
}
}
if (hasTextContent) {
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
buffers.responseContent += content;
emit partialResponseReceived(requestId, content);
}
}
}
if (messageObj.contains("tool_calls")) {
QJsonArray toolCalls = messageObj["tool_calls"].toArray();
LOG_MESSAGE(
QString("OllamaProvider: Found %1 structured tool calls").arg(toolCalls.size()));
for (const auto &toolCallValue : toolCalls) {
message->handleToolCall(toolCallValue.toObject());
}
}
}
else if (data.contains("response")) {
QString content = data["response"].toString();
if (!content.isEmpty()) {
message->handleContentDelta(content);
bool hasTextContent = false;
for (auto block : message->currentBlocks()) {
if (qobject_cast<LLMCore::TextContent *>(block)) {
hasTextContent = true;
break;
}
}
if (hasTextContent) {
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
buffers.responseContent += content;
emit partialResponseReceived(requestId, content);
}
}
}
if (data["done"].toBool()) {
if (data.contains("signature")) {
QString signature = data["signature"].toString();
message->handleThinkingComplete(signature);
LOG_MESSAGE(QString("OllamaProvider: Set thinking signature, length=%1")
.arg(signature.length()));
}
message->handleDone(true);
handleMessageComplete(requestId);
}
}
void OllamaProvider::handleMessageComplete(const QString &requestId)
{
if (!m_messages.contains(requestId))
return;
OllamaMessage *message = m_messages[requestId];
emitThinkingBlocks(requestId, message);
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Ollama message requires tool execution for %1").arg(requestId));
auto toolUseContent = message->getCurrentToolUseContent();
if (toolUseContent.isEmpty()) {
LOG_MESSAGE(
QString("WARNING: No tools to execute for %1 despite RequiresToolExecution state")
.arg(requestId));
return;
}
for (auto toolContent : toolUseContent) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(toolContent->name());
emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
LOG_MESSAGE(
QString("Executing tool: name=%1, id=%2, input=%3")
.arg(toolContent->name())
.arg(toolContent->id())
.arg(
QString::fromUtf8(
QJsonDocument(toolContent->input()).toJson(QJsonDocument::Compact))));
m_toolsManager->executeToolCall(
requestId, toolContent->id(), toolContent->name(), toolContent->input());
}
} else {
LOG_MESSAGE(QString("Ollama message marked as complete for %1").arg(requestId));
}
}
void OllamaProvider::cleanupRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("Cleaning up Ollama request %1").arg(requestId));
if (m_messages.contains(requestId)) {
auto msg = m_messages.take(requestId);
msg->deleteLater();
}
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
m_originalRequests.remove(requestId);
m_thinkingEmitted.remove(requestId);
m_thinkingStarted.remove(requestId);
m_toolsManager->cleanupRequest(requestId);
}
void OllamaProvider::emitThinkingBlocks(const QString &requestId, OllamaMessage *message)
{
if (!message || m_thinkingEmitted.contains(requestId)) {
return;
}
auto thinkingBlocks = message->getCurrentThinkingContent();
if (thinkingBlocks.isEmpty()) {
return;
}
for (auto thinkingContent : thinkingBlocks) {
emit thinkingBlockReceived(
requestId, thinkingContent->thinking(), thinkingContent->signature());
LOG_MESSAGE(QString("Emitted thinking block for request %1, thinking length=%2, signature "
"length=%3")
.arg(requestId)
.arg(thinkingContent->thinking().length())
.arg(thinkingContent->signature().length()));
}
m_thinkingEmitted.insert(requestId);
return m_client;
}
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,14 +19,13 @@
#pragma once
#include <llmcore/Provider.hpp>
#include <pluginllmcore/Provider.hpp>
#include "OllamaMessage.hpp"
#include "tools/ToolsManager.hpp"
#include <LLMCore/OllamaClient.hpp>
namespace QodeAssist::Providers {
class OllamaProvider : public LLMCore::Provider
class OllamaProvider : public PluginLLMCore::Provider
{
Q_OBJECT
public:
@ -36,51 +35,22 @@ public:
QString url() const override;
QString completionEndpoint() const override;
QString chatEndpoint() const override;
bool supportsModelListing() const override;
void prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled) override;
QFuture<QList<QString>> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
PluginLLMCore::ProviderID providerID() const override;
PluginLLMCore::ProviderCapabilities capabilities() const override;
::LLMCore::BaseClient *client() const override;
QString apiKey() const override;
void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
LLMCore::ProviderID providerID() const override;
void sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
bool supportsTools() const override;
bool supportImage() const override;
bool supportThinking() const override;
void cancelRequest(const LLMCore::RequestID &requestId) override;
public slots:
void onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
void onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId,
std::optional<QString> error) override;
private slots:
void onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults);
private:
void processStreamData(const QString &requestId, const QJsonObject &data);
void handleMessageComplete(const QString &requestId);
void cleanupRequest(const LLMCore::RequestID &requestId);
void emitThinkingBlocks(const QString &requestId, OllamaMessage *message);
QHash<QodeAssist::LLMCore::RequestID, OllamaMessage *> m_messages;
QHash<QodeAssist::LLMCore::RequestID, QUrl> m_requestUrls;
QHash<QodeAssist::LLMCore::RequestID, QJsonObject> m_originalRequests;
QSet<QString> m_thinkingEmitted;
QSet<QString> m_thinkingStarted;
Tools::ToolsManager *m_toolsManager;
::LLMCore::OllamaClient *m_client;
};
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -18,8 +18,9 @@
*/
#include "OpenAICompatProvider.hpp"
#include <LLMCore/ToolsManager.hpp>
#include "llmcore/ValidationUtils.hpp"
#include "tools/ToolsRegistration.hpp"
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
@ -30,19 +31,14 @@
#include <QJsonArray>
#include <QJsonDocument>
#include <QJsonObject>
#include <QNetworkReply>
namespace QodeAssist::Providers {
OpenAICompatProvider::OpenAICompatProvider(QObject *parent)
: LLMCore::Provider(parent)
, m_toolsManager(new Tools::ToolsManager(this))
: PluginLLMCore::Provider(parent)
, m_client(new ::LLMCore::OpenAIClient(QString(), QString(), QString(), this))
{
connect(
m_toolsManager,
&Tools::ToolsManager::toolExecutionComplete,
this,
&OpenAICompatProvider::onToolExecutionComplete);
Tools::registerQodeAssistTools(m_client->tools());
}
QString OpenAICompatProvider::name() const
@ -50,6 +46,11 @@ QString OpenAICompatProvider::name() const
return "OpenAI Compatible";
}
QString OpenAICompatProvider::apiKey() const
{
return Settings::providerSettings().openAiCompatApiKey();
}
QString OpenAICompatProvider::url() const
{
return "http://localhost:1234";
@ -65,16 +66,11 @@ QString OpenAICompatProvider::chatEndpoint() const
return "/v1/chat/completions";
}
bool OpenAICompatProvider::supportsModelListing() const
{
return false;
}
void OpenAICompatProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled)
{
@ -98,22 +94,16 @@ void OpenAICompatProvider::prepareRequest(
request["presence_penalty"] = settings.presencePenalty();
};
if (type == LLMCore::RequestType::CodeCompletion) {
if (type == PluginLLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
} else if (type == PluginLLMCore::RequestType::QuickRefactoring) {
applyModelParams(Settings::quickRefactorSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
}
if (isToolsEnabled) {
LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL;
if (type == LLMCore::RequestType::QuickRefactoring) {
filter = LLMCore::RunToolsFilter::OnlyRead;
}
auto toolsDefinitions = m_toolsManager->getToolsDefinitions(
LLMCore::ToolSchemaFormat::OpenAI, filter);
auto toolsDefinitions = m_client->tools()->getToolsDefinitions();
if (!toolsDefinitions.isEmpty()) {
request["tools"] = toolsDefinitions;
LOG_MESSAGE(
@ -127,286 +117,19 @@ QFuture<QList<QString>> OpenAICompatProvider::getInstalledModels(const QString &
return QtFuture::makeReadyFuture(QList<QString>{});
}
QList<QString> OpenAICompatProvider::validateRequest(
const QJsonObject &request, LLMCore::TemplateType type)
PluginLLMCore::ProviderID OpenAICompatProvider::providerID() const
{
const auto templateReq = QJsonObject{
{"model", {}},
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}},
{"temperature", {}},
{"max_tokens", {}},
{"top_p", {}},
{"top_k", {}},
{"frequency_penalty", {}},
{"presence_penalty", {}},
{"stop", QJsonArray{}},
{"stream", {}},
{"tools", {}}};
return LLMCore::ValidationUtils::validateRequestFields(request, templateReq);
return PluginLLMCore::ProviderID::OpenAICompatible;
}
QString OpenAICompatProvider::apiKey() const
PluginLLMCore::ProviderCapabilities OpenAICompatProvider::capabilities() const
{
return Settings::providerSettings().openAiCompatApiKey();
return PluginLLMCore::ProviderCapability::Tools | PluginLLMCore::ProviderCapability::Image;
}
void OpenAICompatProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
::LLMCore::BaseClient *OpenAICompatProvider::client() const
{
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
if (!apiKey().isEmpty()) {
networkRequest.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
}
}
LLMCore::ProviderID OpenAICompatProvider::providerID() const
{
return LLMCore::ProviderID::OpenAICompatible;
}
void OpenAICompatProvider::sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
if (!m_messages.contains(requestId)) {
m_dataBuffers[requestId].clear();
}
m_requestUrls[requestId] = url;
m_originalRequests[requestId] = payload;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LOG_MESSAGE(
QString("OpenAICompatProvider: Sending request %1 to %2").arg(requestId, url.toString()));
httpClient()->postStreaming(requestId, networkRequest, payload);
}
bool OpenAICompatProvider::supportsTools() const
{
return true;
}
bool OpenAICompatProvider::supportImage() const
{
return true;
}
void OpenAICompatProvider::cancelRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("OpenAICompatProvider: Cancelling request %1").arg(requestId));
LLMCore::Provider::cancelRequest(requestId);
cleanupRequest(requestId);
}
void OpenAICompatProvider::onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
{
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
QStringList lines = buffers.rawStreamBuffer.processData(data);
for (const QString &line : lines) {
if (line.trimmed().isEmpty() || line == "data: [DONE]") {
continue;
}
QJsonObject chunk = parseEventLine(line);
if (chunk.isEmpty())
continue;
processStreamChunk(requestId, chunk);
}
}
void OpenAICompatProvider::onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId, std::optional<QString> error)
{
if (error) {
LOG_MESSAGE(QString("OpenAICompatProvider request %1 failed: %2").arg(requestId, *error));
emit requestFailed(requestId, *error);
cleanupRequest(requestId);
return;
}
if (m_messages.contains(requestId)) {
OpenAIMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId));
m_dataBuffers.remove(requestId);
return;
}
}
if (m_dataBuffers.contains(requestId)) {
const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
if (!buffers.responseContent.isEmpty()) {
LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId));
emit fullResponseReceived(requestId, buffers.responseContent);
}
}
cleanupRequest(requestId);
}
void OpenAICompatProvider::onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults)
{
if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: Missing data for continuation request %1").arg(requestId));
cleanupRequest(requestId);
return;
}
LOG_MESSAGE(QString("Tool execution complete for OpenAICompat request %1").arg(requestId));
for (auto it = toolResults.begin(); it != toolResults.end(); ++it) {
OpenAIMessage *message = m_messages[requestId];
auto toolContent = message->getCurrentToolUseContent();
for (auto tool : toolContent) {
if (tool->id() == it.key()) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(tool->name());
emit toolExecutionCompleted(
requestId, tool->id(), toolStringName, toolResults[tool->id()]);
break;
}
}
}
OpenAIMessage *message = m_messages[requestId];
QJsonObject continuationRequest = m_originalRequests[requestId];
QJsonArray messages = continuationRequest["messages"].toArray();
messages.append(message->toProviderFormat());
QJsonArray toolResultMessages = message->createToolResultMessages(toolResults);
for (const auto &toolMsg : toolResultMessages) {
messages.append(toolMsg);
}
continuationRequest["messages"] = messages;
LOG_MESSAGE(QString("Sending continuation request for %1 with %2 tool results")
.arg(requestId)
.arg(toolResults.size()));
sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
}
void OpenAICompatProvider::processStreamChunk(const QString &requestId, const QJsonObject &chunk)
{
QJsonArray choices = chunk["choices"].toArray();
if (choices.isEmpty()) {
return;
}
QJsonObject choice = choices[0].toObject();
QJsonObject delta = choice["delta"].toObject();
QString finishReason = choice["finish_reason"].toString();
OpenAIMessage *message = m_messages.value(requestId);
if (!message) {
message = new OpenAIMessage(this);
m_messages[requestId] = message;
LOG_MESSAGE(QString("Created NEW OpenAIMessage for request %1").arg(requestId));
if (m_dataBuffers.contains(requestId)) {
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Starting continuation for request %1").arg(requestId));
}
} else if (
m_dataBuffers.contains(requestId)
&& message->state() == LLMCore::MessageState::RequiresToolExecution) {
message->startNewContinuation();
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId));
}
if (delta.contains("content") && !delta["content"].isNull()) {
QString content = delta["content"].toString();
message->handleContentDelta(content);
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
buffers.responseContent += content;
emit partialResponseReceived(requestId, content);
}
if (delta.contains("tool_calls")) {
QJsonArray toolCalls = delta["tool_calls"].toArray();
for (const auto &toolCallValue : toolCalls) {
QJsonObject toolCall = toolCallValue.toObject();
int index = toolCall["index"].toInt();
if (toolCall.contains("id")) {
QString id = toolCall["id"].toString();
QJsonObject function = toolCall["function"].toObject();
QString name = function["name"].toString();
message->handleToolCallStart(index, id, name);
}
if (toolCall.contains("function")) {
QJsonObject function = toolCall["function"].toObject();
if (function.contains("arguments")) {
QString args = function["arguments"].toString();
message->handleToolCallDelta(index, args);
}
}
}
}
if (!finishReason.isEmpty() && finishReason != "null") {
for (int i = 0; i < 10; ++i) {
message->handleToolCallComplete(i);
}
message->handleFinishReason(finishReason);
handleMessageComplete(requestId);
}
}
void OpenAICompatProvider::handleMessageComplete(const QString &requestId)
{
if (!m_messages.contains(requestId))
return;
OpenAIMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("OpenAICompat message requires tool execution for %1").arg(requestId));
auto toolUseContent = message->getCurrentToolUseContent();
if (toolUseContent.isEmpty()) {
LOG_MESSAGE(QString("No tools to execute for %1").arg(requestId));
return;
}
for (auto toolContent : toolUseContent) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(toolContent->name());
emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
m_toolsManager->executeToolCall(
requestId, toolContent->id(), toolContent->name(), toolContent->input());
}
} else {
LOG_MESSAGE(QString("OpenAICompat message marked as complete for %1").arg(requestId));
}
}
void OpenAICompatProvider::cleanupRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("Cleaning up OpenAICompat request %1").arg(requestId));
if (m_messages.contains(requestId)) {
OpenAIMessage *message = m_messages.take(requestId);
message->deleteLater();
}
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
m_originalRequests.remove(requestId);
m_toolsManager->cleanupRequest(requestId);
return m_client;
}
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,13 +19,12 @@
#pragma once
#include "OpenAIMessage.hpp"
#include "tools/ToolsManager.hpp"
#include <llmcore/Provider.hpp>
#include <LLMCore/OpenAIClient.hpp>
#include <pluginllmcore/Provider.hpp>
namespace QodeAssist::Providers {
class OpenAICompatProvider : public LLMCore::Provider
class OpenAICompatProvider : public PluginLLMCore::Provider
{
Q_OBJECT
public:
@ -35,47 +34,22 @@ public:
QString url() const override;
QString completionEndpoint() const override;
QString chatEndpoint() const override;
bool supportsModelListing() const override;
void prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled) override;
QFuture<QList<QString>> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
PluginLLMCore::ProviderID providerID() const override;
PluginLLMCore::ProviderCapabilities capabilities() const override;
::LLMCore::BaseClient *client() const override;
QString apiKey() const override;
void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
LLMCore::ProviderID providerID() const override;
void sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
bool supportsTools() const override;
bool supportImage() const override;
void cancelRequest(const LLMCore::RequestID &requestId) override;
public slots:
void onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
void onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId,
std::optional<QString> error) override;
private slots:
void onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults);
private:
void processStreamChunk(const QString &requestId, const QJsonObject &chunk);
void handleMessageComplete(const QString &requestId);
void cleanupRequest(const LLMCore::RequestID &requestId);
QHash<LLMCore::RequestID, OpenAIMessage *> m_messages;
QHash<LLMCore::RequestID, QUrl> m_requestUrls;
QHash<LLMCore::RequestID, QJsonObject> m_originalRequests;
Tools::ToolsManager *m_toolsManager;
::LLMCore::OpenAIClient *m_client;
};
} // namespace QodeAssist::Providers

View File

@ -46,7 +46,7 @@ void OpenAIMessage::handleToolCallStart(int index, const QString &id, const QStr
m_currentBlocks.append(nullptr);
}
auto toolContent = new LLMCore::ToolUseContent(id, name);
auto toolContent = new PluginLLMCore::ToolUseContent(id, name);
toolContent->setParent(this);
m_currentBlocks[index] = toolContent;
m_pendingToolArguments[index] = "";
@ -73,7 +73,7 @@ void OpenAIMessage::handleToolCallComplete(int index)
}
if (index < m_currentBlocks.size()) {
if (auto toolContent = qobject_cast<LLMCore::ToolUseContent *>(m_currentBlocks[index])) {
if (auto toolContent = qobject_cast<PluginLLMCore::ToolUseContent *>(m_currentBlocks[index])) {
toolContent->setInput(argsObject);
}
}
@ -100,10 +100,10 @@ QJsonObject OpenAIMessage::toProviderFormat() const
if (!block)
continue;
if (auto text = qobject_cast<LLMCore::TextContent *>(block)) {
if (auto text = qobject_cast<PluginLLMCore::TextContent *>(block)) {
textContent += text->text();
} else if (auto tool = qobject_cast<LLMCore::ToolUseContent *>(block)) {
toolCalls.append(tool->toJson(LLMCore::ProviderFormat::OpenAI));
} else if (auto tool = qobject_cast<PluginLLMCore::ToolUseContent *>(block)) {
toolCalls.append(tool->toJson(PluginLLMCore::ProviderFormat::OpenAI));
}
}
@ -126,20 +126,20 @@ QJsonArray OpenAIMessage::createToolResultMessages(const QHash<QString, QString>
for (auto toolContent : getCurrentToolUseContent()) {
if (toolResults.contains(toolContent->id())) {
auto toolResult = std::make_unique<LLMCore::ToolResultContent>(
auto toolResult = std::make_unique<PluginLLMCore::ToolResultContent>(
toolContent->id(), toolResults[toolContent->id()]);
messages.append(toolResult->toJson(LLMCore::ProviderFormat::OpenAI));
messages.append(toolResult->toJson(PluginLLMCore::ProviderFormat::OpenAI));
}
}
return messages;
}
QList<LLMCore::ToolUseContent *> OpenAIMessage::getCurrentToolUseContent() const
QList<PluginLLMCore::ToolUseContent *> OpenAIMessage::getCurrentToolUseContent() const
{
QList<LLMCore::ToolUseContent *> toolBlocks;
QList<PluginLLMCore::ToolUseContent *> toolBlocks;
for (auto block : m_currentBlocks) {
if (auto toolContent = qobject_cast<LLMCore::ToolUseContent *>(block)) {
if (auto toolContent = qobject_cast<PluginLLMCore::ToolUseContent *>(block)) {
toolBlocks.append(toolContent);
}
}
@ -153,29 +153,29 @@ void OpenAIMessage::startNewContinuation()
m_currentBlocks.clear();
m_pendingToolArguments.clear();
m_finishReason.clear();
m_state = LLMCore::MessageState::Building;
m_state = PluginLLMCore::MessageState::Building;
}
void OpenAIMessage::updateStateFromFinishReason()
{
if (m_finishReason == "tool_calls" && !getCurrentToolUseContent().empty()) {
m_state = LLMCore::MessageState::RequiresToolExecution;
m_state = PluginLLMCore::MessageState::RequiresToolExecution;
} else if (m_finishReason == "stop") {
m_state = LLMCore::MessageState::Final;
m_state = PluginLLMCore::MessageState::Final;
} else {
m_state = LLMCore::MessageState::Complete;
m_state = PluginLLMCore::MessageState::Complete;
}
}
LLMCore::TextContent *OpenAIMessage::getOrCreateTextContent()
PluginLLMCore::TextContent *OpenAIMessage::getOrCreateTextContent()
{
for (auto block : m_currentBlocks) {
if (auto textContent = qobject_cast<LLMCore::TextContent *>(block)) {
if (auto textContent = qobject_cast<PluginLLMCore::TextContent *>(block)) {
return textContent;
}
}
return addCurrentContent<LLMCore::TextContent>();
return addCurrentContent<PluginLLMCore::TextContent>();
}
} // namespace QodeAssist::Providers

View File

@ -19,7 +19,7 @@
#pragma once
#include <llmcore/ContentBlocks.hpp>
#include <pluginllmcore/ContentBlocks.hpp>
namespace QodeAssist::Providers {
@ -38,19 +38,19 @@ public:
QJsonObject toProviderFormat() const;
QJsonArray createToolResultMessages(const QHash<QString, QString> &toolResults) const;
LLMCore::MessageState state() const { return m_state; }
QList<LLMCore::ToolUseContent *> getCurrentToolUseContent() const;
PluginLLMCore::MessageState state() const { return m_state; }
QList<PluginLLMCore::ToolUseContent *> getCurrentToolUseContent() const;
void startNewContinuation();
private:
QString m_finishReason;
LLMCore::MessageState m_state = LLMCore::MessageState::Building;
QList<LLMCore::ContentBlock *> m_currentBlocks;
PluginLLMCore::MessageState m_state = PluginLLMCore::MessageState::Building;
QList<PluginLLMCore::ContentBlock *> m_currentBlocks;
QHash<int, QString> m_pendingToolArguments;
void updateStateFromFinishReason();
LLMCore::TextContent *getOrCreateTextContent();
PluginLLMCore::TextContent *getOrCreateTextContent();
template<typename T, typename... Args>
T *addCurrentContent(Args &&...args)

View File

@ -19,7 +19,8 @@
#include "OpenAIProvider.hpp"
#include "llmcore/ValidationUtils.hpp"
#include <LLMCore/ToolsManager.hpp>
#include "tools/ToolsRegistration.hpp"
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
@ -34,14 +35,10 @@
namespace QodeAssist::Providers {
OpenAIProvider::OpenAIProvider(QObject *parent)
: LLMCore::Provider(parent)
, m_toolsManager(new Tools::ToolsManager(this))
: PluginLLMCore::Provider(parent)
, m_client(new ::LLMCore::OpenAIClient(QString(), QString(), QString(), this))
{
connect(
m_toolsManager,
&Tools::ToolsManager::toolExecutionComplete,
this,
&OpenAIProvider::onToolExecutionComplete);
Tools::registerQodeAssistTools(m_client->tools());
}
QString OpenAIProvider::name() const
@ -49,6 +46,11 @@ QString OpenAIProvider::name() const
return "OpenAI";
}
QString OpenAIProvider::apiKey() const
{
return Settings::providerSettings().openAiApiKey();
}
QString OpenAIProvider::url() const
{
return "https://api.openai.com";
@ -64,16 +66,11 @@ QString OpenAIProvider::chatEndpoint() const
return "/v1/chat/completions";
}
bool OpenAIProvider::supportsModelListing() const
{
return true;
}
void OpenAIProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled)
{
@ -116,22 +113,16 @@ void OpenAIProvider::prepareRequest(
request["presence_penalty"] = settings.presencePenalty();
};
if (type == LLMCore::RequestType::CodeCompletion) {
if (type == PluginLLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
} else if (type == PluginLLMCore::RequestType::QuickRefactoring) {
applyModelParams(Settings::quickRefactorSettings());
} else {
applyModelParams(Settings::chatAssistantSettings());
}
if (isToolsEnabled) {
LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL;
if (type == LLMCore::RequestType::QuickRefactoring) {
filter = LLMCore::RunToolsFilter::OnlyRead;
}
auto toolsDefinitions = m_toolsManager->getToolsDefinitions(
LLMCore::ToolSchemaFormat::OpenAI, filter);
auto toolsDefinitions = m_client->tools()->getToolsDefinitions();
if (!toolsDefinitions.isEmpty()) {
request["tools"] = toolsDefinitions;
LOG_MESSAGE(QString("Added %1 tools to OpenAI request").arg(toolsDefinitions.size()));
@ -139,318 +130,37 @@ void OpenAIProvider::prepareRequest(
}
}
QFuture<QList<QString>> OpenAIProvider::getInstalledModels(const QString &url)
QFuture<QList<QString>> OpenAIProvider::getInstalledModels(const QString &baseUrl)
{
QNetworkRequest request(QString("%1/v1/models").arg(url));
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
if (!apiKey().isEmpty()) {
request.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
}
return httpClient()->get(request).then([](const QByteArray &data) {
QList<QString> models;
QJsonObject jsonObject = QJsonDocument::fromJson(data).object();
if (jsonObject.contains("data")) {
QJsonArray modelArray = jsonObject["data"].toArray();
for (const QJsonValue &value : modelArray) {
QJsonObject modelObject = value.toObject();
if (modelObject.contains("id")) {
QString modelId = modelObject["id"].toString();
if (!modelId.contains("dall-e") && !modelId.contains("whisper")
&& !modelId.contains("tts") && !modelId.contains("davinci")
&& !modelId.contains("babbage") && !modelId.contains("omni")) {
models.append(modelId);
}
}
m_client->setUrl(baseUrl);
m_client->setApiKey(apiKey());
return m_client->listModels().then([](const QList<QString> &allModels) {
QList<QString> filtered;
for (const QString &modelId : allModels) {
if (!modelId.contains("dall-e") && !modelId.contains("whisper")
&& !modelId.contains("tts") && !modelId.contains("davinci")
&& !modelId.contains("babbage") && !modelId.contains("omni")) {
filtered.append(modelId);
}
}
return models;
}).onFailed([](const std::exception &e) {
LOG_MESSAGE(QString("Error fetching OpenAI models: %1").arg(e.what()));
return QList<QString>{};
return filtered;
});
}
QList<QString> OpenAIProvider::validateRequest(const QJsonObject &request, LLMCore::TemplateType type)
PluginLLMCore::ProviderID OpenAIProvider::providerID() const
{
const auto templateReq = QJsonObject{
{"model", {}},
{"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}},
{"temperature", {}},
{"max_tokens", {}},
{"max_completion_tokens", {}}, // New parameter for newer models
{"top_p", {}},
{"top_k", {}},
{"frequency_penalty", {}},
{"presence_penalty", {}},
{"stop", QJsonArray{}},
{"stream", {}},
{"tools", {}}};
return LLMCore::ValidationUtils::validateRequestFields(request, templateReq);
return PluginLLMCore::ProviderID::OpenAI;
}
QString OpenAIProvider::apiKey() const
PluginLLMCore::ProviderCapabilities OpenAIProvider::capabilities() const
{
return Settings::providerSettings().openAiApiKey();
return PluginLLMCore::ProviderCapability::Tools | PluginLLMCore::ProviderCapability::Image
| PluginLLMCore::ProviderCapability::ModelListing;
}
void OpenAIProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
::LLMCore::BaseClient *OpenAIProvider::client() const
{
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
if (!apiKey().isEmpty()) {
networkRequest.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
}
}
LLMCore::ProviderID OpenAIProvider::providerID() const
{
return LLMCore::ProviderID::OpenAI;
}
void OpenAIProvider::sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
if (!m_messages.contains(requestId)) {
m_dataBuffers[requestId].clear();
}
m_requestUrls[requestId] = url;
m_originalRequests[requestId] = payload;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LOG_MESSAGE(QString("OpenAIProvider: Sending request %1 to %2").arg(requestId, url.toString()));
httpClient()->postStreaming(requestId, networkRequest, payload);
}
bool OpenAIProvider::supportsTools() const
{
return true;
}
bool OpenAIProvider::supportImage() const
{
return true;
}
void OpenAIProvider::cancelRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("OpenAIProvider: Cancelling request %1").arg(requestId));
LLMCore::Provider::cancelRequest(requestId);
cleanupRequest(requestId);
}
void OpenAIProvider::onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
{
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
QStringList lines = buffers.rawStreamBuffer.processData(data);
for (const QString &line : lines) {
if (line.trimmed().isEmpty() || line == "data: [DONE]") {
continue;
}
QJsonObject chunk = parseEventLine(line);
if (chunk.isEmpty())
continue;
processStreamChunk(requestId, chunk);
}
}
void OpenAIProvider::onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId, std::optional<QString> error)
{
if (error) {
LOG_MESSAGE(QString("OpenAIProvider request %1 failed: %2").arg(requestId, *error));
emit requestFailed(requestId, *error);
cleanupRequest(requestId);
return;
}
if (m_messages.contains(requestId)) {
OpenAIMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId));
m_dataBuffers.remove(requestId);
return;
}
}
if (m_dataBuffers.contains(requestId)) {
const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
if (!buffers.responseContent.isEmpty()) {
LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId));
emit fullResponseReceived(requestId, buffers.responseContent);
}
}
cleanupRequest(requestId);
}
void OpenAIProvider::onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults)
{
if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: Missing data for continuation request %1").arg(requestId));
cleanupRequest(requestId);
return;
}
LOG_MESSAGE(QString("Tool execution complete for OpenAI request %1").arg(requestId));
for (auto it = toolResults.begin(); it != toolResults.end(); ++it) {
OpenAIMessage *message = m_messages[requestId];
auto toolContent = message->getCurrentToolUseContent();
for (auto tool : toolContent) {
if (tool->id() == it.key()) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(tool->name());
emit toolExecutionCompleted(
requestId, tool->id(), toolStringName, toolResults[tool->id()]);
break;
}
}
}
OpenAIMessage *message = m_messages[requestId];
QJsonObject continuationRequest = m_originalRequests[requestId];
QJsonArray messages = continuationRequest["messages"].toArray();
messages.append(message->toProviderFormat());
QJsonArray toolResultMessages = message->createToolResultMessages(toolResults);
for (const auto &toolMsg : toolResultMessages) {
messages.append(toolMsg);
}
continuationRequest["messages"] = messages;
LOG_MESSAGE(QString("Sending continuation request for %1 with %2 tool results")
.arg(requestId)
.arg(toolResults.size()));
sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
}
void OpenAIProvider::processStreamChunk(const QString &requestId, const QJsonObject &chunk)
{
QJsonArray choices = chunk["choices"].toArray();
if (choices.isEmpty()) {
return;
}
QJsonObject choice = choices[0].toObject();
QJsonObject delta = choice["delta"].toObject();
QString finishReason = choice["finish_reason"].toString();
OpenAIMessage *message = m_messages.value(requestId);
if (!message) {
message = new OpenAIMessage(this);
m_messages[requestId] = message;
LOG_MESSAGE(QString("Created NEW OpenAIAPIMessage for request %1").arg(requestId));
if (m_dataBuffers.contains(requestId)) {
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Starting continuation for request %1").arg(requestId));
}
} else if (
m_dataBuffers.contains(requestId)
&& message->state() == LLMCore::MessageState::RequiresToolExecution) {
message->startNewContinuation();
emit continuationStarted(requestId);
LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId));
}
if (delta.contains("content") && !delta["content"].isNull()) {
QString content = delta["content"].toString();
message->handleContentDelta(content);
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
buffers.responseContent += content;
emit partialResponseReceived(requestId, content);
}
if (delta.contains("tool_calls")) {
QJsonArray toolCalls = delta["tool_calls"].toArray();
for (const auto &toolCallValue : toolCalls) {
QJsonObject toolCall = toolCallValue.toObject();
int index = toolCall["index"].toInt();
if (toolCall.contains("id")) {
QString id = toolCall["id"].toString();
QJsonObject function = toolCall["function"].toObject();
QString name = function["name"].toString();
message->handleToolCallStart(index, id, name);
}
if (toolCall.contains("function")) {
QJsonObject function = toolCall["function"].toObject();
if (function.contains("arguments")) {
QString args = function["arguments"].toString();
message->handleToolCallDelta(index, args);
}
}
}
}
if (!finishReason.isEmpty() && finishReason != "null") {
for (int i = 0; i < 10; ++i) {
message->handleToolCallComplete(i);
}
message->handleFinishReason(finishReason);
handleMessageComplete(requestId);
}
}
void OpenAIProvider::handleMessageComplete(const QString &requestId)
{
if (!m_messages.contains(requestId))
return;
OpenAIMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
LOG_MESSAGE(QString("OpenAI message requires tool execution for %1").arg(requestId));
auto toolUseContent = message->getCurrentToolUseContent();
if (toolUseContent.isEmpty()) {
LOG_MESSAGE(QString("No tools to execute for %1").arg(requestId));
return;
}
for (auto toolContent : toolUseContent) {
auto toolStringName = m_toolsManager->toolsFactory()->getStringName(toolContent->name());
emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
m_toolsManager->executeToolCall(
requestId, toolContent->id(), toolContent->name(), toolContent->input());
}
} else {
LOG_MESSAGE(QString("OpenAI message marked as complete for %1").arg(requestId));
}
}
void OpenAIProvider::cleanupRequest(const LLMCore::RequestID &requestId)
{
LOG_MESSAGE(QString("Cleaning up OpenAI request %1").arg(requestId));
if (m_messages.contains(requestId)) {
OpenAIMessage *message = m_messages.take(requestId);
message->deleteLater();
}
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
m_originalRequests.remove(requestId);
m_toolsManager->cleanupRequest(requestId);
return m_client;
}
} // namespace QodeAssist::Providers

View File

@ -19,13 +19,12 @@
#pragma once
#include "OpenAIMessage.hpp"
#include "tools/ToolsManager.hpp"
#include <llmcore/Provider.hpp>
#include <LLMCore/OpenAIClient.hpp>
#include <pluginllmcore/Provider.hpp>
namespace QodeAssist::Providers {
class OpenAIProvider : public LLMCore::Provider
class OpenAIProvider : public PluginLLMCore::Provider
{
Q_OBJECT
public:
@ -35,47 +34,22 @@ public:
QString url() const override;
QString completionEndpoint() const override;
QString chatEndpoint() const override;
bool supportsModelListing() const override;
void prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled) override;
QFuture<QList<QString>> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
PluginLLMCore::ProviderID providerID() const override;
PluginLLMCore::ProviderCapabilities capabilities() const override;
::LLMCore::BaseClient *client() const override;
QString apiKey() const override;
void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
LLMCore::ProviderID providerID() const override;
void sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
bool supportsTools() const override;
bool supportImage() const override;
void cancelRequest(const LLMCore::RequestID &requestId) override;
public slots:
void onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
void onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId,
std::optional<QString> error) override;
private slots:
void onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults);
private:
void processStreamChunk(const QString &requestId, const QJsonObject &chunk);
void handleMessageComplete(const QString &requestId);
void cleanupRequest(const LLMCore::RequestID &requestId);
QHash<LLMCore::RequestID, OpenAIMessage *> m_messages;
QHash<LLMCore::RequestID, QUrl> m_requestUrls;
QHash<LLMCore::RequestID, QJsonObject> m_originalRequests;
Tools::ToolsManager *m_toolsManager;
::LLMCore::OpenAIClient *m_client;
};
} // namespace QodeAssist::Providers

View File

@ -52,7 +52,7 @@ void OpenAIResponsesMessage::handleItemDelta(const QJsonObject &item)
void OpenAIResponsesMessage::handleToolCallStart(const QString &callId, const QString &name)
{
auto toolContent = new LLMCore::ToolUseContent(callId, name);
auto toolContent = new PluginLLMCore::ToolUseContent(callId, name);
toolContent->setParent(this);
m_items.append(toolContent);
m_toolCalls[callId] = toolContent;
@ -86,7 +86,7 @@ void OpenAIResponsesMessage::handleToolCallComplete(const QString &callId)
void OpenAIResponsesMessage::handleReasoningStart(const QString &itemId)
{
auto thinkingContent = new LLMCore::ThinkingContent();
auto thinkingContent = new PluginLLMCore::ThinkingContent();
thinkingContent->setParent(this);
m_items.append(thinkingContent);
m_thinkingBlocks[itemId] = thinkingContent;
@ -115,13 +115,13 @@ QList<QJsonObject> OpenAIResponsesMessage::toItemsFormat() const
QList<QJsonObject> items;
QString textContent;
QList<LLMCore::ToolUseContent *> toolCalls;
QList<PluginLLMCore::ToolUseContent *> toolCalls;
for (const auto *block : m_items) {
if (const auto *text = qobject_cast<const LLMCore::TextContent *>(block)) {
if (const auto *text = qobject_cast<const PluginLLMCore::TextContent *>(block)) {
textContent += text->text();
} else if (auto *tool = qobject_cast<LLMCore::ToolUseContent *>(
const_cast<LLMCore::ContentBlock *>(block))) {
} else if (auto *tool = qobject_cast<PluginLLMCore::ToolUseContent *>(
const_cast<PluginLLMCore::ContentBlock *>(block))) {
toolCalls.append(tool);
}
}
@ -146,22 +146,22 @@ QList<QJsonObject> OpenAIResponsesMessage::toItemsFormat() const
return items;
}
QList<LLMCore::ToolUseContent *> OpenAIResponsesMessage::getCurrentToolUseContent() const
QList<PluginLLMCore::ToolUseContent *> OpenAIResponsesMessage::getCurrentToolUseContent() const
{
QList<LLMCore::ToolUseContent *> toolBlocks;
QList<PluginLLMCore::ToolUseContent *> toolBlocks;
for (auto *block : m_items) {
if (auto *toolContent = qobject_cast<LLMCore::ToolUseContent *>(block)) {
if (auto *toolContent = qobject_cast<PluginLLMCore::ToolUseContent *>(block)) {
toolBlocks.append(toolContent);
}
}
return toolBlocks;
}
QList<LLMCore::ThinkingContent *> OpenAIResponsesMessage::getCurrentThinkingContent() const
QList<PluginLLMCore::ThinkingContent *> OpenAIResponsesMessage::getCurrentThinkingContent() const
{
QList<LLMCore::ThinkingContent *> thinkingBlocks;
QList<PluginLLMCore::ThinkingContent *> thinkingBlocks;
for (auto *block : m_items) {
if (auto *thinkingContent = qobject_cast<LLMCore::ThinkingContent *>(block)) {
if (auto *thinkingContent = qobject_cast<PluginLLMCore::ThinkingContent *>(block)) {
thinkingBlocks.append(thinkingContent);
}
}
@ -189,7 +189,7 @@ QString OpenAIResponsesMessage::accumulatedText() const
{
QString text;
for (const auto *block : m_items) {
if (const auto *textContent = qobject_cast<const LLMCore::TextContent *>(block)) {
if (const auto *textContent = qobject_cast<const PluginLLMCore::TextContent *>(block)) {
text += textContent->text();
}
}
@ -202,28 +202,28 @@ void OpenAIResponsesMessage::updateStateFromStatus()
if (m_status == "completed") {
if (!getCurrentToolUseContent().isEmpty()) {
m_state = LLMCore::MessageState::RequiresToolExecution;
m_state = PluginLLMCore::MessageState::RequiresToolExecution;
} else {
m_state = LLMCore::MessageState::Complete;
m_state = PluginLLMCore::MessageState::Complete;
}
} else if (m_status == "in_progress") {
m_state = LLMCore::MessageState::Building;
m_state = PluginLLMCore::MessageState::Building;
} else if (m_status == "failed" || m_status == "cancelled" || m_status == "incomplete") {
m_state = LLMCore::MessageState::Final;
m_state = PluginLLMCore::MessageState::Final;
} else {
m_state = LLMCore::MessageState::Building;
m_state = PluginLLMCore::MessageState::Building;
}
}
LLMCore::TextContent *OpenAIResponsesMessage::getOrCreateTextItem()
PluginLLMCore::TextContent *OpenAIResponsesMessage::getOrCreateTextItem()
{
for (auto *block : m_items) {
if (auto *textContent = qobject_cast<LLMCore::TextContent *>(block)) {
if (auto *textContent = qobject_cast<PluginLLMCore::TextContent *>(block)) {
return textContent;
}
}
auto *textContent = new LLMCore::TextContent();
auto *textContent = new PluginLLMCore::TextContent();
textContent->setParent(this);
m_items.append(textContent);
return textContent;
@ -239,7 +239,7 @@ void OpenAIResponsesMessage::startNewContinuation()
m_pendingToolArguments.clear();
m_status.clear();
m_state = LLMCore::MessageState::Building;
m_state = PluginLLMCore::MessageState::Building;
}
} // namespace QodeAssist::Providers

View File

@ -19,7 +19,7 @@
#pragma once
#include <llmcore/ContentBlocks.hpp>
#include <pluginllmcore/ContentBlocks.hpp>
namespace QodeAssist::Providers {
@ -41,10 +41,10 @@ public:
QList<QJsonObject> toItemsFormat() const;
QJsonArray createToolResultItems(const QHash<QString, QString> &toolResults) const;
LLMCore::MessageState state() const noexcept { return m_state; }
PluginLLMCore::MessageState state() const noexcept { return m_state; }
QString accumulatedText() const;
QList<LLMCore::ToolUseContent *> getCurrentToolUseContent() const;
QList<LLMCore::ThinkingContent *> getCurrentThinkingContent() const;
QList<PluginLLMCore::ToolUseContent *> getCurrentToolUseContent() const;
QList<PluginLLMCore::ThinkingContent *> getCurrentThinkingContent() const;
bool hasToolCalls() const noexcept { return !m_toolCalls.isEmpty(); }
bool hasThinkingContent() const noexcept { return !m_thinkingBlocks.isEmpty(); }
@ -53,14 +53,14 @@ public:
private:
QString m_status;
LLMCore::MessageState m_state = LLMCore::MessageState::Building;
QList<LLMCore::ContentBlock *> m_items;
PluginLLMCore::MessageState m_state = PluginLLMCore::MessageState::Building;
QList<PluginLLMCore::ContentBlock *> m_items;
QHash<QString, QString> m_pendingToolArguments;
QHash<QString, LLMCore::ToolUseContent *> m_toolCalls;
QHash<QString, LLMCore::ThinkingContent *> m_thinkingBlocks;
QHash<QString, PluginLLMCore::ToolUseContent *> m_toolCalls;
QHash<QString, PluginLLMCore::ThinkingContent *> m_thinkingBlocks;
void updateStateFromStatus();
LLMCore::TextContent *getOrCreateTextItem();
PluginLLMCore::TextContent *getOrCreateTextItem();
};
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -18,9 +18,9 @@
*/
#include "OpenAIResponsesProvider.hpp"
#include "OpenAIResponses/ResponseObject.hpp"
#include <LLMCore/ToolsManager.hpp>
#include "tools/ToolsRegistration.hpp"
#include "llmcore/ValidationUtils.hpp"
#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
@ -35,14 +35,10 @@
namespace QodeAssist::Providers {
OpenAIResponsesProvider::OpenAIResponsesProvider(QObject *parent)
: LLMCore::Provider(parent)
, m_toolsManager(new Tools::ToolsManager(this))
: PluginLLMCore::Provider(parent)
, m_client(new ::LLMCore::OpenAIResponsesClient(QString(), QString(), QString(), this))
{
connect(
m_toolsManager,
&Tools::ToolsManager::toolExecutionComplete,
this,
&OpenAIResponsesProvider::onToolExecutionComplete);
Tools::registerQodeAssistTools(m_client->tools());
}
QString OpenAIResponsesProvider::name() const
@ -50,6 +46,11 @@ QString OpenAIResponsesProvider::name() const
return "OpenAI Responses";
}
QString OpenAIResponsesProvider::apiKey() const
{
return Settings::providerSettings().openAiApiKey();
}
QString OpenAIResponsesProvider::url() const
{
return "https://api.openai.com";
@ -65,16 +66,11 @@ QString OpenAIResponsesProvider::chatEndpoint() const
return "/v1/responses";
}
bool OpenAIResponsesProvider::supportsModelListing() const
{
return true;
}
void OpenAIResponsesProvider::prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled)
{
@ -97,7 +93,7 @@ void OpenAIResponsesProvider::prepareRequest(
if (effortStr.isEmpty()) {
effortStr = "medium";
}
QJsonObject reasoning;
reasoning["effort"] = effortStr;
request["reasoning"] = reasoning;
@ -109,9 +105,9 @@ void OpenAIResponsesProvider::prepareRequest(
request["include"] = include;
};
if (type == LLMCore::RequestType::CodeCompletion) {
if (type == PluginLLMCore::RequestType::CodeCompletion) {
applyModelParams(Settings::codeCompletionSettings());
} else if (type == LLMCore::RequestType::QuickRefactoring) {
} else if (type == PluginLLMCore::RequestType::QuickRefactoring) {
const auto &qrSettings = Settings::quickRefactorSettings();
applyModelParams(qrSettings);
@ -128,12 +124,8 @@ void OpenAIResponsesProvider::prepareRequest(
}
if (isToolsEnabled) {
const LLMCore::RunToolsFilter filter = (type == LLMCore::RequestType::QuickRefactoring)
? LLMCore::RunToolsFilter::OnlyRead
: LLMCore::RunToolsFilter::ALL;
const auto toolsDefinitions
= m_toolsManager->getToolsDefinitions(LLMCore::ToolSchemaFormat::OpenAI, filter);
= m_client->tools()->getToolsDefinitions();
if (!toolsDefinitions.isEmpty()) {
QJsonArray responsesTools;
@ -156,496 +148,40 @@ void OpenAIResponsesProvider::prepareRequest(
request["stream"] = true;
}
QFuture<QList<QString>> OpenAIResponsesProvider::getInstalledModels(const QString &url)
QFuture<QList<QString>> OpenAIResponsesProvider::getInstalledModels(const QString &baseUrl)
{
QNetworkRequest request(QString("%1/v1/models").arg(url));
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
if (!apiKey().isEmpty()) {
request.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
}
return httpClient()->get(request).then([](const QByteArray &data) {
QList<QString> models;
const QJsonObject jsonObject = QJsonDocument::fromJson(data).object();
if (jsonObject.contains("data")) {
const QJsonArray modelArray = jsonObject["data"].toArray();
models.reserve(modelArray.size());
static const QStringList modelPrefixes = {"gpt-5", "o1", "o2", "o3", "o4"};
for (const QJsonValue &value : modelArray) {
const QJsonObject modelObject = value.toObject();
if (!modelObject.contains("id")) {
continue;
}
const QString modelId = modelObject["id"].toString();
for (const QString &prefix : modelPrefixes) {
if (modelId.contains(prefix)) {
models.append(modelId);
break;
}
m_client->setUrl(baseUrl);
m_client->setApiKey(apiKey());
return m_client->listModels().then([](const QList<QString> &models) {
QList<QString> filtered;
static const QStringList modelPrefixes = {"gpt-5", "o1", "o2", "o3", "o4"};
for (const QString &modelId : models) {
for (const QString &prefix : modelPrefixes) {
if (modelId.contains(prefix)) {
filtered.append(modelId);
break;
}
}
}
return models;
}).onFailed([](const std::exception &e) {
LOG_MESSAGE(QString("Error fetching OpenAI models: %1").arg(e.what()));
return QList<QString>{};
return filtered;
});
}
QList<QString> OpenAIResponsesProvider::validateRequest(
const QJsonObject &request, LLMCore::TemplateType type)
PluginLLMCore::ProviderID OpenAIResponsesProvider::providerID() const
{
Q_UNUSED(type);
QList<QString> errors;
if (!request.contains("input")) {
errors.append("Missing required field: input");
return errors;
}
const QJsonValue inputValue = request["input"];
if (!inputValue.isString() && !inputValue.isArray()) {
errors.append("Field 'input' must be either a string or an array");
}
if (request.contains("max_output_tokens") && !request["max_output_tokens"].isDouble()) {
errors.append("Field 'max_output_tokens' must be a number");
}
if (request.contains("top_p") && !request["top_p"].isDouble()) {
errors.append("Field 'top_p' must be a number");
}
if (request.contains("reasoning") && !request["reasoning"].isObject()) {
errors.append("Field 'reasoning' must be an object");
}
if (request.contains("stream") && !request["stream"].isBool()) {
errors.append("Field 'stream' must be a boolean");
}
if (request.contains("tools") && !request["tools"].isArray()) {
errors.append("Field 'tools' must be an array");
}
return errors;
return PluginLLMCore::ProviderID::OpenAIResponses;
}
QString OpenAIResponsesProvider::apiKey() const
PluginLLMCore::ProviderCapabilities OpenAIResponsesProvider::capabilities() const
{
return Settings::providerSettings().openAiApiKey();
return PluginLLMCore::ProviderCapability::Tools | PluginLLMCore::ProviderCapability::Thinking
| PluginLLMCore::ProviderCapability::Image
| PluginLLMCore::ProviderCapability::ModelListing;
}
void OpenAIResponsesProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
::LLMCore::BaseClient *OpenAIResponsesProvider::client() const
{
networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
if (!apiKey().isEmpty()) {
networkRequest.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
}
}
LLMCore::ProviderID OpenAIResponsesProvider::providerID() const
{
return LLMCore::ProviderID::OpenAIResponses;
}
void OpenAIResponsesProvider::sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
if (!m_messages.contains(requestId)) {
m_dataBuffers[requestId].clear();
}
m_requestUrls[requestId] = url;
m_originalRequests[requestId] = payload;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
httpClient()->postStreaming(requestId, networkRequest, payload);
}
bool OpenAIResponsesProvider::supportsTools() const
{
return true;
}
bool OpenAIResponsesProvider::supportImage() const
{
return true;
}
bool OpenAIResponsesProvider::supportThinking() const
{
return true;
}
void OpenAIResponsesProvider::cancelRequest(const LLMCore::RequestID &requestId)
{
LLMCore::Provider::cancelRequest(requestId);
cleanupRequest(requestId);
}
void OpenAIResponsesProvider::onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
{
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
const QStringList lines = buffers.rawStreamBuffer.processData(data);
QString currentEventType;
for (const QString &line : lines) {
const QString trimmedLine = line.trimmed();
if (trimmedLine.isEmpty()) {
continue;
}
if (line == "data: [DONE]") {
continue;
}
if (line.startsWith("event: ")) {
currentEventType = line.mid(7).trimmed();
continue;
}
QString dataLine = line;
if (line.startsWith("data: ")) {
dataLine = line.mid(6);
}
const QJsonDocument doc = QJsonDocument::fromJson(dataLine.toUtf8());
if (doc.isObject()) {
const QJsonObject obj = doc.object();
processStreamEvent(requestId, currentEventType, obj);
}
}
}
void OpenAIResponsesProvider::onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId, std::optional<QString> error)
{
if (error) {
LOG_MESSAGE(QString("OpenAIResponses request %1 failed: %2").arg(requestId, *error));
emit requestFailed(requestId, *error);
cleanupRequest(requestId);
return;
}
if (m_messages.contains(requestId)) {
OpenAIResponsesMessage *message = m_messages[requestId];
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
return;
}
}
if (m_dataBuffers.contains(requestId)) {
const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
if (!buffers.responseContent.isEmpty()) {
emit fullResponseReceived(requestId, buffers.responseContent);
} else {
LOG_MESSAGE(QString("WARNING: OpenAIResponses - Response content is empty for %1, "
"emitting empty response")
.arg(requestId));
emit fullResponseReceived(requestId, "");
}
} else {
LOG_MESSAGE(
QString("WARNING: OpenAIResponses - No data buffer found for %1").arg(requestId));
}
cleanupRequest(requestId);
}
void OpenAIResponsesProvider::processStreamEvent(
const QString &requestId, const QString &eventType, const QJsonObject &data)
{
OpenAIResponsesMessage *message = m_messages.value(requestId);
if (!message) {
message = new OpenAIResponsesMessage(this);
m_messages[requestId] = message;
if (m_dataBuffers.contains(requestId)) {
emit continuationStarted(requestId);
}
} else if (
m_dataBuffers.contains(requestId)
&& message->state() == LLMCore::MessageState::RequiresToolExecution) {
message->startNewContinuation();
emit continuationStarted(requestId);
}
if (eventType == "response.content_part.added") {
} else if (eventType == "response.output_text.delta") {
const QString delta = data["delta"].toString();
if (!delta.isEmpty()) {
m_dataBuffers[requestId].responseContent += delta;
emit partialResponseReceived(requestId, delta);
}
} else if (eventType == "response.output_text.done") {
const QString fullText = data["text"].toString();
if (!fullText.isEmpty()) {
m_dataBuffers[requestId].responseContent = fullText;
}
} else if (eventType == "response.content_part.done") {
} else if (eventType == "response.output_item.added") {
using namespace QodeAssist::OpenAIResponses;
const QJsonObject item = data["item"].toObject();
OutputItem outputItem = OutputItem::fromJson(item);
if (const auto *functionCall = outputItem.asFunctionCall()) {
if (!functionCall->callId.isEmpty() && !functionCall->name.isEmpty()) {
if (!m_itemIdToCallId.contains(requestId)) {
m_itemIdToCallId[requestId] = QHash<QString, QString>();
}
m_itemIdToCallId[requestId][functionCall->id] = functionCall->callId;
message->handleToolCallStart(functionCall->callId, functionCall->name);
}
} else if (const auto *reasoning = outputItem.asReasoning()) {
if (!reasoning->id.isEmpty()) {
message->handleReasoningStart(reasoning->id);
}
}
} else if (eventType == "response.reasoning_content.delta") {
const QString itemId = data["item_id"].toString();
const QString delta = data["delta"].toString();
if (!itemId.isEmpty() && !delta.isEmpty()) {
message->handleReasoningDelta(itemId, delta);
}
} else if (eventType == "response.reasoning_content.done") {
const QString itemId = data["item_id"].toString();
if (!itemId.isEmpty()) {
message->handleReasoningComplete(itemId);
emitPendingThinkingBlocks(requestId);
}
} else if (eventType == "response.function_call_arguments.delta") {
const QString itemId = data["item_id"].toString();
const QString delta = data["delta"].toString();
if (!itemId.isEmpty() && !delta.isEmpty()) {
const QString callId = m_itemIdToCallId.value(requestId).value(itemId);
if (!callId.isEmpty()) {
message->handleToolCallDelta(callId, delta);
} else {
LOG_MESSAGE(QString("ERROR: No call_id mapping found for item_id: %1").arg(itemId));
}
}
} else if (
eventType == "response.function_call_arguments.done"
|| eventType == "response.output_item.done") {
const QString itemId = data["item_id"].toString();
const QJsonObject item = data["item"].toObject();
if (!item.isEmpty() && item["type"].toString() == "reasoning") {
using namespace QodeAssist::OpenAIResponses;
const QString finalItemId = itemId.isEmpty() ? item["id"].toString() : itemId;
ReasoningOutput reasoningOutput = ReasoningOutput::fromJson(item);
QString reasoningText;
if (!reasoningOutput.summaryText.isEmpty()) {
reasoningText = reasoningOutput.summaryText;
} else if (!reasoningOutput.contentTexts.isEmpty()) {
reasoningText = reasoningOutput.contentTexts.join("\n");
}
if (reasoningText.isEmpty()) {
reasoningText = QString(
"[Reasoning process completed, but detailed thinking is not available in "
"streaming mode. The model has processed your request with extended reasoning.]");
}
if (!finalItemId.isEmpty()) {
message->handleReasoningDelta(finalItemId, reasoningText);
message->handleReasoningComplete(finalItemId);
emitPendingThinkingBlocks(requestId);
}
} else if (item.isEmpty() && !itemId.isEmpty()) {
const QString callId = m_itemIdToCallId.value(requestId).value(itemId);
if (!callId.isEmpty()) {
message->handleToolCallComplete(callId);
} else {
LOG_MESSAGE(
QString("ERROR: OpenAIResponses - No call_id mapping found for item_id: %1")
.arg(itemId));
}
} else if (!item.isEmpty() && item["type"].toString() == "function_call") {
const QString callId = item["call_id"].toString();
if (!callId.isEmpty()) {
message->handleToolCallComplete(callId);
} else {
LOG_MESSAGE(
QString("ERROR: OpenAIResponses - Function call done but call_id is empty"));
}
}
} else if (eventType == "response.created") {
} else if (eventType == "response.in_progress") {
} else if (eventType == "response.completed") {
using namespace QodeAssist::OpenAIResponses;
const QJsonObject responseObj = data["response"].toObject();
Response response = Response::fromJson(responseObj);
const QString statusStr = responseObj["status"].toString();
if (m_dataBuffers[requestId].responseContent.isEmpty()) {
const QString aggregatedText = response.getAggregatedText();
if (!aggregatedText.isEmpty()) {
m_dataBuffers[requestId].responseContent = aggregatedText;
}
}
message->handleStatus(statusStr);
handleMessageComplete(requestId);
} else if (eventType == "response.incomplete") {
using namespace QodeAssist::OpenAIResponses;
const QJsonObject responseObj = data["response"].toObject();
if (!responseObj.isEmpty()) {
Response response = Response::fromJson(responseObj);
const QString statusStr = responseObj["status"].toString();
if (m_dataBuffers[requestId].responseContent.isEmpty()) {
const QString aggregatedText = response.getAggregatedText();
if (!aggregatedText.isEmpty()) {
m_dataBuffers[requestId].responseContent = aggregatedText;
}
}
message->handleStatus(statusStr);
} else {
message->handleStatus("incomplete");
}
handleMessageComplete(requestId);
} else if (!eventType.isEmpty()) {
LOG_MESSAGE(QString("WARNING: OpenAIResponses - Unhandled event type '%1' for request %2\nData: %3")
.arg(eventType)
.arg(requestId)
.arg(QString::fromUtf8(QJsonDocument(data).toJson(QJsonDocument::Compact))));
}
}
void OpenAIResponsesProvider::emitPendingThinkingBlocks(const QString &requestId)
{
if (!m_messages.contains(requestId)) {
return;
}
OpenAIResponsesMessage *message = m_messages[requestId];
const auto thinkingBlocks = message->getCurrentThinkingContent();
if (thinkingBlocks.isEmpty()) {
return;
}
const int alreadyEmitted = m_emittedThinkingBlocksCount.value(requestId, 0);
const int totalBlocks = thinkingBlocks.size();
for (int i = alreadyEmitted; i < totalBlocks; ++i) {
const auto *thinkingContent = thinkingBlocks[i];
if (thinkingContent->thinking().trimmed().isEmpty()) {
continue;
}
emit thinkingBlockReceived(
requestId, thinkingContent->thinking(), thinkingContent->signature());
}
m_emittedThinkingBlocksCount[requestId] = totalBlocks;
}
void OpenAIResponsesProvider::handleMessageComplete(const QString &requestId)
{
if (!m_messages.contains(requestId)) {
return;
}
OpenAIResponsesMessage *message = m_messages[requestId];
emitPendingThinkingBlocks(requestId);
if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
const auto toolUseContent = message->getCurrentToolUseContent();
if (toolUseContent.isEmpty()) {
return;
}
for (const auto *toolContent : toolUseContent) {
const auto toolStringName = m_toolsManager->toolsFactory()->getStringName(
toolContent->name());
emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
m_toolsManager->executeToolCall(
requestId, toolContent->id(), toolContent->name(), toolContent->input());
}
}
}
void OpenAIResponsesProvider::onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults)
{
if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: OpenAIResponses - Missing data for continuation request %1")
.arg(requestId));
cleanupRequest(requestId);
return;
}
OpenAIResponsesMessage *message = m_messages[requestId];
const auto toolContent = message->getCurrentToolUseContent();
for (auto it = toolResults.constBegin(); it != toolResults.constEnd(); ++it) {
for (const auto *tool : toolContent) {
if (tool->id() == it.key()) {
const auto toolStringName = m_toolsManager->toolsFactory()->getStringName(
tool->name());
emit toolExecutionCompleted(
requestId, tool->id(), toolStringName, toolResults[tool->id()]);
break;
}
}
}
QJsonObject continuationRequest = m_originalRequests[requestId];
QJsonArray input = continuationRequest["input"].toArray();
const QList<QJsonObject> assistantItems = message->toItemsFormat();
for (const QJsonObject &item : assistantItems) {
input.append(item);
}
const QJsonArray toolResultItems = message->createToolResultItems(toolResults);
for (const QJsonValue &item : toolResultItems) {
input.append(item);
}
continuationRequest["input"] = input;
m_dataBuffers[requestId].responseContent.clear();
sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
}
void OpenAIResponsesProvider::cleanupRequest(const LLMCore::RequestID &requestId)
{
if (m_messages.contains(requestId)) {
OpenAIResponsesMessage *message = m_messages.take(requestId);
message->deleteLater();
}
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
m_originalRequests.remove(requestId);
m_itemIdToCallId.remove(requestId);
m_emittedThinkingBlocksCount.remove(requestId);
m_toolsManager->cleanupRequest(requestId);
return m_client;
}
} // namespace QodeAssist::Providers

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,13 +19,12 @@
#pragma once
#include "OpenAIResponsesMessage.hpp"
#include "tools/ToolsManager.hpp"
#include <llmcore/Provider.hpp>
#include <LLMCore/OpenAIResponsesClient.hpp>
#include <pluginllmcore/Provider.hpp>
namespace QodeAssist::Providers {
class OpenAIResponsesProvider : public LLMCore::Provider
class OpenAIResponsesProvider : public PluginLLMCore::Provider
{
Q_OBJECT
public:
@ -35,52 +34,22 @@ public:
QString url() const override;
QString completionEndpoint() const override;
QString chatEndpoint() const override;
bool supportsModelListing() const override;
void prepareRequest(
QJsonObject &request,
LLMCore::PromptTemplate *prompt,
LLMCore::ContextData context,
LLMCore::RequestType type,
PluginLLMCore::PromptTemplate *prompt,
PluginLLMCore::ContextData context,
PluginLLMCore::RequestType type,
bool isToolsEnabled,
bool isThinkingEnabled) override;
QFuture<QList<QString>> getInstalledModels(const QString &url) override;
QList<QString> validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
PluginLLMCore::ProviderID providerID() const override;
PluginLLMCore::ProviderCapabilities capabilities() const override;
::LLMCore::BaseClient *client() const override;
QString apiKey() const override;
void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
LLMCore::ProviderID providerID() const override;
void sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
bool supportsTools() const override;
bool supportImage() const override;
bool supportThinking() const override;
void cancelRequest(const LLMCore::RequestID &requestId) override;
public slots:
void onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
void onRequestFinished(
const QodeAssist::LLMCore::RequestID &requestId,
std::optional<QString> error) override;
private slots:
void onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults);
private:
void processStreamEvent(const QString &requestId, const QString &eventType, const QJsonObject &data);
void emitPendingThinkingBlocks(const QString &requestId);
void handleMessageComplete(const QString &requestId);
void cleanupRequest(const LLMCore::RequestID &requestId);
QHash<LLMCore::RequestID, OpenAIResponsesMessage *> m_messages;
QHash<LLMCore::RequestID, QUrl> m_requestUrls;
QHash<LLMCore::RequestID, QJsonObject> m_originalRequests;
QHash<LLMCore::RequestID, QHash<QString, QString>> m_itemIdToCallId;
QHash<LLMCore::RequestID, int> m_emittedThinkingBlocksCount;
Tools::ToolsManager *m_toolsManager;
::LLMCore::OpenAIResponsesClient *m_client;
};
} // namespace QodeAssist::Providers

View File

@ -28,24 +28,28 @@
namespace QodeAssist::Providers {
OpenRouterProvider::OpenRouterProvider(QObject *parent)
: OpenAICompatProvider(parent)
{}
QString OpenRouterProvider::name() const
{
return "OpenRouter";
}
QString OpenRouterProvider::url() const
{
return "https://openrouter.ai/api";
}
QString OpenRouterProvider::apiKey() const
{
return Settings::providerSettings().openRouterApiKey();
}
LLMCore::ProviderID OpenRouterProvider::providerID() const
QString OpenRouterProvider::url() const
{
return LLMCore::ProviderID::OpenRouter;
return "https://openrouter.ai/api";
}
PluginLLMCore::ProviderID OpenRouterProvider::providerID() const
{
return PluginLLMCore::ProviderID::OpenRouter;
}
} // namespace QodeAssist::Providers

View File

@ -26,10 +26,12 @@ namespace QodeAssist::Providers {
class OpenRouterProvider : public OpenAICompatProvider
{
public:
explicit OpenRouterProvider(QObject *parent = nullptr);
QString name() const override;
QString url() const override;
QString apiKey() const override;
LLMCore::ProviderID providerID() const override;
PluginLLMCore::ProviderID providerID() const override;
};
} // namespace QodeAssist::Providers

View File

@ -19,7 +19,7 @@
#pragma once
#include "llmcore/ProvidersManager.hpp"
#include "pluginllmcore/ProvidersManager.hpp"
#include "providers/ClaudeProvider.hpp"
#include "providers/CodestralProvider.hpp"
#include "providers/GoogleAIProvider.hpp"
@ -36,7 +36,7 @@ namespace QodeAssist::Providers {
inline void registerProviders()
{
auto &providerManager = LLMCore::ProvidersManager::instance();
auto &providerManager = PluginLLMCore::ProvidersManager::instance();
providerManager.registerProvider<OllamaProvider>();
providerManager.registerProvider<ClaudeProvider>();
providerManager.registerProvider<OpenAIProvider>();