diff --git a/llmcore/CMakeLists.txt b/llmcore/CMakeLists.txt index 0a3d0f2..00ed401 100644 --- a/llmcore/CMakeLists.txt +++ b/llmcore/CMakeLists.txt @@ -10,8 +10,6 @@ add_library(LLMCore STATIC PromptTemplate.hpp PromptTemplateManager.hpp PromptTemplateManager.cpp RequestConfig.hpp - OllamaMessage.hpp OllamaMessage.cpp - OpenAIMessage.hpp OpenAIMessage.cpp ValidationUtils.hpp ValidationUtils.cpp ProviderID.hpp HttpClient.hpp HttpClient.cpp diff --git a/llmcore/OllamaMessage.cpp b/llmcore/OllamaMessage.cpp deleted file mode 100644 index a325922..0000000 --- a/llmcore/OllamaMessage.cpp +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright (C) 2024-2025 Petr Mironychev - * - * This file is part of QodeAssist. - * - * QodeAssist is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * QodeAssist is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with QodeAssist. If not, see . - */ - -#include "OllamaMessage.hpp" -#include -#include - -namespace QodeAssist::LLMCore { - -QJsonObject OllamaMessage::parseJsonFromData(const QByteArray &data) -{ - QByteArrayList lines = data.split('\n'); - for (const QByteArray &line : lines) { - if (line.trimmed().isEmpty()) { - continue; - } - - QJsonParseError error; - QJsonDocument doc = QJsonDocument::fromJson(line, &error); - if (!doc.isNull() && error.error == QJsonParseError::NoError) { - return doc.object(); - } - } - return QJsonObject(); -} - -OllamaMessage OllamaMessage::fromJson(const QByteArray &data, Type type) -{ - OllamaMessage msg; - QJsonObject obj = parseJsonFromData(data); - - if (obj.isEmpty()) { - msg.error = "Invalid JSON response"; - return msg; - } - - msg.model = obj["model"].toString(); - msg.createdAt = QDateTime::fromString(obj["created_at"].toString(), Qt::ISODate); - msg.done = obj["done"].toBool(); - msg.doneReason = obj["done_reason"].toString(); - msg.error = obj["error"].toString(); - - if (type == Type::Generate) { - auto &genResponse = msg.response.emplace(); - genResponse.response = obj["response"].toString(); - if (msg.done && obj.contains("context")) { - const auto array = obj["context"].toArray(); - genResponse.context.reserve(array.size()); - for (const auto &val : array) { - genResponse.context.append(val.toInt()); - } - } - } else { - auto &chatResponse = msg.response.emplace(); - const auto msgObj = obj["message"].toObject(); - chatResponse.role = msgObj["role"].toString(); - chatResponse.content = msgObj["content"].toString(); - } - - if (msg.done) { - msg.metrics - = {obj["total_duration"].toVariant().toLongLong(), - obj["load_duration"].toVariant().toLongLong(), - obj["prompt_eval_count"].toVariant().toLongLong(), - obj["prompt_eval_duration"].toVariant().toLongLong(), - obj["eval_count"].toVariant().toLongLong(), - obj["eval_duration"].toVariant().toLongLong()}; - } - - return msg; -} - -QString OllamaMessage::getContent() const -{ - if (std::holds_alternative(response)) { - return std::get(response).response; - } - return std::get(response).content; -} - -bool OllamaMessage::hasError() const -{ - return !error.isEmpty(); -} - -} // namespace QodeAssist::LLMCore diff --git a/llmcore/OllamaMessage.hpp b/llmcore/OllamaMessage.hpp deleted file mode 100644 index 6c7885c..0000000 --- a/llmcore/OllamaMessage.hpp +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright (C) 2024-2025 Petr Mironychev - * - * This file is part of QodeAssist. - * - * QodeAssist is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * QodeAssist is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with QodeAssist. If not, see . - */ - -#pragma once - -#include -#include -#include - -namespace QodeAssist::LLMCore { - -class OllamaMessage -{ -public: - enum class Type { Generate, Chat }; - - struct Metrics - { - qint64 totalDuration{0}; - qint64 loadDuration{0}; - qint64 promptEvalCount{0}; - qint64 promptEvalDuration{0}; - qint64 evalCount{0}; - qint64 evalDuration{0}; - }; - - struct GenerateResponse - { - QString response; - QVector context; - }; - - struct ChatResponse - { - QString role; - QString content; - }; - - QString model; - QDateTime createdAt; - std::variant response; - bool done{false}; - QString doneReason; - QString error; - Metrics metrics; - - static OllamaMessage fromJson(const QByteArray &data, Type type); - QString getContent() const; - bool hasError() const; - -private: - static QJsonObject parseJsonFromData(const QByteArray &data); -}; - -} // namespace QodeAssist::LLMCore diff --git a/llmcore/OpenAIMessage.cpp b/llmcore/OpenAIMessage.cpp deleted file mode 100644 index 4c9ee18..0000000 --- a/llmcore/OpenAIMessage.cpp +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2024-2025 Petr Mironychev - * - * This file is part of QodeAssist. - * - * QodeAssist is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * QodeAssist is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with QodeAssist. If not, see . - */ - -#include "OpenAIMessage.hpp" -#include -#include - -namespace QodeAssist::LLMCore { - -OpenAIMessage OpenAIMessage::fromJson(const QJsonObject &obj) -{ - OpenAIMessage msg; - - if (obj.contains("error")) { - msg.error = obj["error"].toObject()["message"].toString(); - return msg; - } - - if (obj.contains("choices")) { - auto choices = obj["choices"].toArray(); - if (!choices.isEmpty()) { - auto choiceObj = choices[0].toObject(); - - if (choiceObj.contains("delta")) { - QJsonObject delta = choiceObj["delta"].toObject(); - msg.choice.content = delta["content"].toString(); - } else if (choiceObj.contains("message")) { - QJsonObject message = choiceObj["message"].toObject(); - msg.choice.content = message["content"].toString(); - } - - msg.choice.finishReason = choiceObj["finish_reason"].toString(); - if (!msg.choice.finishReason.isEmpty()) { - msg.done = true; - } - } - } - - if (obj.contains("usage")) { - QJsonObject usage = obj["usage"].toObject(); - msg.usage.promptTokens = usage["prompt_tokens"].toInt(); - msg.usage.completionTokens = usage["completion_tokens"].toInt(); - msg.usage.totalTokens = usage["total_tokens"].toInt(); - } - - return msg; -} - -QString OpenAIMessage::getContent() const -{ - return choice.content; -} - -bool OpenAIMessage::hasError() const -{ - return !error.isEmpty(); -} - -bool OpenAIMessage::isDone() const -{ - return done - || (!choice.finishReason.isEmpty() - && (choice.finishReason == "stop" || choice.finishReason == "length")); -} - -} // namespace QodeAssist::LLMCore diff --git a/llmcore/OpenAIMessage.hpp b/llmcore/OpenAIMessage.hpp deleted file mode 100644 index 4d7d1b7..0000000 --- a/llmcore/OpenAIMessage.hpp +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2024-2025 Petr Mironychev - * - * This file is part of QodeAssist. - * - * QodeAssist is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * QodeAssist is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with QodeAssist. If not, see . - */ - -#pragma once - -#include -#include -#include - -namespace QodeAssist::LLMCore { - -class OpenAIMessage -{ -public: - struct Choice - { - QString content; - QString finishReason; - }; - - struct Usage - { - int promptTokens{0}; - int completionTokens{0}; - int totalTokens{0}; - }; - - Choice choice; - QString error; - bool done{false}; - Usage usage; - - QString getContent() const; - bool hasError() const; - bool isDone() const; - - static OpenAIMessage fromJson(const QJsonObject &obj); -}; - -} // namespace QodeAssist::LLMCore diff --git a/providers/OllamaProvider.cpp b/providers/OllamaProvider.cpp index 2b0d9f5..4794fb1 100644 --- a/providers/OllamaProvider.cpp +++ b/providers/OllamaProvider.cpp @@ -25,7 +25,6 @@ #include #include -#include "llmcore/OllamaMessage.hpp" #include "llmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" diff --git a/providers/OpenAICompatProvider.cpp b/providers/OpenAICompatProvider.cpp index 663ad23..a7bf5a9 100644 --- a/providers/OpenAICompatProvider.cpp +++ b/providers/OpenAICompatProvider.cpp @@ -19,6 +19,8 @@ #include "OpenAICompatProvider.hpp" +#include "llmcore/ValidationUtils.hpp" +#include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" #include "settings/ProviderSettings.hpp" @@ -28,12 +30,19 @@ #include #include -#include "llmcore/OpenAIMessage.hpp" -#include "llmcore/ValidationUtils.hpp" -#include "logger/Logger.hpp" - namespace QodeAssist::Providers { +OpenAICompatProvider::OpenAICompatProvider(QObject *parent) + : LLMCore::Provider(parent) + , m_toolsManager(new Tools::ToolsManager(this)) +{ + connect( + m_toolsManager, + &Tools::ToolsManager::toolExecutionComplete, + this, + &OpenAICompatProvider::onToolExecutionComplete); +} + QString OpenAICompatProvider::name() const { return "OpenAI Compatible"; @@ -90,6 +99,16 @@ void OpenAICompatProvider::prepareRequest( } else { applyModelParams(Settings::chatAssistantSettings()); } + + if (supportsTools() && type == LLMCore::RequestType::Chat + && Settings::chatAssistantSettings().useTools()) { + auto toolsDefinitions = m_toolsManager->getToolsDefinitions(Tools::ToolSchemaFormat::OpenAI); + if (!toolsDefinitions.isEmpty()) { + request["tools"] = toolsDefinitions; + LOG_MESSAGE( + QString("Added %1 tools to OpenAICompat request").arg(toolsDefinitions.size())); + } + } } QList OpenAICompatProvider::getInstalledModels(const QString &url) @@ -110,7 +129,8 @@ QList OpenAICompatProvider::validateRequest( {"frequency_penalty", {}}, {"presence_penalty", {}}, {"stop", QJsonArray{}}, - {"stream", {}}}; + {"stream", {}}, + {"tools", {}}}; return LLMCore::ValidationUtils::validateRequestFields(request, templateReq); } @@ -137,8 +157,12 @@ LLMCore::ProviderID OpenAICompatProvider::providerID() const void OpenAICompatProvider::sendRequest( const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) { - m_dataBuffers[requestId].clear(); + if (!m_messages.contains(requestId)) { + m_dataBuffers[requestId].clear(); + } + m_requestUrls[requestId] = url; + m_originalRequests[requestId] = payload; QNetworkRequest networkRequest(url); prepareNetworkRequest(networkRequest); @@ -152,57 +176,34 @@ void OpenAICompatProvider::sendRequest( emit httpClient()->sendRequest(request); } +bool OpenAICompatProvider::supportsTools() const +{ + return true; +} + +void OpenAICompatProvider::cancelRequest(const LLMCore::RequestID &requestId) +{ + LOG_MESSAGE(QString("OpenAICompatProvider: Cancelling request %1").arg(requestId)); + LLMCore::Provider::cancelRequest(requestId); + cleanupRequest(requestId); +} + void OpenAICompatProvider::onDataReceived( const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) { LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; QStringList lines = buffers.rawStreamBuffer.processData(data); - if (data.isEmpty()) { - return; - } - - bool isDone = false; - QString tempResponse; - for (const QString &line : lines) { - if (line.trimmed().isEmpty()) { + if (line.trimmed().isEmpty() || line == "data: [DONE]") { continue; } - if (line == "data: [DONE]") { - isDone = true; - continue; - } - - QJsonObject responseObj = parseEventLine(line); - if (responseObj.isEmpty()) + QJsonObject chunk = parseEventLine(line); + if (chunk.isEmpty()) continue; - auto message = LLMCore::OpenAIMessage::fromJson(responseObj); - if (message.hasError()) { - LOG_MESSAGE("Error in OpenAI response: " + message.error); - continue; - } - - QString content = message.getContent(); - if (!content.isEmpty()) { - tempResponse += content; - } - - if (message.isDone()) { - isDone = true; - } - } - - if (!tempResponse.isEmpty()) { - buffers.responseContent += tempResponse; - emit partialResponseReceived(requestId, tempResponse); - } - - if (isDone) { - emit fullResponseReceived(requestId, buffers.responseContent); - m_dataBuffers.remove(requestId); + processStreamChunk(requestId, chunk); } } @@ -212,17 +213,161 @@ void OpenAICompatProvider::onRequestFinished( if (!success) { LOG_MESSAGE(QString("OpenAICompatProvider request %1 failed: %2").arg(requestId, error)); emit requestFailed(requestId, error); - } else { - if (m_dataBuffers.contains(requestId)) { - const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; - if (!buffers.responseContent.isEmpty()) { - emit fullResponseReceived(requestId, buffers.responseContent); + cleanupRequest(requestId); + return; + } + + if (m_messages.contains(requestId)) { + OpenAIMessage *message = m_messages[requestId]; + if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId)); + m_dataBuffers.remove(requestId); + return; + } + } + + if (m_dataBuffers.contains(requestId)) { + const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + if (!buffers.responseContent.isEmpty()) { + LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId)); + emit fullResponseReceived(requestId, buffers.responseContent); + } + } + + cleanupRequest(requestId); +} + +void OpenAICompatProvider::onToolExecutionComplete( + const QString &requestId, const QHash &toolResults) +{ + if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) { + LOG_MESSAGE(QString("ERROR: Missing data for continuation request %1").arg(requestId)); + cleanupRequest(requestId); + return; + } + + LOG_MESSAGE(QString("Tool execution complete for OpenAICompat request %1").arg(requestId)); + + OpenAIMessage *message = m_messages[requestId]; + QJsonObject continuationRequest = m_originalRequests[requestId]; + QJsonArray messages = continuationRequest["messages"].toArray(); + + messages.append(message->toProviderFormat()); + + QJsonArray toolResultMessages = message->createToolResultMessages(toolResults); + for (const auto &toolMsg : toolResultMessages) { + messages.append(toolMsg); + } + + continuationRequest["messages"] = messages; + + LOG_MESSAGE(QString("Sending continuation request for %1 with %2 tool results") + .arg(requestId) + .arg(toolResults.size())); + + sendRequest(requestId, m_requestUrls[requestId], continuationRequest); +} + +void OpenAICompatProvider::processStreamChunk(const QString &requestId, const QJsonObject &chunk) +{ + QJsonArray choices = chunk["choices"].toArray(); + if (choices.isEmpty()) { + return; + } + + QJsonObject choice = choices[0].toObject(); + QJsonObject delta = choice["delta"].toObject(); + QString finishReason = choice["finish_reason"].toString(); + + OpenAIMessage *message = m_messages.value(requestId); + if (!message) { + message = new OpenAIMessage(this); + m_messages[requestId] = message; + LOG_MESSAGE(QString("Created NEW OpenAIMessage for request %1").arg(requestId)); + } + + if (delta.contains("content") && !delta["content"].isNull()) { + QString content = delta["content"].toString(); + message->handleContentDelta(content); + + LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + buffers.responseContent += content; + emit partialResponseReceived(requestId, content); + } + + if (delta.contains("tool_calls")) { + QJsonArray toolCalls = delta["tool_calls"].toArray(); + for (const auto &toolCallValue : toolCalls) { + QJsonObject toolCall = toolCallValue.toObject(); + int index = toolCall["index"].toInt(); + + if (toolCall.contains("id")) { + QString id = toolCall["id"].toString(); + QJsonObject function = toolCall["function"].toObject(); + QString name = function["name"].toString(); + message->handleToolCallStart(index, id, name); + } + + if (toolCall.contains("function")) { + QJsonObject function = toolCall["function"].toObject(); + if (function.contains("arguments")) { + QString args = function["arguments"].toString(); + message->handleToolCallDelta(index, args); + } } } } + if (!finishReason.isEmpty() && finishReason != "null") { + for (int i = 0; i < 10; ++i) { + message->handleToolCallComplete(i); + } + + message->handleFinishReason(finishReason); + handleMessageComplete(requestId); + } +} + +void OpenAICompatProvider::handleMessageComplete(const QString &requestId) +{ + if (!m_messages.contains(requestId)) + return; + + OpenAIMessage *message = m_messages[requestId]; + + if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + LOG_MESSAGE(QString("OpenAICompat message requires tool execution for %1").arg(requestId)); + + auto toolUseContent = message->getCurrentToolUseContent(); + + if (toolUseContent.isEmpty()) { + LOG_MESSAGE(QString("No tools to execute for %1").arg(requestId)); + return; + } + + for (auto toolContent : toolUseContent) { + m_toolsManager->executeToolCall( + requestId, toolContent->id(), toolContent->name(), toolContent->input()); + } + + } else { + LOG_MESSAGE(QString("OpenAICompat message marked as complete for %1").arg(requestId)); + } +} + +void OpenAICompatProvider::cleanupRequest(const LLMCore::RequestID &requestId) +{ + LOG_MESSAGE(QString("Cleaning up OpenAICompat request %1").arg(requestId)); + + if (m_messages.contains(requestId)) { + OpenAIMessage *message = m_messages.take(requestId); + message->deleteLater(); + } + m_dataBuffers.remove(requestId); m_requestUrls.remove(requestId); + m_originalRequests.remove(requestId); + m_toolsManager->cleanupRequest(requestId); } } // namespace QodeAssist::Providers diff --git a/providers/OpenAICompatProvider.hpp b/providers/OpenAICompatProvider.hpp index 8170771..679703b 100644 --- a/providers/OpenAICompatProvider.hpp +++ b/providers/OpenAICompatProvider.hpp @@ -19,13 +19,18 @@ #pragma once -#include "llmcore/Provider.hpp" +#include "OpenAIMessage.hpp" +#include "tools/ToolsManager.hpp" +#include namespace QodeAssist::Providers { class OpenAICompatProvider : public LLMCore::Provider { + Q_OBJECT public: + explicit OpenAICompatProvider(QObject *parent = nullptr); + QString name() const override; QString url() const override; QString completionEndpoint() const override; @@ -45,6 +50,9 @@ public: void sendRequest( const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; + bool supportsTools() const override; + void cancelRequest(const LLMCore::RequestID &requestId) override; + public slots: void onDataReceived( const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; @@ -52,6 +60,20 @@ public slots: const QodeAssist::LLMCore::RequestID &requestId, bool success, const QString &error) override; + +private slots: + void onToolExecutionComplete( + const QString &requestId, const QHash &toolResults); + +private: + void processStreamChunk(const QString &requestId, const QJsonObject &chunk); + void handleMessageComplete(const QString &requestId); + void cleanupRequest(const LLMCore::RequestID &requestId); + + QHash m_messages; + QHash m_requestUrls; + QHash m_originalRequests; + Tools::ToolsManager *m_toolsManager; }; } // namespace QodeAssist::Providers diff --git a/providers/OpenRouterAIProvider.cpp b/providers/OpenRouterAIProvider.cpp index eac9297..e64afec 100644 --- a/providers/OpenRouterAIProvider.cpp +++ b/providers/OpenRouterAIProvider.cpp @@ -26,9 +26,6 @@ #include #include -#include "llmcore/OpenAIMessage.hpp" -#include "logger/Logger.hpp" - namespace QodeAssist::Providers { QString OpenRouterProvider::name() const @@ -51,77 +48,4 @@ LLMCore::ProviderID OpenRouterProvider::providerID() const return LLMCore::ProviderID::OpenRouter; } -void OpenRouterProvider::onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) -{ - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; - QStringList lines = buffers.rawStreamBuffer.processData(data); - - if (data.isEmpty()) { - return; - } - - bool isDone = false; - QString tempResponse; - - for (const QString &line : lines) { - if (line.trimmed().isEmpty() || line.contains("OPENROUTER PROCESSING")) { - continue; - } - - if (line == "data: [DONE]") { - isDone = true; - continue; - } - - QJsonObject responseObj = parseEventLine(line); - if (responseObj.isEmpty()) - continue; - - auto message = LLMCore::OpenAIMessage::fromJson(responseObj); - if (message.hasError()) { - LOG_MESSAGE("Error in OpenRouter response: " + message.error); - continue; - } - - QString content = message.getContent(); - if (!content.isEmpty()) { - tempResponse += content; - } - - if (message.isDone()) { - isDone = true; - } - } - - if (!tempResponse.isEmpty()) { - buffers.responseContent += tempResponse; - emit partialResponseReceived(requestId, tempResponse); - } - - if (isDone) { - emit fullResponseReceived(requestId, buffers.responseContent); - m_dataBuffers.remove(requestId); - } -} - -void OpenRouterProvider::onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, bool success, const QString &error) -{ - if (!success) { - LOG_MESSAGE(QString("OpenRouterProvider request %1 failed: %2").arg(requestId, error)); - emit requestFailed(requestId, error); - } else { - if (m_dataBuffers.contains(requestId)) { - const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; - if (!buffers.responseContent.isEmpty()) { - emit fullResponseReceived(requestId, buffers.responseContent); - } - } - } - - m_dataBuffers.remove(requestId); - m_requestUrls.remove(requestId); -} - } // namespace QodeAssist::Providers diff --git a/providers/OpenRouterAIProvider.hpp b/providers/OpenRouterAIProvider.hpp index 3e7521c..012aca8 100644 --- a/providers/OpenRouterAIProvider.hpp +++ b/providers/OpenRouterAIProvider.hpp @@ -30,14 +30,6 @@ public: QString url() const override; QString apiKey() const override; LLMCore::ProviderID providerID() const override; - -public slots: - void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; - void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, - bool success, - const QString &error) override; }; } // namespace QodeAssist::Providers