From f58fad957873bf2e64d107200106c3141b943147 Mon Sep 17 00:00:00 2001 From: Petr Mironychev <9195189+Palm1r@users.noreply.github.com> Date: Mon, 30 Mar 2026 00:49:45 +0200 Subject: [PATCH] feat: Rename old llmcore module to pluginllmcore --- CMakeLists.txt | 4 +- ChatView/ChatCompressor.cpp | 24 +++---- ChatView/ChatCompressor.hpp | 8 +-- ChatView/ChatRootView.cpp | 12 ++-- ChatView/ChatRootView.hpp | 4 +- ChatView/ClientInterface.cpp | 50 +++++++-------- ChatView/ClientInterface.hpp | 10 +-- ConfigurationManager.cpp | 8 +-- ConfigurationManager.hpp | 8 +-- LLMClientInterface.cpp | 42 ++++++------- LLMClientInterface.hpp | 20 +++--- QodeAssistClient.hpp | 4 +- QuickRefactorHandler.cpp | 42 ++++++------- QuickRefactorHandler.hpp | 8 +-- context/DocumentContextReader.cpp | 2 +- context/DocumentContextReader.hpp | 4 +- {llmcore => pluginllmcore}/BaseTool.cpp | 4 +- {llmcore => pluginllmcore}/BaseTool.hpp | 4 +- {llmcore => pluginllmcore}/CMakeLists.txt | 0 {llmcore => pluginllmcore}/ContentBlocks.hpp | 4 +- {llmcore => pluginllmcore}/ContextData.hpp | 4 +- {llmcore => pluginllmcore}/DataBuffers.hpp | 4 +- {llmcore => pluginllmcore}/HttpClient.cpp | 4 +- {llmcore => pluginllmcore}/HttpClient.hpp | 4 +- .../IPromptProvider.hpp | 4 +- .../IProviderRegistry.hpp | 4 +- {llmcore => pluginllmcore}/IToolsManager.hpp | 2 +- .../PromptProviderChat.hpp | 4 +- .../PromptProviderFim.hpp | 4 +- {llmcore => pluginllmcore}/PromptTemplate.hpp | 4 +- .../PromptTemplateManager.cpp | 4 +- .../PromptTemplateManager.hpp | 4 +- {llmcore => pluginllmcore}/Provider.cpp | 4 +- {llmcore => pluginllmcore}/Provider.hpp | 22 +++---- {llmcore => pluginllmcore}/ProviderID.hpp | 2 +- .../ProvidersManager.cpp | 4 +- .../ProvidersManager.hpp | 4 +- {llmcore => pluginllmcore}/RequestConfig.hpp | 4 +- .../RequestHandlerBase.cpp | 0 .../RequestHandlerBase.hpp | 0 {llmcore => pluginllmcore}/RequestType.hpp | 2 +- .../ResponseCleaner.hpp | 4 +- {llmcore => pluginllmcore}/RulesLoader.cpp | 4 +- {llmcore => pluginllmcore}/RulesLoader.hpp | 4 +- {llmcore => pluginllmcore}/SSEBuffer.cpp | 4 +- {llmcore => pluginllmcore}/SSEBuffer.hpp | 4 +- .../ValidationUtils.cpp | 4 +- .../ValidationUtils.hpp | 4 +- providers/ClaudeMessage.cpp | 58 ++++++++--------- providers/ClaudeMessage.hpp | 16 ++--- providers/ClaudeProvider.cpp | 58 ++++++++--------- providers/ClaudeProvider.hpp | 32 +++++----- providers/GoogleAIProvider.cpp | 54 ++++++++-------- providers/GoogleAIProvider.hpp | 34 +++++----- providers/GoogleMessage.cpp | 44 ++++++------- providers/GoogleMessage.hpp | 14 ++--- providers/LMStudioProvider.cpp | 54 ++++++++-------- providers/LMStudioProvider.hpp | 30 ++++----- providers/LlamaCppProvider.cpp | 58 ++++++++--------- providers/LlamaCppProvider.hpp | 30 ++++----- providers/MistralAIProvider.cpp | 56 ++++++++--------- providers/MistralAIProvider.hpp | 30 ++++----- providers/OllamaMessage.cpp | 52 ++++++++-------- providers/OllamaMessage.hpp | 20 +++--- providers/OllamaProvider.cpp | 62 +++++++++---------- providers/OllamaProvider.hpp | 30 ++++----- providers/OpenAICompatProvider.cpp | 54 ++++++++-------- providers/OpenAICompatProvider.hpp | 30 ++++----- providers/OpenAIMessage.cpp | 34 +++++----- providers/OpenAIMessage.hpp | 12 ++-- providers/OpenAIProvider.cpp | 54 ++++++++-------- providers/OpenAIProvider.hpp | 30 ++++----- providers/OpenAIResponsesMessage.cpp | 44 ++++++------- providers/OpenAIResponsesMessage.hpp | 18 +++--- providers/OpenAIResponsesProvider.cpp | 50 +++++++-------- providers/OpenAIResponsesProvider.hpp | 34 +++++----- providers/OpenRouterAIProvider.cpp | 4 +- providers/OpenRouterAIProvider.hpp | 2 +- providers/Providers.hpp | 4 +- qodeassist.cpp | 10 +-- settings/GeneralSettings.hpp | 2 +- templates/Alpaca.hpp | 20 +++--- templates/ChatML.hpp | 20 +++--- templates/Claude.hpp | 12 ++-- templates/CodeLlamaFim.hpp | 12 ++-- templates/CodeLlamaQMLFim.hpp | 12 ++-- templates/GoogleAI.hpp | 12 ++-- templates/Llama2.hpp | 20 +++--- templates/Llama3.hpp | 20 +++--- templates/LlamaCppFim.hpp | 12 ++-- templates/MistralAI.hpp | 22 +++---- templates/Ollama.hpp | 22 +++---- templates/OpenAI.hpp | 12 ++-- templates/OpenAICompatible.hpp | 18 +++--- templates/OpenAIResponses.hpp | 14 ++--- templates/Qwen25CoderFIM.hpp | 12 ++-- templates/Qwen3CoderFIM.hpp | 20 +++--- templates/StarCoder2Fim.hpp | 12 ++-- templates/Templates.hpp | 4 +- tools/BuildProjectTool.cpp | 16 ++--- tools/BuildProjectTool.hpp | 8 +-- tools/CreateNewFileTool.cpp | 14 ++--- tools/CreateNewFileTool.hpp | 8 +-- tools/EditFileTool.cpp | 14 ++--- tools/EditFileTool.hpp | 8 +-- tools/ExecuteTerminalCommandTool.cpp | 18 +++--- tools/ExecuteTerminalCommandTool.hpp | 8 +-- tools/FindAndReadFileTool.cpp | 14 ++--- tools/FindAndReadFileTool.hpp | 8 +-- tools/GetIssuesListTool.cpp | 14 ++--- tools/GetIssuesListTool.hpp | 8 +-- tools/ListProjectFilesTool.cpp | 14 ++--- tools/ListProjectFilesTool.hpp | 8 +-- tools/ProjectSearchTool.cpp | 14 ++--- tools/ProjectSearchTool.hpp | 8 +-- tools/TodoTool.cpp | 14 ++--- tools/TodoTool.hpp | 8 +-- tools/ToolHandler.cpp | 2 +- tools/ToolHandler.hpp | 4 +- tools/ToolsFactory.cpp | 32 +++++----- tools/ToolsFactory.hpp | 14 ++--- tools/ToolsManager.cpp | 2 +- tools/ToolsManager.hpp | 10 +-- 123 files changed, 1018 insertions(+), 1018 deletions(-) rename {llmcore => pluginllmcore}/BaseTool.cpp (96%) rename {llmcore => pluginllmcore}/BaseTool.hpp (96%) rename {llmcore => pluginllmcore}/CMakeLists.txt (100%) rename {llmcore => pluginllmcore}/ContentBlocks.hpp (98%) rename {llmcore => pluginllmcore}/ContextData.hpp (95%) rename {llmcore => pluginllmcore}/DataBuffers.hpp (92%) rename {llmcore => pluginllmcore}/HttpClient.cpp (99%) rename {llmcore => pluginllmcore}/HttpClient.hpp (96%) rename {llmcore => pluginllmcore}/IPromptProvider.hpp (93%) rename {llmcore => pluginllmcore}/IProviderRegistry.hpp (92%) rename {llmcore => pluginllmcore}/IToolsManager.hpp (97%) rename {llmcore => pluginllmcore}/PromptProviderChat.hpp (95%) rename {llmcore => pluginllmcore}/PromptProviderFim.hpp (95%) rename {llmcore => pluginllmcore}/PromptTemplate.hpp (94%) rename {llmcore => pluginllmcore}/PromptTemplateManager.cpp (97%) rename {llmcore => pluginllmcore}/PromptTemplateManager.hpp (96%) rename {llmcore => pluginllmcore}/Provider.cpp (90%) rename {llmcore => pluginllmcore}/Provider.hpp (81%) rename {llmcore => pluginllmcore}/ProviderID.hpp (96%) rename {llmcore => pluginllmcore}/ProvidersManager.cpp (94%) rename {llmcore => pluginllmcore}/ProvidersManager.hpp (95%) rename {llmcore => pluginllmcore}/RequestConfig.hpp (93%) rename {llmcore => pluginllmcore}/RequestHandlerBase.cpp (100%) rename {llmcore => pluginllmcore}/RequestHandlerBase.hpp (100%) rename {llmcore => pluginllmcore}/RequestType.hpp (95%) rename {llmcore => pluginllmcore}/ResponseCleaner.hpp (97%) rename {llmcore => pluginllmcore}/RulesLoader.cpp (98%) rename {llmcore => pluginllmcore}/RulesLoader.hpp (95%) rename {llmcore => pluginllmcore}/SSEBuffer.cpp (93%) rename {llmcore => pluginllmcore}/SSEBuffer.hpp (92%) rename {llmcore => pluginllmcore}/ValidationUtils.cpp (96%) rename {llmcore => pluginllmcore}/ValidationUtils.hpp (93%) diff --git a/CMakeLists.txt b/CMakeLists.txt index b168521..6d05683 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -34,8 +34,8 @@ add_definitions( -DQODEASSIST_QT_CREATOR_VERSION_PATCH=${QODEASSIST_QT_CREATOR_VERSION_PATCH} ) -add_subdirectory(llmcore) -add_subdirectory(sources/external/llmcore) +add_subdirectory(pluginllmcore) +# add_subdirectory(sources/external/llmcore) add_subdirectory(settings) add_subdirectory(logger) add_subdirectory(UIControls) diff --git a/ChatView/ChatCompressor.cpp b/ChatView/ChatCompressor.cpp index 0743e86..c9b0a94 100644 --- a/ChatView/ChatCompressor.cpp +++ b/ChatView/ChatCompressor.cpp @@ -56,7 +56,7 @@ void ChatCompressor::startCompression(const QString &chatFilePath, ChatModel *ch } auto providerName = Settings::generalSettings().caProvider(); - m_provider = LLMCore::ProvidersManager::instance().getProviderByName(providerName); + m_provider = PluginLLMCore::ProvidersManager::instance().getProviderByName(providerName); if (!m_provider) { emit compressionFailed(tr("No provider available")); @@ -64,7 +64,7 @@ void ChatCompressor::startCompression(const QString &chatFilePath, ChatModel *ch } auto templateName = Settings::generalSettings().caTemplate(); - auto promptTemplate = LLMCore::PromptTemplateManager::instance().getChatTemplateByName( + auto promptTemplate = PluginLLMCore::PromptTemplateManager::instance().getChatTemplateByName( templateName); if (!promptTemplate) { @@ -85,7 +85,7 @@ void ChatCompressor::startCompression(const QString &chatFilePath, ChatModel *ch QUrl requestUrl; QJsonObject payload; - if (m_provider->providerID() == LLMCore::ProviderID::GoogleAI) { + if (m_provider->providerID() == PluginLLMCore::ProviderID::GoogleAI) { requestUrl = QUrl(QString("%1/models/%2:streamGenerateContent?alt=sse") .arg(Settings::generalSettings().caUrl(), Settings::generalSettings().caModel())); @@ -188,28 +188,28 @@ QString ChatCompressor::buildCompressionPrompt() const } void ChatCompressor::buildRequestPayload( - QJsonObject &payload, LLMCore::PromptTemplate *promptTemplate) + QJsonObject &payload, PluginLLMCore::PromptTemplate *promptTemplate) { - LLMCore::ContextData context; + PluginLLMCore::ContextData context; context.systemPrompt = QStringLiteral( "You are a helpful assistant that creates concise summaries of conversations. " "Your summaries preserve key information, technical details, and the flow of discussion."); - QVector messages; + QVector messages; for (const auto &msg : m_chatModel->getChatHistory()) { if (msg.role == ChatModel::ChatRole::Tool || msg.role == ChatModel::ChatRole::FileEdit || msg.role == ChatModel::ChatRole::Thinking) continue; - LLMCore::Message apiMessage; + PluginLLMCore::Message apiMessage; apiMessage.role = (msg.role == ChatModel::ChatRole::User) ? "user" : "assistant"; apiMessage.content = msg.content; messages.append(apiMessage); } - LLMCore::Message compressionRequest; + PluginLLMCore::Message compressionRequest; compressionRequest.role = "user"; compressionRequest.content = buildCompressionPrompt(); messages.append(compressionRequest); @@ -217,7 +217,7 @@ void ChatCompressor::buildRequestPayload( context.history = messages; m_provider->prepareRequest( - payload, promptTemplate, context, LLMCore::RequestType::Chat, false, false); + payload, promptTemplate, context, PluginLLMCore::RequestType::Chat, false, false); } bool ChatCompressor::createCompressedChatFile( @@ -268,21 +268,21 @@ void ChatCompressor::connectProviderSignals() { m_connections.append(connect( m_provider, - &LLMCore::Provider::partialResponseReceived, + &PluginLLMCore::Provider::partialResponseReceived, this, &ChatCompressor::onPartialResponseReceived, Qt::UniqueConnection)); m_connections.append(connect( m_provider, - &LLMCore::Provider::fullResponseReceived, + &PluginLLMCore::Provider::fullResponseReceived, this, &ChatCompressor::onFullResponseReceived, Qt::UniqueConnection)); m_connections.append(connect( m_provider, - &LLMCore::Provider::requestFailed, + &PluginLLMCore::Provider::requestFailed, this, &ChatCompressor::onRequestFailed, Qt::UniqueConnection)); diff --git a/ChatView/ChatCompressor.hpp b/ChatView/ChatCompressor.hpp index 54ef607..07df7aa 100644 --- a/ChatView/ChatCompressor.hpp +++ b/ChatView/ChatCompressor.hpp @@ -24,10 +24,10 @@ #include #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class Provider; class PromptTemplate; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore namespace QodeAssist::Chat { @@ -64,13 +64,13 @@ private: void disconnectAllSignals(); void cleanupState(); void handleCompressionError(const QString &error); - void buildRequestPayload(QJsonObject &payload, LLMCore::PromptTemplate *promptTemplate); + void buildRequestPayload(QJsonObject &payload, PluginLLMCore::PromptTemplate *promptTemplate); bool m_isCompressing = false; QString m_currentRequestId; QString m_originalChatPath; QString m_accumulatedSummary; - LLMCore::Provider *m_provider = nullptr; + PluginLLMCore::Provider *m_provider = nullptr; ChatModel *m_chatModel = nullptr; QList m_connections; diff --git a/ChatView/ChatRootView.cpp b/ChatView/ChatRootView.cpp index 158e8d1..9f8cab4 100644 --- a/ChatView/ChatRootView.cpp +++ b/ChatView/ChatRootView.cpp @@ -51,14 +51,14 @@ #include "context/ChangesManager.h" #include "context/ContextManager.hpp" #include "context/TokenUtils.hpp" -#include "llmcore/RulesLoader.hpp" +#include "pluginllmcore/RulesLoader.hpp" namespace QodeAssist::Chat { ChatRootView::ChatRootView(QQuickItem *parent) : QQuickItem(parent) , m_chatModel(new ChatModel(this)) - , m_promptProvider(LLMCore::PromptTemplateManager::instance()) + , m_promptProvider(PluginLLMCore::PromptTemplateManager::instance()) , m_clientInterface(new ClientInterface(m_chatModel, &m_promptProvider, this)) , m_fileManager(new ChatFileManager(this)) , m_isRequestInProgress(false) @@ -929,7 +929,7 @@ QString ChatRootView::getRuleContent(int index) if (index < 0 || index >= m_activeRules.size()) return QString(); - return LLMCore::RulesLoader::loadRuleFileContent( + return PluginLLMCore::RulesLoader::loadRuleFileContent( m_activeRules[index].toMap()["filePath"].toString()); } @@ -937,7 +937,7 @@ void ChatRootView::refreshRules() { m_activeRules.clear(); - auto project = LLMCore::RulesLoader::getActiveProject(); + auto project = PluginLLMCore::RulesLoader::getActiveProject(); if (!project) { emit activeRulesChanged(); emit activeRulesCountChanged(); @@ -945,7 +945,7 @@ void ChatRootView::refreshRules() } auto ruleFiles - = LLMCore::RulesLoader::getRuleFilesForProject(project, LLMCore::RulesContext::Chat); + = PluginLLMCore::RulesLoader::getRuleFilesForProject(project, PluginLLMCore::RulesContext::Chat); for (const auto &ruleFile : ruleFiles) { QVariantMap ruleMap; @@ -1296,7 +1296,7 @@ QString ChatRootView::lastInfoMessage() const bool ChatRootView::isThinkingSupport() const { auto providerName = Settings::generalSettings().caProvider(); - auto provider = LLMCore::ProvidersManager::instance().getProviderByName(providerName); + auto provider = PluginLLMCore::ProvidersManager::instance().getProviderByName(providerName); return provider && provider->supportThinking(); } diff --git a/ChatView/ChatRootView.hpp b/ChatView/ChatRootView.hpp index 6fbc12c..9003121 100644 --- a/ChatView/ChatRootView.hpp +++ b/ChatView/ChatRootView.hpp @@ -25,7 +25,7 @@ #include "ChatFileManager.hpp" #include "ChatModel.hpp" #include "ClientInterface.hpp" -#include "llmcore/PromptProviderChat.hpp" +#include "pluginllmcore/PromptProviderChat.hpp" #include namespace QodeAssist::Chat { @@ -235,7 +235,7 @@ private: bool hasImageAttachments(const QStringList &attachments) const; ChatModel *m_chatModel; - LLMCore::PromptProviderChat m_promptProvider; + PluginLLMCore::PromptProviderChat m_promptProvider; ClientInterface *m_clientInterface; ChatFileManager *m_fileManager; QString m_currentTemplate; diff --git a/ChatView/ClientInterface.cpp b/ChatView/ClientInterface.cpp index d9f59fc..b0037fb 100644 --- a/ChatView/ClientInterface.cpp +++ b/ChatView/ClientInterface.cpp @@ -53,7 +53,7 @@ namespace QodeAssist::Chat { ClientInterface::ClientInterface( - ChatModel *chatModel, LLMCore::IPromptProvider *promptProvider, QObject *parent) + ChatModel *chatModel, PluginLLMCore::IPromptProvider *promptProvider, QObject *parent) : QObject(parent) , m_chatModel(chatModel) , m_promptProvider(promptProvider) @@ -138,7 +138,7 @@ void ClientInterface::sendMessage( auto &chatAssistantSettings = Settings::chatAssistantSettings(); auto providerName = Settings::generalSettings().caProvider(); - auto provider = LLMCore::ProvidersManager::instance().getProviderByName(providerName); + auto provider = PluginLLMCore::ProvidersManager::instance().getProviderByName(providerName); if (!provider) { LOG_MESSAGE(QString("No provider found with name: %1").arg(providerName)); @@ -153,7 +153,7 @@ void ClientInterface::sendMessage( return; } - LLMCore::ContextData context; + PluginLLMCore::ContextData context; const bool isToolsEnabled = useTools; @@ -167,7 +167,7 @@ void ClientInterface::sendMessage( systemPrompt = systemPrompt + "\n\n" + role.systemPrompt; } - auto project = LLMCore::RulesLoader::getActiveProject(); + auto project = PluginLLMCore::RulesLoader::getActiveProject(); if (project) { systemPrompt += QString("\n# Active project name: %1").arg(project->displayName()); @@ -182,7 +182,7 @@ void ClientInterface::sendMessage( } QString projectRules - = LLMCore::RulesLoader::loadRulesForProject(project, LLMCore::RulesContext::Chat); + = PluginLLMCore::RulesLoader::loadRulesForProject(project, PluginLLMCore::RulesContext::Chat); if (!projectRules.isEmpty()) { systemPrompt += QString("\n# Project Rules\n\n") + projectRules; @@ -197,13 +197,13 @@ void ClientInterface::sendMessage( context.systemPrompt = systemPrompt; } - QVector messages; + QVector messages; for (const auto &msg : m_chatModel->getChatHistory()) { if (msg.role == ChatModel::ChatRole::Tool || msg.role == ChatModel::ChatRole::FileEdit) { continue; } - LLMCore::Message apiMessage; + PluginLLMCore::Message apiMessage; apiMessage.role = msg.role == ChatModel::ChatRole::User ? "user" : "assistant"; apiMessage.content = msg.content; @@ -240,11 +240,11 @@ void ClientInterface::sendMessage( context.history = messages; - LLMCore::LLMConfig config; - config.requestType = LLMCore::RequestType::Chat; + PluginLLMCore::LLMConfig config; + config.requestType = PluginLLMCore::RequestType::Chat; config.provider = provider; config.promptTemplate = promptTemplate; - if (provider->providerID() == LLMCore::ProviderID::GoogleAI) { + if (provider->providerID() == PluginLLMCore::ProviderID::GoogleAI) { QString stream = QString{"streamGenerateContent?alt=sse"}; config.url = QUrl(QString("%1/models/%2:%3") .arg( @@ -264,7 +264,7 @@ void ClientInterface::sendMessage( config.providerRequest, promptTemplate, context, - LLMCore::RequestType::Chat, + PluginLLMCore::RequestType::Chat, useTools, useThinking); @@ -277,49 +277,49 @@ void ClientInterface::sendMessage( connect( provider, - &LLMCore::Provider::partialResponseReceived, + &PluginLLMCore::Provider::partialResponseReceived, this, &ClientInterface::handlePartialResponse, Qt::UniqueConnection); connect( provider, - &LLMCore::Provider::fullResponseReceived, + &PluginLLMCore::Provider::fullResponseReceived, this, &ClientInterface::handleFullResponse, Qt::UniqueConnection); connect( provider, - &LLMCore::Provider::requestFailed, + &PluginLLMCore::Provider::requestFailed, this, &ClientInterface::handleRequestFailed, Qt::UniqueConnection); connect( provider, - &LLMCore::Provider::toolExecutionStarted, + &PluginLLMCore::Provider::toolExecutionStarted, this, &ClientInterface::handleToolExecutionStarted, Qt::UniqueConnection); connect( provider, - &LLMCore::Provider::toolExecutionCompleted, + &PluginLLMCore::Provider::toolExecutionCompleted, this, &ClientInterface::handleToolExecutionCompleted, Qt::UniqueConnection); connect( provider, - &LLMCore::Provider::continuationStarted, + &PluginLLMCore::Provider::continuationStarted, this, &ClientInterface::handleCleanAccumulatedData, Qt::UniqueConnection); connect( provider, - &LLMCore::Provider::thinkingBlockReceived, + &PluginLLMCore::Provider::thinkingBlockReceived, this, &ClientInterface::handleThinkingBlockReceived, Qt::UniqueConnection); connect( provider, - &LLMCore::Provider::redactedThinkingBlockReceived, + &PluginLLMCore::Provider::redactedThinkingBlockReceived, this, &ClientInterface::handleRedactedThinkingBlockReceived, Qt::UniqueConnection); @@ -334,7 +334,7 @@ void ClientInterface::sendMessage( void ClientInterface::clearMessages() { const auto providerName = Settings::generalSettings().caProvider(); - auto *provider = LLMCore::ProvidersManager::instance().getProviderByName(providerName); + auto *provider = PluginLLMCore::ProvidersManager::instance().getProviderByName(providerName); if (provider && !m_chatFilePath.isEmpty() && provider->supportsTools() && provider->toolsManager()) { @@ -346,7 +346,7 @@ void ClientInterface::clearMessages() void ClientInterface::cancelRequest() { - QSet providers; + QSet providers; for (auto it = m_activeRequests.begin(); it != m_activeRequests.end(); ++it) { if (it.value().provider) { providers.insert(it.value().provider); @@ -588,10 +588,10 @@ QString ClientInterface::encodeImageToBase64(const QString &filePath) const return imageData.toBase64(); } -QVector ClientInterface::loadImagesFromStorage( +QVector ClientInterface::loadImagesFromStorage( const QList &storedImages) const { - QVector apiImages; + QVector apiImages; for (const auto &storedImage : storedImages) { QString base64Data @@ -601,7 +601,7 @@ QVector ClientInterface::loadImagesFromStorage( continue; } - LLMCore::ImageAttachment apiImage; + PluginLLMCore::ImageAttachment apiImage; apiImage.data = base64Data; apiImage.mediaType = storedImage.mediaType; apiImage.isUrl = false; @@ -616,7 +616,7 @@ void ClientInterface::setChatFilePath(const QString &filePath) { if (!m_chatFilePath.isEmpty() && m_chatFilePath != filePath) { const auto providerName = Settings::generalSettings().caProvider(); - auto *provider = LLMCore::ProvidersManager::instance().getProviderByName(providerName); + auto *provider = PluginLLMCore::ProvidersManager::instance().getProviderByName(providerName); if (provider && provider->supportsTools() && provider->toolsManager()) { provider->toolsManager()->clearTodoSession(m_chatFilePath); diff --git a/ChatView/ClientInterface.hpp b/ChatView/ClientInterface.hpp index 5155b36..131f6e1 100644 --- a/ChatView/ClientInterface.hpp +++ b/ChatView/ClientInterface.hpp @@ -25,7 +25,7 @@ #include "ChatModel.hpp" #include "Provider.hpp" -#include "llmcore/IPromptProvider.hpp" +#include "pluginllmcore/IPromptProvider.hpp" #include namespace QodeAssist::Chat { @@ -36,7 +36,7 @@ class ClientInterface : public QObject public: explicit ClientInterface( - ChatModel *chatModel, LLMCore::IPromptProvider *promptProvider, QObject *parent = nullptr); + ChatModel *chatModel, PluginLLMCore::IPromptProvider *promptProvider, QObject *parent = nullptr); ~ClientInterface(); void sendMessage( @@ -82,15 +82,15 @@ private: bool isImageFile(const QString &filePath) const; QString getMediaTypeForImage(const QString &filePath) const; QString encodeImageToBase64(const QString &filePath) const; - QVector loadImagesFromStorage(const QList &storedImages) const; + QVector loadImagesFromStorage(const QList &storedImages) const; struct RequestContext { QJsonObject originalRequest; - LLMCore::Provider *provider; + PluginLLMCore::Provider *provider; }; - LLMCore::IPromptProvider *m_promptProvider = nullptr; + PluginLLMCore::IPromptProvider *m_promptProvider = nullptr; ChatModel *m_chatModel; Context::ContextManager *m_contextManager; QString m_chatFilePath; diff --git a/ConfigurationManager.cpp b/ConfigurationManager.cpp index ba8c98f..93e790b 100644 --- a/ConfigurationManager.cpp +++ b/ConfigurationManager.cpp @@ -41,7 +41,7 @@ void ConfigurationManager::init() void ConfigurationManager::updateTemplateDescription(const Utils::StringAspect &templateAspect) { - LLMCore::PromptTemplate *templ = m_templateManger.getFimTemplateByName(templateAspect.value()); + PluginLLMCore::PromptTemplate *templ = m_templateManger.getFimTemplateByName(templateAspect.value()); if (!templ) { return; @@ -65,7 +65,7 @@ void ConfigurationManager::updateAllTemplateDescriptions() void ConfigurationManager::checkTemplate(const Utils::StringAspect &templateAspect) { - LLMCore::PromptTemplate *templ = m_templateManger.getFimTemplateByName(templateAspect.value()); + PluginLLMCore::PromptTemplate *templ = m_templateManger.getFimTemplateByName(templateAspect.value()); if (templ->name() == templateAspect.value()) return; @@ -86,8 +86,8 @@ void ConfigurationManager::checkAllTemplate() ConfigurationManager::ConfigurationManager(QObject *parent) : QObject(parent) , m_generalSettings(Settings::generalSettings()) - , m_providersManager(LLMCore::ProvidersManager::instance()) - , m_templateManger(LLMCore::PromptTemplateManager::instance()) + , m_providersManager(PluginLLMCore::ProvidersManager::instance()) + , m_templateManger(PluginLLMCore::PromptTemplateManager::instance()) {} void ConfigurationManager::setupConnections() diff --git a/ConfigurationManager.hpp b/ConfigurationManager.hpp index b5be510..35e180f 100644 --- a/ConfigurationManager.hpp +++ b/ConfigurationManager.hpp @@ -21,8 +21,8 @@ #include -#include "llmcore/PromptTemplateManager.hpp" -#include "llmcore/ProvidersManager.hpp" +#include "pluginllmcore/PromptTemplateManager.hpp" +#include "pluginllmcore/ProvidersManager.hpp" #include "settings/GeneralSettings.hpp" namespace QodeAssist { @@ -54,8 +54,8 @@ private: ConfigurationManager &operator=(const ConfigurationManager &) = delete; Settings::GeneralSettings &m_generalSettings; - LLMCore::ProvidersManager &m_providersManager; - LLMCore::PromptTemplateManager &m_templateManger; + PluginLLMCore::ProvidersManager &m_providersManager; + PluginLLMCore::PromptTemplateManager &m_templateManger; void setupConnections(); }; diff --git a/LLMClientInterface.cpp b/LLMClientInterface.cpp index 869fb0a..5914603 100644 --- a/LLMClientInterface.cpp +++ b/LLMClientInterface.cpp @@ -29,16 +29,16 @@ #include "logger/Logger.hpp" #include "settings/CodeCompletionSettings.hpp" #include "settings/GeneralSettings.hpp" -#include -#include +#include +#include namespace QodeAssist { LLMClientInterface::LLMClientInterface( const Settings::GeneralSettings &generalSettings, const Settings::CodeCompletionSettings &completeSettings, - LLMCore::IProviderRegistry &providerRegistry, - LLMCore::IPromptProvider *promptProvider, + PluginLLMCore::IProviderRegistry &providerRegistry, + PluginLLMCore::IPromptProvider *promptProvider, Context::IDocumentReader &documentReader, IRequestPerformanceLogger &performanceLogger) : m_generalSettings(generalSettings) @@ -136,7 +136,7 @@ void LLMClientInterface::sendData(const QByteArray &data) void LLMClientInterface::handleCancelRequest() { - QSet providers; + QSet providers; for (auto it = m_activeRequests.begin(); it != m_activeRequests.end(); ++it) { if (it.value().provider) { providers.insert(it.value().provider); @@ -271,12 +271,12 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request) } // TODO refactor to dynamic presets system - LLMCore::LLMConfig config; - config.requestType = LLMCore::RequestType::CodeCompletion; + PluginLLMCore::LLMConfig config; + config.requestType = PluginLLMCore::RequestType::CodeCompletion; config.provider = provider; config.promptTemplate = promptTemplate; // TODO refactor networking - if (provider->providerID() == LLMCore::ProviderID::GoogleAI) { + if (provider->providerID() == PluginLLMCore::ProviderID::GoogleAI) { QString stream = QString{"streamGenerateContent?alt=sse"}; config.url = QUrl(QString("%1/models/%2:%3").arg(url, modelName, stream)); } else { @@ -295,14 +295,14 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request) if (m_completeSettings.useSystemPrompt()) systemPrompt.append( m_completeSettings.useUserMessageTemplateForCC() - && promptTemplate->type() == LLMCore::TemplateType::Chat + && promptTemplate->type() == PluginLLMCore::TemplateType::Chat ? m_completeSettings.systemPromptForNonFimModels() : m_completeSettings.systemPrompt()); - auto project = LLMCore::RulesLoader::getActiveProject(); + auto project = PluginLLMCore::RulesLoader::getActiveProject(); if (project) { QString projectRules - = LLMCore::RulesLoader::loadRulesForProject(project, LLMCore::RulesContext::Completions); + = PluginLLMCore::RulesLoader::loadRulesForProject(project, PluginLLMCore::RulesContext::Completions); if (!projectRules.isEmpty()) { systemPrompt += "\n\n# Project Rules\n\n" + projectRules; @@ -314,10 +314,10 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request) systemPrompt.append(updatedContext.fileContext.value()); if (m_completeSettings.useOpenFilesContext()) { - if (provider->providerID() == LLMCore::ProviderID::LlamaCpp) { + if (provider->providerID() == PluginLLMCore::ProviderID::LlamaCpp) { for (const auto openedFilePath : m_contextManager->openedFiles({filePath})) { if (!updatedContext.filesMetadata) { - updatedContext.filesMetadata = QList(); + updatedContext.filesMetadata = QList(); } updatedContext.filesMetadata->append({openedFilePath.first, openedFilePath.second}); } @@ -328,7 +328,7 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request) updatedContext.systemPrompt = systemPrompt; - if (promptTemplate->type() == LLMCore::TemplateType::Chat) { + if (promptTemplate->type() == PluginLLMCore::TemplateType::Chat) { QString userMessage; if (m_completeSettings.useUserMessageTemplateForCC()) { userMessage = m_completeSettings.processMessageToFIM( @@ -338,7 +338,7 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request) } // TODO refactor add message - QVector messages; + QVector messages; messages.append({"user", userMessage}); updatedContext.history = messages; } @@ -347,7 +347,7 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request) config.providerRequest, promptTemplate, updatedContext, - LLMCore::RequestType::CodeCompletion, + PluginLLMCore::RequestType::CodeCompletion, false, false); @@ -367,13 +367,13 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request) connect( provider, - &LLMCore::Provider::fullResponseReceived, + &PluginLLMCore::Provider::fullResponseReceived, this, &LLMClientInterface::handleFullResponse, Qt::UniqueConnection); connect( provider, - &LLMCore::Provider::requestFailed, + &PluginLLMCore::Provider::requestFailed, this, &LLMClientInterface::handleRequestFailed, Qt::UniqueConnection); @@ -381,7 +381,7 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request) provider->sendRequest(requestId, config.url, config.providerRequest); } -LLMCore::ContextData LLMClientInterface::prepareContext( +PluginLLMCore::ContextData LLMClientInterface::prepareContext( const QJsonObject &request, const Context::DocumentInfo &documentInfo) { QJsonObject params = request["params"].toObject(); @@ -396,13 +396,13 @@ LLMCore::ContextData LLMClientInterface::prepareContext( } QString LLMClientInterface::endpoint( - LLMCore::Provider *provider, LLMCore::TemplateType type, bool isLanguageSpecify) + PluginLLMCore::Provider *provider, PluginLLMCore::TemplateType type, bool isLanguageSpecify) { QString endpoint; auto endpointMode = isLanguageSpecify ? m_generalSettings.ccPreset1EndpointMode.stringValue() : m_generalSettings.ccEndpointMode.stringValue(); if (endpointMode == "Auto") { - endpoint = type == LLMCore::TemplateType::FIM ? provider->completionEndpoint() + endpoint = type == PluginLLMCore::TemplateType::FIM ? provider->completionEndpoint() : provider->chatEndpoint(); } else if (endpointMode == "Custom") { endpoint = isLanguageSpecify ? m_generalSettings.ccPreset1CustomEndpoint() diff --git a/LLMClientInterface.hpp b/LLMClientInterface.hpp index f1b7b24..7ecfb05 100644 --- a/LLMClientInterface.hpp +++ b/LLMClientInterface.hpp @@ -25,9 +25,9 @@ #include #include #include -#include -#include -#include +#include +#include +#include #include #include #include @@ -45,8 +45,8 @@ public: LLMClientInterface( const Settings::GeneralSettings &generalSettings, const Settings::CodeCompletionSettings &completeSettings, - LLMCore::IProviderRegistry &providerRegistry, - LLMCore::IPromptProvider *promptProvider, + PluginLLMCore::IProviderRegistry &providerRegistry, + PluginLLMCore::IPromptProvider *promptProvider, Context::IDocumentReader &documentReader, IRequestPerformanceLogger &performanceLogger); ~LLMClientInterface() override; @@ -82,17 +82,17 @@ private: struct RequestContext { QJsonObject originalRequest; - LLMCore::Provider *provider; + PluginLLMCore::Provider *provider; }; - LLMCore::ContextData prepareContext( + PluginLLMCore::ContextData prepareContext( const QJsonObject &request, const Context::DocumentInfo &documentInfo); - QString endpoint(LLMCore::Provider *provider, LLMCore::TemplateType type, bool isLanguageSpecify); + QString endpoint(PluginLLMCore::Provider *provider, PluginLLMCore::TemplateType type, bool isLanguageSpecify); const Settings::CodeCompletionSettings &m_completeSettings; const Settings::GeneralSettings &m_generalSettings; - LLMCore::IPromptProvider *m_promptProvider = nullptr; - LLMCore::IProviderRegistry &m_providerRegistry; + PluginLLMCore::IPromptProvider *m_promptProvider = nullptr; + PluginLLMCore::IProviderRegistry &m_providerRegistry; Context::IDocumentReader &m_documentReader; IRequestPerformanceLogger &m_performanceLogger; QElapsedTimer m_completionTimer; diff --git a/QodeAssistClient.hpp b/QodeAssistClient.hpp index b930f10..ae30fac 100644 --- a/QodeAssistClient.hpp +++ b/QodeAssistClient.hpp @@ -36,8 +36,8 @@ #include "widgets/EditorChatButtonHandler.hpp" #include "widgets/RefactorWidgetHandler.hpp" #include -#include -#include +#include +#include namespace QodeAssist { diff --git a/QuickRefactorHandler.cpp b/QuickRefactorHandler.cpp index 70a9398..67eb7e5 100644 --- a/QuickRefactorHandler.cpp +++ b/QuickRefactorHandler.cpp @@ -24,13 +24,13 @@ #include #include -#include +#include #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include @@ -109,8 +109,8 @@ void QuickRefactorHandler::prepareAndSendRequest( { auto &settings = Settings::generalSettings(); - auto &providerRegistry = LLMCore::ProvidersManager::instance(); - auto &promptManager = LLMCore::PromptTemplateManager::instance(); + auto &providerRegistry = PluginLLMCore::ProvidersManager::instance(); + auto &promptManager = PluginLLMCore::PromptTemplateManager::instance(); const auto providerName = settings.qrProvider(); auto provider = providerRegistry.getProviderByName(providerName); @@ -140,14 +140,14 @@ void QuickRefactorHandler::prepareAndSendRequest( return; } - LLMCore::LLMConfig config; - config.requestType = LLMCore::RequestType::QuickRefactoring; + PluginLLMCore::LLMConfig config; + config.requestType = PluginLLMCore::RequestType::QuickRefactoring; config.provider = provider; config.promptTemplate = promptTemplate; config.url = QString("%1%2").arg(settings.qrUrl(), provider->chatEndpoint()); config.apiKey = provider->apiKey(); - if (provider->providerID() == LLMCore::ProviderID::GoogleAI) { + if (provider->providerID() == PluginLLMCore::ProviderID::GoogleAI) { QString stream = QString{"streamGenerateContent?alt=sse"}; config.url = QUrl(QString("%1/models/%2:%3") .arg( @@ -161,7 +161,7 @@ void QuickRefactorHandler::prepareAndSendRequest( = {{"model", Settings::generalSettings().qrModel()}, {"stream", true}}; } - LLMCore::ContextData context = prepareContext(editor, range, instructions); + PluginLLMCore::ContextData context = prepareContext(editor, range, instructions); bool enableTools = Settings::quickRefactorSettings().useTools(); bool enableThinking = Settings::quickRefactorSettings().useThinking(); @@ -169,7 +169,7 @@ void QuickRefactorHandler::prepareAndSendRequest( config.providerRequest, promptTemplate, context, - LLMCore::RequestType::QuickRefactoring, + PluginLLMCore::RequestType::QuickRefactoring, enableTools, enableThinking); @@ -183,14 +183,14 @@ void QuickRefactorHandler::prepareAndSendRequest( connect( provider, - &LLMCore::Provider::fullResponseReceived, + &PluginLLMCore::Provider::fullResponseReceived, this, &QuickRefactorHandler::handleFullResponse, Qt::UniqueConnection); connect( provider, - &LLMCore::Provider::requestFailed, + &PluginLLMCore::Provider::requestFailed, this, &QuickRefactorHandler::handleRequestFailed, Qt::UniqueConnection); @@ -198,12 +198,12 @@ void QuickRefactorHandler::prepareAndSendRequest( provider->sendRequest(requestId, config.url, config.providerRequest); } -LLMCore::ContextData QuickRefactorHandler::prepareContext( +PluginLLMCore::ContextData QuickRefactorHandler::prepareContext( TextEditor::TextEditorWidget *editor, const Utils::Text::Range &range, const QString &instructions) { - LLMCore::ContextData context; + PluginLLMCore::ContextData context; auto textDocument = editor->textDocument(); Context::DocumentReaderQtCreator documentReader; @@ -287,10 +287,10 @@ LLMCore::ContextData QuickRefactorHandler::prepareContext( QString systemPrompt = Settings::quickRefactorSettings().systemPrompt(); - auto project = LLMCore::RulesLoader::getActiveProject(); + auto project = PluginLLMCore::RulesLoader::getActiveProject(); if (project) { - QString projectRules = LLMCore::RulesLoader::loadRulesForProject( - project, LLMCore::RulesContext::QuickRefactor); + QString projectRules = PluginLLMCore::RulesLoader::loadRulesForProject( + project, PluginLLMCore::RulesContext::QuickRefactor); if (!projectRules.isEmpty()) { systemPrompt += "\n\n# Project Rules\n\n" + projectRules; @@ -368,7 +368,7 @@ LLMCore::ContextData QuickRefactorHandler::prepareContext( context.systemPrompt = systemPrompt; - QVector messages; + QVector messages; messages.append( {"user", instructions.isEmpty() ? "Refactor the code to improve its quality and maintainability." @@ -387,7 +387,7 @@ void QuickRefactorHandler::handleLLMResponse( if (isComplete) { m_isRefactoringInProgress = false; - QString cleanedResponse = LLMCore::ResponseCleaner::clean(response); + QString cleanedResponse = PluginLLMCore::ResponseCleaner::clean(response); RefactorResult result; result.newText = cleanedResponse; diff --git a/QuickRefactorHandler.hpp b/QuickRefactorHandler.hpp index 2029f70..b7b51df 100644 --- a/QuickRefactorHandler.hpp +++ b/QuickRefactorHandler.hpp @@ -27,8 +27,8 @@ #include #include -#include -#include +#include +#include namespace QodeAssist { @@ -68,7 +68,7 @@ private: const Utils::Text::Range &range); void handleLLMResponse(const QString &response, const QJsonObject &request, bool isComplete); - LLMCore::ContextData prepareContext( + PluginLLMCore::ContextData prepareContext( TextEditor::TextEditorWidget *editor, const Utils::Text::Range &range, const QString &instructions); @@ -76,7 +76,7 @@ private: struct RequestContext { QJsonObject originalRequest; - LLMCore::Provider *provider; + PluginLLMCore::Provider *provider; }; QHash m_activeRequests; diff --git a/context/DocumentContextReader.cpp b/context/DocumentContextReader.cpp index 69e59bd..07362af 100644 --- a/context/DocumentContextReader.cpp +++ b/context/DocumentContextReader.cpp @@ -269,7 +269,7 @@ CopyrightInfo DocumentContextReader::copyrightInfo() const return m_copyrightInfo; } -LLMCore::ContextData DocumentContextReader::prepareContext( +PluginLLMCore::ContextData DocumentContextReader::prepareContext( int lineNumber, int cursorPosition, const Settings::CodeCompletionSettings &settings) const { QString contextBefore; diff --git a/context/DocumentContextReader.hpp b/context/DocumentContextReader.hpp index 4f055a7..51c592e 100644 --- a/context/DocumentContextReader.hpp +++ b/context/DocumentContextReader.hpp @@ -22,7 +22,7 @@ #include #include -#include +#include #include namespace QodeAssist::Context { @@ -73,7 +73,7 @@ public: CopyrightInfo copyrightInfo() const; - LLMCore::ContextData prepareContext( + PluginLLMCore::ContextData prepareContext( int lineNumber, int cursorPosition, const Settings::CodeCompletionSettings &settings) const; private: diff --git a/llmcore/BaseTool.cpp b/pluginllmcore/BaseTool.cpp similarity index 96% rename from llmcore/BaseTool.cpp rename to pluginllmcore/BaseTool.cpp index e1594af..ce53d08 100644 --- a/llmcore/BaseTool.cpp +++ b/pluginllmcore/BaseTool.cpp @@ -19,7 +19,7 @@ #include "BaseTool.hpp" -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { BaseTool::BaseTool(QObject *parent) : QObject(parent) @@ -70,4 +70,4 @@ QJsonObject BaseTool::customizeForGoogle(const QJsonObject &baseDefinition) cons return tool; } -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/BaseTool.hpp b/pluginllmcore/BaseTool.hpp similarity index 96% rename from llmcore/BaseTool.hpp rename to pluginllmcore/BaseTool.hpp index ddb5684..7421bb7 100644 --- a/llmcore/BaseTool.hpp +++ b/pluginllmcore/BaseTool.hpp @@ -25,7 +25,7 @@ #include #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { enum class ToolSchemaFormat { OpenAI, Claude, Ollama, Google }; @@ -67,4 +67,4 @@ protected: virtual QJsonObject customizeForGoogle(const QJsonObject &baseDefinition) const; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/CMakeLists.txt b/pluginllmcore/CMakeLists.txt similarity index 100% rename from llmcore/CMakeLists.txt rename to pluginllmcore/CMakeLists.txt diff --git a/llmcore/ContentBlocks.hpp b/pluginllmcore/ContentBlocks.hpp similarity index 98% rename from llmcore/ContentBlocks.hpp rename to pluginllmcore/ContentBlocks.hpp index 9286eea..2eb9014 100644 --- a/llmcore/ContentBlocks.hpp +++ b/pluginllmcore/ContentBlocks.hpp @@ -26,7 +26,7 @@ #include #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { enum class MessageState { Building, Complete, RequiresToolExecution, Final }; @@ -249,4 +249,4 @@ private: QString m_signature; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/ContextData.hpp b/pluginllmcore/ContextData.hpp similarity index 95% rename from llmcore/ContextData.hpp rename to pluginllmcore/ContextData.hpp index e95c135..b23cd94 100644 --- a/llmcore/ContextData.hpp +++ b/pluginllmcore/ContextData.hpp @@ -22,7 +22,7 @@ #include #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { struct ImageAttachment { @@ -66,4 +66,4 @@ struct ContextData bool operator==(const ContextData &) const = default; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/DataBuffers.hpp b/pluginllmcore/DataBuffers.hpp similarity index 92% rename from llmcore/DataBuffers.hpp rename to pluginllmcore/DataBuffers.hpp index 4ab5dee..4d1b496 100644 --- a/llmcore/DataBuffers.hpp +++ b/pluginllmcore/DataBuffers.hpp @@ -22,7 +22,7 @@ #include "SSEBuffer.hpp" #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { struct DataBuffers { @@ -36,4 +36,4 @@ struct DataBuffers } }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/HttpClient.cpp b/pluginllmcore/HttpClient.cpp similarity index 99% rename from llmcore/HttpClient.cpp rename to pluginllmcore/HttpClient.cpp index 9fbcb45..6799d29 100644 --- a/llmcore/HttpClient.cpp +++ b/pluginllmcore/HttpClient.cpp @@ -24,7 +24,7 @@ #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { HttpClient::HttpClient(QObject *parent) : QObject(parent) @@ -273,4 +273,4 @@ QString HttpClient::parseErrorFromResponse( return QString("HTTP %1: %2").arg(statusCode).arg(networkErrorString); } -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/HttpClient.hpp b/pluginllmcore/HttpClient.hpp similarity index 96% rename from llmcore/HttpClient.hpp rename to pluginllmcore/HttpClient.hpp index 44a18c9..c39d6bf 100644 --- a/llmcore/HttpClient.hpp +++ b/pluginllmcore/HttpClient.hpp @@ -30,7 +30,7 @@ #include #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class HttpClient : public QObject { @@ -73,4 +73,4 @@ private: mutable QMutex m_mutex; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/IPromptProvider.hpp b/pluginllmcore/IPromptProvider.hpp similarity index 93% rename from llmcore/IPromptProvider.hpp rename to pluginllmcore/IPromptProvider.hpp index 0dde34a..c23bf04 100644 --- a/llmcore/IPromptProvider.hpp +++ b/pluginllmcore/IPromptProvider.hpp @@ -22,7 +22,7 @@ #include "PromptTemplate.hpp" #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class IPromptProvider { @@ -36,4 +36,4 @@ public: virtual QStringList getTemplatesForProvider(ProviderID id) const = 0; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/IProviderRegistry.hpp b/pluginllmcore/IProviderRegistry.hpp similarity index 92% rename from llmcore/IProviderRegistry.hpp rename to pluginllmcore/IProviderRegistry.hpp index 18b9dca..855cee1 100644 --- a/llmcore/IProviderRegistry.hpp +++ b/pluginllmcore/IProviderRegistry.hpp @@ -21,7 +21,7 @@ #include "Provider.hpp" -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class IProviderRegistry { @@ -33,4 +33,4 @@ public: virtual QStringList providersNames() const = 0; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/IToolsManager.hpp b/pluginllmcore/IToolsManager.hpp similarity index 97% rename from llmcore/IToolsManager.hpp rename to pluginllmcore/IToolsManager.hpp index 0637773..92c74d1 100644 --- a/llmcore/IToolsManager.hpp +++ b/pluginllmcore/IToolsManager.hpp @@ -26,7 +26,7 @@ #include "BaseTool.hpp" -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class IToolsManager { diff --git a/llmcore/PromptProviderChat.hpp b/pluginllmcore/PromptProviderChat.hpp similarity index 95% rename from llmcore/PromptProviderChat.hpp rename to pluginllmcore/PromptProviderChat.hpp index 19804f1..5de59d4 100644 --- a/llmcore/PromptProviderChat.hpp +++ b/pluginllmcore/PromptProviderChat.hpp @@ -23,7 +23,7 @@ #include "PromptTemplate.hpp" #include "PromptTemplateManager.hpp" -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class PromptProviderChat : public IPromptProvider { @@ -50,4 +50,4 @@ private: PromptTemplateManager &m_templateManager; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/PromptProviderFim.hpp b/pluginllmcore/PromptProviderFim.hpp similarity index 95% rename from llmcore/PromptProviderFim.hpp rename to pluginllmcore/PromptProviderFim.hpp index 19529c0..6e9d006 100644 --- a/llmcore/PromptProviderFim.hpp +++ b/pluginllmcore/PromptProviderFim.hpp @@ -22,7 +22,7 @@ #include "IPromptProvider.hpp" #include "PromptTemplateManager.hpp" -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class PromptProviderFim : public IPromptProvider { @@ -49,4 +49,4 @@ private: PromptTemplateManager &m_templateManager; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/PromptTemplate.hpp b/pluginllmcore/PromptTemplate.hpp similarity index 94% rename from llmcore/PromptTemplate.hpp rename to pluginllmcore/PromptTemplate.hpp index 07eee24..d320041 100644 --- a/llmcore/PromptTemplate.hpp +++ b/pluginllmcore/PromptTemplate.hpp @@ -26,7 +26,7 @@ #include "ContextData.hpp" #include "ProviderID.hpp" -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { enum class TemplateType { Chat, FIM, FIMOnChat }; @@ -41,4 +41,4 @@ public: virtual QString description() const = 0; virtual bool isSupportProvider(ProviderID id) const = 0; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/PromptTemplateManager.cpp b/pluginllmcore/PromptTemplateManager.cpp similarity index 97% rename from llmcore/PromptTemplateManager.cpp rename to pluginllmcore/PromptTemplateManager.cpp index 804b52c..1ba002f 100644 --- a/llmcore/PromptTemplateManager.cpp +++ b/pluginllmcore/PromptTemplateManager.cpp @@ -21,7 +21,7 @@ #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { PromptTemplateManager &PromptTemplateManager::instance() { @@ -96,4 +96,4 @@ PromptTemplate *PromptTemplateManager::getChatTemplateByName(const QString &temp return m_chatTemplates[templateName]; } -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/PromptTemplateManager.hpp b/pluginllmcore/PromptTemplateManager.hpp similarity index 96% rename from llmcore/PromptTemplateManager.hpp rename to pluginllmcore/PromptTemplateManager.hpp index 94a9381..7b359c7 100644 --- a/llmcore/PromptTemplateManager.hpp +++ b/pluginllmcore/PromptTemplateManager.hpp @@ -24,7 +24,7 @@ #include "PromptTemplate.hpp" -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class PromptTemplateManager { @@ -62,4 +62,4 @@ private: QMap m_chatTemplates; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/Provider.cpp b/pluginllmcore/Provider.cpp similarity index 90% rename from llmcore/Provider.cpp rename to pluginllmcore/Provider.cpp index dbae853..0a9da24 100644 --- a/llmcore/Provider.cpp +++ b/pluginllmcore/Provider.cpp @@ -2,7 +2,7 @@ #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { Provider::Provider(QObject *parent) : QObject(parent) @@ -33,4 +33,4 @@ QJsonObject Provider::parseEventLine(const QString &line) return doc.object(); } -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/Provider.hpp b/pluginllmcore/Provider.hpp similarity index 81% rename from llmcore/Provider.hpp rename to pluginllmcore/Provider.hpp index dbc09c4..fd654a8 100644 --- a/llmcore/Provider.hpp +++ b/pluginllmcore/Provider.hpp @@ -37,7 +37,7 @@ class QNetworkReply; class QJsonObject; -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class Provider : public QObject { @@ -54,9 +54,9 @@ public: virtual bool supportsModelListing() const = 0; virtual void prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) = 0; @@ -81,18 +81,18 @@ public: public slots: virtual void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) = 0; virtual void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, std::optional error) + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) = 0; signals: void partialResponseReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QString &partialText); + const QodeAssist::PluginLLMCore::RequestID &requestId, const QString &partialText); void fullResponseReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QString &fullText); - void requestFailed(const QodeAssist::LLMCore::RequestID &requestId, const QString &error); + const QodeAssist::PluginLLMCore::RequestID &requestId, const QString &fullText); + void requestFailed(const QodeAssist::PluginLLMCore::RequestID &requestId, const QString &error); void toolExecutionStarted( const QString &requestId, const QString &toolId, const QString &toolName); void toolExecutionCompleted( @@ -100,7 +100,7 @@ signals: const QString &toolId, const QString &toolName, const QString &result); - void continuationStarted(const QodeAssist::LLMCore::RequestID &requestId); + void continuationStarted(const QodeAssist::PluginLLMCore::RequestID &requestId); void thinkingBlockReceived( const QString &requestId, const QString &thinking, const QString &signature); void redactedThinkingBlockReceived(const QString &requestId, const QString &signature); @@ -115,4 +115,4 @@ private: HttpClient *m_httpClient; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/ProviderID.hpp b/pluginllmcore/ProviderID.hpp similarity index 96% rename from llmcore/ProviderID.hpp rename to pluginllmcore/ProviderID.hpp index 19e7b5b..dc1ea6e 100644 --- a/llmcore/ProviderID.hpp +++ b/pluginllmcore/ProviderID.hpp @@ -17,7 +17,7 @@ * along with QodeAssist. If not, see . */ -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { enum class ProviderID { Any, diff --git a/llmcore/ProvidersManager.cpp b/pluginllmcore/ProvidersManager.cpp similarity index 94% rename from llmcore/ProvidersManager.cpp rename to pluginllmcore/ProvidersManager.cpp index 9a48866..a2abf15 100644 --- a/llmcore/ProvidersManager.cpp +++ b/pluginllmcore/ProvidersManager.cpp @@ -19,7 +19,7 @@ #include "ProvidersManager.hpp" -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { ProvidersManager &ProvidersManager::instance() { @@ -44,4 +44,4 @@ Provider *ProvidersManager::getProviderByName(const QString &providerName) return m_providers[providerName]; } -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/ProvidersManager.hpp b/pluginllmcore/ProvidersManager.hpp similarity index 95% rename from llmcore/ProvidersManager.hpp rename to pluginllmcore/ProvidersManager.hpp index a985233..789a946 100644 --- a/llmcore/ProvidersManager.hpp +++ b/pluginllmcore/ProvidersManager.hpp @@ -24,7 +24,7 @@ #include "IProviderRegistry.hpp" #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class ProvidersManager : public IProviderRegistry { @@ -53,4 +53,4 @@ private: QMap m_providers; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/RequestConfig.hpp b/pluginllmcore/RequestConfig.hpp similarity index 93% rename from llmcore/RequestConfig.hpp rename to pluginllmcore/RequestConfig.hpp index e6d5413..55b4bf3 100644 --- a/llmcore/RequestConfig.hpp +++ b/pluginllmcore/RequestConfig.hpp @@ -25,7 +25,7 @@ #include #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { struct LLMConfig { @@ -38,4 +38,4 @@ struct LLMConfig QString apiKey; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/RequestHandlerBase.cpp b/pluginllmcore/RequestHandlerBase.cpp similarity index 100% rename from llmcore/RequestHandlerBase.cpp rename to pluginllmcore/RequestHandlerBase.cpp diff --git a/llmcore/RequestHandlerBase.hpp b/pluginllmcore/RequestHandlerBase.hpp similarity index 100% rename from llmcore/RequestHandlerBase.hpp rename to pluginllmcore/RequestHandlerBase.hpp diff --git a/llmcore/RequestType.hpp b/pluginllmcore/RequestType.hpp similarity index 95% rename from llmcore/RequestType.hpp rename to pluginllmcore/RequestType.hpp index 1b9b5aa..3dd47e6 100644 --- a/llmcore/RequestType.hpp +++ b/pluginllmcore/RequestType.hpp @@ -21,7 +21,7 @@ #pragma once -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { enum RequestType { CodeCompletion, Chat, Embedding, QuickRefactoring }; diff --git a/llmcore/ResponseCleaner.hpp b/pluginllmcore/ResponseCleaner.hpp similarity index 97% rename from llmcore/ResponseCleaner.hpp rename to pluginllmcore/ResponseCleaner.hpp index e2dc683..6363045 100644 --- a/llmcore/ResponseCleaner.hpp +++ b/pluginllmcore/ResponseCleaner.hpp @@ -23,7 +23,7 @@ #include #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class ResponseCleaner { @@ -115,5 +115,5 @@ private: } }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/RulesLoader.cpp b/pluginllmcore/RulesLoader.cpp similarity index 98% rename from llmcore/RulesLoader.cpp rename to pluginllmcore/RulesLoader.cpp index abb2100..346a122 100644 --- a/llmcore/RulesLoader.cpp +++ b/pluginllmcore/RulesLoader.cpp @@ -26,7 +26,7 @@ #include #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { QString RulesLoader::loadRules(const QString &projectPath, RulesContext context) { @@ -178,4 +178,4 @@ QVector RulesLoader::collectMarkdownFiles( return result; } -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/RulesLoader.hpp b/pluginllmcore/RulesLoader.hpp similarity index 95% rename from llmcore/RulesLoader.hpp rename to pluginllmcore/RulesLoader.hpp index 2fbe76e..437f75c 100644 --- a/llmcore/RulesLoader.hpp +++ b/pluginllmcore/RulesLoader.hpp @@ -25,7 +25,7 @@ namespace ProjectExplorer { class Project; } -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { enum class RulesContext { Completions, Chat, QuickRefactor }; @@ -54,4 +54,4 @@ private: static QString getProjectPath(ProjectExplorer::Project *project); }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/SSEBuffer.cpp b/pluginllmcore/SSEBuffer.cpp similarity index 93% rename from llmcore/SSEBuffer.cpp rename to pluginllmcore/SSEBuffer.cpp index bc53c29..c0a7948 100644 --- a/llmcore/SSEBuffer.cpp +++ b/pluginllmcore/SSEBuffer.cpp @@ -19,7 +19,7 @@ #include "SSEBuffer.hpp" -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { QStringList SSEBuffer::processData(const QByteArray &data) { @@ -48,4 +48,4 @@ bool SSEBuffer::hasIncompleteData() const return !m_buffer.isEmpty(); } -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/SSEBuffer.hpp b/pluginllmcore/SSEBuffer.hpp similarity index 92% rename from llmcore/SSEBuffer.hpp rename to pluginllmcore/SSEBuffer.hpp index 1f05572..629444f 100644 --- a/llmcore/SSEBuffer.hpp +++ b/pluginllmcore/SSEBuffer.hpp @@ -22,7 +22,7 @@ #include #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class SSEBuffer { @@ -39,4 +39,4 @@ private: QString m_buffer; }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/ValidationUtils.cpp b/pluginllmcore/ValidationUtils.cpp similarity index 96% rename from llmcore/ValidationUtils.cpp rename to pluginllmcore/ValidationUtils.cpp index 8d4f72f..4b8dbac 100644 --- a/llmcore/ValidationUtils.cpp +++ b/pluginllmcore/ValidationUtils.cpp @@ -21,7 +21,7 @@ #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { QStringList ValidationUtils::validateRequestFields( const QJsonObject &request, const QJsonObject &templateObj) @@ -54,4 +54,4 @@ void ValidationUtils::validateNestedObjects( } } -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/llmcore/ValidationUtils.hpp b/pluginllmcore/ValidationUtils.hpp similarity index 93% rename from llmcore/ValidationUtils.hpp rename to pluginllmcore/ValidationUtils.hpp index b77ab97..de9835d 100644 --- a/llmcore/ValidationUtils.hpp +++ b/pluginllmcore/ValidationUtils.hpp @@ -22,7 +22,7 @@ #include #include -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class ValidationUtils { @@ -38,4 +38,4 @@ private: const QJsonObject &request, const QJsonObject &templateObj, QStringList &errors); }; -} // namespace QodeAssist::LLMCore +} // namespace QodeAssist::PluginLLMCore diff --git a/providers/ClaudeMessage.cpp b/providers/ClaudeMessage.cpp index 29397a4..13f48f5 100644 --- a/providers/ClaudeMessage.cpp +++ b/providers/ClaudeMessage.cpp @@ -37,32 +37,32 @@ void ClaudeMessage::handleContentBlockStart( .arg(blockType)); if (blockType == "text") { - addCurrentContent(); + addCurrentContent(); } else if (blockType == "image") { QJsonObject source = data["source"].toObject(); QString sourceType = source["type"].toString(); QString imageData; QString mediaType; - LLMCore::ImageContent::ImageSourceType imgSourceType = LLMCore::ImageContent::ImageSourceType::Base64; + PluginLLMCore::ImageContent::ImageSourceType imgSourceType = PluginLLMCore::ImageContent::ImageSourceType::Base64; if (sourceType == "base64") { imageData = source["data"].toString(); mediaType = source["media_type"].toString(); - imgSourceType = LLMCore::ImageContent::ImageSourceType::Base64; + imgSourceType = PluginLLMCore::ImageContent::ImageSourceType::Base64; } else if (sourceType == "url") { imageData = source["url"].toString(); - imgSourceType = LLMCore::ImageContent::ImageSourceType::Url; + imgSourceType = PluginLLMCore::ImageContent::ImageSourceType::Url; } - addCurrentContent(imageData, mediaType, imgSourceType); + addCurrentContent(imageData, mediaType, imgSourceType); } else if (blockType == "tool_use") { QString toolId = data["id"].toString(); QString toolName = data["name"].toString(); QJsonObject toolInput = data["input"].toObject(); - addCurrentContent(toolId, toolName, toolInput); + addCurrentContent(toolId, toolName, toolInput); m_pendingToolInputs[index] = ""; } else if (blockType == "thinking") { @@ -70,13 +70,13 @@ void ClaudeMessage::handleContentBlockStart( QString signature = data["signature"].toString(); LOG_MESSAGE(QString("ClaudeMessage: Creating thinking block with signature length=%1") .arg(signature.length())); - addCurrentContent(thinking, signature); + addCurrentContent(thinking, signature); } else if (blockType == "redacted_thinking") { QString signature = data["signature"].toString(); LOG_MESSAGE(QString("ClaudeMessage: Creating redacted_thinking block with signature length=%1") .arg(signature.length())); - addCurrentContent(signature); + addCurrentContent(signature); } } @@ -88,7 +88,7 @@ void ClaudeMessage::handleContentBlockDelta( } if (deltaType == "text_delta") { - if (auto textContent = qobject_cast(m_currentBlocks[index])) { + if (auto textContent = qobject_cast(m_currentBlocks[index])) { textContent->appendText(delta["text"].toString()); } @@ -99,17 +99,17 @@ void ClaudeMessage::handleContentBlockDelta( } } else if (deltaType == "thinking_delta") { - if (auto thinkingContent = qobject_cast(m_currentBlocks[index])) { + if (auto thinkingContent = qobject_cast(m_currentBlocks[index])) { thinkingContent->appendThinking(delta["thinking"].toString()); } } else if (deltaType == "signature_delta") { - if (auto thinkingContent = qobject_cast(m_currentBlocks[index])) { + if (auto thinkingContent = qobject_cast(m_currentBlocks[index])) { QString signature = delta["signature"].toString(); thinkingContent->setSignature(signature); LOG_MESSAGE(QString("Set signature for thinking block %1: length=%2") .arg(index).arg(signature.length())); - } else if (auto redactedContent = qobject_cast(m_currentBlocks[index])) { + } else if (auto redactedContent = qobject_cast(m_currentBlocks[index])) { QString signature = delta["signature"].toString(); redactedContent->setSignature(signature); LOG_MESSAGE(QString("Set signature for redacted_thinking block %1: length=%2") @@ -132,7 +132,7 @@ void ClaudeMessage::handleContentBlockStop(int index) } if (index < m_currentBlocks.size()) { - if (auto toolContent = qobject_cast(m_currentBlocks[index])) { + if (auto toolContent = qobject_cast(m_currentBlocks[index])) { toolContent->setInput(inputObject); } } @@ -155,7 +155,7 @@ QJsonObject ClaudeMessage::toProviderFormat() const QJsonArray content; for (auto block : m_currentBlocks) { - QJsonValue blockJson = block->toJson(LLMCore::ProviderFormat::Claude); + QJsonValue blockJson = block->toJson(PluginLLMCore::ProviderFormat::Claude); content.append(blockJson); } @@ -173,42 +173,42 @@ QJsonArray ClaudeMessage::createToolResultsContent(const QHash for (auto toolContent : getCurrentToolUseContent()) { if (toolResults.contains(toolContent->id())) { - auto toolResult = std::make_unique( + auto toolResult = std::make_unique( toolContent->id(), toolResults[toolContent->id()]); - results.append(toolResult->toJson(LLMCore::ProviderFormat::Claude)); + results.append(toolResult->toJson(PluginLLMCore::ProviderFormat::Claude)); } } return results; } -QList ClaudeMessage::getCurrentToolUseContent() const +QList ClaudeMessage::getCurrentToolUseContent() const { - QList toolBlocks; + QList toolBlocks; for (auto block : m_currentBlocks) { - if (auto toolContent = qobject_cast(block)) { + if (auto toolContent = qobject_cast(block)) { toolBlocks.append(toolContent); } } return toolBlocks; } -QList ClaudeMessage::getCurrentThinkingContent() const +QList ClaudeMessage::getCurrentThinkingContent() const { - QList thinkingBlocks; + QList thinkingBlocks; for (auto block : m_currentBlocks) { - if (auto thinkingContent = qobject_cast(block)) { + if (auto thinkingContent = qobject_cast(block)) { thinkingBlocks.append(thinkingContent); } } return thinkingBlocks; } -QList ClaudeMessage::getCurrentRedactedThinkingContent() const +QList ClaudeMessage::getCurrentRedactedThinkingContent() const { - QList redactedBlocks; + QList redactedBlocks; for (auto block : m_currentBlocks) { - if (auto redactedContent = qobject_cast(block)) { + if (auto redactedContent = qobject_cast(block)) { redactedBlocks.append(redactedContent); } } @@ -222,17 +222,17 @@ void ClaudeMessage::startNewContinuation() m_currentBlocks.clear(); m_pendingToolInputs.clear(); m_stopReason.clear(); - m_state = LLMCore::MessageState::Building; + m_state = PluginLLMCore::MessageState::Building; } void ClaudeMessage::updateStateFromStopReason() { if (m_stopReason == "tool_use" && !getCurrentToolUseContent().empty()) { - m_state = LLMCore::MessageState::RequiresToolExecution; + m_state = PluginLLMCore::MessageState::RequiresToolExecution; } else if (m_stopReason == "end_turn") { - m_state = LLMCore::MessageState::Final; + m_state = PluginLLMCore::MessageState::Final; } else { - m_state = LLMCore::MessageState::Complete; + m_state = PluginLLMCore::MessageState::Complete; } } diff --git a/providers/ClaudeMessage.hpp b/providers/ClaudeMessage.hpp index 5c6b623..68efdc8 100644 --- a/providers/ClaudeMessage.hpp +++ b/providers/ClaudeMessage.hpp @@ -19,7 +19,7 @@ #pragma once -#include +#include namespace QodeAssist { @@ -37,18 +37,18 @@ public: QJsonObject toProviderFormat() const; QJsonArray createToolResultsContent(const QHash &toolResults) const; - LLMCore::MessageState state() const { return m_state; } - QList getCurrentToolUseContent() const; - QList getCurrentThinkingContent() const; - QList getCurrentRedactedThinkingContent() const; - const QList &getCurrentBlocks() const { return m_currentBlocks; } + PluginLLMCore::MessageState state() const { return m_state; } + QList getCurrentToolUseContent() const; + QList getCurrentThinkingContent() const; + QList getCurrentRedactedThinkingContent() const; + const QList &getCurrentBlocks() const { return m_currentBlocks; } void startNewContinuation(); private: QString m_stopReason; - LLMCore::MessageState m_state = LLMCore::MessageState::Building; - QList m_currentBlocks; + PluginLLMCore::MessageState m_state = PluginLLMCore::MessageState::Building; + QList m_currentBlocks; QHash m_pendingToolInputs; void updateStateFromStopReason(); diff --git a/providers/ClaudeProvider.cpp b/providers/ClaudeProvider.cpp index 8094deb..1aea058 100644 --- a/providers/ClaudeProvider.cpp +++ b/providers/ClaudeProvider.cpp @@ -24,7 +24,7 @@ #include #include -#include "llmcore/ValidationUtils.hpp" +#include "pluginllmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -35,7 +35,7 @@ namespace QodeAssist::Providers { ClaudeProvider::ClaudeProvider(QObject *parent) - : LLMCore::Provider(parent) + : PluginLLMCore::Provider(parent) , m_toolsManager(new Tools::ToolsManager(this)) { connect( @@ -72,9 +72,9 @@ bool ClaudeProvider::supportsModelListing() const void ClaudeProvider::prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) { @@ -102,10 +102,10 @@ void ClaudeProvider::prepareRequest( request["temperature"] = 1.0; }; - if (type == LLMCore::RequestType::CodeCompletion) { + if (type == PluginLLMCore::RequestType::CodeCompletion) { applyModelParams(Settings::codeCompletionSettings()); request["temperature"] = Settings::codeCompletionSettings().temperature(); - } else if (type == LLMCore::RequestType::QuickRefactoring) { + } else if (type == PluginLLMCore::RequestType::QuickRefactoring) { const auto &qrSettings = Settings::quickRefactorSettings(); applyModelParams(qrSettings); @@ -126,13 +126,13 @@ void ClaudeProvider::prepareRequest( } if (isToolsEnabled) { - LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL; - if (type == LLMCore::RequestType::QuickRefactoring) { - filter = LLMCore::RunToolsFilter::OnlyRead; + PluginLLMCore::RunToolsFilter filter = PluginLLMCore::RunToolsFilter::ALL; + if (type == PluginLLMCore::RequestType::QuickRefactoring) { + filter = PluginLLMCore::RunToolsFilter::OnlyRead; } auto toolsDefinitions = m_toolsManager->getToolsDefinitions( - LLMCore::ToolSchemaFormat::Claude, filter); + PluginLLMCore::ToolSchemaFormat::Claude, filter); if (!toolsDefinitions.isEmpty()) { request["tools"] = toolsDefinitions; LOG_MESSAGE(QString("Added %1 tools to Claude request").arg(toolsDefinitions.size())); @@ -175,7 +175,7 @@ QFuture> ClaudeProvider::getInstalledModels(const QString &baseUr }); } -QList ClaudeProvider::validateRequest(const QJsonObject &request, LLMCore::TemplateType type) +QList ClaudeProvider::validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) { const auto templateReq = QJsonObject{ {"model", {}}, @@ -191,7 +191,7 @@ QList ClaudeProvider::validateRequest(const QJsonObject &request, LLMCo {"tools", {}}, {"thinking", QJsonObject{{"type", {}}, {"budget_tokens", {}}}}}; - return LLMCore::ValidationUtils::validateRequestFields(request, templateReq); + return PluginLLMCore::ValidationUtils::validateRequestFields(request, templateReq); } QString ClaudeProvider::apiKey() const @@ -209,13 +209,13 @@ void ClaudeProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) cons } } -LLMCore::ProviderID ClaudeProvider::providerID() const +PluginLLMCore::ProviderID ClaudeProvider::providerID() const { - return LLMCore::ProviderID::Claude; + return PluginLLMCore::ProviderID::Claude; } void ClaudeProvider::sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) { if (!m_messages.contains(requestId)) { m_dataBuffers[requestId].clear(); @@ -245,22 +245,22 @@ bool ClaudeProvider::supportImage() const { return true; }; -void ClaudeProvider::cancelRequest(const LLMCore::RequestID &requestId) +void ClaudeProvider::cancelRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("ClaudeProvider: Cancelling request %1").arg(requestId)); - LLMCore::Provider::cancelRequest(requestId); + PluginLLMCore::Provider::cancelRequest(requestId); cleanupRequest(requestId); } -LLMCore::IToolsManager *ClaudeProvider::toolsManager() const +PluginLLMCore::IToolsManager *ClaudeProvider::toolsManager() const { return m_toolsManager; } void ClaudeProvider::onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) { - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; QStringList lines = buffers.rawStreamBuffer.processData(data); for (const QString &line : lines) { @@ -273,7 +273,7 @@ void ClaudeProvider::onDataReceived( } void ClaudeProvider::onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, std::optional error) + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) { if (error) { LOG_MESSAGE(QString("ClaudeProvider request %1 failed: %2").arg(requestId, *error)); @@ -284,7 +284,7 @@ void ClaudeProvider::onRequestFinished( if (m_messages.contains(requestId)) { ClaudeMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId)); m_dataBuffers.remove(requestId); return; @@ -292,7 +292,7 @@ void ClaudeProvider::onRequestFinished( } if (m_dataBuffers.contains(requestId)) { - const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + const PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; if (!buffers.responseContent.isEmpty()) { LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId)); emit fullResponseReceived(requestId, buffers.responseContent); @@ -403,7 +403,7 @@ void ClaudeProvider::processStreamEvent(const QString &requestId, const QJsonObj if (deltaType == "text_delta") { QString text = delta["text"].toString(); - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; buffers.responseContent += text; emit partialResponseReceived(requestId, text); } else if (deltaType == "signature_delta") { @@ -434,7 +434,7 @@ void ClaudeProvider::processStreamEvent(const QString &requestId, const QJsonObj if (!signature.isEmpty()) { auto allBlocks = message->getCurrentBlocks(); if (index < allBlocks.size()) { - if (auto thinkingContent = qobject_cast(allBlocks[index])) { + if (auto thinkingContent = qobject_cast(allBlocks[index])) { thinkingContent->setSignature(signature); LOG_MESSAGE( QString("Updated thinking block signature from content_block_stop, " @@ -448,7 +448,7 @@ void ClaudeProvider::processStreamEvent(const QString &requestId, const QJsonObj if (!signature.isEmpty()) { auto allBlocks = message->getCurrentBlocks(); if (index < allBlocks.size()) { - if (auto redactedContent = qobject_cast(allBlocks[index])) { + if (auto redactedContent = qobject_cast(allBlocks[index])) { redactedContent->setSignature(signature); LOG_MESSAGE( QString("Updated redacted_thinking block signature from content_block_stop, " @@ -507,7 +507,7 @@ void ClaudeProvider::handleMessageComplete(const QString &requestId) ClaudeMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Claude message requires tool execution for %1").arg(requestId)); auto toolUseContent = message->getCurrentToolUseContent(); @@ -530,7 +530,7 @@ void ClaudeProvider::handleMessageComplete(const QString &requestId) } } -void ClaudeProvider::cleanupRequest(const LLMCore::RequestID &requestId) +void ClaudeProvider::cleanupRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("Cleaning up Claude request %1").arg(requestId)); diff --git a/providers/ClaudeProvider.hpp b/providers/ClaudeProvider.hpp index 335f6ac..c5d5c55 100644 --- a/providers/ClaudeProvider.hpp +++ b/providers/ClaudeProvider.hpp @@ -19,14 +19,14 @@ #pragma once -#include +#include #include "ClaudeMessage.hpp" #include "tools/ToolsManager.hpp" namespace QodeAssist::Providers { -class ClaudeProvider : public LLMCore::Provider +class ClaudeProvider : public PluginLLMCore::Provider { Q_OBJECT public: @@ -39,32 +39,32 @@ public: bool supportsModelListing() const override; void prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) override; QFuture> getInstalledModels(const QString &url) override; - QList validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override; + QList validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) override; QString apiKey() const override; void prepareNetworkRequest(QNetworkRequest &networkRequest) const override; - LLMCore::ProviderID providerID() const override; + PluginLLMCore::ProviderID providerID() const override; void sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; bool supportsTools() const override; bool supportThinking() const override; bool supportImage() const override; - void cancelRequest(const LLMCore::RequestID &requestId) override; + void cancelRequest(const PluginLLMCore::RequestID &requestId) override; - LLMCore::IToolsManager *toolsManager() const override; + PluginLLMCore::IToolsManager *toolsManager() const override; public slots: void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) override; void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) override; private slots: @@ -74,11 +74,11 @@ private slots: private: void processStreamEvent(const QString &requestId, const QJsonObject &event); void handleMessageComplete(const QString &requestId); - void cleanupRequest(const LLMCore::RequestID &requestId); + void cleanupRequest(const PluginLLMCore::RequestID &requestId); - QHash m_messages; - QHash m_requestUrls; - QHash m_originalRequests; + QHash m_messages; + QHash m_requestUrls; + QHash m_originalRequests; Tools::ToolsManager *m_toolsManager; }; diff --git a/providers/GoogleAIProvider.cpp b/providers/GoogleAIProvider.cpp index 599780c..fe49ace 100644 --- a/providers/GoogleAIProvider.cpp +++ b/providers/GoogleAIProvider.cpp @@ -24,7 +24,7 @@ #include #include -#include "llmcore/ValidationUtils.hpp" +#include "pluginllmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -35,7 +35,7 @@ namespace QodeAssist::Providers { GoogleAIProvider::GoogleAIProvider(QObject *parent) - : LLMCore::Provider(parent) + : PluginLLMCore::Provider(parent) , m_toolsManager(new Tools::ToolsManager(this)) { connect( @@ -72,9 +72,9 @@ bool GoogleAIProvider::supportsModelListing() const void GoogleAIProvider::prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) { @@ -119,9 +119,9 @@ void GoogleAIProvider::prepareRequest( request["generationConfig"] = generationConfig; }; - if (type == LLMCore::RequestType::CodeCompletion) { + if (type == PluginLLMCore::RequestType::CodeCompletion) { applyModelParams(Settings::codeCompletionSettings()); - } else if (type == LLMCore::RequestType::QuickRefactoring) { + } else if (type == PluginLLMCore::RequestType::QuickRefactoring) { const auto &qrSettings = Settings::quickRefactorSettings(); if (isThinkingEnabled) { @@ -140,13 +140,13 @@ void GoogleAIProvider::prepareRequest( } if (isToolsEnabled) { - LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL; - if (type == LLMCore::RequestType::QuickRefactoring) { - filter = LLMCore::RunToolsFilter::OnlyRead; + PluginLLMCore::RunToolsFilter filter = PluginLLMCore::RunToolsFilter::ALL; + if (type == PluginLLMCore::RequestType::QuickRefactoring) { + filter = PluginLLMCore::RunToolsFilter::OnlyRead; } auto toolsDefinitions = m_toolsManager->getToolsDefinitions( - LLMCore::ToolSchemaFormat::Google, filter); + PluginLLMCore::ToolSchemaFormat::Google, filter); if (!toolsDefinitions.isEmpty()) { request["tools"] = toolsDefinitions; LOG_MESSAGE(QString("Added %1 tools to Google AI request").arg(toolsDefinitions.size())); @@ -184,7 +184,7 @@ QFuture> GoogleAIProvider::getInstalledModels(const QString &url) } QList GoogleAIProvider::validateRequest( - const QJsonObject &request, LLMCore::TemplateType type) + const QJsonObject &request, PluginLLMCore::TemplateType type) { QJsonObject templateReq; @@ -202,7 +202,7 @@ QList GoogleAIProvider::validateRequest( {"safetySettings", QJsonArray{}}, {"tools", QJsonArray{}}}; - return LLMCore::ValidationUtils::validateRequestFields(request, templateReq); + return PluginLLMCore::ValidationUtils::validateRequestFields(request, templateReq); } QString GoogleAIProvider::apiKey() const @@ -221,13 +221,13 @@ void GoogleAIProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) co networkRequest.setUrl(url); } -LLMCore::ProviderID GoogleAIProvider::providerID() const +PluginLLMCore::ProviderID GoogleAIProvider::providerID() const { - return LLMCore::ProviderID::GoogleAI; + return PluginLLMCore::ProviderID::GoogleAI; } void GoogleAIProvider::sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) { if (!m_messages.contains(requestId)) { m_dataBuffers[requestId].clear(); @@ -260,15 +260,15 @@ bool GoogleAIProvider::supportImage() const return true; } -void GoogleAIProvider::cancelRequest(const LLMCore::RequestID &requestId) +void GoogleAIProvider::cancelRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("GoogleAIProvider: Cancelling request %1").arg(requestId)); - LLMCore::Provider::cancelRequest(requestId); + PluginLLMCore::Provider::cancelRequest(requestId); cleanupRequest(requestId); } void GoogleAIProvider::onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) { if (data.isEmpty()) { return; @@ -292,7 +292,7 @@ void GoogleAIProvider::onDataReceived( } } - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; QStringList lines = buffers.rawStreamBuffer.processData(data); for (const QString &line : lines) { @@ -309,7 +309,7 @@ void GoogleAIProvider::onDataReceived( } void GoogleAIProvider::onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, std::optional error) + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) { if (error) { LOG_MESSAGE(QString("GoogleAIProvider request %1 failed: %2").arg(requestId, *error)); @@ -330,7 +330,7 @@ void GoogleAIProvider::onRequestFinished( handleMessageComplete(requestId); - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId)); m_dataBuffers.remove(requestId); return; @@ -338,7 +338,7 @@ void GoogleAIProvider::onRequestFinished( } if (m_dataBuffers.contains(requestId)) { - const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + const PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; if (!buffers.responseContent.isEmpty()) { emit fullResponseReceived(requestId, buffers.responseContent); } else { @@ -407,7 +407,7 @@ void GoogleAIProvider::processStreamChunk(const QString &requestId, const QJsonO } } else if ( m_dataBuffers.contains(requestId) - && message->state() == LLMCore::MessageState::RequiresToolExecution) { + && message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { message->startNewContinuation(); m_emittedThinkingBlocksCount[requestId] = 0; LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId)); @@ -440,7 +440,7 @@ void GoogleAIProvider::processStreamChunk(const QString &requestId, const QJsonO message->handleContentDelta(text); - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; buffers.responseContent += text; emit partialResponseReceived(requestId, text); } @@ -533,7 +533,7 @@ void GoogleAIProvider::handleMessageComplete(const QString &requestId) GoogleMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Google AI message requires tool execution for %1").arg(requestId)); auto toolUseContent = message->getCurrentToolUseContent(); @@ -555,7 +555,7 @@ void GoogleAIProvider::handleMessageComplete(const QString &requestId) } } -void GoogleAIProvider::cleanupRequest(const LLMCore::RequestID &requestId) +void GoogleAIProvider::cleanupRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("Cleaning up Google AI request %1").arg(requestId)); diff --git a/providers/GoogleAIProvider.hpp b/providers/GoogleAIProvider.hpp index f228c99..26ffc7e 100644 --- a/providers/GoogleAIProvider.hpp +++ b/providers/GoogleAIProvider.hpp @@ -20,12 +20,12 @@ #pragma once #include "GoogleMessage.hpp" -#include "llmcore/Provider.hpp" +#include "pluginllmcore/Provider.hpp" #include "tools/ToolsManager.hpp" namespace QodeAssist::Providers { -class GoogleAIProvider : public LLMCore::Provider +class GoogleAIProvider : public PluginLLMCore::Provider { Q_OBJECT public: @@ -38,30 +38,30 @@ public: bool supportsModelListing() const override; void prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) override; QFuture> getInstalledModels(const QString &url) override; - QList validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override; + QList validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) override; QString apiKey() const override; void prepareNetworkRequest(QNetworkRequest &networkRequest) const override; - LLMCore::ProviderID providerID() const override; + PluginLLMCore::ProviderID providerID() const override; void sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; bool supportsTools() const override; bool supportThinking() const override; bool supportImage() const override; - void cancelRequest(const LLMCore::RequestID &requestId) override; + void cancelRequest(const PluginLLMCore::RequestID &requestId) override; public slots: void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) override; void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) override; private slots: @@ -72,13 +72,13 @@ private: void processStreamChunk(const QString &requestId, const QJsonObject &chunk); void handleMessageComplete(const QString &requestId); void emitPendingThinkingBlocks(const QString &requestId); - void cleanupRequest(const LLMCore::RequestID &requestId); + void cleanupRequest(const PluginLLMCore::RequestID &requestId); - QHash m_messages; - QHash m_requestUrls; - QHash m_originalRequests; - QHash m_emittedThinkingBlocksCount; - QSet m_failedRequests; + QHash m_messages; + QHash m_requestUrls; + QHash m_originalRequests; + QHash m_emittedThinkingBlocksCount; + QSet m_failedRequests; Tools::ToolsManager *m_toolsManager; }; diff --git a/providers/GoogleMessage.cpp b/providers/GoogleMessage.cpp index c2bfeeb..33be078 100644 --- a/providers/GoogleMessage.cpp +++ b/providers/GoogleMessage.cpp @@ -32,26 +32,26 @@ GoogleMessage::GoogleMessage(QObject *parent) void GoogleMessage::handleContentDelta(const QString &text) { - if (m_currentBlocks.isEmpty() || !qobject_cast(m_currentBlocks.last())) { - auto textContent = new LLMCore::TextContent(); + if (m_currentBlocks.isEmpty() || !qobject_cast(m_currentBlocks.last())) { + auto textContent = new PluginLLMCore::TextContent(); textContent->setParent(this); m_currentBlocks.append(textContent); } - if (auto textContent = qobject_cast(m_currentBlocks.last())) { + if (auto textContent = qobject_cast(m_currentBlocks.last())) { textContent->appendText(text); } } void GoogleMessage::handleThoughtDelta(const QString &text) { - if (m_currentBlocks.isEmpty() || !qobject_cast(m_currentBlocks.last())) { - auto thinkingContent = new LLMCore::ThinkingContent(); + if (m_currentBlocks.isEmpty() || !qobject_cast(m_currentBlocks.last())) { + auto thinkingContent = new PluginLLMCore::ThinkingContent(); thinkingContent->setParent(this); m_currentBlocks.append(thinkingContent); } - if (auto thinkingContent = qobject_cast(m_currentBlocks.last())) { + if (auto thinkingContent = qobject_cast(m_currentBlocks.last())) { thinkingContent->appendThinking(text); } } @@ -59,13 +59,13 @@ void GoogleMessage::handleThoughtDelta(const QString &text) void GoogleMessage::handleThoughtSignature(const QString &signature) { for (int i = m_currentBlocks.size() - 1; i >= 0; --i) { - if (auto thinkingContent = qobject_cast(m_currentBlocks[i])) { + if (auto thinkingContent = qobject_cast(m_currentBlocks[i])) { thinkingContent->setSignature(signature); return; } } - auto thinkingContent = new LLMCore::ThinkingContent(); + auto thinkingContent = new PluginLLMCore::ThinkingContent(); thinkingContent->setParent(this); thinkingContent->setSignature(signature); m_currentBlocks.append(thinkingContent); @@ -97,7 +97,7 @@ void GoogleMessage::handleFunctionCallComplete() } QString id = QUuid::createUuid().toString(QUuid::WithoutBraces); - auto toolContent = new LLMCore::ToolUseContent(id, m_currentFunctionName, args); + auto toolContent = new PluginLLMCore::ToolUseContent(id, m_currentFunctionName, args); toolContent->setParent(this); m_currentBlocks.append(toolContent); @@ -122,14 +122,14 @@ QJsonObject GoogleMessage::toProviderFormat() const if (!block) continue; - if (auto text = qobject_cast(block)) { + if (auto text = qobject_cast(block)) { parts.append(QJsonObject{{"text", text->text()}}); - } else if (auto tool = qobject_cast(block)) { + } else if (auto tool = qobject_cast(block)) { QJsonObject functionCall; functionCall["name"] = tool->name(); functionCall["args"] = tool->input(); parts.append(QJsonObject{{"functionCall", functionCall}}); - } else if (auto thinking = qobject_cast(block)) { + } else if (auto thinking = qobject_cast(block)) { // Include thinking blocks with their text QJsonObject thinkingPart; thinkingPart["text"] = thinking->thinking(); @@ -169,22 +169,22 @@ QJsonArray GoogleMessage::createToolResultParts(const QHash &t return parts; } -QList GoogleMessage::getCurrentToolUseContent() const +QList GoogleMessage::getCurrentToolUseContent() const { - QList toolBlocks; + QList toolBlocks; for (auto block : m_currentBlocks) { - if (auto toolContent = qobject_cast(block)) { + if (auto toolContent = qobject_cast(block)) { toolBlocks.append(toolContent); } } return toolBlocks; } -QList GoogleMessage::getCurrentThinkingContent() const +QList GoogleMessage::getCurrentThinkingContent() const { - QList thinkingBlocks; + QList thinkingBlocks; for (auto block : m_currentBlocks) { - if (auto thinkingContent = qobject_cast(block)) { + if (auto thinkingContent = qobject_cast(block)) { thinkingBlocks.append(thinkingContent); } } @@ -199,7 +199,7 @@ void GoogleMessage::startNewContinuation() m_pendingFunctionArgs.clear(); m_currentFunctionName.clear(); m_finishReason.clear(); - m_state = LLMCore::MessageState::Building; + m_state = PluginLLMCore::MessageState::Building; } bool GoogleMessage::isErrorFinishReason() const @@ -234,10 +234,10 @@ void GoogleMessage::updateStateFromFinishReason() { if (m_finishReason == "STOP" || m_finishReason == "MAX_TOKENS") { m_state = getCurrentToolUseContent().isEmpty() - ? LLMCore::MessageState::Complete - : LLMCore::MessageState::RequiresToolExecution; + ? PluginLLMCore::MessageState::Complete + : PluginLLMCore::MessageState::RequiresToolExecution; } else { - m_state = LLMCore::MessageState::Complete; + m_state = PluginLLMCore::MessageState::Complete; } } diff --git a/providers/GoogleMessage.hpp b/providers/GoogleMessage.hpp index 036e8d1..39d9855 100644 --- a/providers/GoogleMessage.hpp +++ b/providers/GoogleMessage.hpp @@ -24,7 +24,7 @@ #include #include -#include +#include namespace QodeAssist::Providers { @@ -45,11 +45,11 @@ public: QJsonObject toProviderFormat() const; QJsonArray createToolResultParts(const QHash &toolResults) const; - QList getCurrentToolUseContent() const; - QList getCurrentThinkingContent() const; - QList currentBlocks() const { return m_currentBlocks; } + QList getCurrentToolUseContent() const; + QList getCurrentThinkingContent() const; + QList currentBlocks() const { return m_currentBlocks; } - LLMCore::MessageState state() const { return m_state; } + PluginLLMCore::MessageState state() const { return m_state; } QString finishReason() const { return m_finishReason; } bool isErrorFinishReason() const; QString getErrorMessage() const; @@ -58,11 +58,11 @@ public: private: void updateStateFromFinishReason(); - QList m_currentBlocks; + QList m_currentBlocks; QString m_pendingFunctionArgs; QString m_currentFunctionName; QString m_finishReason; - LLMCore::MessageState m_state = LLMCore::MessageState::Building; + PluginLLMCore::MessageState m_state = PluginLLMCore::MessageState::Building; }; } // namespace QodeAssist::Providers diff --git a/providers/LMStudioProvider.cpp b/providers/LMStudioProvider.cpp index 18b0dcf..e3a13cf 100644 --- a/providers/LMStudioProvider.cpp +++ b/providers/LMStudioProvider.cpp @@ -19,7 +19,7 @@ #include "LMStudioProvider.hpp" -#include "llmcore/ValidationUtils.hpp" +#include "pluginllmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -34,7 +34,7 @@ namespace QodeAssist::Providers { LMStudioProvider::LMStudioProvider(QObject *parent) - : LLMCore::Provider(parent) + : PluginLLMCore::Provider(parent) , m_toolsManager(new Tools::ToolsManager(this)) { connect( @@ -90,7 +90,7 @@ QFuture> LMStudioProvider::getInstalledModels(const QString &url) } QList LMStudioProvider::validateRequest( - const QJsonObject &request, LLMCore::TemplateType type) + const QJsonObject &request, PluginLLMCore::TemplateType type) { const auto templateReq = QJsonObject{ {"model", {}}, @@ -105,7 +105,7 @@ QList LMStudioProvider::validateRequest( {"stream", {}}, {"tools", {}}}; - return LLMCore::ValidationUtils::validateRequestFields(request, templateReq); + return PluginLLMCore::ValidationUtils::validateRequestFields(request, templateReq); } QString LMStudioProvider::apiKey() const @@ -118,13 +118,13 @@ void LMStudioProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) co networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json"); } -LLMCore::ProviderID LMStudioProvider::providerID() const +PluginLLMCore::ProviderID LMStudioProvider::providerID() const { - return LLMCore::ProviderID::LMStudio; + return PluginLLMCore::ProviderID::LMStudio; } void LMStudioProvider::sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) { if (!m_messages.contains(requestId)) { m_dataBuffers[requestId].clear(); @@ -152,17 +152,17 @@ bool LMStudioProvider::supportImage() const return true; } -void LMStudioProvider::cancelRequest(const LLMCore::RequestID &requestId) +void LMStudioProvider::cancelRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("LMStudioProvider: Cancelling request %1").arg(requestId)); - LLMCore::Provider::cancelRequest(requestId); + PluginLLMCore::Provider::cancelRequest(requestId); cleanupRequest(requestId); } void LMStudioProvider::onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) { - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; QStringList lines = buffers.rawStreamBuffer.processData(data); for (const QString &line : lines) { @@ -179,7 +179,7 @@ void LMStudioProvider::onDataReceived( } void LMStudioProvider::onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, std::optional error) + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) { if (error) { LOG_MESSAGE(QString("LMStudioProvider request %1 failed: %2").arg(requestId, *error)); @@ -190,7 +190,7 @@ void LMStudioProvider::onRequestFinished( if (m_messages.contains(requestId)) { OpenAIMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId)); m_dataBuffers.remove(requestId); return; @@ -198,7 +198,7 @@ void LMStudioProvider::onRequestFinished( } if (m_dataBuffers.contains(requestId)) { - const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + const PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; if (!buffers.responseContent.isEmpty()) { LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId)); emit fullResponseReceived(requestId, buffers.responseContent); @@ -210,9 +210,9 @@ void LMStudioProvider::onRequestFinished( void LMStudioProvider::prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) { @@ -236,22 +236,22 @@ void LMStudioProvider::prepareRequest( request["presence_penalty"] = settings.presencePenalty(); }; - if (type == LLMCore::RequestType::CodeCompletion) { + if (type == PluginLLMCore::RequestType::CodeCompletion) { applyModelParams(Settings::codeCompletionSettings()); - } else if (type == LLMCore::RequestType::QuickRefactoring) { + } else if (type == PluginLLMCore::RequestType::QuickRefactoring) { applyModelParams(Settings::quickRefactorSettings()); } else { applyModelParams(Settings::chatAssistantSettings()); } if (isToolsEnabled) { - LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL; - if (type == LLMCore::RequestType::QuickRefactoring) { - filter = LLMCore::RunToolsFilter::OnlyRead; + PluginLLMCore::RunToolsFilter filter = PluginLLMCore::RunToolsFilter::ALL; + if (type == PluginLLMCore::RequestType::QuickRefactoring) { + filter = PluginLLMCore::RunToolsFilter::OnlyRead; } auto toolsDefinitions = m_toolsManager->getToolsDefinitions( - LLMCore::ToolSchemaFormat::OpenAI, filter); + PluginLLMCore::ToolSchemaFormat::OpenAI, filter); if (!toolsDefinitions.isEmpty()) { request["tools"] = toolsDefinitions; LOG_MESSAGE(QString("Added %1 tools to LMStudio request").arg(toolsDefinitions.size())); @@ -326,7 +326,7 @@ void LMStudioProvider::processStreamChunk(const QString &requestId, const QJsonO } } else if ( m_dataBuffers.contains(requestId) - && message->state() == LLMCore::MessageState::RequiresToolExecution) { + && message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { message->startNewContinuation(); emit continuationStarted(requestId); LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId)); @@ -336,7 +336,7 @@ void LMStudioProvider::processStreamChunk(const QString &requestId, const QJsonO QString content = delta["content"].toString(); message->handleContentDelta(content); - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; buffers.responseContent += content; emit partialResponseReceived(requestId, content); } @@ -381,7 +381,7 @@ void LMStudioProvider::handleMessageComplete(const QString &requestId) OpenAIMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("LMStudio message requires tool execution for %1").arg(requestId)); auto toolUseContent = message->getCurrentToolUseContent(); @@ -403,7 +403,7 @@ void LMStudioProvider::handleMessageComplete(const QString &requestId) } } -void LMStudioProvider::cleanupRequest(const LLMCore::RequestID &requestId) +void LMStudioProvider::cleanupRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("Cleaning up LMStudio request %1").arg(requestId)); diff --git a/providers/LMStudioProvider.hpp b/providers/LMStudioProvider.hpp index 2aeb140..28cd0ba 100644 --- a/providers/LMStudioProvider.hpp +++ b/providers/LMStudioProvider.hpp @@ -21,11 +21,11 @@ #include "OpenAIMessage.hpp" #include "tools/ToolsManager.hpp" -#include +#include namespace QodeAssist::Providers { -class LMStudioProvider : public LLMCore::Provider +class LMStudioProvider : public PluginLLMCore::Provider { Q_OBJECT public: @@ -38,29 +38,29 @@ public: bool supportsModelListing() const override; void prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) override; QFuture> getInstalledModels(const QString &url) override; - QList validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override; + QList validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) override; QString apiKey() const override; void prepareNetworkRequest(QNetworkRequest &networkRequest) const override; - LLMCore::ProviderID providerID() const override; + PluginLLMCore::ProviderID providerID() const override; void sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; bool supportsTools() const override; bool supportImage() const override; - void cancelRequest(const LLMCore::RequestID &requestId) override; + void cancelRequest(const PluginLLMCore::RequestID &requestId) override; public slots: void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) override; void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) override; private slots: @@ -70,11 +70,11 @@ private slots: private: void processStreamChunk(const QString &requestId, const QJsonObject &chunk); void handleMessageComplete(const QString &requestId); - void cleanupRequest(const LLMCore::RequestID &requestId); + void cleanupRequest(const PluginLLMCore::RequestID &requestId); - QHash m_messages; - QHash m_requestUrls; - QHash m_originalRequests; + QHash m_messages; + QHash m_requestUrls; + QHash m_originalRequests; Tools::ToolsManager *m_toolsManager; }; diff --git a/providers/LlamaCppProvider.cpp b/providers/LlamaCppProvider.cpp index 563076b..b032ffc 100644 --- a/providers/LlamaCppProvider.cpp +++ b/providers/LlamaCppProvider.cpp @@ -19,7 +19,7 @@ #include "LlamaCppProvider.hpp" -#include "llmcore/ValidationUtils.hpp" +#include "pluginllmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -33,7 +33,7 @@ namespace QodeAssist::Providers { LlamaCppProvider::LlamaCppProvider(QObject *parent) - : LLMCore::Provider(parent) + : PluginLLMCore::Provider(parent) , m_toolsManager(new Tools::ToolsManager(this)) { connect( @@ -70,9 +70,9 @@ bool LlamaCppProvider::supportsModelListing() const void LlamaCppProvider::prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) { @@ -96,22 +96,22 @@ void LlamaCppProvider::prepareRequest( request["presence_penalty"] = settings.presencePenalty(); }; - if (type == LLMCore::RequestType::CodeCompletion) { + if (type == PluginLLMCore::RequestType::CodeCompletion) { applyModelParams(Settings::codeCompletionSettings()); - } else if (type == LLMCore::RequestType::QuickRefactoring) { + } else if (type == PluginLLMCore::RequestType::QuickRefactoring) { applyModelParams(Settings::quickRefactorSettings()); } else { applyModelParams(Settings::chatAssistantSettings()); } if (isToolsEnabled) { - LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL; - if (type == LLMCore::RequestType::QuickRefactoring) { - filter = LLMCore::RunToolsFilter::OnlyRead; + PluginLLMCore::RunToolsFilter filter = PluginLLMCore::RunToolsFilter::ALL; + if (type == PluginLLMCore::RequestType::QuickRefactoring) { + filter = PluginLLMCore::RunToolsFilter::OnlyRead; } auto toolsDefinitions = m_toolsManager->getToolsDefinitions( - LLMCore::ToolSchemaFormat::OpenAI, filter); + PluginLLMCore::ToolSchemaFormat::OpenAI, filter); if (!toolsDefinitions.isEmpty()) { request["tools"] = toolsDefinitions; LOG_MESSAGE(QString("Added %1 tools to llama.cpp request").arg(toolsDefinitions.size())); @@ -125,9 +125,9 @@ QFuture> LlamaCppProvider::getInstalledModels(const QString &) } QList LlamaCppProvider::validateRequest( - const QJsonObject &request, LLMCore::TemplateType type) + const QJsonObject &request, PluginLLMCore::TemplateType type) { - if (type == LLMCore::TemplateType::FIM) { + if (type == PluginLLMCore::TemplateType::FIM) { const auto infillReq = QJsonObject{ {"model", {}}, {"input_prefix", {}}, @@ -143,7 +143,7 @@ QList LlamaCppProvider::validateRequest( {"stop", QJsonArray{}}, {"stream", {}}}; - return LLMCore::ValidationUtils::validateRequestFields(request, infillReq); + return PluginLLMCore::ValidationUtils::validateRequestFields(request, infillReq); } else { const auto chatReq = QJsonObject{ {"model", {}}, @@ -158,7 +158,7 @@ QList LlamaCppProvider::validateRequest( {"stream", {}}, {"tools", {}}}; - return LLMCore::ValidationUtils::validateRequestFields(request, chatReq); + return PluginLLMCore::ValidationUtils::validateRequestFields(request, chatReq); } } @@ -172,13 +172,13 @@ void LlamaCppProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) co networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json"); } -LLMCore::ProviderID LlamaCppProvider::providerID() const +PluginLLMCore::ProviderID LlamaCppProvider::providerID() const { - return LLMCore::ProviderID::LlamaCpp; + return PluginLLMCore::ProviderID::LlamaCpp; } void LlamaCppProvider::sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) { if (!m_messages.contains(requestId)) { m_dataBuffers[requestId].clear(); @@ -206,17 +206,17 @@ bool LlamaCppProvider::supportImage() const return true; } -void LlamaCppProvider::cancelRequest(const LLMCore::RequestID &requestId) +void LlamaCppProvider::cancelRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("LlamaCppProvider: Cancelling request %1").arg(requestId)); - LLMCore::Provider::cancelRequest(requestId); + PluginLLMCore::Provider::cancelRequest(requestId); cleanupRequest(requestId); } void LlamaCppProvider::onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) { - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; QStringList lines = buffers.rawStreamBuffer.processData(data); for (const QString &line : lines) { @@ -245,7 +245,7 @@ void LlamaCppProvider::onDataReceived( } void LlamaCppProvider::onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, std::optional error) + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) { if (error) { LOG_MESSAGE(QString("LlamaCppProvider request %1 failed: %2").arg(requestId, *error)); @@ -256,7 +256,7 @@ void LlamaCppProvider::onRequestFinished( if (m_messages.contains(requestId)) { OpenAIMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId)); m_dataBuffers.remove(requestId); return; @@ -264,7 +264,7 @@ void LlamaCppProvider::onRequestFinished( } if (m_dataBuffers.contains(requestId)) { - const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + const PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; if (!buffers.responseContent.isEmpty()) { LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId)); emit fullResponseReceived(requestId, buffers.responseContent); @@ -341,7 +341,7 @@ void LlamaCppProvider::processStreamChunk(const QString &requestId, const QJsonO } } else if ( m_dataBuffers.contains(requestId) - && message->state() == LLMCore::MessageState::RequiresToolExecution) { + && message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { message->startNewContinuation(); emit continuationStarted(requestId); LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId)); @@ -351,7 +351,7 @@ void LlamaCppProvider::processStreamChunk(const QString &requestId, const QJsonO QString content = delta["content"].toString(); message->handleContentDelta(content); - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; buffers.responseContent += content; emit partialResponseReceived(requestId, content); } @@ -396,7 +396,7 @@ void LlamaCppProvider::handleMessageComplete(const QString &requestId) OpenAIMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("llama.cpp message requires tool execution for %1").arg(requestId)); auto toolUseContent = message->getCurrentToolUseContent(); @@ -418,7 +418,7 @@ void LlamaCppProvider::handleMessageComplete(const QString &requestId) } } -void LlamaCppProvider::cleanupRequest(const LLMCore::RequestID &requestId) +void LlamaCppProvider::cleanupRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("Cleaning up llama.cpp request %1").arg(requestId)); diff --git a/providers/LlamaCppProvider.hpp b/providers/LlamaCppProvider.hpp index b88216e..b1d0a07 100644 --- a/providers/LlamaCppProvider.hpp +++ b/providers/LlamaCppProvider.hpp @@ -21,11 +21,11 @@ #include "OpenAIMessage.hpp" #include "tools/ToolsManager.hpp" -#include +#include namespace QodeAssist::Providers { -class LlamaCppProvider : public LLMCore::Provider +class LlamaCppProvider : public PluginLLMCore::Provider { Q_OBJECT public: @@ -38,29 +38,29 @@ public: bool supportsModelListing() const override; void prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) override; QFuture> getInstalledModels(const QString &url) override; - QList validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override; + QList validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) override; QString apiKey() const override; void prepareNetworkRequest(QNetworkRequest &networkRequest) const override; - LLMCore::ProviderID providerID() const override; + PluginLLMCore::ProviderID providerID() const override; void sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; bool supportsTools() const override; bool supportImage() const override; - void cancelRequest(const LLMCore::RequestID &requestId) override; + void cancelRequest(const PluginLLMCore::RequestID &requestId) override; public slots: void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) override; void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) override; private slots: @@ -70,11 +70,11 @@ private slots: private: void processStreamChunk(const QString &requestId, const QJsonObject &chunk); void handleMessageComplete(const QString &requestId); - void cleanupRequest(const LLMCore::RequestID &requestId); + void cleanupRequest(const PluginLLMCore::RequestID &requestId); - QHash m_messages; - QHash m_requestUrls; - QHash m_originalRequests; + QHash m_messages; + QHash m_requestUrls; + QHash m_originalRequests; Tools::ToolsManager *m_toolsManager; }; diff --git a/providers/MistralAIProvider.cpp b/providers/MistralAIProvider.cpp index 59baef2..0852b07 100644 --- a/providers/MistralAIProvider.cpp +++ b/providers/MistralAIProvider.cpp @@ -19,7 +19,7 @@ #include "MistralAIProvider.hpp" -#include "llmcore/ValidationUtils.hpp" +#include "pluginllmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -34,7 +34,7 @@ namespace QodeAssist::Providers { MistralAIProvider::MistralAIProvider(QObject *parent) - : LLMCore::Provider(parent) + : PluginLLMCore::Provider(parent) , m_toolsManager(new Tools::ToolsManager(this)) { connect( @@ -98,7 +98,7 @@ QFuture> MistralAIProvider::getInstalledModels(const QString &url } QList MistralAIProvider::validateRequest( - const QJsonObject &request, LLMCore::TemplateType type) + const QJsonObject &request, PluginLLMCore::TemplateType type) { const auto fimReq = QJsonObject{ {"model", {}}, @@ -121,8 +121,8 @@ QList MistralAIProvider::validateRequest( {"stream", {}}, {"tools", {}}}; - return LLMCore::ValidationUtils::validateRequestFields( - request, type == LLMCore::TemplateType::FIM ? fimReq : templateReq); + return PluginLLMCore::ValidationUtils::validateRequestFields( + request, type == PluginLLMCore::TemplateType::FIM ? fimReq : templateReq); } QString MistralAIProvider::apiKey() const @@ -139,13 +139,13 @@ void MistralAIProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) c } } -LLMCore::ProviderID MistralAIProvider::providerID() const +PluginLLMCore::ProviderID MistralAIProvider::providerID() const { - return LLMCore::ProviderID::MistralAI; + return PluginLLMCore::ProviderID::MistralAI; } void MistralAIProvider::sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) { if (!m_messages.contains(requestId)) { m_dataBuffers[requestId].clear(); @@ -173,17 +173,17 @@ bool MistralAIProvider::supportImage() const return true; } -void MistralAIProvider::cancelRequest(const LLMCore::RequestID &requestId) +void MistralAIProvider::cancelRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("MistralAIProvider: Cancelling request %1").arg(requestId)); - LLMCore::Provider::cancelRequest(requestId); + PluginLLMCore::Provider::cancelRequest(requestId); cleanupRequest(requestId); } void MistralAIProvider::onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) { - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; QStringList lines = buffers.rawStreamBuffer.processData(data); for (const QString &line : lines) { @@ -200,7 +200,7 @@ void MistralAIProvider::onDataReceived( } void MistralAIProvider::onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, std::optional error) + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) { if (error) { LOG_MESSAGE(QString("MistralAIProvider request %1 failed: %2").arg(requestId, *error)); @@ -211,7 +211,7 @@ void MistralAIProvider::onRequestFinished( if (m_messages.contains(requestId)) { OpenAIMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId)); m_dataBuffers.remove(requestId); return; @@ -219,7 +219,7 @@ void MistralAIProvider::onRequestFinished( } if (m_dataBuffers.contains(requestId)) { - const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + const PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; if (!buffers.responseContent.isEmpty()) { LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId)); emit fullResponseReceived(requestId, buffers.responseContent); @@ -231,9 +231,9 @@ void MistralAIProvider::onRequestFinished( void MistralAIProvider::prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) { @@ -257,22 +257,22 @@ void MistralAIProvider::prepareRequest( request["presence_penalty"] = settings.presencePenalty(); }; - if (type == LLMCore::RequestType::CodeCompletion) { + if (type == PluginLLMCore::RequestType::CodeCompletion) { applyModelParams(Settings::codeCompletionSettings()); - } else if (type == LLMCore::RequestType::QuickRefactoring) { + } else if (type == PluginLLMCore::RequestType::QuickRefactoring) { applyModelParams(Settings::quickRefactorSettings()); } else { applyModelParams(Settings::chatAssistantSettings()); } if (isToolsEnabled) { - LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL; - if (type == LLMCore::RequestType::QuickRefactoring) { - filter = LLMCore::RunToolsFilter::OnlyRead; + PluginLLMCore::RunToolsFilter filter = PluginLLMCore::RunToolsFilter::ALL; + if (type == PluginLLMCore::RequestType::QuickRefactoring) { + filter = PluginLLMCore::RunToolsFilter::OnlyRead; } auto toolsDefinitions = m_toolsManager->getToolsDefinitions( - LLMCore::ToolSchemaFormat::OpenAI, filter); + PluginLLMCore::ToolSchemaFormat::OpenAI, filter); if (!toolsDefinitions.isEmpty()) { request["tools"] = toolsDefinitions; LOG_MESSAGE(QString("Added %1 tools to Mistral request").arg(toolsDefinitions.size())); @@ -347,7 +347,7 @@ void MistralAIProvider::processStreamChunk(const QString &requestId, const QJson } } else if ( m_dataBuffers.contains(requestId) - && message->state() == LLMCore::MessageState::RequiresToolExecution) { + && message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { message->startNewContinuation(); emit continuationStarted(requestId); LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId)); @@ -357,7 +357,7 @@ void MistralAIProvider::processStreamChunk(const QString &requestId, const QJson QString content = delta["content"].toString(); message->handleContentDelta(content); - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; buffers.responseContent += content; emit partialResponseReceived(requestId, content); } @@ -402,7 +402,7 @@ void MistralAIProvider::handleMessageComplete(const QString &requestId) OpenAIMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Mistral message requires tool execution for %1").arg(requestId)); auto toolUseContent = message->getCurrentToolUseContent(); @@ -424,7 +424,7 @@ void MistralAIProvider::handleMessageComplete(const QString &requestId) } } -void MistralAIProvider::cleanupRequest(const LLMCore::RequestID &requestId) +void MistralAIProvider::cleanupRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("Cleaning up Mistral request %1").arg(requestId)); diff --git a/providers/MistralAIProvider.hpp b/providers/MistralAIProvider.hpp index 6a68c03..ac6bf63 100644 --- a/providers/MistralAIProvider.hpp +++ b/providers/MistralAIProvider.hpp @@ -21,11 +21,11 @@ #include "OpenAIMessage.hpp" #include "tools/ToolsManager.hpp" -#include +#include namespace QodeAssist::Providers { -class MistralAIProvider : public LLMCore::Provider +class MistralAIProvider : public PluginLLMCore::Provider { Q_OBJECT public: @@ -38,29 +38,29 @@ public: bool supportsModelListing() const override; void prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) override; QFuture> getInstalledModels(const QString &url) override; - QList validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override; + QList validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) override; QString apiKey() const override; void prepareNetworkRequest(QNetworkRequest &networkRequest) const override; - LLMCore::ProviderID providerID() const override; + PluginLLMCore::ProviderID providerID() const override; void sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; bool supportsTools() const override; bool supportImage() const override; - void cancelRequest(const LLMCore::RequestID &requestId) override; + void cancelRequest(const PluginLLMCore::RequestID &requestId) override; public slots: void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) override; void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) override; private slots: @@ -70,11 +70,11 @@ private slots: private: void processStreamChunk(const QString &requestId, const QJsonObject &chunk); void handleMessageComplete(const QString &requestId); - void cleanupRequest(const LLMCore::RequestID &requestId); + void cleanupRequest(const PluginLLMCore::RequestID &requestId); - QHash m_messages; - QHash m_requestUrls; - QHash m_originalRequests; + QHash m_messages; + QHash m_requestUrls; + QHash m_originalRequests; Tools::ToolsManager *m_toolsManager; }; diff --git a/providers/OllamaMessage.cpp b/providers/OllamaMessage.cpp index c97576b..57f373d 100644 --- a/providers/OllamaMessage.cpp +++ b/providers/OllamaMessage.cpp @@ -39,13 +39,13 @@ void OllamaMessage::handleContentDelta(const QString &content) } if (!m_contentAddedToTextBlock) { - LLMCore::TextContent *textContent = getOrCreateTextContent(); + PluginLLMCore::TextContent *textContent = getOrCreateTextContent(); textContent->setText(m_accumulatedContent); m_contentAddedToTextBlock = true; LOG_MESSAGE(QString("OllamaMessage: Added accumulated content to TextContent, length=%1") .arg(m_accumulatedContent.length())); } else { - LLMCore::TextContent *textContent = getOrCreateTextContent(); + PluginLLMCore::TextContent *textContent = getOrCreateTextContent(); textContent->appendText(content); } } @@ -65,7 +65,7 @@ void OllamaMessage::handleToolCall(const QJsonObject &toolCall) m_accumulatedContent.clear(); } - addCurrentContent(toolId, name, arguments); + addCurrentContent(toolId, name, arguments); LOG_MESSAGE( QString("OllamaMessage: Structured tool call detected - name=%1, id=%2").arg(name, toolId)); @@ -73,7 +73,7 @@ void OllamaMessage::handleToolCall(const QJsonObject &toolCall) void OllamaMessage::handleThinkingDelta(const QString &thinking) { - LLMCore::ThinkingContent *thinkingContent = getOrCreateThinkingContent(); + PluginLLMCore::ThinkingContent *thinkingContent = getOrCreateThinkingContent(); thinkingContent->appendThinking(thinking); } @@ -102,7 +102,7 @@ void OllamaMessage::handleDone(bool done) .arg(trimmed.length())); for (auto it = m_currentBlocks.begin(); it != m_currentBlocks.end();) { - if (qobject_cast(*it)) { + if (qobject_cast(*it)) { LOG_MESSAGE(QString( "OllamaMessage: Removing TextContent block (incomplete tool call)")); (*it)->deleteLater(); @@ -114,7 +114,7 @@ void OllamaMessage::handleDone(bool done) m_accumulatedContent.clear(); } else { - LLMCore::TextContent *textContent = getOrCreateTextContent(); + PluginLLMCore::TextContent *textContent = getOrCreateTextContent(); textContent->setText(m_accumulatedContent); m_contentAddedToTextBlock = true; LOG_MESSAGE( @@ -184,13 +184,13 @@ bool OllamaMessage::tryParseToolCall() QString toolId = QString("call_%1_%2").arg(name).arg(QDateTime::currentMSecsSinceEpoch()); for (auto block : m_currentBlocks) { - if (qobject_cast(block)) { + if (qobject_cast(block)) { LOG_MESSAGE(QString("OllamaMessage: Removing TextContent block (tool call detected)")); } } m_currentBlocks.clear(); - addCurrentContent(toolId, name, arguments); + addCurrentContent(toolId, name, arguments); LOG_MESSAGE( QString( @@ -238,14 +238,14 @@ QJsonObject OllamaMessage::toProviderFormat() const if (!block) continue; - if (auto text = qobject_cast(block)) { + if (auto text = qobject_cast(block)) { textContent += text->text(); - } else if (auto tool = qobject_cast(block)) { + } else if (auto tool = qobject_cast(block)) { QJsonObject toolCall; toolCall["type"] = "function"; toolCall["function"] = QJsonObject{{"name", tool->name()}, {"arguments", tool->input()}}; toolCalls.append(toolCall); - } else if (auto thinking = qobject_cast(block)) { + } else if (auto thinking = qobject_cast(block)) { thinkingContent += thinking->thinking(); } } @@ -287,22 +287,22 @@ QJsonArray OllamaMessage::createToolResultMessages(const QHash return messages; } -QList OllamaMessage::getCurrentToolUseContent() const +QList OllamaMessage::getCurrentToolUseContent() const { - QList toolBlocks; + QList toolBlocks; for (auto block : m_currentBlocks) { - if (auto toolContent = qobject_cast(block)) { + if (auto toolContent = qobject_cast(block)) { toolBlocks.append(toolContent); } } return toolBlocks; } -QList OllamaMessage::getCurrentThinkingContent() const +QList OllamaMessage::getCurrentThinkingContent() const { - QList thinkingBlocks; + QList thinkingBlocks; for (auto block : m_currentBlocks) { - if (auto thinkingContent = qobject_cast(block)) { + if (auto thinkingContent = qobject_cast(block)) { thinkingBlocks.append(thinkingContent); } } @@ -316,7 +316,7 @@ void OllamaMessage::startNewContinuation() m_currentBlocks.clear(); m_accumulatedContent.clear(); m_done = false; - m_state = LLMCore::MessageState::Building; + m_state = PluginLLMCore::MessageState::Building; m_contentAddedToTextBlock = false; m_currentThinkingContent = nullptr; } @@ -324,40 +324,40 @@ void OllamaMessage::startNewContinuation() void OllamaMessage::updateStateFromDone() { if (!getCurrentToolUseContent().empty()) { - m_state = LLMCore::MessageState::RequiresToolExecution; + m_state = PluginLLMCore::MessageState::RequiresToolExecution; LOG_MESSAGE(QString("OllamaMessage: State set to RequiresToolExecution, tools count=%1") .arg(getCurrentToolUseContent().size())); } else { - m_state = LLMCore::MessageState::Final; + m_state = PluginLLMCore::MessageState::Final; LOG_MESSAGE(QString("OllamaMessage: State set to Final")); } } -LLMCore::TextContent *OllamaMessage::getOrCreateTextContent() +PluginLLMCore::TextContent *OllamaMessage::getOrCreateTextContent() { for (auto block : m_currentBlocks) { - if (auto textContent = qobject_cast(block)) { + if (auto textContent = qobject_cast(block)) { return textContent; } } - return addCurrentContent(); + return addCurrentContent(); } -LLMCore::ThinkingContent *OllamaMessage::getOrCreateThinkingContent() +PluginLLMCore::ThinkingContent *OllamaMessage::getOrCreateThinkingContent() { if (m_currentThinkingContent) { return m_currentThinkingContent; } for (auto block : m_currentBlocks) { - if (auto thinkingContent = qobject_cast(block)) { + if (auto thinkingContent = qobject_cast(block)) { m_currentThinkingContent = thinkingContent; return m_currentThinkingContent; } } - m_currentThinkingContent = addCurrentContent(); + m_currentThinkingContent = addCurrentContent(); LOG_MESSAGE(QString("OllamaMessage: Created new ThinkingContent block")); return m_currentThinkingContent; } diff --git a/providers/OllamaMessage.hpp b/providers/OllamaMessage.hpp index 123cfcc..b02546d 100644 --- a/providers/OllamaMessage.hpp +++ b/providers/OllamaMessage.hpp @@ -19,7 +19,7 @@ #pragma once -#include +#include namespace QodeAssist::Providers { @@ -38,26 +38,26 @@ public: QJsonObject toProviderFormat() const; QJsonArray createToolResultMessages(const QHash &toolResults) const; - LLMCore::MessageState state() const { return m_state; } - QList getCurrentToolUseContent() const; - QList getCurrentThinkingContent() const; - QList currentBlocks() const { return m_currentBlocks; } + PluginLLMCore::MessageState state() const { return m_state; } + QList getCurrentToolUseContent() const; + QList getCurrentThinkingContent() const; + QList currentBlocks() const { return m_currentBlocks; } void startNewContinuation(); private: bool m_done = false; - LLMCore::MessageState m_state = LLMCore::MessageState::Building; - QList m_currentBlocks; + PluginLLMCore::MessageState m_state = PluginLLMCore::MessageState::Building; + QList m_currentBlocks; QString m_accumulatedContent; bool m_contentAddedToTextBlock = false; - LLMCore::ThinkingContent *m_currentThinkingContent = nullptr; + PluginLLMCore::ThinkingContent *m_currentThinkingContent = nullptr; void updateStateFromDone(); bool tryParseToolCall(); bool isLikelyToolCallJson(const QString &content) const; - LLMCore::TextContent *getOrCreateTextContent(); - LLMCore::ThinkingContent *getOrCreateThinkingContent(); + PluginLLMCore::TextContent *getOrCreateTextContent(); + PluginLLMCore::ThinkingContent *getOrCreateThinkingContent(); template T *addCurrentContent(Args &&...args) diff --git a/providers/OllamaProvider.cpp b/providers/OllamaProvider.cpp index e29590f..27bb87c 100644 --- a/providers/OllamaProvider.cpp +++ b/providers/OllamaProvider.cpp @@ -23,7 +23,7 @@ #include #include -#include "llmcore/ValidationUtils.hpp" +#include "pluginllmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -34,7 +34,7 @@ namespace QodeAssist::Providers { OllamaProvider::OllamaProvider(QObject *parent) - : LLMCore::Provider(parent) + : PluginLLMCore::Provider(parent) , m_toolsManager(new Tools::ToolsManager(this)) { connect( @@ -71,9 +71,9 @@ bool OllamaProvider::supportsModelListing() const void OllamaProvider::prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) { @@ -109,9 +109,9 @@ void OllamaProvider::prepareRequest( request["options"] = options; }; - if (type == LLMCore::RequestType::CodeCompletion) { + if (type == PluginLLMCore::RequestType::CodeCompletion) { applySettings(Settings::codeCompletionSettings()); - } else if (type == LLMCore::RequestType::QuickRefactoring) { + } else if (type == PluginLLMCore::RequestType::QuickRefactoring) { const auto &qrSettings = Settings::quickRefactorSettings(); applySettings(qrSettings); @@ -130,13 +130,13 @@ void OllamaProvider::prepareRequest( } if (isToolsEnabled) { - LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL; - if (type == LLMCore::RequestType::QuickRefactoring) { - filter = LLMCore::RunToolsFilter::OnlyRead; + PluginLLMCore::RunToolsFilter filter = PluginLLMCore::RunToolsFilter::ALL; + if (type == PluginLLMCore::RequestType::QuickRefactoring) { + filter = PluginLLMCore::RunToolsFilter::OnlyRead; } auto toolsDefinitions = m_toolsManager->toolsFactory()->getToolsDefinitions( - LLMCore::ToolSchemaFormat::Ollama, filter); + PluginLLMCore::ToolSchemaFormat::Ollama, filter); if (!toolsDefinitions.isEmpty()) { request["tools"] = toolsDefinitions; LOG_MESSAGE( @@ -166,7 +166,7 @@ QFuture> OllamaProvider::getInstalledModels(const QString &url) }); } -QList OllamaProvider::validateRequest(const QJsonObject &request, LLMCore::TemplateType type) +QList OllamaProvider::validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) { const auto fimReq = QJsonObject{ {"keep_alive", {}}, @@ -202,8 +202,8 @@ QList OllamaProvider::validateRequest(const QJsonObject &request, LLMCo {"frequency_penalty", {}}, {"presence_penalty", {}}}}}; - return LLMCore::ValidationUtils::validateRequestFields( - request, type == LLMCore::TemplateType::FIM ? fimReq : messageReq); + return PluginLLMCore::ValidationUtils::validateRequestFields( + request, type == PluginLLMCore::TemplateType::FIM ? fimReq : messageReq); } QString OllamaProvider::apiKey() const @@ -220,13 +220,13 @@ void OllamaProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) cons } } -LLMCore::ProviderID OllamaProvider::providerID() const +PluginLLMCore::ProviderID OllamaProvider::providerID() const { - return LLMCore::ProviderID::Ollama; + return PluginLLMCore::ProviderID::Ollama; } void OllamaProvider::sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) { m_dataBuffers[requestId].clear(); @@ -256,17 +256,17 @@ bool OllamaProvider::supportThinking() const return true; } -void OllamaProvider::cancelRequest(const LLMCore::RequestID &requestId) +void OllamaProvider::cancelRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("OllamaProvider: Cancelling request %1").arg(requestId)); - LLMCore::Provider::cancelRequest(requestId); + PluginLLMCore::Provider::cancelRequest(requestId); cleanupRequest(requestId); } void OllamaProvider::onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) { - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; QStringList lines = buffers.rawStreamBuffer.processData(data); if (data.isEmpty()) { @@ -297,7 +297,7 @@ void OllamaProvider::onDataReceived( } void OllamaProvider::onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, std::optional error) + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) { if (error) { LOG_MESSAGE(QString("OllamaProvider request %1 failed: %2").arg(requestId, *error)); @@ -308,7 +308,7 @@ void OllamaProvider::onRequestFinished( if (m_messages.contains(requestId)) { OllamaMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId)); return; } @@ -319,7 +319,7 @@ void OllamaProvider::onRequestFinished( OllamaMessage *message = m_messages[requestId]; for (auto block : message->currentBlocks()) { - if (auto textContent = qobject_cast(block)) { + if (auto textContent = qobject_cast(block)) { finalText += textContent->text(); } } @@ -408,7 +408,7 @@ void OllamaProvider::processStreamData(const QString &requestId, const QJsonObje } } else if ( m_dataBuffers.contains(requestId) - && message->state() == LLMCore::MessageState::RequiresToolExecution) { + && message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { message->startNewContinuation(); emit continuationStarted(requestId); LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId)); @@ -455,14 +455,14 @@ void OllamaProvider::processStreamData(const QString &requestId, const QJsonObje bool hasTextContent = false; for (auto block : message->currentBlocks()) { - if (qobject_cast(block)) { + if (qobject_cast(block)) { hasTextContent = true; break; } } if (hasTextContent) { - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; buffers.responseContent += content; emit partialResponseReceived(requestId, content); } @@ -485,14 +485,14 @@ void OllamaProvider::processStreamData(const QString &requestId, const QJsonObje bool hasTextContent = false; for (auto block : message->currentBlocks()) { - if (qobject_cast(block)) { + if (qobject_cast(block)) { hasTextContent = true; break; } } if (hasTextContent) { - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; buffers.responseContent += content; emit partialResponseReceived(requestId, content); } @@ -521,7 +521,7 @@ void OllamaProvider::handleMessageComplete(const QString &requestId) emitThinkingBlocks(requestId, message); - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Ollama message requires tool execution for %1").arg(requestId)); auto toolUseContent = message->getCurrentToolUseContent(); @@ -554,7 +554,7 @@ void OllamaProvider::handleMessageComplete(const QString &requestId) } } -void OllamaProvider::cleanupRequest(const LLMCore::RequestID &requestId) +void OllamaProvider::cleanupRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("Cleaning up Ollama request %1").arg(requestId)); diff --git a/providers/OllamaProvider.hpp b/providers/OllamaProvider.hpp index 34d2665..9e97e20 100644 --- a/providers/OllamaProvider.hpp +++ b/providers/OllamaProvider.hpp @@ -19,14 +19,14 @@ #pragma once -#include +#include #include "OllamaMessage.hpp" #include "tools/ToolsManager.hpp" namespace QodeAssist::Providers { -class OllamaProvider : public LLMCore::Provider +class OllamaProvider : public PluginLLMCore::Provider { Q_OBJECT public: @@ -39,30 +39,30 @@ public: bool supportsModelListing() const override; void prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) override; QFuture> getInstalledModels(const QString &url) override; - QList validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override; + QList validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) override; QString apiKey() const override; void prepareNetworkRequest(QNetworkRequest &networkRequest) const override; - LLMCore::ProviderID providerID() const override; + PluginLLMCore::ProviderID providerID() const override; void sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; bool supportsTools() const override; bool supportImage() const override; bool supportThinking() const override; - void cancelRequest(const LLMCore::RequestID &requestId) override; + void cancelRequest(const PluginLLMCore::RequestID &requestId) override; public slots: void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) override; void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) override; private slots: @@ -72,12 +72,12 @@ private slots: private: void processStreamData(const QString &requestId, const QJsonObject &data); void handleMessageComplete(const QString &requestId); - void cleanupRequest(const LLMCore::RequestID &requestId); + void cleanupRequest(const PluginLLMCore::RequestID &requestId); void emitThinkingBlocks(const QString &requestId, OllamaMessage *message); - QHash m_messages; - QHash m_requestUrls; - QHash m_originalRequests; + QHash m_messages; + QHash m_requestUrls; + QHash m_originalRequests; QSet m_thinkingEmitted; QSet m_thinkingStarted; Tools::ToolsManager *m_toolsManager; diff --git a/providers/OpenAICompatProvider.cpp b/providers/OpenAICompatProvider.cpp index e71dc31..5cab3ef 100644 --- a/providers/OpenAICompatProvider.cpp +++ b/providers/OpenAICompatProvider.cpp @@ -19,7 +19,7 @@ #include "OpenAICompatProvider.hpp" -#include "llmcore/ValidationUtils.hpp" +#include "pluginllmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -35,7 +35,7 @@ namespace QodeAssist::Providers { OpenAICompatProvider::OpenAICompatProvider(QObject *parent) - : LLMCore::Provider(parent) + : PluginLLMCore::Provider(parent) , m_toolsManager(new Tools::ToolsManager(this)) { connect( @@ -72,9 +72,9 @@ bool OpenAICompatProvider::supportsModelListing() const void OpenAICompatProvider::prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) { @@ -98,22 +98,22 @@ void OpenAICompatProvider::prepareRequest( request["presence_penalty"] = settings.presencePenalty(); }; - if (type == LLMCore::RequestType::CodeCompletion) { + if (type == PluginLLMCore::RequestType::CodeCompletion) { applyModelParams(Settings::codeCompletionSettings()); - } else if (type == LLMCore::RequestType::QuickRefactoring) { + } else if (type == PluginLLMCore::RequestType::QuickRefactoring) { applyModelParams(Settings::quickRefactorSettings()); } else { applyModelParams(Settings::chatAssistantSettings()); } if (isToolsEnabled) { - LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL; - if (type == LLMCore::RequestType::QuickRefactoring) { - filter = LLMCore::RunToolsFilter::OnlyRead; + PluginLLMCore::RunToolsFilter filter = PluginLLMCore::RunToolsFilter::ALL; + if (type == PluginLLMCore::RequestType::QuickRefactoring) { + filter = PluginLLMCore::RunToolsFilter::OnlyRead; } auto toolsDefinitions = m_toolsManager->getToolsDefinitions( - LLMCore::ToolSchemaFormat::OpenAI, filter); + PluginLLMCore::ToolSchemaFormat::OpenAI, filter); if (!toolsDefinitions.isEmpty()) { request["tools"] = toolsDefinitions; LOG_MESSAGE( @@ -128,7 +128,7 @@ QFuture> OpenAICompatProvider::getInstalledModels(const QString & } QList OpenAICompatProvider::validateRequest( - const QJsonObject &request, LLMCore::TemplateType type) + const QJsonObject &request, PluginLLMCore::TemplateType type) { const auto templateReq = QJsonObject{ {"model", {}}, @@ -143,7 +143,7 @@ QList OpenAICompatProvider::validateRequest( {"stream", {}}, {"tools", {}}}; - return LLMCore::ValidationUtils::validateRequestFields(request, templateReq); + return PluginLLMCore::ValidationUtils::validateRequestFields(request, templateReq); } QString OpenAICompatProvider::apiKey() const @@ -160,13 +160,13 @@ void OpenAICompatProvider::prepareNetworkRequest(QNetworkRequest &networkRequest } } -LLMCore::ProviderID OpenAICompatProvider::providerID() const +PluginLLMCore::ProviderID OpenAICompatProvider::providerID() const { - return LLMCore::ProviderID::OpenAICompatible; + return PluginLLMCore::ProviderID::OpenAICompatible; } void OpenAICompatProvider::sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) { if (!m_messages.contains(requestId)) { m_dataBuffers[requestId].clear(); @@ -194,17 +194,17 @@ bool OpenAICompatProvider::supportImage() const return true; } -void OpenAICompatProvider::cancelRequest(const LLMCore::RequestID &requestId) +void OpenAICompatProvider::cancelRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("OpenAICompatProvider: Cancelling request %1").arg(requestId)); - LLMCore::Provider::cancelRequest(requestId); + PluginLLMCore::Provider::cancelRequest(requestId); cleanupRequest(requestId); } void OpenAICompatProvider::onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) { - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; QStringList lines = buffers.rawStreamBuffer.processData(data); for (const QString &line : lines) { @@ -221,7 +221,7 @@ void OpenAICompatProvider::onDataReceived( } void OpenAICompatProvider::onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, std::optional error) + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) { if (error) { LOG_MESSAGE(QString("OpenAICompatProvider request %1 failed: %2").arg(requestId, *error)); @@ -232,7 +232,7 @@ void OpenAICompatProvider::onRequestFinished( if (m_messages.contains(requestId)) { OpenAIMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId)); m_dataBuffers.remove(requestId); return; @@ -240,7 +240,7 @@ void OpenAICompatProvider::onRequestFinished( } if (m_dataBuffers.contains(requestId)) { - const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + const PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; if (!buffers.responseContent.isEmpty()) { LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId)); emit fullResponseReceived(requestId, buffers.responseContent); @@ -317,7 +317,7 @@ void OpenAICompatProvider::processStreamChunk(const QString &requestId, const QJ } } else if ( m_dataBuffers.contains(requestId) - && message->state() == LLMCore::MessageState::RequiresToolExecution) { + && message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { message->startNewContinuation(); emit continuationStarted(requestId); LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId)); @@ -327,7 +327,7 @@ void OpenAICompatProvider::processStreamChunk(const QString &requestId, const QJ QString content = delta["content"].toString(); message->handleContentDelta(content); - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; buffers.responseContent += content; emit partialResponseReceived(requestId, content); } @@ -372,7 +372,7 @@ void OpenAICompatProvider::handleMessageComplete(const QString &requestId) OpenAIMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("OpenAICompat message requires tool execution for %1").arg(requestId)); auto toolUseContent = message->getCurrentToolUseContent(); @@ -394,7 +394,7 @@ void OpenAICompatProvider::handleMessageComplete(const QString &requestId) } } -void OpenAICompatProvider::cleanupRequest(const LLMCore::RequestID &requestId) +void OpenAICompatProvider::cleanupRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("Cleaning up OpenAICompat request %1").arg(requestId)); diff --git a/providers/OpenAICompatProvider.hpp b/providers/OpenAICompatProvider.hpp index e6b70f3..ebca2db 100644 --- a/providers/OpenAICompatProvider.hpp +++ b/providers/OpenAICompatProvider.hpp @@ -21,11 +21,11 @@ #include "OpenAIMessage.hpp" #include "tools/ToolsManager.hpp" -#include +#include namespace QodeAssist::Providers { -class OpenAICompatProvider : public LLMCore::Provider +class OpenAICompatProvider : public PluginLLMCore::Provider { Q_OBJECT public: @@ -38,29 +38,29 @@ public: bool supportsModelListing() const override; void prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) override; QFuture> getInstalledModels(const QString &url) override; - QList validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override; + QList validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) override; QString apiKey() const override; void prepareNetworkRequest(QNetworkRequest &networkRequest) const override; - LLMCore::ProviderID providerID() const override; + PluginLLMCore::ProviderID providerID() const override; void sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; bool supportsTools() const override; bool supportImage() const override; - void cancelRequest(const LLMCore::RequestID &requestId) override; + void cancelRequest(const PluginLLMCore::RequestID &requestId) override; public slots: void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) override; void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) override; private slots: @@ -70,11 +70,11 @@ private slots: private: void processStreamChunk(const QString &requestId, const QJsonObject &chunk); void handleMessageComplete(const QString &requestId); - void cleanupRequest(const LLMCore::RequestID &requestId); + void cleanupRequest(const PluginLLMCore::RequestID &requestId); - QHash m_messages; - QHash m_requestUrls; - QHash m_originalRequests; + QHash m_messages; + QHash m_requestUrls; + QHash m_originalRequests; Tools::ToolsManager *m_toolsManager; }; diff --git a/providers/OpenAIMessage.cpp b/providers/OpenAIMessage.cpp index 9417cd5..06335e9 100644 --- a/providers/OpenAIMessage.cpp +++ b/providers/OpenAIMessage.cpp @@ -46,7 +46,7 @@ void OpenAIMessage::handleToolCallStart(int index, const QString &id, const QStr m_currentBlocks.append(nullptr); } - auto toolContent = new LLMCore::ToolUseContent(id, name); + auto toolContent = new PluginLLMCore::ToolUseContent(id, name); toolContent->setParent(this); m_currentBlocks[index] = toolContent; m_pendingToolArguments[index] = ""; @@ -73,7 +73,7 @@ void OpenAIMessage::handleToolCallComplete(int index) } if (index < m_currentBlocks.size()) { - if (auto toolContent = qobject_cast(m_currentBlocks[index])) { + if (auto toolContent = qobject_cast(m_currentBlocks[index])) { toolContent->setInput(argsObject); } } @@ -100,10 +100,10 @@ QJsonObject OpenAIMessage::toProviderFormat() const if (!block) continue; - if (auto text = qobject_cast(block)) { + if (auto text = qobject_cast(block)) { textContent += text->text(); - } else if (auto tool = qobject_cast(block)) { - toolCalls.append(tool->toJson(LLMCore::ProviderFormat::OpenAI)); + } else if (auto tool = qobject_cast(block)) { + toolCalls.append(tool->toJson(PluginLLMCore::ProviderFormat::OpenAI)); } } @@ -126,20 +126,20 @@ QJsonArray OpenAIMessage::createToolResultMessages(const QHash for (auto toolContent : getCurrentToolUseContent()) { if (toolResults.contains(toolContent->id())) { - auto toolResult = std::make_unique( + auto toolResult = std::make_unique( toolContent->id(), toolResults[toolContent->id()]); - messages.append(toolResult->toJson(LLMCore::ProviderFormat::OpenAI)); + messages.append(toolResult->toJson(PluginLLMCore::ProviderFormat::OpenAI)); } } return messages; } -QList OpenAIMessage::getCurrentToolUseContent() const +QList OpenAIMessage::getCurrentToolUseContent() const { - QList toolBlocks; + QList toolBlocks; for (auto block : m_currentBlocks) { - if (auto toolContent = qobject_cast(block)) { + if (auto toolContent = qobject_cast(block)) { toolBlocks.append(toolContent); } } @@ -153,29 +153,29 @@ void OpenAIMessage::startNewContinuation() m_currentBlocks.clear(); m_pendingToolArguments.clear(); m_finishReason.clear(); - m_state = LLMCore::MessageState::Building; + m_state = PluginLLMCore::MessageState::Building; } void OpenAIMessage::updateStateFromFinishReason() { if (m_finishReason == "tool_calls" && !getCurrentToolUseContent().empty()) { - m_state = LLMCore::MessageState::RequiresToolExecution; + m_state = PluginLLMCore::MessageState::RequiresToolExecution; } else if (m_finishReason == "stop") { - m_state = LLMCore::MessageState::Final; + m_state = PluginLLMCore::MessageState::Final; } else { - m_state = LLMCore::MessageState::Complete; + m_state = PluginLLMCore::MessageState::Complete; } } -LLMCore::TextContent *OpenAIMessage::getOrCreateTextContent() +PluginLLMCore::TextContent *OpenAIMessage::getOrCreateTextContent() { for (auto block : m_currentBlocks) { - if (auto textContent = qobject_cast(block)) { + if (auto textContent = qobject_cast(block)) { return textContent; } } - return addCurrentContent(); + return addCurrentContent(); } } // namespace QodeAssist::Providers diff --git a/providers/OpenAIMessage.hpp b/providers/OpenAIMessage.hpp index 8a1fe3e..ab00334 100644 --- a/providers/OpenAIMessage.hpp +++ b/providers/OpenAIMessage.hpp @@ -19,7 +19,7 @@ #pragma once -#include +#include namespace QodeAssist::Providers { @@ -38,19 +38,19 @@ public: QJsonObject toProviderFormat() const; QJsonArray createToolResultMessages(const QHash &toolResults) const; - LLMCore::MessageState state() const { return m_state; } - QList getCurrentToolUseContent() const; + PluginLLMCore::MessageState state() const { return m_state; } + QList getCurrentToolUseContent() const; void startNewContinuation(); private: QString m_finishReason; - LLMCore::MessageState m_state = LLMCore::MessageState::Building; - QList m_currentBlocks; + PluginLLMCore::MessageState m_state = PluginLLMCore::MessageState::Building; + QList m_currentBlocks; QHash m_pendingToolArguments; void updateStateFromFinishReason(); - LLMCore::TextContent *getOrCreateTextContent(); + PluginLLMCore::TextContent *getOrCreateTextContent(); template T *addCurrentContent(Args &&...args) diff --git a/providers/OpenAIProvider.cpp b/providers/OpenAIProvider.cpp index aa5df7f..daf2afc 100644 --- a/providers/OpenAIProvider.cpp +++ b/providers/OpenAIProvider.cpp @@ -19,7 +19,7 @@ #include "OpenAIProvider.hpp" -#include "llmcore/ValidationUtils.hpp" +#include "pluginllmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -34,7 +34,7 @@ namespace QodeAssist::Providers { OpenAIProvider::OpenAIProvider(QObject *parent) - : LLMCore::Provider(parent) + : PluginLLMCore::Provider(parent) , m_toolsManager(new Tools::ToolsManager(this)) { connect( @@ -71,9 +71,9 @@ bool OpenAIProvider::supportsModelListing() const void OpenAIProvider::prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) { @@ -116,22 +116,22 @@ void OpenAIProvider::prepareRequest( request["presence_penalty"] = settings.presencePenalty(); }; - if (type == LLMCore::RequestType::CodeCompletion) { + if (type == PluginLLMCore::RequestType::CodeCompletion) { applyModelParams(Settings::codeCompletionSettings()); - } else if (type == LLMCore::RequestType::QuickRefactoring) { + } else if (type == PluginLLMCore::RequestType::QuickRefactoring) { applyModelParams(Settings::quickRefactorSettings()); } else { applyModelParams(Settings::chatAssistantSettings()); } if (isToolsEnabled) { - LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL; - if (type == LLMCore::RequestType::QuickRefactoring) { - filter = LLMCore::RunToolsFilter::OnlyRead; + PluginLLMCore::RunToolsFilter filter = PluginLLMCore::RunToolsFilter::ALL; + if (type == PluginLLMCore::RequestType::QuickRefactoring) { + filter = PluginLLMCore::RunToolsFilter::OnlyRead; } auto toolsDefinitions = m_toolsManager->getToolsDefinitions( - LLMCore::ToolSchemaFormat::OpenAI, filter); + PluginLLMCore::ToolSchemaFormat::OpenAI, filter); if (!toolsDefinitions.isEmpty()) { request["tools"] = toolsDefinitions; LOG_MESSAGE(QString("Added %1 tools to OpenAI request").arg(toolsDefinitions.size())); @@ -172,7 +172,7 @@ QFuture> OpenAIProvider::getInstalledModels(const QString &url) }); } -QList OpenAIProvider::validateRequest(const QJsonObject &request, LLMCore::TemplateType type) +QList OpenAIProvider::validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) { const auto templateReq = QJsonObject{ {"model", {}}, @@ -188,7 +188,7 @@ QList OpenAIProvider::validateRequest(const QJsonObject &request, LLMCo {"stream", {}}, {"tools", {}}}; - return LLMCore::ValidationUtils::validateRequestFields(request, templateReq); + return PluginLLMCore::ValidationUtils::validateRequestFields(request, templateReq); } QString OpenAIProvider::apiKey() const @@ -205,13 +205,13 @@ void OpenAIProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) cons } } -LLMCore::ProviderID OpenAIProvider::providerID() const +PluginLLMCore::ProviderID OpenAIProvider::providerID() const { - return LLMCore::ProviderID::OpenAI; + return PluginLLMCore::ProviderID::OpenAI; } void OpenAIProvider::sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) { if (!m_messages.contains(requestId)) { m_dataBuffers[requestId].clear(); @@ -238,17 +238,17 @@ bool OpenAIProvider::supportImage() const return true; } -void OpenAIProvider::cancelRequest(const LLMCore::RequestID &requestId) +void OpenAIProvider::cancelRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("OpenAIProvider: Cancelling request %1").arg(requestId)); - LLMCore::Provider::cancelRequest(requestId); + PluginLLMCore::Provider::cancelRequest(requestId); cleanupRequest(requestId); } void OpenAIProvider::onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) { - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; QStringList lines = buffers.rawStreamBuffer.processData(data); for (const QString &line : lines) { @@ -265,7 +265,7 @@ void OpenAIProvider::onDataReceived( } void OpenAIProvider::onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, std::optional error) + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) { if (error) { LOG_MESSAGE(QString("OpenAIProvider request %1 failed: %2").arg(requestId, *error)); @@ -276,7 +276,7 @@ void OpenAIProvider::onRequestFinished( if (m_messages.contains(requestId)) { OpenAIMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId)); m_dataBuffers.remove(requestId); return; @@ -284,7 +284,7 @@ void OpenAIProvider::onRequestFinished( } if (m_dataBuffers.contains(requestId)) { - const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + const PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; if (!buffers.responseContent.isEmpty()) { LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId)); emit fullResponseReceived(requestId, buffers.responseContent); @@ -361,7 +361,7 @@ void OpenAIProvider::processStreamChunk(const QString &requestId, const QJsonObj } } else if ( m_dataBuffers.contains(requestId) - && message->state() == LLMCore::MessageState::RequiresToolExecution) { + && message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { message->startNewContinuation(); emit continuationStarted(requestId); LOG_MESSAGE(QString("Cleared message state for continuation request %1").arg(requestId)); @@ -371,7 +371,7 @@ void OpenAIProvider::processStreamChunk(const QString &requestId, const QJsonObj QString content = delta["content"].toString(); message->handleContentDelta(content); - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; buffers.responseContent += content; emit partialResponseReceived(requestId, content); } @@ -416,7 +416,7 @@ void OpenAIProvider::handleMessageComplete(const QString &requestId) OpenAIMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { LOG_MESSAGE(QString("OpenAI message requires tool execution for %1").arg(requestId)); auto toolUseContent = message->getCurrentToolUseContent(); @@ -438,7 +438,7 @@ void OpenAIProvider::handleMessageComplete(const QString &requestId) } } -void OpenAIProvider::cleanupRequest(const LLMCore::RequestID &requestId) +void OpenAIProvider::cleanupRequest(const PluginLLMCore::RequestID &requestId) { LOG_MESSAGE(QString("Cleaning up OpenAI request %1").arg(requestId)); diff --git a/providers/OpenAIProvider.hpp b/providers/OpenAIProvider.hpp index 52535a7..efe8c1c 100644 --- a/providers/OpenAIProvider.hpp +++ b/providers/OpenAIProvider.hpp @@ -21,11 +21,11 @@ #include "OpenAIMessage.hpp" #include "tools/ToolsManager.hpp" -#include +#include namespace QodeAssist::Providers { -class OpenAIProvider : public LLMCore::Provider +class OpenAIProvider : public PluginLLMCore::Provider { Q_OBJECT public: @@ -38,29 +38,29 @@ public: bool supportsModelListing() const override; void prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) override; QFuture> getInstalledModels(const QString &url) override; - QList validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override; + QList validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) override; QString apiKey() const override; void prepareNetworkRequest(QNetworkRequest &networkRequest) const override; - LLMCore::ProviderID providerID() const override; + PluginLLMCore::ProviderID providerID() const override; void sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; bool supportsTools() const override; bool supportImage() const override; - void cancelRequest(const LLMCore::RequestID &requestId) override; + void cancelRequest(const PluginLLMCore::RequestID &requestId) override; public slots: void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) override; void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) override; private slots: @@ -70,11 +70,11 @@ private slots: private: void processStreamChunk(const QString &requestId, const QJsonObject &chunk); void handleMessageComplete(const QString &requestId); - void cleanupRequest(const LLMCore::RequestID &requestId); + void cleanupRequest(const PluginLLMCore::RequestID &requestId); - QHash m_messages; - QHash m_requestUrls; - QHash m_originalRequests; + QHash m_messages; + QHash m_requestUrls; + QHash m_originalRequests; Tools::ToolsManager *m_toolsManager; }; diff --git a/providers/OpenAIResponsesMessage.cpp b/providers/OpenAIResponsesMessage.cpp index 8806262..d356e70 100644 --- a/providers/OpenAIResponsesMessage.cpp +++ b/providers/OpenAIResponsesMessage.cpp @@ -52,7 +52,7 @@ void OpenAIResponsesMessage::handleItemDelta(const QJsonObject &item) void OpenAIResponsesMessage::handleToolCallStart(const QString &callId, const QString &name) { - auto toolContent = new LLMCore::ToolUseContent(callId, name); + auto toolContent = new PluginLLMCore::ToolUseContent(callId, name); toolContent->setParent(this); m_items.append(toolContent); m_toolCalls[callId] = toolContent; @@ -86,7 +86,7 @@ void OpenAIResponsesMessage::handleToolCallComplete(const QString &callId) void OpenAIResponsesMessage::handleReasoningStart(const QString &itemId) { - auto thinkingContent = new LLMCore::ThinkingContent(); + auto thinkingContent = new PluginLLMCore::ThinkingContent(); thinkingContent->setParent(this); m_items.append(thinkingContent); m_thinkingBlocks[itemId] = thinkingContent; @@ -115,13 +115,13 @@ QList OpenAIResponsesMessage::toItemsFormat() const QList items; QString textContent; - QList toolCalls; + QList toolCalls; for (const auto *block : m_items) { - if (const auto *text = qobject_cast(block)) { + if (const auto *text = qobject_cast(block)) { textContent += text->text(); - } else if (auto *tool = qobject_cast( - const_cast(block))) { + } else if (auto *tool = qobject_cast( + const_cast(block))) { toolCalls.append(tool); } } @@ -146,22 +146,22 @@ QList OpenAIResponsesMessage::toItemsFormat() const return items; } -QList OpenAIResponsesMessage::getCurrentToolUseContent() const +QList OpenAIResponsesMessage::getCurrentToolUseContent() const { - QList toolBlocks; + QList toolBlocks; for (auto *block : m_items) { - if (auto *toolContent = qobject_cast(block)) { + if (auto *toolContent = qobject_cast(block)) { toolBlocks.append(toolContent); } } return toolBlocks; } -QList OpenAIResponsesMessage::getCurrentThinkingContent() const +QList OpenAIResponsesMessage::getCurrentThinkingContent() const { - QList thinkingBlocks; + QList thinkingBlocks; for (auto *block : m_items) { - if (auto *thinkingContent = qobject_cast(block)) { + if (auto *thinkingContent = qobject_cast(block)) { thinkingBlocks.append(thinkingContent); } } @@ -189,7 +189,7 @@ QString OpenAIResponsesMessage::accumulatedText() const { QString text; for (const auto *block : m_items) { - if (const auto *textContent = qobject_cast(block)) { + if (const auto *textContent = qobject_cast(block)) { text += textContent->text(); } } @@ -202,28 +202,28 @@ void OpenAIResponsesMessage::updateStateFromStatus() if (m_status == "completed") { if (!getCurrentToolUseContent().isEmpty()) { - m_state = LLMCore::MessageState::RequiresToolExecution; + m_state = PluginLLMCore::MessageState::RequiresToolExecution; } else { - m_state = LLMCore::MessageState::Complete; + m_state = PluginLLMCore::MessageState::Complete; } } else if (m_status == "in_progress") { - m_state = LLMCore::MessageState::Building; + m_state = PluginLLMCore::MessageState::Building; } else if (m_status == "failed" || m_status == "cancelled" || m_status == "incomplete") { - m_state = LLMCore::MessageState::Final; + m_state = PluginLLMCore::MessageState::Final; } else { - m_state = LLMCore::MessageState::Building; + m_state = PluginLLMCore::MessageState::Building; } } -LLMCore::TextContent *OpenAIResponsesMessage::getOrCreateTextItem() +PluginLLMCore::TextContent *OpenAIResponsesMessage::getOrCreateTextItem() { for (auto *block : m_items) { - if (auto *textContent = qobject_cast(block)) { + if (auto *textContent = qobject_cast(block)) { return textContent; } } - auto *textContent = new LLMCore::TextContent(); + auto *textContent = new PluginLLMCore::TextContent(); textContent->setParent(this); m_items.append(textContent); return textContent; @@ -239,7 +239,7 @@ void OpenAIResponsesMessage::startNewContinuation() m_pendingToolArguments.clear(); m_status.clear(); - m_state = LLMCore::MessageState::Building; + m_state = PluginLLMCore::MessageState::Building; } } // namespace QodeAssist::Providers diff --git a/providers/OpenAIResponsesMessage.hpp b/providers/OpenAIResponsesMessage.hpp index 0b0a497..f4e0569 100644 --- a/providers/OpenAIResponsesMessage.hpp +++ b/providers/OpenAIResponsesMessage.hpp @@ -19,7 +19,7 @@ #pragma once -#include +#include namespace QodeAssist::Providers { @@ -41,10 +41,10 @@ public: QList toItemsFormat() const; QJsonArray createToolResultItems(const QHash &toolResults) const; - LLMCore::MessageState state() const noexcept { return m_state; } + PluginLLMCore::MessageState state() const noexcept { return m_state; } QString accumulatedText() const; - QList getCurrentToolUseContent() const; - QList getCurrentThinkingContent() const; + QList getCurrentToolUseContent() const; + QList getCurrentThinkingContent() const; bool hasToolCalls() const noexcept { return !m_toolCalls.isEmpty(); } bool hasThinkingContent() const noexcept { return !m_thinkingBlocks.isEmpty(); } @@ -53,14 +53,14 @@ public: private: QString m_status; - LLMCore::MessageState m_state = LLMCore::MessageState::Building; - QList m_items; + PluginLLMCore::MessageState m_state = PluginLLMCore::MessageState::Building; + QList m_items; QHash m_pendingToolArguments; - QHash m_toolCalls; - QHash m_thinkingBlocks; + QHash m_toolCalls; + QHash m_thinkingBlocks; void updateStateFromStatus(); - LLMCore::TextContent *getOrCreateTextItem(); + PluginLLMCore::TextContent *getOrCreateTextItem(); }; } // namespace QodeAssist::Providers diff --git a/providers/OpenAIResponsesProvider.cpp b/providers/OpenAIResponsesProvider.cpp index fa7c444..46be259 100644 --- a/providers/OpenAIResponsesProvider.cpp +++ b/providers/OpenAIResponsesProvider.cpp @@ -20,7 +20,7 @@ #include "OpenAIResponsesProvider.hpp" #include "OpenAIResponses/ResponseObject.hpp" -#include "llmcore/ValidationUtils.hpp" +#include "pluginllmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -35,7 +35,7 @@ namespace QodeAssist::Providers { OpenAIResponsesProvider::OpenAIResponsesProvider(QObject *parent) - : LLMCore::Provider(parent) + : PluginLLMCore::Provider(parent) , m_toolsManager(new Tools::ToolsManager(this)) { connect( @@ -72,9 +72,9 @@ bool OpenAIResponsesProvider::supportsModelListing() const void OpenAIResponsesProvider::prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) { @@ -109,9 +109,9 @@ void OpenAIResponsesProvider::prepareRequest( request["include"] = include; }; - if (type == LLMCore::RequestType::CodeCompletion) { + if (type == PluginLLMCore::RequestType::CodeCompletion) { applyModelParams(Settings::codeCompletionSettings()); - } else if (type == LLMCore::RequestType::QuickRefactoring) { + } else if (type == PluginLLMCore::RequestType::QuickRefactoring) { const auto &qrSettings = Settings::quickRefactorSettings(); applyModelParams(qrSettings); @@ -128,12 +128,12 @@ void OpenAIResponsesProvider::prepareRequest( } if (isToolsEnabled) { - const LLMCore::RunToolsFilter filter = (type == LLMCore::RequestType::QuickRefactoring) - ? LLMCore::RunToolsFilter::OnlyRead - : LLMCore::RunToolsFilter::ALL; + const PluginLLMCore::RunToolsFilter filter = (type == PluginLLMCore::RequestType::QuickRefactoring) + ? PluginLLMCore::RunToolsFilter::OnlyRead + : PluginLLMCore::RunToolsFilter::ALL; const auto toolsDefinitions - = m_toolsManager->getToolsDefinitions(LLMCore::ToolSchemaFormat::OpenAI, filter); + = m_toolsManager->getToolsDefinitions(PluginLLMCore::ToolSchemaFormat::OpenAI, filter); if (!toolsDefinitions.isEmpty()) { QJsonArray responsesTools; @@ -197,7 +197,7 @@ QFuture> OpenAIResponsesProvider::getInstalledModels(const QStrin } QList OpenAIResponsesProvider::validateRequest( - const QJsonObject &request, LLMCore::TemplateType type) + const QJsonObject &request, PluginLLMCore::TemplateType type) { Q_UNUSED(type); @@ -250,13 +250,13 @@ void OpenAIResponsesProvider::prepareNetworkRequest(QNetworkRequest &networkRequ } } -LLMCore::ProviderID OpenAIResponsesProvider::providerID() const +PluginLLMCore::ProviderID OpenAIResponsesProvider::providerID() const { - return LLMCore::ProviderID::OpenAIResponses; + return PluginLLMCore::ProviderID::OpenAIResponses; } void OpenAIResponsesProvider::sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) { if (!m_messages.contains(requestId)) { m_dataBuffers[requestId].clear(); @@ -286,16 +286,16 @@ bool OpenAIResponsesProvider::supportThinking() const return true; } -void OpenAIResponsesProvider::cancelRequest(const LLMCore::RequestID &requestId) +void OpenAIResponsesProvider::cancelRequest(const PluginLLMCore::RequestID &requestId) { - LLMCore::Provider::cancelRequest(requestId); + PluginLLMCore::Provider::cancelRequest(requestId); cleanupRequest(requestId); } void OpenAIResponsesProvider::onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) { - LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; const QStringList lines = buffers.rawStreamBuffer.processData(data); QString currentEventType; @@ -329,7 +329,7 @@ void OpenAIResponsesProvider::onDataReceived( } void OpenAIResponsesProvider::onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, std::optional error) + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) { if (error) { LOG_MESSAGE(QString("OpenAIResponses request %1 failed: %2").arg(requestId, *error)); @@ -340,13 +340,13 @@ void OpenAIResponsesProvider::onRequestFinished( if (m_messages.contains(requestId)) { OpenAIResponsesMessage *message = m_messages[requestId]; - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { return; } } if (m_dataBuffers.contains(requestId)) { - const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; + const PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId]; if (!buffers.responseContent.isEmpty()) { emit fullResponseReceived(requestId, buffers.responseContent); } else { @@ -376,7 +376,7 @@ void OpenAIResponsesProvider::processStreamEvent( } } else if ( m_dataBuffers.contains(requestId) - && message->state() == LLMCore::MessageState::RequiresToolExecution) { + && message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { message->startNewContinuation(); emit continuationStarted(requestId); } @@ -571,7 +571,7 @@ void OpenAIResponsesProvider::handleMessageComplete(const QString &requestId) emitPendingThinkingBlocks(requestId); - if (message->state() == LLMCore::MessageState::RequiresToolExecution) { + if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) { const auto toolUseContent = message->getCurrentToolUseContent(); if (toolUseContent.isEmpty()) { @@ -633,7 +633,7 @@ void OpenAIResponsesProvider::onToolExecutionComplete( sendRequest(requestId, m_requestUrls[requestId], continuationRequest); } -void OpenAIResponsesProvider::cleanupRequest(const LLMCore::RequestID &requestId) +void OpenAIResponsesProvider::cleanupRequest(const PluginLLMCore::RequestID &requestId) { if (m_messages.contains(requestId)) { OpenAIResponsesMessage *message = m_messages.take(requestId); diff --git a/providers/OpenAIResponsesProvider.hpp b/providers/OpenAIResponsesProvider.hpp index 420985f..18313d3 100644 --- a/providers/OpenAIResponsesProvider.hpp +++ b/providers/OpenAIResponsesProvider.hpp @@ -21,11 +21,11 @@ #include "OpenAIResponsesMessage.hpp" #include "tools/ToolsManager.hpp" -#include +#include namespace QodeAssist::Providers { -class OpenAIResponsesProvider : public LLMCore::Provider +class OpenAIResponsesProvider : public PluginLLMCore::Provider { Q_OBJECT public: @@ -38,30 +38,30 @@ public: bool supportsModelListing() const override; void prepareRequest( QJsonObject &request, - LLMCore::PromptTemplate *prompt, - LLMCore::ContextData context, - LLMCore::RequestType type, + PluginLLMCore::PromptTemplate *prompt, + PluginLLMCore::ContextData context, + PluginLLMCore::RequestType type, bool isToolsEnabled, bool isThinkingEnabled) override; QFuture> getInstalledModels(const QString &url) override; - QList validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override; + QList validateRequest(const QJsonObject &request, PluginLLMCore::TemplateType type) override; QString apiKey() const override; void prepareNetworkRequest(QNetworkRequest &networkRequest) const override; - LLMCore::ProviderID providerID() const override; + PluginLLMCore::ProviderID providerID() const override; void sendRequest( - const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; + const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override; bool supportsTools() const override; bool supportImage() const override; bool supportThinking() const override; - void cancelRequest(const LLMCore::RequestID &requestId) override; + void cancelRequest(const PluginLLMCore::RequestID &requestId) override; public slots: void onDataReceived( - const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override; + const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data) override; void onRequestFinished( - const QodeAssist::LLMCore::RequestID &requestId, + const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional error) override; private slots: @@ -72,13 +72,13 @@ private: void processStreamEvent(const QString &requestId, const QString &eventType, const QJsonObject &data); void emitPendingThinkingBlocks(const QString &requestId); void handleMessageComplete(const QString &requestId); - void cleanupRequest(const LLMCore::RequestID &requestId); + void cleanupRequest(const PluginLLMCore::RequestID &requestId); - QHash m_messages; - QHash m_requestUrls; - QHash m_originalRequests; - QHash> m_itemIdToCallId; - QHash m_emittedThinkingBlocksCount; + QHash m_messages; + QHash m_requestUrls; + QHash m_originalRequests; + QHash> m_itemIdToCallId; + QHash m_emittedThinkingBlocksCount; Tools::ToolsManager *m_toolsManager; }; diff --git a/providers/OpenRouterAIProvider.cpp b/providers/OpenRouterAIProvider.cpp index e64afec..2aacfdc 100644 --- a/providers/OpenRouterAIProvider.cpp +++ b/providers/OpenRouterAIProvider.cpp @@ -43,9 +43,9 @@ QString OpenRouterProvider::apiKey() const return Settings::providerSettings().openRouterApiKey(); } -LLMCore::ProviderID OpenRouterProvider::providerID() const +PluginLLMCore::ProviderID OpenRouterProvider::providerID() const { - return LLMCore::ProviderID::OpenRouter; + return PluginLLMCore::ProviderID::OpenRouter; } } // namespace QodeAssist::Providers diff --git a/providers/OpenRouterAIProvider.hpp b/providers/OpenRouterAIProvider.hpp index 012aca8..0dfb3d0 100644 --- a/providers/OpenRouterAIProvider.hpp +++ b/providers/OpenRouterAIProvider.hpp @@ -29,7 +29,7 @@ public: QString name() const override; QString url() const override; QString apiKey() const override; - LLMCore::ProviderID providerID() const override; + PluginLLMCore::ProviderID providerID() const override; }; } // namespace QodeAssist::Providers diff --git a/providers/Providers.hpp b/providers/Providers.hpp index 7876888..f77fe88 100644 --- a/providers/Providers.hpp +++ b/providers/Providers.hpp @@ -19,7 +19,7 @@ #pragma once -#include "llmcore/ProvidersManager.hpp" +#include "pluginllmcore/ProvidersManager.hpp" #include "providers/ClaudeProvider.hpp" #include "providers/CodestralProvider.hpp" #include "providers/GoogleAIProvider.hpp" @@ -36,7 +36,7 @@ namespace QodeAssist::Providers { inline void registerProviders() { - auto &providerManager = LLMCore::ProvidersManager::instance(); + auto &providerManager = PluginLLMCore::ProvidersManager::instance(); providerManager.registerProvider(); providerManager.registerProvider(); providerManager.registerProvider(); diff --git a/qodeassist.cpp b/qodeassist.cpp index a205aab..75d3f07 100644 --- a/qodeassist.cpp +++ b/qodeassist.cpp @@ -50,8 +50,8 @@ #include "chat/ChatOutputPane.h" #include "chat/NavigationPanel.hpp" #include "context/DocumentReaderQtCreator.hpp" -#include "llmcore/PromptProviderFim.hpp" -#include "llmcore/ProvidersManager.hpp" +#include "pluginllmcore/PromptProviderFim.hpp" +#include "pluginllmcore/ProvidersManager.hpp" #include "logger/RequestPerformanceLogger.hpp" #include "providers/Providers.hpp" #include "settings/ChatAssistantSettings.hpp" @@ -84,7 +84,7 @@ class QodeAssistPlugin final : public ExtensionSystem::IPlugin public: QodeAssistPlugin() : m_updater(new PluginUpdater(this)) - , m_promptProvider(LLMCore::PromptTemplateManager::instance()) + , m_promptProvider(PluginLLMCore::PromptTemplateManager::instance()) {} ~QodeAssistPlugin() final @@ -263,7 +263,7 @@ public: m_qodeAssistClient = new QodeAssistClient(new LLMClientInterface( Settings::generalSettings(), Settings::codeCompletionSettings(), - LLMCore::ProvidersManager::instance(), + PluginLLMCore::ProvidersManager::instance(), &m_promptProvider, m_documentReader, m_performanceLogger)); @@ -305,7 +305,7 @@ private: } QPointer m_qodeAssistClient; - LLMCore::PromptProviderFim m_promptProvider; + PluginLLMCore::PromptProviderFim m_promptProvider; Context::DocumentReaderQtCreator m_documentReader; RequestPerformanceLogger m_performanceLogger; QPointer m_chatOutputPane; diff --git a/settings/GeneralSettings.hpp b/settings/GeneralSettings.hpp index 14b036b..d7bdc08 100644 --- a/settings/GeneralSettings.hpp +++ b/settings/GeneralSettings.hpp @@ -29,7 +29,7 @@ namespace Utils { class DetailsWidget; } -namespace QodeAssist::LLMCore { +namespace QodeAssist::PluginLLMCore { class Provider; } namespace QodeAssist::Settings { diff --git a/templates/Alpaca.hpp b/templates/Alpaca.hpp index fb2c54f..cfccc4d 100644 --- a/templates/Alpaca.hpp +++ b/templates/Alpaca.hpp @@ -19,21 +19,21 @@ #pragma once -#include "llmcore/PromptTemplate.hpp" +#include "pluginllmcore/PromptTemplate.hpp" #include namespace QodeAssist::Templates { -class Alpaca : public LLMCore::PromptTemplate +class Alpaca : public PluginLLMCore::PromptTemplate { public: QString name() const override { return "Alpaca"; } - LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; } + PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::Chat; } QStringList stopWords() const override { return QStringList() << "### Instruction:" << "### Response:"; } - void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override + void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override { QJsonArray messages; @@ -72,14 +72,14 @@ public: "}\n\n" "Combines all messages into a single formatted prompt."; } - bool isSupportProvider(LLMCore::ProviderID id) const override + bool isSupportProvider(PluginLLMCore::ProviderID id) const override { switch (id) { - case LLMCore::ProviderID::Ollama: - case LLMCore::ProviderID::LMStudio: - case LLMCore::ProviderID::OpenRouter: - case LLMCore::ProviderID::OpenAICompatible: - case LLMCore::ProviderID::LlamaCpp: + case PluginLLMCore::ProviderID::Ollama: + case PluginLLMCore::ProviderID::LMStudio: + case PluginLLMCore::ProviderID::OpenRouter: + case PluginLLMCore::ProviderID::OpenAICompatible: + case PluginLLMCore::ProviderID::LlamaCpp: return true; default: return false; diff --git a/templates/ChatML.hpp b/templates/ChatML.hpp index 7b1bf16..62be0e8 100644 --- a/templates/ChatML.hpp +++ b/templates/ChatML.hpp @@ -21,20 +21,20 @@ #include -#include "llmcore/PromptTemplate.hpp" +#include "pluginllmcore/PromptTemplate.hpp" namespace QodeAssist::Templates { -class ChatML : public LLMCore::PromptTemplate +class ChatML : public PluginLLMCore::PromptTemplate { public: QString name() const override { return "ChatML"; } - LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; } + PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::Chat; } QStringList stopWords() const override { return QStringList() << "<|im_start|>" << "<|im_end|>"; } - void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override + void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override { QJsonArray messages; @@ -73,14 +73,14 @@ public: "}\n\n" "Compatible with multiple providers supporting the ChatML token format."; } - bool isSupportProvider(LLMCore::ProviderID id) const override + bool isSupportProvider(PluginLLMCore::ProviderID id) const override { switch (id) { - case LLMCore::ProviderID::Ollama: - case LLMCore::ProviderID::LMStudio: - case LLMCore::ProviderID::OpenRouter: - case LLMCore::ProviderID::OpenAICompatible: - case LLMCore::ProviderID::LlamaCpp: + case PluginLLMCore::ProviderID::Ollama: + case PluginLLMCore::ProviderID::LMStudio: + case PluginLLMCore::ProviderID::OpenRouter: + case PluginLLMCore::ProviderID::OpenAICompatible: + case PluginLLMCore::ProviderID::LlamaCpp: return true; default: return false; diff --git a/templates/Claude.hpp b/templates/Claude.hpp index 53475ee..b54301e 100644 --- a/templates/Claude.hpp +++ b/templates/Claude.hpp @@ -21,17 +21,17 @@ #include -#include "llmcore/PromptTemplate.hpp" +#include "pluginllmcore/PromptTemplate.hpp" namespace QodeAssist::Templates { -class Claude : public LLMCore::PromptTemplate +class Claude : public PluginLLMCore::PromptTemplate { public: - LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; } + PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::Chat; } QString name() const override { return "Claude"; } QStringList stopWords() const override { return QStringList(); } - void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override + void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override { QJsonArray messages; @@ -111,10 +111,10 @@ public: "}\n\n" "Formats content according to Claude API specifications."; } - bool isSupportProvider(LLMCore::ProviderID id) const override + bool isSupportProvider(PluginLLMCore::ProviderID id) const override { switch (id) { - case QodeAssist::LLMCore::ProviderID::Claude: + case QodeAssist::PluginLLMCore::ProviderID::Claude: return true; default: return false; diff --git a/templates/CodeLlamaFim.hpp b/templates/CodeLlamaFim.hpp index 90e4299..9b8da89 100644 --- a/templates/CodeLlamaFim.hpp +++ b/templates/CodeLlamaFim.hpp @@ -19,20 +19,20 @@ #pragma once -#include "llmcore/PromptTemplate.hpp" +#include "pluginllmcore/PromptTemplate.hpp" namespace QodeAssist::Templates { -class CodeLlamaFim : public LLMCore::PromptTemplate +class CodeLlamaFim : public PluginLLMCore::PromptTemplate { public: - LLMCore::TemplateType type() const override { return LLMCore::TemplateType::FIM; } + PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::FIM; } QString name() const override { return "CodeLlama FIM"; } QStringList stopWords() const override { return QStringList() << "" << "
" << "";
     }
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         request["prompt"] = QString("
 %1 %2 ")
                                 .arg(context.prefix.value_or(""), context.suffix.value_or(""));
@@ -47,10 +47,10 @@ public:
                "}\n\n"
                "Optimized for code completion with CodeLlama models.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case QodeAssist::LLMCore::ProviderID::Ollama:
+        case QodeAssist::PluginLLMCore::ProviderID::Ollama:
             return true;
         default:
             return false;
diff --git a/templates/CodeLlamaQMLFim.hpp b/templates/CodeLlamaQMLFim.hpp
index e5a0f51..6af51bf 100644
--- a/templates/CodeLlamaQMLFim.hpp
+++ b/templates/CodeLlamaQMLFim.hpp
@@ -19,21 +19,21 @@
 
 #pragma once
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 
 namespace QodeAssist::Templates {
 
-class CodeLlamaQMLFim : public LLMCore::PromptTemplate
+class CodeLlamaQMLFim : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::FIM; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::FIM; }
     QString name() const override { return "CodeLlama QML FIM"; }
     QStringList stopWords() const override
     {
         return QStringList() << "" << "
" << "
" << "
" << "< EOT >" << "\\end" << "" << "" << "##"; } - void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override + void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override { request["prompt"] = QString("%1
%2")
                                 .arg(context.suffix.value_or(""), context.prefix.value_or(""));
@@ -48,10 +48,10 @@ public:
                "}\n\n"
                "Specifically optimized for QML/JavaScript code completion.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case QodeAssist::LLMCore::ProviderID::Ollama:
+        case QodeAssist::PluginLLMCore::ProviderID::Ollama:
             return true;
         default:
             return false;
diff --git a/templates/GoogleAI.hpp b/templates/GoogleAI.hpp
index 4a61ed0..bbf0e37 100644
--- a/templates/GoogleAI.hpp
+++ b/templates/GoogleAI.hpp
@@ -22,18 +22,18 @@
 #include 
 #include 
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 
 namespace QodeAssist::Templates {
 
-class GoogleAI : public LLMCore::PromptTemplate
+class GoogleAI : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::Chat; }
     QString name() const override { return "Google AI"; }
     QStringList stopWords() const override { return QStringList(); }
 
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         QJsonArray contents;
 
@@ -128,9 +128,9 @@ public:
                "Supports proper role mapping (model/user roles), images, and thinking blocks.";
     }
 
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
-        return id == QodeAssist::LLMCore::ProviderID::GoogleAI;
+        return id == QodeAssist::PluginLLMCore::ProviderID::GoogleAI;
     }
 };
 
diff --git a/templates/Llama2.hpp b/templates/Llama2.hpp
index 76261d9..038d1a4 100644
--- a/templates/Llama2.hpp
+++ b/templates/Llama2.hpp
@@ -19,18 +19,18 @@
 
 #pragma once
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 #include 
 
 namespace QodeAssist::Templates {
 
-class Llama2 : public LLMCore::PromptTemplate
+class Llama2 : public PluginLLMCore::PromptTemplate
 {
 public:
     QString name() const override { return "Llama 2"; }
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::Chat; }
     QStringList stopWords() const override { return QStringList() << "[INST]"; }
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         QJsonArray messages;
 
@@ -70,14 +70,14 @@ public:
                "}\n\n"
                "Compatible with Ollama, LM Studio, and other services for Llama 2.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case LLMCore::ProviderID::Ollama:
-        case LLMCore::ProviderID::LMStudio:
-        case LLMCore::ProviderID::OpenRouter:
-        case LLMCore::ProviderID::OpenAICompatible:
-        case LLMCore::ProviderID::LlamaCpp:
+        case PluginLLMCore::ProviderID::Ollama:
+        case PluginLLMCore::ProviderID::LMStudio:
+        case PluginLLMCore::ProviderID::OpenRouter:
+        case PluginLLMCore::ProviderID::OpenAICompatible:
+        case PluginLLMCore::ProviderID::LlamaCpp:
             return true;
         default:
             return false;
diff --git a/templates/Llama3.hpp b/templates/Llama3.hpp
index fc151d0..5dd6cf0 100644
--- a/templates/Llama3.hpp
+++ b/templates/Llama3.hpp
@@ -21,20 +21,20 @@
 
 #include 
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 
 namespace QodeAssist::Templates {
 
-class Llama3 : public LLMCore::PromptTemplate
+class Llama3 : public PluginLLMCore::PromptTemplate
 {
 public:
     QString name() const override { return "Llama 3"; }
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::Chat; }
     QStringList stopWords() const override
     {
         return QStringList() << "<|start_header_id|>" << "<|end_header_id|>" << "<|eot_id|>";
     }
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         QJsonArray messages;
 
@@ -77,14 +77,14 @@ public:
                "}\n\n"
                "Compatible with Ollama, LM Studio, and OpenAI-compatible services for Llama 3.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case LLMCore::ProviderID::Ollama:
-        case LLMCore::ProviderID::LMStudio:
-        case LLMCore::ProviderID::OpenRouter:
-        case LLMCore::ProviderID::OpenAICompatible:
-        case LLMCore::ProviderID::LlamaCpp:
+        case PluginLLMCore::ProviderID::Ollama:
+        case PluginLLMCore::ProviderID::LMStudio:
+        case PluginLLMCore::ProviderID::OpenRouter:
+        case PluginLLMCore::ProviderID::OpenAICompatible:
+        case PluginLLMCore::ProviderID::LlamaCpp:
             return true;
         default:
             return false;
diff --git a/templates/LlamaCppFim.hpp b/templates/LlamaCppFim.hpp
index bf64234..d9fa286 100644
--- a/templates/LlamaCppFim.hpp
+++ b/templates/LlamaCppFim.hpp
@@ -21,18 +21,18 @@
 
 #include 
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 
 namespace QodeAssist::Templates {
 
-class LlamaCppFim : public LLMCore::PromptTemplate
+class LlamaCppFim : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::FIM; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::FIM; }
     QString name() const override { return "llama.cpp FIM"; }
     QStringList stopWords() const override { return {}; }
 
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         request["input_prefix"] = context.prefix.value_or("");
         request["input_suffix"] = context.suffix.value_or("");
@@ -60,9 +60,9 @@ public:
                "Recommended for models with FIM capability.";
     }
 
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
-        return id == QodeAssist::LLMCore::ProviderID::LlamaCpp;
+        return id == QodeAssist::PluginLLMCore::ProviderID::LlamaCpp;
     }
 };
 
diff --git a/templates/MistralAI.hpp b/templates/MistralAI.hpp
index b9315ce..fdf8f91 100644
--- a/templates/MistralAI.hpp
+++ b/templates/MistralAI.hpp
@@ -21,17 +21,17 @@
 
 #include 
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 
 namespace QodeAssist::Templates {
 
-class MistralAIFim : public LLMCore::PromptTemplate
+class MistralAIFim : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::FIM; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::FIM; }
     QString name() const override { return "Mistral AI FIM"; }
     QStringList stopWords() const override { return QStringList(); }
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         request["prompt"] = context.prefix.value_or("");
         request["suffix"] = context.suffix.value_or("");
@@ -45,10 +45,10 @@ public:
                "}\n\n"
                "Optimized for code completion with MistralAI models.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case QodeAssist::LLMCore::ProviderID::MistralAI:
+        case QodeAssist::PluginLLMCore::ProviderID::MistralAI:
             return true;
         default:
             return false;
@@ -56,14 +56,14 @@ public:
     }
 };
 
-class MistralAIChat : public LLMCore::PromptTemplate
+class MistralAIChat : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::Chat; }
     QString name() const override { return "Mistral AI Chat"; }
     QStringList stopWords() const override { return QStringList(); }
 
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         QJsonArray messages;
 
@@ -116,10 +116,10 @@ public:
                "}\n\n"
                "Supports system messages, conversation history, and images.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case QodeAssist::LLMCore::ProviderID::MistralAI:
+        case QodeAssist::PluginLLMCore::ProviderID::MistralAI:
             return true;
         default:
             return false;
diff --git a/templates/Ollama.hpp b/templates/Ollama.hpp
index 2890f2d..de0f799 100644
--- a/templates/Ollama.hpp
+++ b/templates/Ollama.hpp
@@ -21,17 +21,17 @@
 
 #include 
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 
 namespace QodeAssist::Templates {
 
-class OllamaFim : public LLMCore::PromptTemplate
+class OllamaFim : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::FIM; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::FIM; }
     QString name() const override { return "Ollama FIM"; }
     QStringList stopWords() const override { return QStringList() << ""; }
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         request["prompt"] = context.prefix.value_or("");
         request["suffix"] = context.suffix.value_or("");
@@ -47,10 +47,10 @@ public:
                "}\n\n"
                "Recommended for Ollama models with FIM capability.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case QodeAssist::LLMCore::ProviderID::Ollama:
+        case QodeAssist::PluginLLMCore::ProviderID::Ollama:
             return true;
         default:
             return false;
@@ -58,14 +58,14 @@ public:
     }
 };
 
-class OllamaChat : public LLMCore::PromptTemplate
+class OllamaChat : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::Chat; }
     QString name() const override { return "Ollama Chat"; }
     QStringList stopWords() const override { return QStringList(); }
 
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         QJsonArray messages;
 
@@ -107,10 +107,10 @@ public:
                "Recommended for Ollama models with chat capability.\n"
                "Supports images for multimodal models (e.g., llava).";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case QodeAssist::LLMCore::ProviderID::Ollama:
+        case QodeAssist::PluginLLMCore::ProviderID::Ollama:
             return true;
         default:
             return false;
diff --git a/templates/OpenAI.hpp b/templates/OpenAI.hpp
index 0b9f7aa..72a80c0 100644
--- a/templates/OpenAI.hpp
+++ b/templates/OpenAI.hpp
@@ -21,17 +21,17 @@
 
 #include 
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 
 namespace QodeAssist::Templates {
 
-class OpenAI : public LLMCore::PromptTemplate
+class OpenAI : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::Chat; }
     QString name() const override { return "OpenAI"; }
     QStringList stopWords() const override { return QStringList(); }
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         QJsonArray messages;
 
@@ -84,10 +84,10 @@ public:
                "}\n\n"
                "Standard Chat API format for OpenAI.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case QodeAssist::LLMCore::ProviderID::OpenAI:
+        case QodeAssist::PluginLLMCore::ProviderID::OpenAI:
             return true;
         default:
             return false;
diff --git a/templates/OpenAICompatible.hpp b/templates/OpenAICompatible.hpp
index 6484aa1..9f9cc24 100644
--- a/templates/OpenAICompatible.hpp
+++ b/templates/OpenAICompatible.hpp
@@ -21,17 +21,17 @@
 
 #include 
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 
 namespace QodeAssist::Templates {
 
-class OpenAICompatible : public LLMCore::PromptTemplate
+class OpenAICompatible : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::Chat; }
     QString name() const override { return "OpenAI Compatible"; }
     QStringList stopWords() const override { return QStringList(); }
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         QJsonArray messages;
 
@@ -85,13 +85,13 @@ public:
                "Works with any service implementing the OpenAI Chat API specification.\n"
                "Supports images.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case LLMCore::ProviderID::OpenAICompatible:
-        case LLMCore::ProviderID::OpenRouter:
-        case LLMCore::ProviderID::LMStudio:
-        case LLMCore::ProviderID::LlamaCpp:
+        case PluginLLMCore::ProviderID::OpenAICompatible:
+        case PluginLLMCore::ProviderID::OpenRouter:
+        case PluginLLMCore::ProviderID::LMStudio:
+        case PluginLLMCore::ProviderID::LlamaCpp:
             return true;
         default:
             return false;
diff --git a/templates/OpenAIResponses.hpp b/templates/OpenAIResponses.hpp
index 662ebf4..fab26da 100644
--- a/templates/OpenAIResponses.hpp
+++ b/templates/OpenAIResponses.hpp
@@ -19,24 +19,24 @@
 
 #pragma once
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 #include "providers/OpenAIResponsesRequestBuilder.hpp"
 
 namespace QodeAssist::Templates {
 
-class OpenAIResponses : public LLMCore::PromptTemplate
+class OpenAIResponses : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const noexcept override 
+    PluginLLMCore::TemplateType type() const noexcept override 
     { 
-        return LLMCore::TemplateType::Chat; 
+        return PluginLLMCore::TemplateType::Chat; 
     }
     
     QString name() const override { return "OpenAI Responses"; }
     
     QStringList stopWords() const override { return {}; }
     
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         using namespace QodeAssist::OpenAIResponses;
         RequestBuilder builder;
@@ -108,9 +108,9 @@ public:
                "}\n\n"
                "Uses type-safe RequestBuilder for OpenAI Responses API.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const noexcept override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const noexcept override
     {
-        return id == QodeAssist::LLMCore::ProviderID::OpenAIResponses;
+        return id == QodeAssist::PluginLLMCore::ProviderID::OpenAIResponses;
     }
 
 private:
diff --git a/templates/Qwen25CoderFIM.hpp b/templates/Qwen25CoderFIM.hpp
index cad83da..bda401e 100644
--- a/templates/Qwen25CoderFIM.hpp
+++ b/templates/Qwen25CoderFIM.hpp
@@ -19,18 +19,18 @@
 
 #pragma once
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 #include 
 
 namespace QodeAssist::Templates {
 
-class Qwen25CoderFIM : public LLMCore::PromptTemplate
+class Qwen25CoderFIM : public PluginLLMCore::PromptTemplate
 {
 public:
     QString name() const override { return "Qwen2.5 Coder FIM"; }
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::FIM; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::FIM; }
     QStringList stopWords() const override { return QStringList() << "<|endoftext|>" << "<|EOT|>"; }
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         request["prompt"] = QString("<|fim_prefix|>%1<|fim_suffix|>%2<|fim_middle|>")
                                 .arg(context.prefix.value_or(""), context.suffix.value_or(""));
@@ -46,10 +46,10 @@ public:
                "}\n\n"
                "Ideal for code completion with Qwen models.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case QodeAssist::LLMCore::ProviderID::Ollama:
+        case QodeAssist::PluginLLMCore::ProviderID::Ollama:
             return true;
         default:
             return false;
diff --git a/templates/Qwen3CoderFIM.hpp b/templates/Qwen3CoderFIM.hpp
index 94af77e..b4ab172 100644
--- a/templates/Qwen3CoderFIM.hpp
+++ b/templates/Qwen3CoderFIM.hpp
@@ -21,17 +21,17 @@
 
 #include 
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 
 namespace QodeAssist::Templates {
 
-class Qwen3CoderFIM : public LLMCore::PromptTemplate
+class Qwen3CoderFIM : public PluginLLMCore::PromptTemplate
 {
 public:
     QString name() const override { return "Qwen3 Coder FIM"; }
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::FIMOnChat; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::FIMOnChat; }
     QStringList stopWords() const override { return QStringList() << "<|im_end|>"; }
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         QJsonArray messages;
 
@@ -62,14 +62,14 @@ public:
                "  ]\n"
                "}\n\n";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case LLMCore::ProviderID::Ollama:
-        case LLMCore::ProviderID::LMStudio:
-        case LLMCore::ProviderID::OpenRouter:
-        case LLMCore::ProviderID::OpenAICompatible:
-        case LLMCore::ProviderID::LlamaCpp:
+        case PluginLLMCore::ProviderID::Ollama:
+        case PluginLLMCore::ProviderID::LMStudio:
+        case PluginLLMCore::ProviderID::OpenRouter:
+        case PluginLLMCore::ProviderID::OpenAICompatible:
+        case PluginLLMCore::ProviderID::LlamaCpp:
             return true;
         default:
             return false;
diff --git a/templates/StarCoder2Fim.hpp b/templates/StarCoder2Fim.hpp
index b204478..71e20f6 100644
--- a/templates/StarCoder2Fim.hpp
+++ b/templates/StarCoder2Fim.hpp
@@ -19,21 +19,21 @@
 
 #pragma once
 
-#include "llmcore/PromptTemplate.hpp"
+#include "pluginllmcore/PromptTemplate.hpp"
 
 namespace QodeAssist::Templates {
 
-class StarCoder2Fim : public LLMCore::PromptTemplate
+class StarCoder2Fim : public PluginLLMCore::PromptTemplate
 {
 public:
-    LLMCore::TemplateType type() const override { return LLMCore::TemplateType::FIM; }
+    PluginLLMCore::TemplateType type() const override { return PluginLLMCore::TemplateType::FIM; }
     QString name() const override { return "StarCoder2 FIM"; }
     QStringList stopWords() const override
     {
         return QStringList() << "<|endoftext|>" << "" << "" << ""
                              << "";
     }
-    void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+    void prepareRequest(QJsonObject &request, const PluginLLMCore::ContextData &context) const override
     {
         request["prompt"] = QString("%1%2")
                                 .arg(context.prefix.value_or(""), context.suffix.value_or(""));
@@ -48,10 +48,10 @@ public:
                "}\n\n"
                "Includes stop words to prevent token duplication.";
     }
-    bool isSupportProvider(LLMCore::ProviderID id) const override
+    bool isSupportProvider(PluginLLMCore::ProviderID id) const override
     {
         switch (id) {
-        case QodeAssist::LLMCore::ProviderID::Ollama:
+        case QodeAssist::PluginLLMCore::ProviderID::Ollama:
             return true;
         default:
             return false;
diff --git a/templates/Templates.hpp b/templates/Templates.hpp
index f73cb41..940f653 100644
--- a/templates/Templates.hpp
+++ b/templates/Templates.hpp
@@ -19,7 +19,7 @@
 
 #pragma once
 
-#include "llmcore/PromptTemplateManager.hpp"
+#include "pluginllmcore/PromptTemplateManager.hpp"
 #include "templates/Alpaca.hpp"
 #include "templates/ChatML.hpp"
 #include "templates/Claude.hpp"
@@ -44,7 +44,7 @@ namespace QodeAssist::Templates {
 
 inline void registerTemplates()
 {
-    auto &templateManager = LLMCore::PromptTemplateManager::instance();
+    auto &templateManager = PluginLLMCore::PromptTemplateManager::instance();
     templateManager.registerTemplate();
     templateManager.registerTemplate();
     templateManager.registerTemplate();
diff --git a/tools/BuildProjectTool.cpp b/tools/BuildProjectTool.cpp
index bf6e43e..3148711 100644
--- a/tools/BuildProjectTool.cpp
+++ b/tools/BuildProjectTool.cpp
@@ -80,7 +80,7 @@ QString BuildProjectTool::description() const
            "Note: This operation may take some time depending on project size.";
 }
 
-QJsonObject BuildProjectTool::getDefinition(LLMCore::ToolSchemaFormat format) const
+QJsonObject BuildProjectTool::getDefinition(PluginLLMCore::ToolSchemaFormat format) const
 {
     QJsonObject definition;
     definition["type"] = "object";
@@ -97,23 +97,23 @@ QJsonObject BuildProjectTool::getDefinition(LLMCore::ToolSchemaFormat format) co
     definition["required"] = QJsonArray();
 
     switch (format) {
-    case LLMCore::ToolSchemaFormat::OpenAI:
+    case PluginLLMCore::ToolSchemaFormat::OpenAI:
         return customizeForOpenAI(definition);
-    case LLMCore::ToolSchemaFormat::Claude:
+    case PluginLLMCore::ToolSchemaFormat::Claude:
         return customizeForClaude(definition);
-    case LLMCore::ToolSchemaFormat::Ollama:
+    case PluginLLMCore::ToolSchemaFormat::Ollama:
         return customizeForOllama(definition);
-    case LLMCore::ToolSchemaFormat::Google:
+    case PluginLLMCore::ToolSchemaFormat::Google:
         return customizeForGoogle(definition);
     }
 
     return definition;
 }
 
-LLMCore::ToolPermissions BuildProjectTool::requiredPermissions() const
+PluginLLMCore::ToolPermissions BuildProjectTool::requiredPermissions() const
 {
-    return LLMCore::ToolPermission::FileSystemRead 
-         | LLMCore::ToolPermission::FileSystemWrite;
+    return PluginLLMCore::ToolPermission::FileSystemRead 
+         | PluginLLMCore::ToolPermission::FileSystemWrite;
 }
 
 QFuture BuildProjectTool::executeAsync(const QJsonObject &input)
diff --git a/tools/BuildProjectTool.hpp b/tools/BuildProjectTool.hpp
index 2c0ade5..f27d797 100644
--- a/tools/BuildProjectTool.hpp
+++ b/tools/BuildProjectTool.hpp
@@ -19,7 +19,7 @@
 
 #pragma once
 
-#include 
+#include 
 #include 
 #include 
 #include 
@@ -42,7 +42,7 @@ struct BuildInfo
     QMetaObject::Connection buildFinishedConnection;
 };
 
-class BuildProjectTool : public LLMCore::BaseTool
+class BuildProjectTool : public PluginLLMCore::BaseTool
 {
     Q_OBJECT
 public:
@@ -52,8 +52,8 @@ public:
     QString name() const override;
     QString stringName() const override;
     QString description() const override;
-    QJsonObject getDefinition(LLMCore::ToolSchemaFormat format) const override;
-    LLMCore::ToolPermissions requiredPermissions() const override;
+    QJsonObject getDefinition(PluginLLMCore::ToolSchemaFormat format) const override;
+    PluginLLMCore::ToolPermissions requiredPermissions() const override;
 
     QFuture executeAsync(const QJsonObject &input = QJsonObject()) override;
 
diff --git a/tools/CreateNewFileTool.cpp b/tools/CreateNewFileTool.cpp
index 6aed97e..abc42bd 100644
--- a/tools/CreateNewFileTool.cpp
+++ b/tools/CreateNewFileTool.cpp
@@ -54,7 +54,7 @@ QString CreateNewFileTool::description() const
            "to the project file";
 }
 
-QJsonObject CreateNewFileTool::getDefinition(LLMCore::ToolSchemaFormat format) const
+QJsonObject CreateNewFileTool::getDefinition(PluginLLMCore::ToolSchemaFormat format) const
 {
     QJsonObject properties;
 
@@ -71,22 +71,22 @@ QJsonObject CreateNewFileTool::getDefinition(LLMCore::ToolSchemaFormat format) c
     definition["required"] = required;
 
     switch (format) {
-    case LLMCore::ToolSchemaFormat::OpenAI:
+    case PluginLLMCore::ToolSchemaFormat::OpenAI:
         return customizeForOpenAI(definition);
-    case LLMCore::ToolSchemaFormat::Claude:
+    case PluginLLMCore::ToolSchemaFormat::Claude:
         return customizeForClaude(definition);
-    case LLMCore::ToolSchemaFormat::Ollama:
+    case PluginLLMCore::ToolSchemaFormat::Ollama:
         return customizeForOllama(definition);
-    case LLMCore::ToolSchemaFormat::Google:
+    case PluginLLMCore::ToolSchemaFormat::Google:
         return customizeForGoogle(definition);
     }
 
     return definition;
 }
 
-LLMCore::ToolPermissions CreateNewFileTool::requiredPermissions() const
+PluginLLMCore::ToolPermissions CreateNewFileTool::requiredPermissions() const
 {
-    return LLMCore::ToolPermission::FileSystemWrite;
+    return PluginLLMCore::ToolPermission::FileSystemWrite;
 }
 
 QFuture CreateNewFileTool::executeAsync(const QJsonObject &input)
diff --git a/tools/CreateNewFileTool.hpp b/tools/CreateNewFileTool.hpp
index 1c16325..5d848db 100644
--- a/tools/CreateNewFileTool.hpp
+++ b/tools/CreateNewFileTool.hpp
@@ -19,11 +19,11 @@
 
 #pragma once
 
-#include 
+#include 
 
 namespace QodeAssist::Tools {
 
-class CreateNewFileTool : public LLMCore::BaseTool
+class CreateNewFileTool : public PluginLLMCore::BaseTool
 {
     Q_OBJECT
 public:
@@ -32,8 +32,8 @@ public:
     QString name() const override;
     QString stringName() const override;
     QString description() const override;
-    QJsonObject getDefinition(LLMCore::ToolSchemaFormat format) const override;
-    LLMCore::ToolPermissions requiredPermissions() const override;
+    QJsonObject getDefinition(PluginLLMCore::ToolSchemaFormat format) const override;
+    PluginLLMCore::ToolPermissions requiredPermissions() const override;
 
     QFuture executeAsync(const QJsonObject &input = QJsonObject()) override;
 };
diff --git a/tools/EditFileTool.cpp b/tools/EditFileTool.cpp
index adfb60e..3f096c5 100644
--- a/tools/EditFileTool.cpp
+++ b/tools/EditFileTool.cpp
@@ -71,7 +71,7 @@ QString EditFileTool::description() const
            "disabled auto-apply. DO NOT retry the same edit - wait for user action.";
 }
 
-QJsonObject EditFileTool::getDefinition(LLMCore::ToolSchemaFormat format) const
+QJsonObject EditFileTool::getDefinition(PluginLLMCore::ToolSchemaFormat format) const
 {
     QJsonObject properties;
 
@@ -105,22 +105,22 @@ QJsonObject EditFileTool::getDefinition(LLMCore::ToolSchemaFormat format) const
     definition["required"] = required;
 
     switch (format) {
-    case LLMCore::ToolSchemaFormat::OpenAI:
+    case PluginLLMCore::ToolSchemaFormat::OpenAI:
         return customizeForOpenAI(definition);
-    case LLMCore::ToolSchemaFormat::Claude:
+    case PluginLLMCore::ToolSchemaFormat::Claude:
         return customizeForClaude(definition);
-    case LLMCore::ToolSchemaFormat::Ollama:
+    case PluginLLMCore::ToolSchemaFormat::Ollama:
         return customizeForOllama(definition);
-    case LLMCore::ToolSchemaFormat::Google:
+    case PluginLLMCore::ToolSchemaFormat::Google:
         return customizeForGoogle(definition);
     }
 
     return definition;
 }
 
-LLMCore::ToolPermissions EditFileTool::requiredPermissions() const
+PluginLLMCore::ToolPermissions EditFileTool::requiredPermissions() const
 {
-    return LLMCore::ToolPermission::FileSystemWrite;
+    return PluginLLMCore::ToolPermission::FileSystemWrite;
 }
 
 QFuture EditFileTool::executeAsync(const QJsonObject &input)
diff --git a/tools/EditFileTool.hpp b/tools/EditFileTool.hpp
index 8f888a0..4389b64 100644
--- a/tools/EditFileTool.hpp
+++ b/tools/EditFileTool.hpp
@@ -19,11 +19,11 @@
 
 #pragma once
 
-#include 
+#include 
 
 namespace QodeAssist::Tools {
 
-class EditFileTool : public LLMCore::BaseTool
+class EditFileTool : public PluginLLMCore::BaseTool
 {
     Q_OBJECT
 public:
@@ -32,8 +32,8 @@ public:
     QString name() const override;
     QString stringName() const override;
     QString description() const override;
-    QJsonObject getDefinition(LLMCore::ToolSchemaFormat format) const override;
-    LLMCore::ToolPermissions requiredPermissions() const override;
+    QJsonObject getDefinition(PluginLLMCore::ToolSchemaFormat format) const override;
+    PluginLLMCore::ToolPermissions requiredPermissions() const override;
 
     QFuture executeAsync(const QJsonObject &input = QJsonObject()) override;
 };
diff --git a/tools/ExecuteTerminalCommandTool.cpp b/tools/ExecuteTerminalCommandTool.cpp
index d114500..fd61b40 100644
--- a/tools/ExecuteTerminalCommandTool.cpp
+++ b/tools/ExecuteTerminalCommandTool.cpp
@@ -56,7 +56,7 @@ QString ExecuteTerminalCommandTool::description() const
     return getCommandDescription();
 }
 
-QJsonObject ExecuteTerminalCommandTool::getDefinition(LLMCore::ToolSchemaFormat format) const
+QJsonObject ExecuteTerminalCommandTool::getDefinition(PluginLLMCore::ToolSchemaFormat format) const
 {
     QJsonObject definition;
     definition["type"] = "object";
@@ -78,24 +78,24 @@ QJsonObject ExecuteTerminalCommandTool::getDefinition(LLMCore::ToolSchemaFormat
     definition["required"] = QJsonArray{"command"};
 
     switch (format) {
-    case LLMCore::ToolSchemaFormat::OpenAI:
+    case PluginLLMCore::ToolSchemaFormat::OpenAI:
         return customizeForOpenAI(definition);
-    case LLMCore::ToolSchemaFormat::Claude:
+    case PluginLLMCore::ToolSchemaFormat::Claude:
         return customizeForClaude(definition);
-    case LLMCore::ToolSchemaFormat::Ollama:
+    case PluginLLMCore::ToolSchemaFormat::Ollama:
         return customizeForOllama(definition);
-    case LLMCore::ToolSchemaFormat::Google:
+    case PluginLLMCore::ToolSchemaFormat::Google:
         return customizeForGoogle(definition);
     }
 
     return definition;
 }
 
-LLMCore::ToolPermissions ExecuteTerminalCommandTool::requiredPermissions() const
+PluginLLMCore::ToolPermissions ExecuteTerminalCommandTool::requiredPermissions() const
 {
-    return LLMCore::ToolPermission::FileSystemRead 
-         | LLMCore::ToolPermission::FileSystemWrite 
-         | LLMCore::ToolPermission::NetworkAccess;
+    return PluginLLMCore::ToolPermission::FileSystemRead 
+         | PluginLLMCore::ToolPermission::FileSystemWrite 
+         | PluginLLMCore::ToolPermission::NetworkAccess;
 }
 
 QFuture ExecuteTerminalCommandTool::executeAsync(const QJsonObject &input)
diff --git a/tools/ExecuteTerminalCommandTool.hpp b/tools/ExecuteTerminalCommandTool.hpp
index 27b04be..5a1f041 100644
--- a/tools/ExecuteTerminalCommandTool.hpp
+++ b/tools/ExecuteTerminalCommandTool.hpp
@@ -19,12 +19,12 @@
 
 #pragma once
 
-#include 
+#include 
 #include 
 
 namespace QodeAssist::Tools {
 
-class ExecuteTerminalCommandTool : public LLMCore::BaseTool
+class ExecuteTerminalCommandTool : public PluginLLMCore::BaseTool
 {
     Q_OBJECT
 public:
@@ -33,8 +33,8 @@ public:
     QString name() const override;
     QString stringName() const override;
     QString description() const override;
-    QJsonObject getDefinition(LLMCore::ToolSchemaFormat format) const override;
-    LLMCore::ToolPermissions requiredPermissions() const override;
+    QJsonObject getDefinition(PluginLLMCore::ToolSchemaFormat format) const override;
+    PluginLLMCore::ToolPermissions requiredPermissions() const override;
 
     QFuture executeAsync(const QJsonObject &input = QJsonObject()) override;
 
diff --git a/tools/FindAndReadFileTool.cpp b/tools/FindAndReadFileTool.cpp
index 6ef9594..17bd237 100644
--- a/tools/FindAndReadFileTool.cpp
+++ b/tools/FindAndReadFileTool.cpp
@@ -48,7 +48,7 @@ QString FindAndReadFileTool::description() const
            "Returns the best matching file and its content.";
 }
 
-QJsonObject FindAndReadFileTool::getDefinition(LLMCore::ToolSchemaFormat format) const
+QJsonObject FindAndReadFileTool::getDefinition(PluginLLMCore::ToolSchemaFormat format) const
 {
     QJsonObject properties;
 
@@ -69,21 +69,21 @@ QJsonObject FindAndReadFileTool::getDefinition(LLMCore::ToolSchemaFormat format)
     definition["required"] = QJsonArray{"query"};
 
     switch (format) {
-    case LLMCore::ToolSchemaFormat::OpenAI:
+    case PluginLLMCore::ToolSchemaFormat::OpenAI:
         return customizeForOpenAI(definition);
-    case LLMCore::ToolSchemaFormat::Claude:
+    case PluginLLMCore::ToolSchemaFormat::Claude:
         return customizeForClaude(definition);
-    case LLMCore::ToolSchemaFormat::Ollama:
+    case PluginLLMCore::ToolSchemaFormat::Ollama:
         return customizeForOllama(definition);
-    case LLMCore::ToolSchemaFormat::Google:
+    case PluginLLMCore::ToolSchemaFormat::Google:
         return customizeForGoogle(definition);
     }
     return definition;
 }
 
-LLMCore::ToolPermissions FindAndReadFileTool::requiredPermissions() const
+PluginLLMCore::ToolPermissions FindAndReadFileTool::requiredPermissions() const
 {
-    return LLMCore::ToolPermission::FileSystemRead;
+    return PluginLLMCore::ToolPermission::FileSystemRead;
 }
 
 QFuture FindAndReadFileTool::executeAsync(const QJsonObject &input)
diff --git a/tools/FindAndReadFileTool.hpp b/tools/FindAndReadFileTool.hpp
index 7311529..cac99e6 100644
--- a/tools/FindAndReadFileTool.hpp
+++ b/tools/FindAndReadFileTool.hpp
@@ -22,14 +22,14 @@
 #include "FileSearchUtils.hpp"
 
 #include 
-#include 
+#include 
 #include 
 #include 
 #include 
 
 namespace QodeAssist::Tools {
 
-class FindAndReadFileTool : public LLMCore::BaseTool
+class FindAndReadFileTool : public PluginLLMCore::BaseTool
 {
     Q_OBJECT
 
@@ -39,8 +39,8 @@ public:
     QString name() const override;
     QString stringName() const override;
     QString description() const override;
-    QJsonObject getDefinition(LLMCore::ToolSchemaFormat format) const override;
-    LLMCore::ToolPermissions requiredPermissions() const override;
+    QJsonObject getDefinition(PluginLLMCore::ToolSchemaFormat format) const override;
+    PluginLLMCore::ToolPermissions requiredPermissions() const override;
     QFuture executeAsync(const QJsonObject &input) override;
 
 private:
diff --git a/tools/GetIssuesListTool.cpp b/tools/GetIssuesListTool.cpp
index c274f2b..0188345 100644
--- a/tools/GetIssuesListTool.cpp
+++ b/tools/GetIssuesListTool.cpp
@@ -138,7 +138,7 @@ QString GetIssuesListTool::description() const
            "Optional severity filter: 'error', 'warning', or 'all' (default).";
 }
 
-QJsonObject GetIssuesListTool::getDefinition(LLMCore::ToolSchemaFormat format) const
+QJsonObject GetIssuesListTool::getDefinition(PluginLLMCore::ToolSchemaFormat format) const
 {
     QJsonObject definition;
     definition["type"] = "object";
@@ -153,22 +153,22 @@ QJsonObject GetIssuesListTool::getDefinition(LLMCore::ToolSchemaFormat format) c
     definition["required"] = QJsonArray();
 
     switch (format) {
-    case LLMCore::ToolSchemaFormat::OpenAI:
+    case PluginLLMCore::ToolSchemaFormat::OpenAI:
         return customizeForOpenAI(definition);
-    case LLMCore::ToolSchemaFormat::Claude:
+    case PluginLLMCore::ToolSchemaFormat::Claude:
         return customizeForClaude(definition);
-    case LLMCore::ToolSchemaFormat::Ollama:
+    case PluginLLMCore::ToolSchemaFormat::Ollama:
         return customizeForOllama(definition);
-    case LLMCore::ToolSchemaFormat::Google:
+    case PluginLLMCore::ToolSchemaFormat::Google:
         return customizeForGoogle(definition);
     }
 
     return definition;
 }
 
-LLMCore::ToolPermissions GetIssuesListTool::requiredPermissions() const
+PluginLLMCore::ToolPermissions GetIssuesListTool::requiredPermissions() const
 {
-    return LLMCore::ToolPermission::FileSystemRead;
+    return PluginLLMCore::ToolPermission::FileSystemRead;
 }
 
 QFuture GetIssuesListTool::executeAsync(const QJsonObject &input)
diff --git a/tools/GetIssuesListTool.hpp b/tools/GetIssuesListTool.hpp
index ea2c1ef..496125c 100644
--- a/tools/GetIssuesListTool.hpp
+++ b/tools/GetIssuesListTool.hpp
@@ -19,7 +19,7 @@
 
 #pragma once
 
-#include 
+#include 
 #include 
 #include 
 #include 
@@ -46,7 +46,7 @@ private:
     mutable QMutex m_mutex;
 };
 
-class GetIssuesListTool : public LLMCore::BaseTool
+class GetIssuesListTool : public PluginLLMCore::BaseTool
 {
     Q_OBJECT
 public:
@@ -55,8 +55,8 @@ public:
     QString name() const override;
     QString stringName() const override;
     QString description() const override;
-    QJsonObject getDefinition(LLMCore::ToolSchemaFormat format) const override;
-    LLMCore::ToolPermissions requiredPermissions() const override;
+    QJsonObject getDefinition(PluginLLMCore::ToolSchemaFormat format) const override;
+    PluginLLMCore::ToolPermissions requiredPermissions() const override;
 
     QFuture executeAsync(const QJsonObject &input = QJsonObject()) override;
 };
diff --git a/tools/ListProjectFilesTool.cpp b/tools/ListProjectFilesTool.cpp
index f39e45b..8ae8329 100644
--- a/tools/ListProjectFilesTool.cpp
+++ b/tools/ListProjectFilesTool.cpp
@@ -53,7 +53,7 @@ QString ListProjectFilesTool::description() const
            "Useful for understanding project structure. No parameters required.";
 }
 
-QJsonObject ListProjectFilesTool::getDefinition(LLMCore::ToolSchemaFormat format) const
+QJsonObject ListProjectFilesTool::getDefinition(PluginLLMCore::ToolSchemaFormat format) const
 {
     QJsonObject definition;
     definition["type"] = "object";
@@ -61,22 +61,22 @@ QJsonObject ListProjectFilesTool::getDefinition(LLMCore::ToolSchemaFormat format
     definition["required"] = QJsonArray();
 
     switch (format) {
-    case LLMCore::ToolSchemaFormat::OpenAI:
+    case PluginLLMCore::ToolSchemaFormat::OpenAI:
         return customizeForOpenAI(definition);
-    case LLMCore::ToolSchemaFormat::Claude:
+    case PluginLLMCore::ToolSchemaFormat::Claude:
         return customizeForClaude(definition);
-    case LLMCore::ToolSchemaFormat::Ollama:
+    case PluginLLMCore::ToolSchemaFormat::Ollama:
         return customizeForOllama(definition);
-    case LLMCore::ToolSchemaFormat::Google:
+    case PluginLLMCore::ToolSchemaFormat::Google:
         return customizeForGoogle(definition);
     }
 
     return definition;
 }
 
-LLMCore::ToolPermissions ListProjectFilesTool::requiredPermissions() const
+PluginLLMCore::ToolPermissions ListProjectFilesTool::requiredPermissions() const
 {
-    return LLMCore::ToolPermission::FileSystemRead;
+    return PluginLLMCore::ToolPermission::FileSystemRead;
 }
 
 QFuture ListProjectFilesTool::executeAsync(const QJsonObject &input)
diff --git a/tools/ListProjectFilesTool.hpp b/tools/ListProjectFilesTool.hpp
index e61dc43..0314200 100644
--- a/tools/ListProjectFilesTool.hpp
+++ b/tools/ListProjectFilesTool.hpp
@@ -19,13 +19,13 @@
 
 #pragma once
 
-#include 
+#include 
 
 #include 
 
 namespace QodeAssist::Tools {
 
-class ListProjectFilesTool : public LLMCore::BaseTool
+class ListProjectFilesTool : public PluginLLMCore::BaseTool
 {
     Q_OBJECT
 public:
@@ -34,8 +34,8 @@ public:
     QString name() const override;
     QString stringName() const override;
     QString description() const override;
-    QJsonObject getDefinition(LLMCore::ToolSchemaFormat format) const override;
-    LLMCore::ToolPermissions requiredPermissions() const override;
+    QJsonObject getDefinition(PluginLLMCore::ToolSchemaFormat format) const override;
+    PluginLLMCore::ToolPermissions requiredPermissions() const override;
 
     QFuture executeAsync(const QJsonObject &input = QJsonObject()) override;
 
diff --git a/tools/ProjectSearchTool.cpp b/tools/ProjectSearchTool.cpp
index a1bfc65..368e73f 100644
--- a/tools/ProjectSearchTool.cpp
+++ b/tools/ProjectSearchTool.cpp
@@ -60,7 +60,7 @@ QString ProjectSearchTool::description() const
            "Symbol mode: finds C++ definitions (classes, functions, etc).";
 }
 
-QJsonObject ProjectSearchTool::getDefinition(LLMCore::ToolSchemaFormat format) const
+QJsonObject ProjectSearchTool::getDefinition(PluginLLMCore::ToolSchemaFormat format) const
 {
     QJsonObject properties;
 
@@ -95,21 +95,21 @@ QJsonObject ProjectSearchTool::getDefinition(LLMCore::ToolSchemaFormat format) c
     definition["required"] = QJsonArray{"query", "search_type"};
 
     switch (format) {
-    case LLMCore::ToolSchemaFormat::OpenAI:
+    case PluginLLMCore::ToolSchemaFormat::OpenAI:
         return customizeForOpenAI(definition);
-    case LLMCore::ToolSchemaFormat::Claude:
+    case PluginLLMCore::ToolSchemaFormat::Claude:
         return customizeForClaude(definition);
-    case LLMCore::ToolSchemaFormat::Ollama:
+    case PluginLLMCore::ToolSchemaFormat::Ollama:
         return customizeForOllama(definition);
-    case LLMCore::ToolSchemaFormat::Google:
+    case PluginLLMCore::ToolSchemaFormat::Google:
         return customizeForGoogle(definition);
     }
     return definition;
 }
 
-LLMCore::ToolPermissions ProjectSearchTool::requiredPermissions() const
+PluginLLMCore::ToolPermissions ProjectSearchTool::requiredPermissions() const
 {
-    return LLMCore::ToolPermission::FileSystemRead;
+    return PluginLLMCore::ToolPermission::FileSystemRead;
 }
 
 QFuture ProjectSearchTool::executeAsync(const QJsonObject &input)
diff --git a/tools/ProjectSearchTool.hpp b/tools/ProjectSearchTool.hpp
index 40874ad..8eb121e 100644
--- a/tools/ProjectSearchTool.hpp
+++ b/tools/ProjectSearchTool.hpp
@@ -20,14 +20,14 @@
 #pragma once
 
 #include 
-#include 
+#include 
 #include 
 #include 
 #include 
 
 namespace QodeAssist::Tools {
 
-class ProjectSearchTool : public LLMCore::BaseTool
+class ProjectSearchTool : public PluginLLMCore::BaseTool
 {
     Q_OBJECT
 
@@ -37,8 +37,8 @@ public:
     QString name() const override;
     QString stringName() const override;
     QString description() const override;
-    QJsonObject getDefinition(LLMCore::ToolSchemaFormat format) const override;
-    LLMCore::ToolPermissions requiredPermissions() const override;
+    QJsonObject getDefinition(PluginLLMCore::ToolSchemaFormat format) const override;
+    PluginLLMCore::ToolPermissions requiredPermissions() const override;
     QFuture executeAsync(const QJsonObject &input) override;
 
 private:
diff --git a/tools/TodoTool.cpp b/tools/TodoTool.cpp
index e58bcea..70d5c7f 100644
--- a/tools/TodoTool.cpp
+++ b/tools/TodoTool.cpp
@@ -53,7 +53,7 @@ QString TodoTool::description() const
            "The list persists throughout the conversation.";
 }
 
-QJsonObject TodoTool::getDefinition(LLMCore::ToolSchemaFormat format) const
+QJsonObject TodoTool::getDefinition(PluginLLMCore::ToolSchemaFormat format) const
 {
     QJsonObject definition;
     definition["type"] = "object";
@@ -98,22 +98,22 @@ QJsonObject TodoTool::getDefinition(LLMCore::ToolSchemaFormat format) const
     definition["required"] = required;
 
     switch (format) {
-    case LLMCore::ToolSchemaFormat::OpenAI:
+    case PluginLLMCore::ToolSchemaFormat::OpenAI:
         return customizeForOpenAI(definition);
-    case LLMCore::ToolSchemaFormat::Claude:
+    case PluginLLMCore::ToolSchemaFormat::Claude:
         return customizeForClaude(definition);
-    case LLMCore::ToolSchemaFormat::Ollama:
+    case PluginLLMCore::ToolSchemaFormat::Ollama:
         return customizeForOllama(definition);
-    case LLMCore::ToolSchemaFormat::Google:
+    case PluginLLMCore::ToolSchemaFormat::Google:
         return customizeForGoogle(definition);
     }
 
     return definition;
 }
 
-LLMCore::ToolPermissions TodoTool::requiredPermissions() const
+PluginLLMCore::ToolPermissions TodoTool::requiredPermissions() const
 {
-    return LLMCore::ToolPermission::None;
+    return PluginLLMCore::ToolPermission::None;
 }
 
 QFuture TodoTool::executeAsync(const QJsonObject &input)
diff --git a/tools/TodoTool.hpp b/tools/TodoTool.hpp
index 2454f82..7b60aed 100644
--- a/tools/TodoTool.hpp
+++ b/tools/TodoTool.hpp
@@ -19,7 +19,7 @@
 
 #pragma once
 
-#include 
+#include 
 
 #include 
 #include 
@@ -34,7 +34,7 @@ struct TodoItem
     bool completed;
 };
 
-class TodoTool : public LLMCore::BaseTool
+class TodoTool : public PluginLLMCore::BaseTool
 {
     Q_OBJECT
 
@@ -44,8 +44,8 @@ public:
     QString name() const override;
     QString stringName() const override;
     QString description() const override;
-    QJsonObject getDefinition(LLMCore::ToolSchemaFormat format) const override;
-    LLMCore::ToolPermissions requiredPermissions() const override;
+    QJsonObject getDefinition(PluginLLMCore::ToolSchemaFormat format) const override;
+    PluginLLMCore::ToolPermissions requiredPermissions() const override;
 
     QFuture executeAsync(const QJsonObject &input = QJsonObject()) override;
 
diff --git a/tools/ToolHandler.cpp b/tools/ToolHandler.cpp
index 93b0c50..ebc5bca 100644
--- a/tools/ToolHandler.cpp
+++ b/tools/ToolHandler.cpp
@@ -35,7 +35,7 @@ ToolHandler::ToolHandler(QObject *parent)
 QFuture ToolHandler::executeToolAsync(
     const QString &requestId,
     const QString &toolId,
-    LLMCore::BaseTool *tool,
+    PluginLLMCore::BaseTool *tool,
     const QJsonObject &input)
 {
     if (!tool) {
diff --git a/tools/ToolHandler.hpp b/tools/ToolHandler.hpp
index c8b2367..ba31f81 100644
--- a/tools/ToolHandler.hpp
+++ b/tools/ToolHandler.hpp
@@ -25,7 +25,7 @@
 #include 
 #include 
 
-#include 
+#include 
 
 namespace QodeAssist::Tools {
 
@@ -39,7 +39,7 @@ public:
     QFuture executeToolAsync(
         const QString &requestId,
         const QString &toolId,
-        LLMCore::BaseTool *tool,
+        PluginLLMCore::BaseTool *tool,
         const QJsonObject &input);
 
     void cleanupRequest(const QString &requestId);
diff --git a/tools/ToolsFactory.cpp b/tools/ToolsFactory.cpp
index 5fe2e19..dabd424 100644
--- a/tools/ToolsFactory.cpp
+++ b/tools/ToolsFactory.cpp
@@ -58,7 +58,7 @@ void ToolsFactory::registerTools()
     LOG_MESSAGE(QString("Registered %1 tools").arg(m_tools.size()));
 }
 
-void ToolsFactory::registerTool(LLMCore::BaseTool *tool)
+void ToolsFactory::registerTool(PluginLLMCore::BaseTool *tool)
 {
     if (!tool) {
         LOG_MESSAGE("Warning: Attempted to register null tool");
@@ -73,18 +73,18 @@ void ToolsFactory::registerTool(LLMCore::BaseTool *tool)
     m_tools.insert(toolName, tool);
 }
 
-QList ToolsFactory::getAvailableTools() const
+QList ToolsFactory::getAvailableTools() const
 {
     return m_tools.values();
 }
 
-LLMCore::BaseTool *ToolsFactory::getToolByName(const QString &name) const
+PluginLLMCore::BaseTool *ToolsFactory::getToolByName(const QString &name) const
 {
     return m_tools.value(name, nullptr);
 }
 
 QJsonArray ToolsFactory::getToolsDefinitions(
-    LLMCore::ToolSchemaFormat format, LLMCore::RunToolsFilter filter) const
+    PluginLLMCore::ToolSchemaFormat format, PluginLLMCore::RunToolsFilter filter) const
 {
     QJsonArray toolsArray;
     const auto &settings = Settings::toolsSettings();
@@ -113,30 +113,30 @@ QJsonArray ToolsFactory::getToolsDefinitions(
 
         const auto requiredPerms = it.value()->requiredPermissions();
 
-        if (filter != LLMCore::RunToolsFilter::ALL) {
+        if (filter != PluginLLMCore::RunToolsFilter::ALL) {
             bool matchesFilter = false;
 
             switch (filter) {
-            case LLMCore::RunToolsFilter::OnlyRead:
-                if (requiredPerms == LLMCore::ToolPermission::None
-                    || requiredPerms.testFlag(LLMCore::ToolPermission::FileSystemRead)) {
+            case PluginLLMCore::RunToolsFilter::OnlyRead:
+                if (requiredPerms == PluginLLMCore::ToolPermission::None
+                    || requiredPerms.testFlag(PluginLLMCore::ToolPermission::FileSystemRead)) {
                     matchesFilter = true;
                 }
                 break;
 
-            case LLMCore::RunToolsFilter::OnlyWrite:
-                if (requiredPerms.testFlag(LLMCore::ToolPermission::FileSystemWrite)) {
+            case PluginLLMCore::RunToolsFilter::OnlyWrite:
+                if (requiredPerms.testFlag(PluginLLMCore::ToolPermission::FileSystemWrite)) {
                     matchesFilter = true;
                 }
                 break;
 
-            case LLMCore::RunToolsFilter::OnlyNetworking:
-                if (requiredPerms.testFlag(LLMCore::ToolPermission::NetworkAccess)) {
+            case PluginLLMCore::RunToolsFilter::OnlyNetworking:
+                if (requiredPerms.testFlag(PluginLLMCore::ToolPermission::NetworkAccess)) {
                     matchesFilter = true;
                 }
                 break;
 
-            case LLMCore::RunToolsFilter::ALL:
+            case PluginLLMCore::RunToolsFilter::ALL:
                 matchesFilter = true;
                 break;
             }
@@ -150,19 +150,19 @@ QJsonArray ToolsFactory::getToolsDefinitions(
 
         bool hasPermission = true;
 
-        if (requiredPerms.testFlag(LLMCore::ToolPermission::FileSystemRead)) {
+        if (requiredPerms.testFlag(PluginLLMCore::ToolPermission::FileSystemRead)) {
             if (!settings.allowFileSystemRead()) {
                 hasPermission = false;
             }
         }
 
-        if (requiredPerms.testFlag(LLMCore::ToolPermission::FileSystemWrite)) {
+        if (requiredPerms.testFlag(PluginLLMCore::ToolPermission::FileSystemWrite)) {
             if (!settings.allowFileSystemWrite()) {
                 hasPermission = false;
             }
         }
 
-        if (requiredPerms.testFlag(LLMCore::ToolPermission::NetworkAccess)) {
+        if (requiredPerms.testFlag(PluginLLMCore::ToolPermission::NetworkAccess)) {
             if (!settings.allowNetworkAccess()) {
                 hasPermission = false;
             }
diff --git a/tools/ToolsFactory.hpp b/tools/ToolsFactory.hpp
index abb0e1a..e14f7d9 100644
--- a/tools/ToolsFactory.hpp
+++ b/tools/ToolsFactory.hpp
@@ -21,7 +21,7 @@
 
 #include 
 
-#include 
+#include 
 
 namespace QodeAssist::Tools {
 
@@ -32,17 +32,17 @@ public:
     ToolsFactory(QObject *parent = nullptr);
     ~ToolsFactory() override = default;
 
-    QList getAvailableTools() const;
-    LLMCore::BaseTool *getToolByName(const QString &name) const;
+    QList getAvailableTools() const;
+    PluginLLMCore::BaseTool *getToolByName(const QString &name) const;
     QJsonArray getToolsDefinitions(
-        LLMCore::ToolSchemaFormat format,
-        LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL) const;
+        PluginLLMCore::ToolSchemaFormat format,
+        PluginLLMCore::RunToolsFilter filter = PluginLLMCore::RunToolsFilter::ALL) const;
     QString getStringName(const QString &name) const;
 
 private:
     void registerTools();
-    void registerTool(LLMCore::BaseTool *tool);
+    void registerTool(PluginLLMCore::BaseTool *tool);
 
-    QHash m_tools;
+    QHash m_tools;
 };
 } // namespace QodeAssist::Tools
diff --git a/tools/ToolsManager.cpp b/tools/ToolsManager.cpp
index 797b6a0..1edd39f 100644
--- a/tools/ToolsManager.cpp
+++ b/tools/ToolsManager.cpp
@@ -138,7 +138,7 @@ void ToolsManager::executeNextTool(const QString &requestId)
 }
 
 QJsonArray ToolsManager::getToolsDefinitions(
-    LLMCore::ToolSchemaFormat format, LLMCore::RunToolsFilter filter) const
+    PluginLLMCore::ToolSchemaFormat format, PluginLLMCore::RunToolsFilter filter) const
 {
     if (!m_toolsFactory) {
         return QJsonArray();
diff --git a/tools/ToolsManager.hpp b/tools/ToolsManager.hpp
index 381c48d..a7d4f96 100644
--- a/tools/ToolsManager.hpp
+++ b/tools/ToolsManager.hpp
@@ -26,8 +26,8 @@
 
 #include "ToolHandler.hpp"
 #include "ToolsFactory.hpp"
-#include 
-#include 
+#include 
+#include 
 
 namespace QodeAssist::Tools {
 
@@ -47,7 +47,7 @@ struct ToolQueue
     bool isExecuting = false;
 };
 
-class ToolsManager : public QObject, public LLMCore::IToolsManager
+class ToolsManager : public QObject, public PluginLLMCore::IToolsManager
 {
     Q_OBJECT
 
@@ -61,8 +61,8 @@ public:
         const QJsonObject &input) override;
 
     QJsonArray getToolsDefinitions(
-        LLMCore::ToolSchemaFormat format,
-        LLMCore::RunToolsFilter filter = LLMCore::RunToolsFilter::ALL) const override;
+        PluginLLMCore::ToolSchemaFormat format,
+        PluginLLMCore::RunToolsFilter filter = PluginLLMCore::RunToolsFilter::ALL) const override;
     
     void cleanupRequest(const QString &requestId) override;
     void setCurrentSessionId(const QString &sessionId) override;