diff --git a/CMakeLists.txt b/CMakeLists.txt index 16f4fec..2a623f1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -56,7 +56,6 @@ add_qtc_plugin(QodeAssist providers/OllamaProvider.hpp providers/OllamaProvider.cpp providers/LMStudioProvider.hpp providers/LMStudioProvider.cpp providers/OpenAICompatProvider.hpp providers/OpenAICompatProvider.cpp - providers/OllamaMessage.hpp providers/OllamaMessage.cpp QodeAssist.qrc LSPCompletion.hpp LLMSuggestion.hpp LLMSuggestion.cpp diff --git a/llmcore/CMakeLists.txt b/llmcore/CMakeLists.txt index 5f7915e..b4048b6 100644 --- a/llmcore/CMakeLists.txt +++ b/llmcore/CMakeLists.txt @@ -7,6 +7,8 @@ add_library(LLMCore STATIC PromptTemplateManager.hpp PromptTemplateManager.cpp RequestConfig.hpp RequestHandler.hpp RequestHandler.cpp + OllamaMessage.hpp OllamaMessage.cpp + OpenAIMessage.hpp OpenAIMessage.cpp ) target_link_libraries(LLMCore diff --git a/providers/OllamaMessage.cpp b/llmcore/OllamaMessage.cpp similarity index 75% rename from providers/OllamaMessage.cpp rename to llmcore/OllamaMessage.cpp index 2e16f9f..1da2912 100644 --- a/providers/OllamaMessage.cpp +++ b/llmcore/OllamaMessage.cpp @@ -18,12 +18,38 @@ */ #include "OllamaMessage.hpp" +#include +#include -namespace QodeAssist::Providers { +namespace QodeAssist::LLMCore { -OllamaMessage OllamaMessage::fromJson(const QJsonObject &obj, Type type) +QJsonObject OllamaMessage::parseJsonFromData(const QByteArray &data) +{ + QByteArrayList lines = data.split('\n'); + for (const QByteArray &line : lines) { + if (line.trimmed().isEmpty()) { + continue; + } + + QJsonParseError error; + QJsonDocument doc = QJsonDocument::fromJson(line, &error); + if (!doc.isNull() && error.error == QJsonParseError::NoError) { + return doc.object(); + } + } + return QJsonObject(); +} + +OllamaMessage OllamaMessage::fromJson(const QByteArray &data, Type type) { OllamaMessage msg; + QJsonObject obj = parseJsonFromData(data); + + if (obj.isEmpty()) { + msg.error = "Invalid JSON response"; + return msg; + } + msg.model = obj["model"].toString(); msg.createdAt = QDateTime::fromString(obj["created_at"].toString(), Qt::ISODate); msg.done = obj["done"].toBool(); @@ -73,4 +99,4 @@ bool OllamaMessage::hasError() const return !error.isEmpty(); } -} // namespace QodeAssist::Providers +} // namespace QodeAssist::LLMCore diff --git a/providers/OllamaMessage.hpp b/llmcore/OllamaMessage.hpp similarity index 88% rename from providers/OllamaMessage.hpp rename to llmcore/OllamaMessage.hpp index 8701a4a..b1aa308 100644 --- a/providers/OllamaMessage.hpp +++ b/llmcore/OllamaMessage.hpp @@ -20,11 +20,10 @@ #pragma once #include -#include #include #include -namespace QodeAssist::Providers { +namespace QodeAssist::LLMCore { class OllamaMessage { @@ -58,14 +57,15 @@ public: std::variant response; bool done{false}; QString doneReason; - Metrics metrics; QString error; + Metrics metrics; - static OllamaMessage fromJson(const QJsonObject &obj, Type type); - + static OllamaMessage fromJson(const QByteArray &data, Type type); QString getContent() const; - bool hasError() const; + +private: + static QJsonObject parseJsonFromData(const QByteArray &data); }; -} // namespace QodeAssist::Providers +} // namespace QodeAssist::LLMCore diff --git a/llmcore/OpenAIMessage.cpp b/llmcore/OpenAIMessage.cpp new file mode 100644 index 0000000..e100dfc --- /dev/null +++ b/llmcore/OpenAIMessage.cpp @@ -0,0 +1,113 @@ +/* + * Copyright (C) 2024 Petr Mironychev + * + * This file is part of QodeAssist. + * + * QodeAssist is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * QodeAssist is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with QodeAssist. If not, see . + */ + +#include "OpenAIMessage.hpp" +#include +#include +#include + +namespace QodeAssist::LLMCore { + +OpenAIMessage OpenAIMessage::fromJson(const QByteArray &data) +{ + OpenAIMessage msg; + + QByteArrayList lines = data.split('\n'); + QByteArray jsonData; + + for (const QByteArray &line : lines) { + if (line.trimmed().isEmpty()) { + continue; + } + + if (line.trimmed() == "data: [DONE]") { + msg.done = true; + continue; + } + + if (line.startsWith("data: ")) { + jsonData = line.mid(6); + break; + } + } + + if (jsonData.isEmpty()) { + jsonData = data; + } + + QJsonParseError error; + QJsonDocument doc = QJsonDocument::fromJson(jsonData, &error); + if (doc.isNull()) { + msg.error = QString("Invalid JSON response: %1").arg(error.errorString()); + return msg; + } + + QJsonObject obj = doc.object(); + + if (obj.contains("error")) { + msg.error = obj["error"].toObject()["message"].toString(); + return msg; + } + + if (obj.contains("choices")) { + auto choices = obj["choices"].toArray(); + if (!choices.isEmpty()) { + auto choiceObj = choices[0].toObject(); + + if (choiceObj.contains("message")) { + msg.choice.content = choiceObj["message"].toObject()["content"].toString(); + } else if (choiceObj.contains("delta")) { + msg.choice.content = choiceObj["delta"].toObject()["content"].toString(); + } + + msg.choice.finishReason = choiceObj["finish_reason"].toString(); + if (!msg.choice.finishReason.isEmpty()) { + msg.done = true; + } + } + } + + if (obj.contains("usage")) { + QJsonObject usage = obj["usage"].toObject(); + msg.usage.promptTokens = usage["prompt_tokens"].toInt(); + msg.usage.completionTokens = usage["completion_tokens"].toInt(); + msg.usage.totalTokens = usage["total_tokens"].toInt(); + } + + return msg; +} + +QString OpenAIMessage::getContent() const +{ + return choice.content; +} + +bool OpenAIMessage::hasError() const +{ + return !error.isEmpty(); +} + +bool OpenAIMessage::isDone() const +{ + return done + || (!choice.finishReason.isEmpty() + && (choice.finishReason == "stop" || choice.finishReason == "length")); +} + +} // namespace QodeAssist::LLMCore diff --git a/llmcore/OpenAIMessage.hpp b/llmcore/OpenAIMessage.hpp new file mode 100644 index 0000000..43c25a7 --- /dev/null +++ b/llmcore/OpenAIMessage.hpp @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2024 Petr Mironychev + * + * This file is part of QodeAssist. + * + * QodeAssist is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * QodeAssist is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with QodeAssist. If not, see . + */ + +#pragma once + +#include +#include +#include + +namespace QodeAssist::LLMCore { + +class OpenAIMessage +{ +public: + struct Choice + { + QString content; + QString finishReason; + }; + + struct Usage + { + int promptTokens{0}; + int completionTokens{0}; + int totalTokens{0}; + }; + + Choice choice; + QString error; + bool done{false}; + Usage usage; + + static OpenAIMessage fromJson(const QByteArray &data); + QString getContent() const; + bool hasError() const; + bool isDone() const; + +private: + static OpenAIMessage fromJsonObject(const QJsonObject &obj); +}; + +} // namespace QodeAssist::LLMCore diff --git a/providers/LMStudioProvider.cpp b/providers/LMStudioProvider.cpp index f04f6f8..eec30f4 100644 --- a/providers/LMStudioProvider.cpp +++ b/providers/LMStudioProvider.cpp @@ -25,6 +25,7 @@ #include #include +#include "llmcore/OpenAIMessage.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -101,43 +102,19 @@ void LMStudioProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType bool LMStudioProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse) { - bool isComplete = false; - while (reply->canReadLine()) { - QByteArray line = reply->readLine().trimmed(); - if (line.isEmpty()) { - continue; - } - if (line == "data: [DONE]") { - isComplete = true; - break; - } - if (line.startsWith("data: ")) { - line = line.mid(6); // Remove "data: " prefix - } - QJsonDocument jsonResponse = QJsonDocument::fromJson(line); - if (jsonResponse.isNull()) { - qWarning() << "Invalid JSON response from LM Studio:" << line; - continue; - } - QJsonObject responseObj = jsonResponse.object(); - if (responseObj.contains("choices")) { - QJsonArray choices = responseObj["choices"].toArray(); - if (!choices.isEmpty()) { - QJsonObject choice = choices.first().toObject(); - QJsonObject delta = choice["delta"].toObject(); - if (delta.contains("content")) { - QString completion = delta["content"].toString(); - - accumulatedResponse += completion; - } - if (choice["finish_reason"].toString() == "stop") { - isComplete = true; - break; - } - } - } + QByteArray data = reply->readAll(); + if (data.isEmpty()) { + return false; } - return isComplete; + + auto message = LLMCore::OpenAIMessage::fromJson(data); + if (message.hasError()) { + LOG_MESSAGE("Error in OpenAI response: " + message.error); + return false; + } + + accumulatedResponse += message.getContent(); + return message.isDone(); } QList LMStudioProvider::getInstalledModels(const QString &url) diff --git a/providers/OllamaProvider.cpp b/providers/OllamaProvider.cpp index 180a000..95fb692 100644 --- a/providers/OllamaProvider.cpp +++ b/providers/OllamaProvider.cpp @@ -25,7 +25,7 @@ #include #include -#include "OllamaMessage.hpp" +#include "llmcore/OllamaMessage.hpp" #include "logger/Logger.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" @@ -88,41 +88,23 @@ void OllamaProvider::prepareRequest(QJsonObject &request, LLMCore::RequestType t bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse) { - const QString endpoint = reply->url().path(); - auto messageType = endpoint == completionEndpoint() ? OllamaMessage::Type::Generate - : OllamaMessage::Type::Chat; - - auto processMessage = - [&accumulatedResponse](const QJsonDocument &doc, OllamaMessage::Type messageType) { - if (doc.isNull()) { - LOG_MESSAGE("Invalid JSON response from Ollama"); - return false; - } - - auto message = OllamaMessage::fromJson(doc.object(), messageType); - if (message.hasError()) { - LOG_MESSAGE("Error in Ollama response: " + message.error); - return false; - } - - accumulatedResponse += message.getContent(); - return message.done; - }; - - if (reply->canReadLine()) { - while (reply->canReadLine()) { - QByteArray line = reply->readLine().trimmed(); - if (line.isEmpty()) - continue; - - if (processMessage(QJsonDocument::fromJson(line), messageType)) { - return true; - } - } + QByteArray data = reply->readAll(); + if (data.isEmpty()) { return false; - } else { - return processMessage(QJsonDocument::fromJson(reply->readAll()), messageType); } + + const QString endpoint = reply->url().path(); + auto messageType = endpoint == completionEndpoint() ? LLMCore::OllamaMessage::Type::Generate + : LLMCore::OllamaMessage::Type::Chat; + + auto message = LLMCore::OllamaMessage::fromJson(data, messageType); + if (message.hasError()) { + LOG_MESSAGE("Error in Ollama response: " + message.error); + return false; + } + + accumulatedResponse += message.getContent(); + return message.done; } QList OllamaProvider::getInstalledModels(const QString &url)