From bed42f9098be51a7e2c693b9ef62aa7681d97b3e Mon Sep 17 00:00:00 2001
From: Petr Mironychev <9195189+Palm1r@users.noreply.github.com>
Date: Wed, 1 Oct 2025 00:58:54 +0200
Subject: [PATCH] feat: Add OpenAI tooling support (#232)
---
CMakeLists.txt | 1 +
providers/OpenAIMessage.cpp | 181 ++++++++++++++++++++++++++
providers/OpenAIMessage.hpp | 65 +++++++++
providers/OpenAIProvider.cpp | 246 +++++++++++++++++++++++++++--------
providers/OpenAIProvider.hpp | 24 +++-
5 files changed, 465 insertions(+), 52 deletions(-)
create mode 100644 providers/OpenAIMessage.cpp
create mode 100644 providers/OpenAIMessage.hpp
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 215e42e..ee6e399 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -119,6 +119,7 @@ add_qtc_plugin(QodeAssist
tools/ListProjectFilesTool.hpp tools/ListProjectFilesTool.cpp
tools/ToolsManager.hpp tools/ToolsManager.cpp
providers/ClaudeMessage.hpp providers/ClaudeMessage.cpp
+ providers/OpenAIMessage.hpp providers/OpenAIMessage.cpp
)
get_target_property(QtCreatorCorePath QtCreator::Core LOCATION)
diff --git a/providers/OpenAIMessage.cpp b/providers/OpenAIMessage.cpp
new file mode 100644
index 0000000..9417cd5
--- /dev/null
+++ b/providers/OpenAIMessage.cpp
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#include "OpenAIMessage.hpp"
+
+#include "logger/Logger.hpp"
+
+#include
+#include
+
+namespace QodeAssist::Providers {
+
+OpenAIMessage::OpenAIMessage(QObject *parent)
+ : QObject(parent)
+{}
+
+void OpenAIMessage::handleContentDelta(const QString &content)
+{
+ auto textContent = getOrCreateTextContent();
+ textContent->appendText(content);
+}
+
+void OpenAIMessage::handleToolCallStart(int index, const QString &id, const QString &name)
+{
+ LOG_MESSAGE(QString("OpenAIMessage: handleToolCallStart index=%1, id=%2, name=%3")
+ .arg(index)
+ .arg(id, name));
+
+ while (m_currentBlocks.size() <= index) {
+ m_currentBlocks.append(nullptr);
+ }
+
+ auto toolContent = new LLMCore::ToolUseContent(id, name);
+ toolContent->setParent(this);
+ m_currentBlocks[index] = toolContent;
+ m_pendingToolArguments[index] = "";
+}
+
+void OpenAIMessage::handleToolCallDelta(int index, const QString &argumentsDelta)
+{
+ if (m_pendingToolArguments.contains(index)) {
+ m_pendingToolArguments[index] += argumentsDelta;
+ }
+}
+
+void OpenAIMessage::handleToolCallComplete(int index)
+{
+ if (m_pendingToolArguments.contains(index)) {
+ QString jsonArgs = m_pendingToolArguments[index];
+ QJsonObject argsObject;
+
+ if (!jsonArgs.isEmpty()) {
+ QJsonDocument doc = QJsonDocument::fromJson(jsonArgs.toUtf8());
+ if (doc.isObject()) {
+ argsObject = doc.object();
+ }
+ }
+
+ if (index < m_currentBlocks.size()) {
+ if (auto toolContent = qobject_cast(m_currentBlocks[index])) {
+ toolContent->setInput(argsObject);
+ }
+ }
+
+ m_pendingToolArguments.remove(index);
+ }
+}
+
+void OpenAIMessage::handleFinishReason(const QString &finishReason)
+{
+ m_finishReason = finishReason;
+ updateStateFromFinishReason();
+}
+
+QJsonObject OpenAIMessage::toProviderFormat() const
+{
+ QJsonObject message;
+ message["role"] = "assistant";
+
+ QString textContent;
+ QJsonArray toolCalls;
+
+ for (auto block : m_currentBlocks) {
+ if (!block)
+ continue;
+
+ if (auto text = qobject_cast(block)) {
+ textContent += text->text();
+ } else if (auto tool = qobject_cast(block)) {
+ toolCalls.append(tool->toJson(LLMCore::ProviderFormat::OpenAI));
+ }
+ }
+
+ if (!textContent.isEmpty()) {
+ message["content"] = textContent;
+ } else {
+ message["content"] = QJsonValue();
+ }
+
+ if (!toolCalls.isEmpty()) {
+ message["tool_calls"] = toolCalls;
+ }
+
+ return message;
+}
+
+QJsonArray OpenAIMessage::createToolResultMessages(const QHash &toolResults) const
+{
+ QJsonArray messages;
+
+ for (auto toolContent : getCurrentToolUseContent()) {
+ if (toolResults.contains(toolContent->id())) {
+ auto toolResult = std::make_unique(
+ toolContent->id(), toolResults[toolContent->id()]);
+ messages.append(toolResult->toJson(LLMCore::ProviderFormat::OpenAI));
+ }
+ }
+
+ return messages;
+}
+
+QList OpenAIMessage::getCurrentToolUseContent() const
+{
+ QList toolBlocks;
+ for (auto block : m_currentBlocks) {
+ if (auto toolContent = qobject_cast(block)) {
+ toolBlocks.append(toolContent);
+ }
+ }
+ return toolBlocks;
+}
+
+void OpenAIMessage::startNewContinuation()
+{
+ LOG_MESSAGE(QString("OpenAIAPIMessage: Starting new continuation"));
+
+ m_currentBlocks.clear();
+ m_pendingToolArguments.clear();
+ m_finishReason.clear();
+ m_state = LLMCore::MessageState::Building;
+}
+
+void OpenAIMessage::updateStateFromFinishReason()
+{
+ if (m_finishReason == "tool_calls" && !getCurrentToolUseContent().empty()) {
+ m_state = LLMCore::MessageState::RequiresToolExecution;
+ } else if (m_finishReason == "stop") {
+ m_state = LLMCore::MessageState::Final;
+ } else {
+ m_state = LLMCore::MessageState::Complete;
+ }
+}
+
+LLMCore::TextContent *OpenAIMessage::getOrCreateTextContent()
+{
+ for (auto block : m_currentBlocks) {
+ if (auto textContent = qobject_cast(block)) {
+ return textContent;
+ }
+ }
+
+ return addCurrentContent();
+}
+
+} // namespace QodeAssist::Providers
diff --git a/providers/OpenAIMessage.hpp b/providers/OpenAIMessage.hpp
new file mode 100644
index 0000000..8a1fe3e
--- /dev/null
+++ b/providers/OpenAIMessage.hpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include
+
+namespace QodeAssist::Providers {
+
+class OpenAIMessage : public QObject
+{
+ Q_OBJECT
+public:
+ explicit OpenAIMessage(QObject *parent = nullptr);
+
+ void handleContentDelta(const QString &content);
+ void handleToolCallStart(int index, const QString &id, const QString &name);
+ void handleToolCallDelta(int index, const QString &argumentsDelta);
+ void handleToolCallComplete(int index);
+ void handleFinishReason(const QString &finishReason);
+
+ QJsonObject toProviderFormat() const;
+ QJsonArray createToolResultMessages(const QHash &toolResults) const;
+
+ LLMCore::MessageState state() const { return m_state; }
+ QList getCurrentToolUseContent() const;
+
+ void startNewContinuation();
+
+private:
+ QString m_finishReason;
+ LLMCore::MessageState m_state = LLMCore::MessageState::Building;
+ QList m_currentBlocks;
+ QHash m_pendingToolArguments;
+
+ void updateStateFromFinishReason();
+ LLMCore::TextContent *getOrCreateTextContent();
+
+ template
+ T *addCurrentContent(Args &&...args)
+ {
+ T *content = new T(std::forward(args)...);
+ content->setParent(this);
+ m_currentBlocks.append(content);
+ return content;
+ }
+};
+
+} // namespace QodeAssist::Providers
diff --git a/providers/OpenAIProvider.cpp b/providers/OpenAIProvider.cpp
index a8991d6..db63c3a 100644
--- a/providers/OpenAIProvider.cpp
+++ b/providers/OpenAIProvider.cpp
@@ -19,6 +19,8 @@
#include "OpenAIProvider.hpp"
+#include "llmcore/ValidationUtils.hpp"
+#include "logger/Logger.hpp"
#include "settings/ChatAssistantSettings.hpp"
#include "settings/CodeCompletionSettings.hpp"
#include "settings/ProviderSettings.hpp"
@@ -29,12 +31,19 @@
#include
#include
-#include "llmcore/OpenAIMessage.hpp"
-#include "llmcore/ValidationUtils.hpp"
-#include "logger/Logger.hpp"
-
namespace QodeAssist::Providers {
+OpenAIProvider::OpenAIProvider(QObject *parent)
+ : LLMCore::Provider(parent)
+ , m_toolsManager(new Tools::ToolsManager(this))
+{
+ connect(
+ m_toolsManager,
+ &Tools::ToolsManager::toolExecutionComplete,
+ this,
+ &OpenAIProvider::onToolExecutionComplete);
+}
+
QString OpenAIProvider::name() const
{
return "OpenAI";
@@ -91,6 +100,15 @@ void OpenAIProvider::prepareRequest(
} else {
applyModelParams(Settings::chatAssistantSettings());
}
+
+ if (supportsTools() && type == LLMCore::RequestType::Chat
+ && Settings::chatAssistantSettings().useTools()) {
+ auto toolsDefinitions = m_toolsManager->getToolsDefinitions(Tools::ToolSchemaFormat::OpenAI);
+ if (!toolsDefinitions.isEmpty()) {
+ request["tools"] = toolsDefinitions;
+ LOG_MESSAGE(QString("Added %1 tools to OpenAI request").arg(toolsDefinitions.size()));
+ }
+ }
}
QList OpenAIProvider::getInstalledModels(const QString &url)
@@ -129,7 +147,7 @@ QList OpenAIProvider::getInstalledModels(const QString &url)
}
}
} else {
- LOG_MESSAGE(QString("Error fetching ChatGPT models: %1").arg(reply->errorString()));
+ LOG_MESSAGE(QString("Error fetching OpenAI models: %1").arg(reply->errorString()));
}
reply->deleteLater();
@@ -148,7 +166,8 @@ QList OpenAIProvider::validateRequest(const QJsonObject &request, LLMCo
{"frequency_penalty", {}},
{"presence_penalty", {}},
{"stop", QJsonArray{}},
- {"stream", {}}};
+ {"stream", {}},
+ {"tools", {}}};
return LLMCore::ValidationUtils::validateRequestFields(request, templateReq);
}
@@ -175,8 +194,12 @@ LLMCore::ProviderID OpenAIProvider::providerID() const
void OpenAIProvider::sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
- m_dataBuffers[requestId].clear();
+ if (!m_messages.contains(requestId)) {
+ m_dataBuffers[requestId].clear();
+ }
+
m_requestUrls[requestId] = url;
+ m_originalRequests[requestId] = payload;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
@@ -189,57 +212,34 @@ void OpenAIProvider::sendRequest(
emit httpClient()->sendRequest(request);
}
+bool OpenAIProvider::supportsTools() const
+{
+ return true;
+}
+
+void OpenAIProvider::cancelRequest(const LLMCore::RequestID &requestId)
+{
+ LOG_MESSAGE(QString("OpenAIProvider: Cancelling request %1").arg(requestId));
+ LLMCore::Provider::cancelRequest(requestId);
+ cleanupRequest(requestId);
+}
+
void OpenAIProvider::onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
{
LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
QStringList lines = buffers.rawStreamBuffer.processData(data);
- if (data.isEmpty()) {
- return;
- }
-
- bool isDone = false;
- QString tempResponse;
-
for (const QString &line : lines) {
- if (line.trimmed().isEmpty()) {
+ if (line.trimmed().isEmpty() || line == "data: [DONE]") {
continue;
}
- if (line == "data: [DONE]") {
- isDone = true;
- continue;
- }
-
- QJsonObject responseObj = parseEventLine(line);
- if (responseObj.isEmpty())
+ QJsonObject chunk = parseEventLine(line);
+ if (chunk.isEmpty())
continue;
- auto message = LLMCore::OpenAIMessage::fromJson(responseObj);
- if (message.hasError()) {
- LOG_MESSAGE("Error in OpenAI response: " + message.error);
- continue;
- }
-
- QString content = message.getContent();
- if (!content.isEmpty()) {
- tempResponse += content;
- }
-
- if (message.isDone()) {
- isDone = true;
- }
- }
-
- if (!tempResponse.isEmpty()) {
- buffers.responseContent += tempResponse;
- emit partialResponseReceived(requestId, tempResponse);
- }
-
- if (isDone) {
- emit fullResponseReceived(requestId, buffers.responseContent);
- m_dataBuffers.remove(requestId);
+ processStreamChunk(requestId, chunk);
}
}
@@ -249,17 +249,161 @@ void OpenAIProvider::onRequestFinished(
if (!success) {
LOG_MESSAGE(QString("OpenAIProvider request %1 failed: %2").arg(requestId, error));
emit requestFailed(requestId, error);
- } else {
- if (m_dataBuffers.contains(requestId)) {
- const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
- if (!buffers.responseContent.isEmpty()) {
- emit fullResponseReceived(requestId, buffers.responseContent);
+ cleanupRequest(requestId);
+ return;
+ }
+
+ if (m_messages.contains(requestId)) {
+ OpenAIMessage *message = m_messages[requestId];
+ if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
+ LOG_MESSAGE(QString("Waiting for tools to complete for %1").arg(requestId));
+ m_dataBuffers.remove(requestId);
+ return;
+ }
+ }
+
+ if (m_dataBuffers.contains(requestId)) {
+ const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
+ if (!buffers.responseContent.isEmpty()) {
+ LOG_MESSAGE(QString("Emitting full response for %1").arg(requestId));
+ emit fullResponseReceived(requestId, buffers.responseContent);
+ }
+ }
+
+ cleanupRequest(requestId);
+}
+
+void OpenAIProvider::onToolExecutionComplete(
+ const QString &requestId, const QHash &toolResults)
+{
+ if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
+ LOG_MESSAGE(QString("ERROR: Missing data for continuation request %1").arg(requestId));
+ cleanupRequest(requestId);
+ return;
+ }
+
+ LOG_MESSAGE(QString("Tool execution complete for OpenAI request %1").arg(requestId));
+
+ OpenAIMessage *message = m_messages[requestId];
+ QJsonObject continuationRequest = m_originalRequests[requestId];
+ QJsonArray messages = continuationRequest["messages"].toArray();
+
+ messages.append(message->toProviderFormat());
+
+ QJsonArray toolResultMessages = message->createToolResultMessages(toolResults);
+ for (const auto &toolMsg : toolResultMessages) {
+ messages.append(toolMsg);
+ }
+
+ continuationRequest["messages"] = messages;
+
+ LOG_MESSAGE(QString("Sending continuation request for %1 with %2 tool results")
+ .arg(requestId)
+ .arg(toolResults.size()));
+
+ sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
+}
+
+void OpenAIProvider::processStreamChunk(const QString &requestId, const QJsonObject &chunk)
+{
+ QJsonArray choices = chunk["choices"].toArray();
+ if (choices.isEmpty()) {
+ return;
+ }
+
+ QJsonObject choice = choices[0].toObject();
+ QJsonObject delta = choice["delta"].toObject();
+ QString finishReason = choice["finish_reason"].toString();
+
+ OpenAIMessage *message = m_messages.value(requestId);
+ if (!message) {
+ message = new OpenAIMessage(this);
+ m_messages[requestId] = message;
+ LOG_MESSAGE(QString("Created NEW OpenAIAPIMessage for request %1").arg(requestId));
+ }
+
+ if (delta.contains("content") && !delta["content"].isNull()) {
+ QString content = delta["content"].toString();
+ message->handleContentDelta(content);
+
+ LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
+ buffers.responseContent += content;
+ emit partialResponseReceived(requestId, content);
+ }
+
+ if (delta.contains("tool_calls")) {
+ QJsonArray toolCalls = delta["tool_calls"].toArray();
+ for (const auto &toolCallValue : toolCalls) {
+ QJsonObject toolCall = toolCallValue.toObject();
+ int index = toolCall["index"].toInt();
+
+ if (toolCall.contains("id")) {
+ QString id = toolCall["id"].toString();
+ QJsonObject function = toolCall["function"].toObject();
+ QString name = function["name"].toString();
+ message->handleToolCallStart(index, id, name);
+ }
+
+ if (toolCall.contains("function")) {
+ QJsonObject function = toolCall["function"].toObject();
+ if (function.contains("arguments")) {
+ QString args = function["arguments"].toString();
+ message->handleToolCallDelta(index, args);
+ }
}
}
}
+ if (!finishReason.isEmpty() && finishReason != "null") {
+ for (int i = 0; i < 10; ++i) {
+ message->handleToolCallComplete(i);
+ }
+
+ message->handleFinishReason(finishReason);
+ handleMessageComplete(requestId);
+ }
+}
+
+void OpenAIProvider::handleMessageComplete(const QString &requestId)
+{
+ if (!m_messages.contains(requestId))
+ return;
+
+ OpenAIMessage *message = m_messages[requestId];
+
+ if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
+ LOG_MESSAGE(QString("OpenAI message requires tool execution for %1").arg(requestId));
+
+ auto toolUseContent = message->getCurrentToolUseContent();
+
+ if (toolUseContent.isEmpty()) {
+ LOG_MESSAGE(QString("No tools to execute for %1").arg(requestId));
+ return;
+ }
+
+ for (auto toolContent : toolUseContent) {
+ m_toolsManager->executeToolCall(
+ requestId, toolContent->id(), toolContent->name(), toolContent->input());
+ }
+
+ } else {
+ LOG_MESSAGE(QString("OpenAI message marked as complete for %1").arg(requestId));
+ }
+}
+
+void OpenAIProvider::cleanupRequest(const LLMCore::RequestID &requestId)
+{
+ LOG_MESSAGE(QString("Cleaning up OpenAI request %1").arg(requestId));
+
+ if (m_messages.contains(requestId)) {
+ OpenAIMessage *message = m_messages.take(requestId);
+ message->deleteLater();
+ }
+
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
+ m_originalRequests.remove(requestId);
+ m_toolsManager->cleanupRequest(requestId);
}
} // namespace QodeAssist::Providers
diff --git a/providers/OpenAIProvider.hpp b/providers/OpenAIProvider.hpp
index 263b819..d90e589 100644
--- a/providers/OpenAIProvider.hpp
+++ b/providers/OpenAIProvider.hpp
@@ -19,13 +19,18 @@
#pragma once
-#include "llmcore/Provider.hpp"
+#include "OpenAIMessage.hpp"
+#include "tools/ToolsManager.hpp"
+#include
namespace QodeAssist::Providers {
class OpenAIProvider : public LLMCore::Provider
{
+ Q_OBJECT
public:
+ explicit OpenAIProvider(QObject *parent = nullptr);
+
QString name() const override;
QString url() const override;
QString completionEndpoint() const override;
@@ -45,6 +50,9 @@ public:
void sendRequest(
const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
+ bool supportsTools() const override;
+ void cancelRequest(const LLMCore::RequestID &requestId) override;
+
public slots:
void onDataReceived(
const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
@@ -52,6 +60,20 @@ public slots:
const QodeAssist::LLMCore::RequestID &requestId,
bool success,
const QString &error) override;
+
+private slots:
+ void onToolExecutionComplete(
+ const QString &requestId, const QHash &toolResults);
+
+private:
+ void processStreamChunk(const QString &requestId, const QJsonObject &chunk);
+ void handleMessageComplete(const QString &requestId);
+ void cleanupRequest(const LLMCore::RequestID &requestId);
+
+ QHash m_messages;
+ QHash m_requestUrls;
+ QHash m_originalRequests;
+ Tools::ToolsManager *m_toolsManager;
};
} // namespace QodeAssist::Providers