refactor: Adapt provider to clients API

This commit is contained in:
Petr Mironychev
2026-03-30 08:08:49 +02:00
parent e55e96714b
commit 545b8ed000
21 changed files with 697 additions and 2958 deletions

View File

@ -1,4 +1,4 @@
/*
/*
* Copyright (C) 2024-2025 Petr Mironychev
*
* This file is part of QodeAssist.
@ -19,7 +19,6 @@
#include "OpenAIResponsesProvider.hpp"
#include <LLMCore/ToolsManager.hpp>
#include "OpenAIResponses/ResponseObject.hpp"
#include "tools/ToolsRegistration.hpp"
#include "pluginllmcore/ValidationUtils.hpp"
@ -41,12 +40,6 @@ OpenAIResponsesProvider::OpenAIResponsesProvider(QObject *parent)
, m_client(new ::LLMCore::OpenAIResponsesClient(url(), apiKey(), QString(), this))
{
Tools::registerQodeAssistTools(m_client->tools());
connect(
m_client->tools(),
&::LLMCore::ToolsManager::toolExecutionComplete,
this,
&OpenAIResponsesProvider::onToolExecutionComplete);
}
QString OpenAIResponsesProvider::name() const
@ -101,7 +94,7 @@ void OpenAIResponsesProvider::prepareRequest(
if (effortStr.isEmpty()) {
effortStr = "medium";
}
QJsonObject reasoning;
reasoning["effort"] = effortStr;
request["reasoning"] = reasoning;
@ -132,10 +125,6 @@ void OpenAIResponsesProvider::prepareRequest(
}
if (isToolsEnabled) {
const PluginLLMCore::RunToolsFilter filter = (type == PluginLLMCore::RequestType::QuickRefactoring)
? PluginLLMCore::RunToolsFilter::OnlyRead
: PluginLLMCore::RunToolsFilter::ALL;
const auto toolsDefinitions
= m_client->tools()->getToolsDefinitions();
if (!toolsDefinitions.isEmpty()) {
@ -160,43 +149,22 @@ void OpenAIResponsesProvider::prepareRequest(
request["stream"] = true;
}
QFuture<QList<QString>> OpenAIResponsesProvider::getInstalledModels(const QString &url)
QFuture<QList<QString>> OpenAIResponsesProvider::getInstalledModels(const QString &baseUrl)
{
QNetworkRequest request(QString("%1/v1/models").arg(url));
request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
if (!apiKey().isEmpty()) {
request.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
}
return httpClient()->get(request).then([](const QByteArray &data) {
QList<QString> models;
const QJsonObject jsonObject = QJsonDocument::fromJson(data).object();
if (jsonObject.contains("data")) {
const QJsonArray modelArray = jsonObject["data"].toArray();
models.reserve(modelArray.size());
static const QStringList modelPrefixes = {"gpt-5", "o1", "o2", "o3", "o4"};
for (const QJsonValue &value : modelArray) {
const QJsonObject modelObject = value.toObject();
if (!modelObject.contains("id")) {
continue;
}
const QString modelId = modelObject["id"].toString();
for (const QString &prefix : modelPrefixes) {
if (modelId.contains(prefix)) {
models.append(modelId);
break;
}
m_client->setUrl(baseUrl);
m_client->setApiKey(apiKey());
return m_client->listModels().then([](const QList<QString> &models) {
QList<QString> filtered;
static const QStringList modelPrefixes = {"gpt-5", "o1", "o2", "o3", "o4"};
for (const QString &modelId : models) {
for (const QString &prefix : modelPrefixes) {
if (modelId.contains(prefix)) {
filtered.append(modelId);
break;
}
}
}
return models;
}).onFailed([](const std::exception &e) {
LOG_MESSAGE(QString("Error fetching OpenAI models: %1").arg(e.what()));
return QList<QString>{};
return filtered;
});
}
@ -262,17 +230,66 @@ PluginLLMCore::ProviderID OpenAIResponsesProvider::providerID() const
void OpenAIResponsesProvider::sendRequest(
const PluginLLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
{
if (!m_messages.contains(requestId)) {
m_dataBuffers[requestId].clear();
}
QUrl baseUrl(url);
baseUrl.setPath("");
m_client->setUrl(baseUrl.toString());
m_client->setApiKey(apiKey());
m_requestUrls[requestId] = url;
m_originalRequests[requestId] = payload;
::LLMCore::RequestCallbacks callbacks;
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
callbacks.onChunk = [this, requestId](const ::LLMCore::RequestID &, const QString &chunk) {
if (m_awaitingContinuation.remove(requestId))
emit continuationStarted(requestId);
emit partialResponseReceived(requestId, chunk);
};
httpClient()->postStreaming(requestId, networkRequest, payload);
callbacks.onCompleted
= [this, requestId](const ::LLMCore::RequestID &clientId, const QString &fullText) {
emit fullResponseReceived(requestId, fullText);
m_providerToClientIds.remove(requestId);
m_clientToProviderIds.remove(clientId);
m_awaitingContinuation.remove(requestId);
};
callbacks.onFailed
= [this, requestId](const ::LLMCore::RequestID &clientId, const QString &error) {
emit requestFailed(requestId, error);
m_providerToClientIds.remove(requestId);
m_clientToProviderIds.remove(clientId);
m_awaitingContinuation.remove(requestId);
};
callbacks.onThinkingBlock = [this, requestId](const ::LLMCore::RequestID &,
const QString &thinking,
const QString &signature) {
if (m_awaitingContinuation.remove(requestId))
emit continuationStarted(requestId);
if (thinking.isEmpty())
emit redactedThinkingBlockReceived(requestId, signature);
else
emit thinkingBlockReceived(requestId, thinking, signature);
};
callbacks.onToolStarted = [this, requestId](const ::LLMCore::RequestID &,
const QString &toolId,
const QString &toolName) {
emit toolExecutionStarted(requestId, toolId, toolName);
m_awaitingContinuation.insert(requestId);
};
callbacks.onToolResult = [this, requestId](const ::LLMCore::RequestID &,
const QString &toolId,
const QString &toolName,
const QString &result) {
emit toolExecutionCompleted(requestId, toolId, toolName, result);
};
auto clientId = m_client->sendMessage(payload, callbacks);
m_providerToClientIds[requestId] = clientId;
m_clientToProviderIds[clientId] = requestId;
LOG_MESSAGE(QString("OpenAIResponsesProvider: Sending request %1 (client: %2) to %3")
.arg(requestId, clientId, url.toString()));
}
bool OpenAIResponsesProvider::supportsTools() const
@ -292,364 +309,14 @@ bool OpenAIResponsesProvider::supportThinking() const
void OpenAIResponsesProvider::cancelRequest(const PluginLLMCore::RequestID &requestId)
{
PluginLLMCore::Provider::cancelRequest(requestId);
cleanupRequest(requestId);
}
LOG_MESSAGE(QString("OpenAIResponsesProvider: Cancelling request %1").arg(requestId));
void OpenAIResponsesProvider::onDataReceived(
const QodeAssist::PluginLLMCore::RequestID &requestId, const QByteArray &data)
{
PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
const QStringList lines = buffers.rawStreamBuffer.processData(data);
QString currentEventType;
for (const QString &line : lines) {
const QString trimmedLine = line.trimmed();
if (trimmedLine.isEmpty()) {
continue;
}
if (line == "data: [DONE]") {
continue;
}
if (line.startsWith("event: ")) {
currentEventType = line.mid(7).trimmed();
continue;
}
QString dataLine = line;
if (line.startsWith("data: ")) {
dataLine = line.mid(6);
}
const QJsonDocument doc = QJsonDocument::fromJson(dataLine.toUtf8());
if (doc.isObject()) {
const QJsonObject obj = doc.object();
processStreamEvent(requestId, currentEventType, obj);
}
if (m_providerToClientIds.contains(requestId)) {
auto clientId = m_providerToClientIds.take(requestId);
m_clientToProviderIds.remove(clientId);
m_client->cancelRequest(clientId);
}
}
void OpenAIResponsesProvider::onRequestFinished(
const QodeAssist::PluginLLMCore::RequestID &requestId, std::optional<QString> error)
{
if (error) {
LOG_MESSAGE(QString("OpenAIResponses request %1 failed: %2").arg(requestId, *error));
emit requestFailed(requestId, *error);
cleanupRequest(requestId);
return;
}
if (m_messages.contains(requestId)) {
OpenAIResponsesMessage *message = m_messages[requestId];
if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) {
return;
}
}
if (m_dataBuffers.contains(requestId)) {
const PluginLLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
if (!buffers.responseContent.isEmpty()) {
emit fullResponseReceived(requestId, buffers.responseContent);
} else {
LOG_MESSAGE(QString("WARNING: OpenAIResponses - Response content is empty for %1, "
"emitting empty response")
.arg(requestId));
emit fullResponseReceived(requestId, "");
}
} else {
LOG_MESSAGE(
QString("WARNING: OpenAIResponses - No data buffer found for %1").arg(requestId));
}
cleanupRequest(requestId);
}
void OpenAIResponsesProvider::processStreamEvent(
const QString &requestId, const QString &eventType, const QJsonObject &data)
{
OpenAIResponsesMessage *message = m_messages.value(requestId);
if (!message) {
message = new OpenAIResponsesMessage(this);
m_messages[requestId] = message;
if (m_dataBuffers.contains(requestId)) {
emit continuationStarted(requestId);
}
} else if (
m_dataBuffers.contains(requestId)
&& message->state() == PluginLLMCore::MessageState::RequiresToolExecution) {
message->startNewContinuation();
emit continuationStarted(requestId);
}
if (eventType == "response.content_part.added") {
} else if (eventType == "response.output_text.delta") {
const QString delta = data["delta"].toString();
if (!delta.isEmpty()) {
m_dataBuffers[requestId].responseContent += delta;
emit partialResponseReceived(requestId, delta);
}
} else if (eventType == "response.output_text.done") {
const QString fullText = data["text"].toString();
if (!fullText.isEmpty()) {
m_dataBuffers[requestId].responseContent = fullText;
}
} else if (eventType == "response.content_part.done") {
} else if (eventType == "response.output_item.added") {
using namespace QodeAssist::OpenAIResponses;
const QJsonObject item = data["item"].toObject();
OutputItem outputItem = OutputItem::fromJson(item);
if (const auto *functionCall = outputItem.asFunctionCall()) {
if (!functionCall->callId.isEmpty() && !functionCall->name.isEmpty()) {
if (!m_itemIdToCallId.contains(requestId)) {
m_itemIdToCallId[requestId] = QHash<QString, QString>();
}
m_itemIdToCallId[requestId][functionCall->id] = functionCall->callId;
message->handleToolCallStart(functionCall->callId, functionCall->name);
}
} else if (const auto *reasoning = outputItem.asReasoning()) {
if (!reasoning->id.isEmpty()) {
message->handleReasoningStart(reasoning->id);
}
}
} else if (eventType == "response.reasoning_content.delta") {
const QString itemId = data["item_id"].toString();
const QString delta = data["delta"].toString();
if (!itemId.isEmpty() && !delta.isEmpty()) {
message->handleReasoningDelta(itemId, delta);
}
} else if (eventType == "response.reasoning_content.done") {
const QString itemId = data["item_id"].toString();
if (!itemId.isEmpty()) {
message->handleReasoningComplete(itemId);
emitPendingThinkingBlocks(requestId);
}
} else if (eventType == "response.function_call_arguments.delta") {
const QString itemId = data["item_id"].toString();
const QString delta = data["delta"].toString();
if (!itemId.isEmpty() && !delta.isEmpty()) {
const QString callId = m_itemIdToCallId.value(requestId).value(itemId);
if (!callId.isEmpty()) {
message->handleToolCallDelta(callId, delta);
} else {
LOG_MESSAGE(QString("ERROR: No call_id mapping found for item_id: %1").arg(itemId));
}
}
} else if (
eventType == "response.function_call_arguments.done"
|| eventType == "response.output_item.done") {
const QString itemId = data["item_id"].toString();
const QJsonObject item = data["item"].toObject();
if (!item.isEmpty() && item["type"].toString() == "reasoning") {
using namespace QodeAssist::OpenAIResponses;
const QString finalItemId = itemId.isEmpty() ? item["id"].toString() : itemId;
ReasoningOutput reasoningOutput = ReasoningOutput::fromJson(item);
QString reasoningText;
if (!reasoningOutput.summaryText.isEmpty()) {
reasoningText = reasoningOutput.summaryText;
} else if (!reasoningOutput.contentTexts.isEmpty()) {
reasoningText = reasoningOutput.contentTexts.join("\n");
}
if (reasoningText.isEmpty()) {
reasoningText = QString(
"[Reasoning process completed, but detailed thinking is not available in "
"streaming mode. The model has processed your request with extended reasoning.]");
}
if (!finalItemId.isEmpty()) {
message->handleReasoningDelta(finalItemId, reasoningText);
message->handleReasoningComplete(finalItemId);
emitPendingThinkingBlocks(requestId);
}
} else if (item.isEmpty() && !itemId.isEmpty()) {
const QString callId = m_itemIdToCallId.value(requestId).value(itemId);
if (!callId.isEmpty()) {
message->handleToolCallComplete(callId);
} else {
LOG_MESSAGE(
QString("ERROR: OpenAIResponses - No call_id mapping found for item_id: %1")
.arg(itemId));
}
} else if (!item.isEmpty() && item["type"].toString() == "function_call") {
const QString callId = item["call_id"].toString();
if (!callId.isEmpty()) {
message->handleToolCallComplete(callId);
} else {
LOG_MESSAGE(
QString("ERROR: OpenAIResponses - Function call done but call_id is empty"));
}
}
} else if (eventType == "response.created") {
} else if (eventType == "response.in_progress") {
} else if (eventType == "response.completed") {
using namespace QodeAssist::OpenAIResponses;
const QJsonObject responseObj = data["response"].toObject();
Response response = Response::fromJson(responseObj);
const QString statusStr = responseObj["status"].toString();
if (m_dataBuffers[requestId].responseContent.isEmpty()) {
const QString aggregatedText = response.getAggregatedText();
if (!aggregatedText.isEmpty()) {
m_dataBuffers[requestId].responseContent = aggregatedText;
}
}
message->handleStatus(statusStr);
handleMessageComplete(requestId);
} else if (eventType == "response.incomplete") {
using namespace QodeAssist::OpenAIResponses;
const QJsonObject responseObj = data["response"].toObject();
if (!responseObj.isEmpty()) {
Response response = Response::fromJson(responseObj);
const QString statusStr = responseObj["status"].toString();
if (m_dataBuffers[requestId].responseContent.isEmpty()) {
const QString aggregatedText = response.getAggregatedText();
if (!aggregatedText.isEmpty()) {
m_dataBuffers[requestId].responseContent = aggregatedText;
}
}
message->handleStatus(statusStr);
} else {
message->handleStatus("incomplete");
}
handleMessageComplete(requestId);
} else if (!eventType.isEmpty()) {
LOG_MESSAGE(QString("WARNING: OpenAIResponses - Unhandled event type '%1' for request %2\nData: %3")
.arg(eventType)
.arg(requestId)
.arg(QString::fromUtf8(QJsonDocument(data).toJson(QJsonDocument::Compact))));
}
}
void OpenAIResponsesProvider::emitPendingThinkingBlocks(const QString &requestId)
{
if (!m_messages.contains(requestId)) {
return;
}
OpenAIResponsesMessage *message = m_messages[requestId];
const auto thinkingBlocks = message->getCurrentThinkingContent();
if (thinkingBlocks.isEmpty()) {
return;
}
const int alreadyEmitted = m_emittedThinkingBlocksCount.value(requestId, 0);
const int totalBlocks = thinkingBlocks.size();
for (int i = alreadyEmitted; i < totalBlocks; ++i) {
const auto *thinkingContent = thinkingBlocks[i];
if (thinkingContent->thinking().trimmed().isEmpty()) {
continue;
}
emit thinkingBlockReceived(
requestId, thinkingContent->thinking(), thinkingContent->signature());
}
m_emittedThinkingBlocksCount[requestId] = totalBlocks;
}
void OpenAIResponsesProvider::handleMessageComplete(const QString &requestId)
{
if (!m_messages.contains(requestId)) {
return;
}
OpenAIResponsesMessage *message = m_messages[requestId];
emitPendingThinkingBlocks(requestId);
if (message->state() == PluginLLMCore::MessageState::RequiresToolExecution) {
const auto toolUseContent = message->getCurrentToolUseContent();
if (toolUseContent.isEmpty()) {
return;
}
for (const auto *toolContent : toolUseContent) {
const auto toolStringName = m_client->tools()->displayName(
toolContent->name());
emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
m_client->tools()->executeToolCall(
requestId, toolContent->id(), toolContent->name(), toolContent->input());
}
}
}
void OpenAIResponsesProvider::onToolExecutionComplete(
const QString &requestId, const QHash<QString, QString> &toolResults)
{
if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
LOG_MESSAGE(QString("ERROR: OpenAIResponses - Missing data for continuation request %1")
.arg(requestId));
cleanupRequest(requestId);
return;
}
OpenAIResponsesMessage *message = m_messages[requestId];
const auto toolContent = message->getCurrentToolUseContent();
for (auto it = toolResults.constBegin(); it != toolResults.constEnd(); ++it) {
for (const auto *tool : toolContent) {
if (tool->id() == it.key()) {
const auto toolStringName = m_client->tools()->displayName(
tool->name());
emit toolExecutionCompleted(
requestId, tool->id(), toolStringName, toolResults[tool->id()]);
break;
}
}
}
QJsonObject continuationRequest = m_originalRequests[requestId];
QJsonArray input = continuationRequest["input"].toArray();
const QList<QJsonObject> assistantItems = message->toItemsFormat();
for (const QJsonObject &item : assistantItems) {
input.append(item);
}
const QJsonArray toolResultItems = message->createToolResultItems(toolResults);
for (const QJsonValue &item : toolResultItems) {
input.append(item);
}
continuationRequest["input"] = input;
m_dataBuffers[requestId].responseContent.clear();
sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
}
void OpenAIResponsesProvider::cleanupRequest(const PluginLLMCore::RequestID &requestId)
{
if (m_messages.contains(requestId)) {
OpenAIResponsesMessage *message = m_messages.take(requestId);
message->deleteLater();
}
m_dataBuffers.remove(requestId);
m_requestUrls.remove(requestId);
m_originalRequests.remove(requestId);
m_itemIdToCallId.remove(requestId);
m_emittedThinkingBlocksCount.remove(requestId);
m_client->tools()->cleanupRequest(requestId);
m_awaitingContinuation.remove(requestId);
}
::LLMCore::ToolsManager *OpenAIResponsesProvider::toolsManager() const