From a4663328222c172286b4b37c241eb8e188ee4268 Mon Sep 17 00:00:00 2001
From: Petr Mironychev <9195189+Palm1r@users.noreply.github.com>
Date: Mon, 1 Dec 2025 12:14:55 +0100
Subject: [PATCH] feat: Add OpenAI Responses API (#282)
* feat: Add OpenAI Responses API
* fix: Make temperature optional
* chore: Increase default value of max tokens
---
CMakeLists.txt | 12 +
LLMClientInterface.cpp | 3 +-
llmcore/ProviderID.hpp | 1 +
.../OpenAIResponses/CancelResponseRequest.hpp | 54 ++
.../OpenAIResponses/DeleteResponseRequest.hpp | 69 ++
.../OpenAIResponses/GetResponseRequest.hpp | 120 ++++
.../OpenAIResponses/InputTokensRequest.hpp | 219 ++++++
.../OpenAIResponses/ItemTypesReference.hpp | 143 ++++
.../OpenAIResponses/ListInputItemsRequest.hpp | 166 +++++
providers/OpenAIResponses/ModelRequest.hpp | 354 ++++++++++
providers/OpenAIResponses/ResponseObject.hpp | 562 +++++++++++++++
providers/OpenAIResponsesMessage.cpp | 246 +++++++
providers/OpenAIResponsesMessage.hpp | 67 ++
providers/OpenAIResponsesProvider.cpp | 666 ++++++++++++++++++
providers/OpenAIResponsesProvider.hpp | 87 +++
providers/OpenAIResponsesRequestBuilder.hpp | 255 +++++++
providers/Providers.hpp | 2 +
settings/ChatAssistantSettings.cpp | 29 +-
settings/ChatAssistantSettings.hpp | 3 +
settings/CodeCompletionSettings.cpp | 27 +-
settings/CodeCompletionSettings.hpp | 3 +
settings/QuickRefactorSettings.cpp | 25 +
settings/QuickRefactorSettings.hpp | 3 +
settings/SettingsConstants.hpp | 12 +
templates/OpenAIResponses.hpp | 135 ++++
templates/Templates.hpp | 2 +
26 files changed, 3261 insertions(+), 4 deletions(-)
create mode 100644 providers/OpenAIResponses/CancelResponseRequest.hpp
create mode 100644 providers/OpenAIResponses/DeleteResponseRequest.hpp
create mode 100644 providers/OpenAIResponses/GetResponseRequest.hpp
create mode 100644 providers/OpenAIResponses/InputTokensRequest.hpp
create mode 100644 providers/OpenAIResponses/ItemTypesReference.hpp
create mode 100644 providers/OpenAIResponses/ListInputItemsRequest.hpp
create mode 100644 providers/OpenAIResponses/ModelRequest.hpp
create mode 100644 providers/OpenAIResponses/ResponseObject.hpp
create mode 100644 providers/OpenAIResponsesMessage.cpp
create mode 100644 providers/OpenAIResponsesMessage.hpp
create mode 100644 providers/OpenAIResponsesProvider.cpp
create mode 100644 providers/OpenAIResponsesProvider.hpp
create mode 100644 providers/OpenAIResponsesRequestBuilder.hpp
create mode 100644 templates/OpenAIResponses.hpp
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 3cdce74..70f7100 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -89,6 +89,7 @@ add_qtc_plugin(QodeAssist
templates/GoogleAI.hpp
templates/LlamaCppFim.hpp
templates/Qwen3CoderFIM.hpp
+ templates/OpenAIResponses.hpp
providers/Providers.hpp
providers/OllamaProvider.hpp providers/OllamaProvider.cpp
providers/ClaudeProvider.hpp providers/ClaudeProvider.cpp
@@ -100,6 +101,17 @@ add_qtc_plugin(QodeAssist
providers/GoogleAIProvider.hpp providers/GoogleAIProvider.cpp
providers/LlamaCppProvider.hpp providers/LlamaCppProvider.cpp
providers/CodestralProvider.hpp providers/CodestralProvider.cpp
+ providers/OpenAIResponses/ModelRequest.hpp
+ providers/OpenAIResponses/ResponseObject.hpp
+ providers/OpenAIResponses/GetResponseRequest.hpp
+ providers/OpenAIResponses/DeleteResponseRequest.hpp
+ providers/OpenAIResponses/CancelResponseRequest.hpp
+ providers/OpenAIResponses/ListInputItemsRequest.hpp
+ providers/OpenAIResponses/InputTokensRequest.hpp
+ providers/OpenAIResponses/ItemTypesReference.hpp
+ providers/OpenAIResponsesRequestBuilder.hpp
+ providers/OpenAIResponsesProvider.hpp providers/OpenAIResponsesProvider.cpp
+ providers/OpenAIResponsesMessage.hpp providers/OpenAIResponsesMessage.cpp
QodeAssist.qrc
LSPCompletion.hpp
LLMSuggestion.hpp LLMSuggestion.cpp
diff --git a/LLMClientInterface.cpp b/LLMClientInterface.cpp
index 6eb23fa..869fb0a 100644
--- a/LLMClientInterface.cpp
+++ b/LLMClientInterface.cpp
@@ -125,8 +125,7 @@ void LLMClientInterface::sendData(const QByteArray &data)
QString requestId = request["id"].toString();
m_performanceLogger.startTimeMeasurement(requestId);
handleCompletion(request);
- } else if (method == "cancelRequest") {
- qDebug() << "Cancelling request";
+ } else if (method == "$/cancelRequest") {
handleCancelRequest();
} else if (method == "exit") {
// TODO make exit handler
diff --git a/llmcore/ProviderID.hpp b/llmcore/ProviderID.hpp
index 1a74367..19e7b5b 100644
--- a/llmcore/ProviderID.hpp
+++ b/llmcore/ProviderID.hpp
@@ -26,6 +26,7 @@ enum class ProviderID {
Claude,
OpenAI,
OpenAICompatible,
+ OpenAIResponses,
MistralAI,
OpenRouter,
GoogleAI,
diff --git a/providers/OpenAIResponses/CancelResponseRequest.hpp b/providers/OpenAIResponses/CancelResponseRequest.hpp
new file mode 100644
index 0000000..e4f9324
--- /dev/null
+++ b/providers/OpenAIResponses/CancelResponseRequest.hpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include
+
+namespace QodeAssist::OpenAIResponses {
+
+struct CancelResponseRequest
+{
+ QString responseId;
+
+ QString buildUrl(const QString &baseUrl) const
+ {
+ return QString("%1/v1/responses/%2/cancel").arg(baseUrl, responseId);
+ }
+
+ bool isValid() const { return !responseId.isEmpty(); }
+};
+
+class CancelResponseRequestBuilder
+{
+public:
+ CancelResponseRequestBuilder &setResponseId(const QString &id)
+ {
+ m_request.responseId = id;
+ return *this;
+ }
+
+ CancelResponseRequest build() const { return m_request; }
+
+private:
+ CancelResponseRequest m_request;
+};
+
+} // namespace QodeAssist::OpenAIResponses
+
diff --git a/providers/OpenAIResponses/DeleteResponseRequest.hpp b/providers/OpenAIResponses/DeleteResponseRequest.hpp
new file mode 100644
index 0000000..36294a1
--- /dev/null
+++ b/providers/OpenAIResponses/DeleteResponseRequest.hpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include
+#include
+
+namespace QodeAssist::OpenAIResponses {
+
+struct DeleteResponseRequest
+{
+ QString responseId;
+
+ QString buildUrl(const QString &baseUrl) const
+ {
+ return QString("%1/v1/responses/%2").arg(baseUrl, responseId);
+ }
+
+ bool isValid() const { return !responseId.isEmpty(); }
+};
+
+class DeleteResponseRequestBuilder
+{
+public:
+ DeleteResponseRequestBuilder &setResponseId(const QString &id)
+ {
+ m_request.responseId = id;
+ return *this;
+ }
+
+ DeleteResponseRequest build() const { return m_request; }
+
+private:
+ DeleteResponseRequest m_request;
+};
+
+struct DeleteResponseResult
+{
+ bool success = false;
+ QString message;
+
+ static DeleteResponseResult fromJson(const QJsonObject &obj)
+ {
+ DeleteResponseResult result;
+ result.success = obj["success"].toBool();
+ result.message = obj["message"].toString();
+ return result;
+ }
+};
+
+} // namespace QodeAssist::OpenAIResponses
+
diff --git a/providers/OpenAIResponses/GetResponseRequest.hpp b/providers/OpenAIResponses/GetResponseRequest.hpp
new file mode 100644
index 0000000..2e30497
--- /dev/null
+++ b/providers/OpenAIResponses/GetResponseRequest.hpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include
+#include
+#include
+#include
+
+namespace QodeAssist::OpenAIResponses {
+
+struct GetResponseRequest
+{
+ QString responseId;
+ std::optional include;
+ std::optional includeObfuscation;
+ std::optional startingAfter;
+ std::optional stream;
+
+ QString buildUrl(const QString &baseUrl) const
+ {
+ QString url = QString("%1/v1/responses/%2").arg(baseUrl, responseId);
+ QStringList queryParams;
+
+ if (include && !include->isEmpty()) {
+ for (const auto &item : *include) {
+ queryParams.append(QString("include=%1").arg(item));
+ }
+ }
+
+ if (includeObfuscation) {
+ queryParams.append(
+ QString("include_obfuscation=%1").arg(*includeObfuscation ? "true" : "false"));
+ }
+
+ if (startingAfter) {
+ queryParams.append(QString("starting_after=%1").arg(*startingAfter));
+ }
+
+ if (stream) {
+ queryParams.append(QString("stream=%1").arg(*stream ? "true" : "false"));
+ }
+
+ if (!queryParams.isEmpty()) {
+ url += "?" + queryParams.join("&");
+ }
+
+ return url;
+ }
+
+ bool isValid() const { return !responseId.isEmpty(); }
+};
+
+class GetResponseRequestBuilder
+{
+public:
+ GetResponseRequestBuilder &setResponseId(const QString &id)
+ {
+ m_request.responseId = id;
+ return *this;
+ }
+
+ GetResponseRequestBuilder &setInclude(const QStringList &include)
+ {
+ m_request.include = include;
+ return *this;
+ }
+
+ GetResponseRequestBuilder &addInclude(const QString &item)
+ {
+ if (!m_request.include) {
+ m_request.include = QStringList();
+ }
+ m_request.include->append(item);
+ return *this;
+ }
+
+ GetResponseRequestBuilder &setIncludeObfuscation(bool enabled)
+ {
+ m_request.includeObfuscation = enabled;
+ return *this;
+ }
+
+ GetResponseRequestBuilder &setStartingAfter(int sequence)
+ {
+ m_request.startingAfter = sequence;
+ return *this;
+ }
+
+ GetResponseRequestBuilder &setStream(bool enabled)
+ {
+ m_request.stream = enabled;
+ return *this;
+ }
+
+ GetResponseRequest build() const { return m_request; }
+
+private:
+ GetResponseRequest m_request;
+};
+
+} // namespace QodeAssist::OpenAIResponses
+
diff --git a/providers/OpenAIResponses/InputTokensRequest.hpp b/providers/OpenAIResponses/InputTokensRequest.hpp
new file mode 100644
index 0000000..b881d48
--- /dev/null
+++ b/providers/OpenAIResponses/InputTokensRequest.hpp
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include "ModelRequest.hpp"
+
+#include
+#include
+
+namespace QodeAssist::OpenAIResponses {
+
+struct InputTokensRequest
+{
+ std::optional conversation;
+ std::optional input;
+ std::optional instructions;
+ std::optional model;
+ std::optional parallelToolCalls;
+ std::optional previousResponseId;
+ std::optional reasoning;
+ std::optional text;
+ std::optional toolChoice;
+ std::optional tools;
+ std::optional truncation;
+
+ QString buildUrl(const QString &baseUrl) const
+ {
+ return QString("%1/v1/responses/input_tokens").arg(baseUrl);
+ }
+
+ QJsonObject toJson() const
+ {
+ QJsonObject obj;
+
+ if (conversation)
+ obj["conversation"] = *conversation;
+ if (input)
+ obj["input"] = *input;
+ if (instructions)
+ obj["instructions"] = *instructions;
+ if (model)
+ obj["model"] = *model;
+ if (parallelToolCalls)
+ obj["parallel_tool_calls"] = *parallelToolCalls;
+ if (previousResponseId)
+ obj["previous_response_id"] = *previousResponseId;
+ if (reasoning)
+ obj["reasoning"] = *reasoning;
+ if (text)
+ obj["text"] = *text;
+ if (toolChoice)
+ obj["tool_choice"] = *toolChoice;
+ if (tools)
+ obj["tools"] = *tools;
+ if (truncation)
+ obj["truncation"] = *truncation;
+
+ return obj;
+ }
+
+ bool isValid() const { return input.has_value() || previousResponseId.has_value(); }
+};
+
+class InputTokensRequestBuilder
+{
+public:
+ InputTokensRequestBuilder &setConversation(const QString &conversationId)
+ {
+ m_request.conversation = conversationId;
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setInput(const QJsonArray &input)
+ {
+ m_request.input = input;
+ return *this;
+ }
+
+ InputTokensRequestBuilder &addInputMessage(const Message &message)
+ {
+ if (!m_request.input) {
+ m_request.input = QJsonArray();
+ }
+ m_request.input->append(message.toJson());
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setInstructions(const QString &instructions)
+ {
+ m_request.instructions = instructions;
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setModel(const QString &model)
+ {
+ m_request.model = model;
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setParallelToolCalls(bool enabled)
+ {
+ m_request.parallelToolCalls = enabled;
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setPreviousResponseId(const QString &responseId)
+ {
+ m_request.previousResponseId = responseId;
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setReasoning(const QJsonObject &reasoning)
+ {
+ m_request.reasoning = reasoning;
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setReasoningEffort(ReasoningEffort effort)
+ {
+ QString effortStr;
+ switch (effort) {
+ case ReasoningEffort::None:
+ effortStr = "none";
+ break;
+ case ReasoningEffort::Minimal:
+ effortStr = "minimal";
+ break;
+ case ReasoningEffort::Low:
+ effortStr = "low";
+ break;
+ case ReasoningEffort::Medium:
+ effortStr = "medium";
+ break;
+ case ReasoningEffort::High:
+ effortStr = "high";
+ break;
+ }
+ m_request.reasoning = QJsonObject{{"effort", effortStr}};
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setText(const QJsonObject &text)
+ {
+ m_request.text = text;
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setTextFormat(const TextFormatOptions &format)
+ {
+ m_request.text = format.toJson();
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setToolChoice(const QJsonValue &toolChoice)
+ {
+ m_request.toolChoice = toolChoice;
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setTools(const QJsonArray &tools)
+ {
+ m_request.tools = tools;
+ return *this;
+ }
+
+ InputTokensRequestBuilder &addTool(const Tool &tool)
+ {
+ if (!m_request.tools) {
+ m_request.tools = QJsonArray();
+ }
+ m_request.tools->append(tool.toJson());
+ return *this;
+ }
+
+ InputTokensRequestBuilder &setTruncation(const QString &truncation)
+ {
+ m_request.truncation = truncation;
+ return *this;
+ }
+
+ InputTokensRequest build() const { return m_request; }
+
+private:
+ InputTokensRequest m_request;
+};
+
+struct InputTokensResponse
+{
+ QString object;
+ int inputTokens = 0;
+
+ static InputTokensResponse fromJson(const QJsonObject &obj)
+ {
+ InputTokensResponse result;
+ result.object = obj["object"].toString();
+ result.inputTokens = obj["input_tokens"].toInt();
+ return result;
+ }
+};
+
+} // namespace QodeAssist::OpenAIResponses
+
diff --git a/providers/OpenAIResponses/ItemTypesReference.hpp b/providers/OpenAIResponses/ItemTypesReference.hpp
new file mode 100644
index 0000000..5b6231d
--- /dev/null
+++ b/providers/OpenAIResponses/ItemTypesReference.hpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+namespace QodeAssist::OpenAIResponses {
+
+/*
+ * REFERENCE: Item Types in List Input Items Response
+ * ===================================================
+ *
+ * The `data` array in ListInputItemsResponse can contain various item types.
+ * This file serves as a reference for all possible item types.
+ *
+ * EXISTING TYPES (already implemented):
+ * -------------------------------------
+ * - MessageOutput (in ResponseObject.hpp)
+ * - FunctionCall (in ResponseObject.hpp)
+ * - ReasoningOutput (in ResponseObject.hpp)
+ * - FileSearchCall (in ResponseObject.hpp)
+ * - CodeInterpreterCall (in ResponseObject.hpp)
+ * - Message (in ModelRequest.hpp) - for input messages
+ *
+ * ADDITIONAL TYPES (to be implemented if needed):
+ * -----------------------------------------------
+ *
+ * 1. Computer Tool Call (computer_call)
+ * - Computer use tool for UI automation
+ * - Properties: action, call_id, id, pending_safety_checks, status, type
+ * - Actions: click, double_click, drag, keypress, move, screenshot, scroll, type, wait
+ *
+ * 2. Computer Tool Call Output (computer_call_output)
+ * - Output from computer tool
+ * - Properties: call_id, id, output, type, acknowledged_safety_checks, status
+ *
+ * 3. Web Search Tool Call (web_search_call)
+ * - Web search results
+ * - Properties: action, id, status, type
+ * - Actions: search, open_page, find
+ *
+ * 4. Image Generation Call (image_generation_call)
+ * - AI image generation request
+ * - Properties: id, result (base64), status, type
+ *
+ * 5. Local Shell Call (local_shell_call)
+ * - Execute shell commands locally
+ * - Properties: action (exec), call_id, id, status, type
+ * - Action properties: command, env, timeout_ms, user, working_directory
+ *
+ * 6. Local Shell Call Output (local_shell_call_output)
+ * - Output from local shell execution
+ * - Properties: id, output (JSON string), type, status
+ *
+ * 7. Shell Tool Call (shell_call)
+ * - Managed shell environment execution
+ * - Properties: action, call_id, id, status, type, created_by
+ *
+ * 8. Shell Call Output (shell_call_output)
+ * - Output from shell tool
+ * - Properties: call_id, id, max_output_length, output (array), type, created_by
+ * - Output chunks: outcome (exit/timeout), stderr, stdout
+ *
+ * 9. Apply Patch Tool Call (apply_patch_call)
+ * - File diff operations
+ * - Properties: call_id, id, operation, status, type, created_by
+ * - Operations: create_file, delete_file, update_file
+ *
+ * 10. Apply Patch Tool Call Output (apply_patch_call_output)
+ * - Output from patch operations
+ * - Properties: call_id, id, status, type, created_by, output
+ *
+ * 11. MCP List Tools (mcp_list_tools)
+ * - List of tools from MCP server
+ * - Properties: id, server_label, tools (array), type, error
+ *
+ * 12. MCP Approval Request (mcp_approval_request)
+ * - Request for human approval
+ * - Properties: arguments, id, name, server_label, type
+ *
+ * 13. MCP Approval Response (mcp_approval_response)
+ * - Response to approval request
+ * - Properties: approval_request_id, approve (bool), id, type, reason
+ *
+ * 14. MCP Tool Call (mcp_call)
+ * - Tool invocation on MCP server
+ * - Properties: arguments, id, name, server_label, type
+ * - Optional: approval_request_id, error, output, status
+ *
+ * 15. Custom Tool Call (custom_tool_call)
+ * - User-defined tool call
+ * - Properties: call_id, input, name, type, id
+ *
+ * 16. Custom Tool Call Output (custom_tool_call_output)
+ * - Output from custom tool
+ * - Properties: call_id, output (string or array), type, id
+ *
+ * 17. Item Reference (item_reference)
+ * - Internal reference to another item
+ * - Properties: id, type
+ *
+ * USAGE:
+ * ------
+ * When parsing ListInputItemsResponse.data array:
+ * 1. Check item["type"] field
+ * 2. Use appropriate parser based on type
+ * 3. For existing types, use ResponseObject.hpp or ModelRequest.hpp
+ * 4. For additional types, implement parsers as needed
+ *
+ * EXAMPLE:
+ * --------
+ * for (const auto &itemValue : response.data) {
+ * const QJsonObject itemObj = itemValue.toObject();
+ * const QString type = itemObj["type"].toString();
+ *
+ * if (type == "message") {
+ * // Use MessageOutput or Message
+ * } else if (type == "function_call") {
+ * // Use FunctionCall
+ * } else if (type == "computer_call") {
+ * // Implement ComputerCall parser
+ * }
+ * // ... handle other types
+ * }
+ */
+
+} // namespace QodeAssist::OpenAIResponses
+
diff --git a/providers/OpenAIResponses/ListInputItemsRequest.hpp b/providers/OpenAIResponses/ListInputItemsRequest.hpp
new file mode 100644
index 0000000..c9f9b31
--- /dev/null
+++ b/providers/OpenAIResponses/ListInputItemsRequest.hpp
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+
+namespace QodeAssist::OpenAIResponses {
+
+enum class SortOrder { Ascending, Descending };
+
+struct ListInputItemsRequest
+{
+ QString responseId;
+ std::optional after;
+ std::optional include;
+ std::optional limit;
+ std::optional order;
+
+ QString buildUrl(const QString &baseUrl) const
+ {
+ QString url = QString("%1/v1/responses/%2/input_items").arg(baseUrl, responseId);
+ QStringList queryParams;
+
+ if (after) {
+ queryParams.append(QString("after=%1").arg(*after));
+ }
+
+ if (include && !include->isEmpty()) {
+ for (const auto &item : *include) {
+ queryParams.append(QString("include=%1").arg(item));
+ }
+ }
+
+ if (limit) {
+ queryParams.append(QString("limit=%1").arg(*limit));
+ }
+
+ if (order) {
+ QString orderStr = (*order == SortOrder::Ascending) ? "asc" : "desc";
+ queryParams.append(QString("order=%1").arg(orderStr));
+ }
+
+ if (!queryParams.isEmpty()) {
+ url += "?" + queryParams.join("&");
+ }
+
+ return url;
+ }
+
+ bool isValid() const
+ {
+ if (responseId.isEmpty()) {
+ return false;
+ }
+
+ if (limit && (*limit < 1 || *limit > 100)) {
+ return false;
+ }
+
+ return true;
+ }
+};
+
+class ListInputItemsRequestBuilder
+{
+public:
+ ListInputItemsRequestBuilder &setResponseId(const QString &id)
+ {
+ m_request.responseId = id;
+ return *this;
+ }
+
+ ListInputItemsRequestBuilder &setAfter(const QString &itemId)
+ {
+ m_request.after = itemId;
+ return *this;
+ }
+
+ ListInputItemsRequestBuilder &setInclude(const QStringList &include)
+ {
+ m_request.include = include;
+ return *this;
+ }
+
+ ListInputItemsRequestBuilder &addInclude(const QString &item)
+ {
+ if (!m_request.include) {
+ m_request.include = QStringList();
+ }
+ m_request.include->append(item);
+ return *this;
+ }
+
+ ListInputItemsRequestBuilder &setLimit(int limit)
+ {
+ m_request.limit = limit;
+ return *this;
+ }
+
+ ListInputItemsRequestBuilder &setOrder(SortOrder order)
+ {
+ m_request.order = order;
+ return *this;
+ }
+
+ ListInputItemsRequestBuilder &setAscendingOrder()
+ {
+ m_request.order = SortOrder::Ascending;
+ return *this;
+ }
+
+ ListInputItemsRequestBuilder &setDescendingOrder()
+ {
+ m_request.order = SortOrder::Descending;
+ return *this;
+ }
+
+ ListInputItemsRequest build() const { return m_request; }
+
+private:
+ ListInputItemsRequest m_request;
+};
+
+struct ListInputItemsResponse
+{
+ QJsonArray data;
+ QString firstId;
+ QString lastId;
+ bool hasMore = false;
+ QString object;
+
+ static ListInputItemsResponse fromJson(const QJsonObject &obj)
+ {
+ ListInputItemsResponse result;
+ result.data = obj["data"].toArray();
+ result.firstId = obj["first_id"].toString();
+ result.lastId = obj["last_id"].toString();
+ result.hasMore = obj["has_more"].toBool();
+ result.object = obj["object"].toString();
+ return result;
+ }
+};
+
+} // namespace QodeAssist::OpenAIResponses
+
diff --git a/providers/OpenAIResponses/ModelRequest.hpp b/providers/OpenAIResponses/ModelRequest.hpp
new file mode 100644
index 0000000..4e1c7cf
--- /dev/null
+++ b/providers/OpenAIResponses/ModelRequest.hpp
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace QodeAssist::OpenAIResponses {
+
+enum class Role { User, Assistant, System, Developer };
+
+enum class MessageStatus { InProgress, Completed, Incomplete };
+
+enum class ReasoningEffort { None, Minimal, Low, Medium, High };
+
+enum class TextFormat { Text, JsonSchema, JsonObject };
+
+struct InputText
+{
+ QString text;
+
+ QJsonObject toJson() const
+ {
+ return QJsonObject{{"type", "input_text"}, {"text", text}};
+ }
+
+ bool isValid() const noexcept { return !text.isEmpty(); }
+};
+
+struct InputImage
+{
+ std::optional fileId;
+ std::optional imageUrl;
+ QString detail = "auto";
+
+ QJsonObject toJson() const
+ {
+ QJsonObject obj{{"type", "input_image"}, {"detail", detail}};
+ if (fileId)
+ obj["file_id"] = *fileId;
+ if (imageUrl)
+ obj["image_url"] = *imageUrl;
+ return obj;
+ }
+
+ bool isValid() const noexcept { return fileId.has_value() || imageUrl.has_value(); }
+};
+
+struct InputFile
+{
+ std::optional fileId;
+ std::optional fileUrl;
+ std::optional fileData;
+ std::optional filename;
+
+ QJsonObject toJson() const
+ {
+ QJsonObject obj{{"type", "input_file"}};
+ if (fileId)
+ obj["file_id"] = *fileId;
+ if (fileUrl)
+ obj["file_url"] = *fileUrl;
+ if (fileData)
+ obj["file_data"] = *fileData;
+ if (filename)
+ obj["filename"] = *filename;
+ return obj;
+ }
+
+ bool isValid() const noexcept
+ {
+ return fileId.has_value() || fileUrl.has_value() || fileData.has_value();
+ }
+};
+
+class MessageContent
+{
+public:
+ MessageContent(QString text) : m_variant(std::move(text)) {}
+ MessageContent(InputText text) : m_variant(std::move(text)) {}
+ MessageContent(InputImage image) : m_variant(std::move(image)) {}
+ MessageContent(InputFile file) : m_variant(std::move(file)) {}
+
+ QJsonValue toJson() const
+ {
+ return std::visit([](const auto &content) -> QJsonValue {
+ using T = std::decay_t;
+ if constexpr (std::is_same_v) {
+ return content;
+ } else {
+ return content.toJson();
+ }
+ }, m_variant);
+ }
+
+ bool isValid() const noexcept
+ {
+ return std::visit([](const auto &content) -> bool {
+ using T = std::decay_t;
+ if constexpr (std::is_same_v) {
+ return !content.isEmpty();
+ } else {
+ return content.isValid();
+ }
+ }, m_variant);
+ }
+
+private:
+ std::variant m_variant;
+};
+
+struct Message
+{
+ Role role;
+ QList content;
+ std::optional status;
+
+ QJsonObject toJson() const
+ {
+ QJsonObject obj;
+ obj["role"] = roleToString(role);
+
+ if (content.size() == 1) {
+ obj["content"] = content[0].toJson();
+ } else {
+ QJsonArray arr;
+ for (const auto &c : content) {
+ arr.append(c.toJson());
+ }
+ obj["content"] = arr;
+ }
+
+ if (status) {
+ obj["status"] = statusToString(*status);
+ }
+
+ return obj;
+ }
+
+ bool isValid() const noexcept
+ {
+ if (content.isEmpty()) {
+ return false;
+ }
+
+ for (const auto &c : content) {
+ if (!c.isValid()) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ static QString roleToString(Role r) noexcept
+ {
+ switch (r) {
+ case Role::User:
+ return "user";
+ case Role::Assistant:
+ return "assistant";
+ case Role::System:
+ return "system";
+ case Role::Developer:
+ return "developer";
+ }
+ return "user";
+ }
+
+ static QString statusToString(MessageStatus s) noexcept
+ {
+ switch (s) {
+ case MessageStatus::InProgress:
+ return "in_progress";
+ case MessageStatus::Completed:
+ return "completed";
+ case MessageStatus::Incomplete:
+ return "incomplete";
+ }
+ return "in_progress";
+ }
+};
+
+struct FunctionTool
+{
+ QString name;
+ QJsonObject parameters;
+ std::optional description;
+ bool strict = true;
+
+ QJsonObject toJson() const
+ {
+ QJsonObject obj{{"type", "function"},
+ {"name", name},
+ {"parameters", parameters},
+ {"strict", strict}};
+ if (description)
+ obj["description"] = *description;
+ return obj;
+ }
+
+ bool isValid() const noexcept
+ {
+ return !name.isEmpty() && !parameters.isEmpty();
+ }
+};
+
+struct FileSearchTool
+{
+ QStringList vectorStoreIds;
+ std::optional maxNumResults;
+ std::optional scoreThreshold;
+
+ QJsonObject toJson() const
+ {
+ QJsonObject obj{{"type", "file_search"}};
+ QJsonArray ids;
+ for (const auto &id : vectorStoreIds) {
+ ids.append(id);
+ }
+ obj["vector_store_ids"] = ids;
+
+ if (maxNumResults)
+ obj["max_num_results"] = *maxNumResults;
+ if (scoreThreshold)
+ obj["score_threshold"] = *scoreThreshold;
+ return obj;
+ }
+
+ bool isValid() const noexcept
+ {
+ return !vectorStoreIds.isEmpty();
+ }
+};
+
+struct WebSearchTool
+{
+ QString searchContextSize = "medium";
+
+ QJsonObject toJson() const
+ {
+ return QJsonObject{{"type", "web_search"}, {"search_context_size", searchContextSize}};
+ }
+
+ bool isValid() const noexcept
+ {
+ return !searchContextSize.isEmpty();
+ }
+};
+
+struct CodeInterpreterTool
+{
+ QString container;
+
+ QJsonObject toJson() const
+ {
+ return QJsonObject{{"type", "code_interpreter"}, {"container", container}};
+ }
+
+ bool isValid() const noexcept
+ {
+ return !container.isEmpty();
+ }
+};
+
+class Tool
+{
+public:
+ Tool(FunctionTool tool) : m_variant(std::move(tool)) {}
+ Tool(FileSearchTool tool) : m_variant(std::move(tool)) {}
+ Tool(WebSearchTool tool) : m_variant(std::move(tool)) {}
+ Tool(CodeInterpreterTool tool) : m_variant(std::move(tool)) {}
+
+ QJsonObject toJson() const
+ {
+ return std::visit([](const auto &t) { return t.toJson(); }, m_variant);
+ }
+
+ bool isValid() const noexcept
+ {
+ return std::visit([](const auto &t) { return t.isValid(); }, m_variant);
+ }
+
+private:
+ std::variant m_variant;
+};
+
+struct TextFormatOptions
+{
+ TextFormat type = TextFormat::Text;
+ std::optional name;
+ std::optional schema;
+ std::optional description;
+ std::optional strict;
+
+ QJsonObject toJson() const
+ {
+ QJsonObject obj;
+
+ switch (type) {
+ case TextFormat::Text:
+ obj["type"] = "text";
+ break;
+ case TextFormat::JsonSchema:
+ obj["type"] = "json_schema";
+ if (name)
+ obj["name"] = *name;
+ if (schema)
+ obj["schema"] = *schema;
+ if (description)
+ obj["description"] = *description;
+ if (strict)
+ obj["strict"] = *strict;
+ break;
+ case TextFormat::JsonObject:
+ obj["type"] = "json_object";
+ break;
+ }
+
+ return obj;
+ }
+
+ bool isValid() const noexcept
+ {
+ if (type == TextFormat::JsonSchema) {
+ return name.has_value() && schema.has_value();
+ }
+ return true;
+ }
+};
+
+} // namespace QodeAssist::OpenAIResponses
+
diff --git a/providers/OpenAIResponses/ResponseObject.hpp b/providers/OpenAIResponses/ResponseObject.hpp
new file mode 100644
index 0000000..51c5b46
--- /dev/null
+++ b/providers/OpenAIResponses/ResponseObject.hpp
@@ -0,0 +1,562 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace QodeAssist::OpenAIResponses {
+
+enum class ResponseStatus { Completed, Failed, InProgress, Cancelled, Queued, Incomplete };
+
+enum class ItemStatus { InProgress, Completed, Incomplete };
+
+struct FileCitation
+{
+ QString fileId;
+ QString filename;
+ int index = 0;
+
+ static FileCitation fromJson(const QJsonObject &obj)
+ {
+ return {obj["file_id"].toString(), obj["filename"].toString(), obj["index"].toInt()};
+ }
+
+ bool isValid() const noexcept { return !fileId.isEmpty(); }
+};
+
+struct UrlCitation
+{
+ QString url;
+ QString title;
+ int startIndex = 0;
+ int endIndex = 0;
+
+ static UrlCitation fromJson(const QJsonObject &obj)
+ {
+ return {
+ obj["url"].toString(),
+ obj["title"].toString(),
+ obj["start_index"].toInt(),
+ obj["end_index"].toInt()};
+ }
+
+ bool isValid() const noexcept { return !url.isEmpty(); }
+};
+
+struct OutputText
+{
+ QString text;
+ QList fileCitations;
+ QList urlCitations;
+
+ static OutputText fromJson(const QJsonObject &obj)
+ {
+ OutputText result;
+ result.text = obj["text"].toString();
+
+ if (obj.contains("annotations")) {
+ const QJsonArray annotations = obj["annotations"].toArray();
+ result.fileCitations.reserve(annotations.size());
+ result.urlCitations.reserve(annotations.size());
+
+ for (const auto &annValue : annotations) {
+ const QJsonObject ann = annValue.toObject();
+ const QString type = ann["type"].toString();
+ if (type == "file_citation") {
+ result.fileCitations.append(FileCitation::fromJson(ann));
+ } else if (type == "url_citation") {
+ result.urlCitations.append(UrlCitation::fromJson(ann));
+ }
+ }
+ }
+
+ return result;
+ }
+
+ bool isValid() const noexcept { return !text.isEmpty(); }
+};
+
+struct Refusal
+{
+ QString refusal;
+
+ static Refusal fromJson(const QJsonObject &obj)
+ {
+ return {obj["refusal"].toString()};
+ }
+
+ bool isValid() const noexcept { return !refusal.isEmpty(); }
+};
+
+struct MessageOutput
+{
+ QString id;
+ QString role;
+ ItemStatus status = ItemStatus::InProgress;
+ QList outputTexts;
+ QList refusals;
+
+ static MessageOutput fromJson(const QJsonObject &obj)
+ {
+ MessageOutput result;
+ result.id = obj["id"].toString();
+ result.role = obj["role"].toString();
+
+ const QString statusStr = obj["status"].toString();
+ if (statusStr == "in_progress")
+ result.status = ItemStatus::InProgress;
+ else if (statusStr == "completed")
+ result.status = ItemStatus::Completed;
+ else
+ result.status = ItemStatus::Incomplete;
+
+ if (obj.contains("content")) {
+ const QJsonArray content = obj["content"].toArray();
+ result.outputTexts.reserve(content.size());
+ result.refusals.reserve(content.size());
+
+ for (const auto &item : content) {
+ const QJsonObject itemObj = item.toObject();
+ const QString type = itemObj["type"].toString();
+
+ if (type == "output_text") {
+ result.outputTexts.append(OutputText::fromJson(itemObj));
+ } else if (type == "refusal") {
+ result.refusals.append(Refusal::fromJson(itemObj));
+ }
+ }
+ }
+
+ return result;
+ }
+
+ bool isValid() const noexcept { return !id.isEmpty(); }
+ bool hasContent() const noexcept { return !outputTexts.isEmpty() || !refusals.isEmpty(); }
+};
+
+struct FunctionCall
+{
+ QString id;
+ QString callId;
+ QString name;
+ QString arguments;
+ ItemStatus status = ItemStatus::InProgress;
+
+ static FunctionCall fromJson(const QJsonObject &obj)
+ {
+ FunctionCall result;
+ result.id = obj["id"].toString();
+ result.callId = obj["call_id"].toString();
+ result.name = obj["name"].toString();
+ result.arguments = obj["arguments"].toString();
+
+ const QString statusStr = obj["status"].toString();
+ if (statusStr == "in_progress")
+ result.status = ItemStatus::InProgress;
+ else if (statusStr == "completed")
+ result.status = ItemStatus::Completed;
+ else
+ result.status = ItemStatus::Incomplete;
+
+ return result;
+ }
+
+ bool isValid() const noexcept { return !id.isEmpty() && !callId.isEmpty() && !name.isEmpty(); }
+};
+
+struct ReasoningOutput
+{
+ QString id;
+ ItemStatus status = ItemStatus::InProgress;
+ QString summaryText;
+ QString encryptedContent;
+ QList contentTexts;
+
+ static ReasoningOutput fromJson(const QJsonObject &obj)
+ {
+ ReasoningOutput result;
+ result.id = obj["id"].toString();
+
+ const QString statusStr = obj["status"].toString();
+ if (statusStr == "in_progress")
+ result.status = ItemStatus::InProgress;
+ else if (statusStr == "completed")
+ result.status = ItemStatus::Completed;
+ else
+ result.status = ItemStatus::Incomplete;
+
+ if (obj.contains("summary")) {
+ const QJsonArray summary = obj["summary"].toArray();
+ for (const auto &item : summary) {
+ const QJsonObject itemObj = item.toObject();
+ if (itemObj["type"].toString() == "summary_text") {
+ result.summaryText = itemObj["text"].toString();
+ break;
+ }
+ }
+ }
+
+ if (obj.contains("content")) {
+ const QJsonArray content = obj["content"].toArray();
+ result.contentTexts.reserve(content.size());
+
+ for (const auto &item : content) {
+ const QJsonObject itemObj = item.toObject();
+ if (itemObj["type"].toString() == "reasoning_text") {
+ result.contentTexts.append(itemObj["text"].toString());
+ }
+ }
+ }
+
+ if (obj.contains("encrypted_content")) {
+ result.encryptedContent = obj["encrypted_content"].toString();
+ }
+
+ return result;
+ }
+
+ bool isValid() const noexcept { return !id.isEmpty(); }
+ bool hasContent() const noexcept
+ {
+ return !summaryText.isEmpty() || !contentTexts.isEmpty() || !encryptedContent.isEmpty();
+ }
+};
+
+struct FileSearchResult
+{
+ QString fileId;
+ QString filename;
+ QString text;
+ double score = 0.0;
+
+ static FileSearchResult fromJson(const QJsonObject &obj)
+ {
+ return {
+ obj["file_id"].toString(),
+ obj["filename"].toString(),
+ obj["text"].toString(),
+ obj["score"].toDouble()};
+ }
+
+ bool isValid() const noexcept { return !fileId.isEmpty(); }
+};
+
+struct FileSearchCall
+{
+ QString id;
+ QString status;
+ QStringList queries;
+ QList results;
+
+ static FileSearchCall fromJson(const QJsonObject &obj)
+ {
+ FileSearchCall result;
+ result.id = obj["id"].toString();
+ result.status = obj["status"].toString();
+
+ if (obj.contains("queries")) {
+ const QJsonArray queries = obj["queries"].toArray();
+ result.queries.reserve(queries.size());
+
+ for (const auto &q : queries) {
+ result.queries.append(q.toString());
+ }
+ }
+
+ if (obj.contains("results")) {
+ const QJsonArray results = obj["results"].toArray();
+ result.results.reserve(results.size());
+
+ for (const auto &r : results) {
+ result.results.append(FileSearchResult::fromJson(r.toObject()));
+ }
+ }
+
+ return result;
+ }
+
+ bool isValid() const noexcept { return !id.isEmpty(); }
+};
+
+struct CodeInterpreterOutput
+{
+ QString type;
+ QString logs;
+ QString imageUrl;
+
+ static CodeInterpreterOutput fromJson(const QJsonObject &obj)
+ {
+ CodeInterpreterOutput result;
+ result.type = obj["type"].toString();
+ if (result.type == "logs") {
+ result.logs = obj["logs"].toString();
+ } else if (result.type == "image") {
+ result.imageUrl = obj["url"].toString();
+ }
+ return result;
+ }
+
+ bool isValid() const noexcept
+ {
+ return !type.isEmpty() && (!logs.isEmpty() || !imageUrl.isEmpty());
+ }
+};
+
+struct CodeInterpreterCall
+{
+ QString id;
+ QString containerId;
+ std::optional code;
+ QString status;
+ QList outputs;
+
+ static CodeInterpreterCall fromJson(const QJsonObject &obj)
+ {
+ CodeInterpreterCall result;
+ result.id = obj["id"].toString();
+ result.containerId = obj["container_id"].toString();
+ result.status = obj["status"].toString();
+
+ if (obj.contains("code") && !obj["code"].isNull()) {
+ result.code = obj["code"].toString();
+ }
+
+ if (obj.contains("outputs")) {
+ const QJsonArray outputs = obj["outputs"].toArray();
+ result.outputs.reserve(outputs.size());
+
+ for (const auto &o : outputs) {
+ result.outputs.append(CodeInterpreterOutput::fromJson(o.toObject()));
+ }
+ }
+
+ return result;
+ }
+
+ bool isValid() const noexcept { return !id.isEmpty() && !containerId.isEmpty(); }
+};
+
+class OutputItem
+{
+public:
+ enum class Type { Message, FunctionCall, Reasoning, FileSearch, CodeInterpreter, Unknown };
+
+ explicit OutputItem(const MessageOutput &msg)
+ : m_type(Type::Message)
+ , m_data(msg)
+ {}
+ explicit OutputItem(const FunctionCall &call)
+ : m_type(Type::FunctionCall)
+ , m_data(call)
+ {}
+ explicit OutputItem(const ReasoningOutput &reasoning)
+ : m_type(Type::Reasoning)
+ , m_data(reasoning)
+ {}
+ explicit OutputItem(const FileSearchCall &search)
+ : m_type(Type::FileSearch)
+ , m_data(search)
+ {}
+ explicit OutputItem(const CodeInterpreterCall &interpreter)
+ : m_type(Type::CodeInterpreter)
+ , m_data(interpreter)
+ {}
+
+ Type type() const { return m_type; }
+
+ const MessageOutput *asMessage() const
+ {
+ return std::holds_alternative(m_data) ? &std::get(m_data)
+ : nullptr;
+ }
+
+ const FunctionCall *asFunctionCall() const
+ {
+ return std::holds_alternative(m_data) ? &std::get(m_data)
+ : nullptr;
+ }
+
+ const ReasoningOutput *asReasoning() const
+ {
+ return std::holds_alternative(m_data) ? &std::get(m_data)
+ : nullptr;
+ }
+
+ const FileSearchCall *asFileSearch() const
+ {
+ return std::holds_alternative(m_data) ? &std::get(m_data)
+ : nullptr;
+ }
+
+ const CodeInterpreterCall *asCodeInterpreter() const
+ {
+ return std::holds_alternative(m_data)
+ ? &std::get(m_data)
+ : nullptr;
+ }
+
+ static OutputItem fromJson(const QJsonObject &obj)
+ {
+ const QString type = obj["type"].toString();
+
+ if (type == "message") {
+ return OutputItem(MessageOutput::fromJson(obj));
+ } else if (type == "function_call") {
+ return OutputItem(FunctionCall::fromJson(obj));
+ } else if (type == "reasoning") {
+ return OutputItem(ReasoningOutput::fromJson(obj));
+ } else if (type == "file_search_call") {
+ return OutputItem(FileSearchCall::fromJson(obj));
+ } else if (type == "code_interpreter_call") {
+ return OutputItem(CodeInterpreterCall::fromJson(obj));
+ }
+
+ return OutputItem(MessageOutput{});
+ }
+
+private:
+ Type m_type;
+ std::variant
+ m_data;
+};
+
+struct Usage
+{
+ int inputTokens = 0;
+ int outputTokens = 0;
+ int totalTokens = 0;
+
+ static Usage fromJson(const QJsonObject &obj)
+ {
+ return {
+ obj["input_tokens"].toInt(),
+ obj["output_tokens"].toInt(),
+ obj["total_tokens"].toInt()
+ };
+ }
+
+ bool isValid() const noexcept { return totalTokens > 0; }
+};
+
+struct ResponseError
+{
+ QString code;
+ QString message;
+
+ static ResponseError fromJson(const QJsonObject &obj)
+ {
+ return {obj["code"].toString(), obj["message"].toString()};
+ }
+
+ bool isValid() const noexcept { return !code.isEmpty() && !message.isEmpty(); }
+};
+
+struct Response
+{
+ QString id;
+ qint64 createdAt = 0;
+ QString model;
+ ResponseStatus status = ResponseStatus::InProgress;
+ QList output;
+ QString outputText;
+ std::optional usage;
+ std::optional error;
+ std::optional conversationId;
+
+ static Response fromJson(const QJsonObject &obj)
+ {
+ Response result;
+ result.id = obj["id"].toString();
+ result.createdAt = obj["created_at"].toInteger();
+ result.model = obj["model"].toString();
+
+ const QString statusStr = obj["status"].toString();
+ if (statusStr == "completed")
+ result.status = ResponseStatus::Completed;
+ else if (statusStr == "failed")
+ result.status = ResponseStatus::Failed;
+ else if (statusStr == "in_progress")
+ result.status = ResponseStatus::InProgress;
+ else if (statusStr == "cancelled")
+ result.status = ResponseStatus::Cancelled;
+ else if (statusStr == "queued")
+ result.status = ResponseStatus::Queued;
+ else
+ result.status = ResponseStatus::Incomplete;
+
+ if (obj.contains("output")) {
+ const QJsonArray output = obj["output"].toArray();
+ result.output.reserve(output.size());
+
+ for (const auto &item : output) {
+ result.output.append(OutputItem::fromJson(item.toObject()));
+ }
+ }
+
+ if (obj.contains("output_text")) {
+ result.outputText = obj["output_text"].toString();
+ }
+
+ if (obj.contains("usage")) {
+ result.usage = Usage::fromJson(obj["usage"].toObject());
+ }
+
+ if (obj.contains("error")) {
+ result.error = ResponseError::fromJson(obj["error"].toObject());
+ }
+
+ if (obj.contains("conversation")) {
+ const QJsonObject conv = obj["conversation"].toObject();
+ result.conversationId = conv["id"].toString();
+ }
+
+ return result;
+ }
+
+ QString getAggregatedText() const
+ {
+ if (!outputText.isEmpty()) {
+ return outputText;
+ }
+
+ QString aggregated;
+ for (const auto &item : output) {
+ if (const auto *msg = item.asMessage()) {
+ for (const auto &text : msg->outputTexts) {
+ aggregated += text.text;
+ }
+ }
+ }
+ return aggregated;
+ }
+
+ bool isValid() const noexcept { return !id.isEmpty(); }
+ bool hasError() const noexcept { return error.has_value(); }
+ bool isCompleted() const noexcept { return status == ResponseStatus::Completed; }
+ bool isFailed() const noexcept { return status == ResponseStatus::Failed; }
+};
+
+} // namespace QodeAssist::OpenAIResponses
+
diff --git a/providers/OpenAIResponsesMessage.cpp b/providers/OpenAIResponsesMessage.cpp
new file mode 100644
index 0000000..8806262
--- /dev/null
+++ b/providers/OpenAIResponsesMessage.cpp
@@ -0,0 +1,246 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#include "OpenAIResponsesMessage.hpp"
+#include "OpenAIResponses/ResponseObject.hpp"
+
+#include "logger/Logger.hpp"
+
+#include
+
+namespace QodeAssist::Providers {
+
+OpenAIResponsesMessage::OpenAIResponsesMessage(QObject *parent)
+ : QObject(parent)
+{}
+
+void OpenAIResponsesMessage::handleItemDelta(const QJsonObject &item)
+{
+ using namespace QodeAssist::OpenAIResponses;
+
+ const QString itemType = item["type"].toString();
+
+ if (itemType == "message" || (itemType.isEmpty() && item.contains("content"))) {
+ OutputItem outputItem = OutputItem::fromJson(item);
+
+ if (const auto *msg = outputItem.asMessage()) {
+ for (const auto &outputText : msg->outputTexts) {
+ if (!outputText.text.isEmpty()) {
+ auto textItem = getOrCreateTextItem();
+ textItem->appendText(outputText.text);
+ }
+ }
+ }
+ }
+}
+
+void OpenAIResponsesMessage::handleToolCallStart(const QString &callId, const QString &name)
+{
+ auto toolContent = new LLMCore::ToolUseContent(callId, name);
+ toolContent->setParent(this);
+ m_items.append(toolContent);
+ m_toolCalls[callId] = toolContent;
+ m_pendingToolArguments[callId] = "";
+}
+
+void OpenAIResponsesMessage::handleToolCallDelta(const QString &callId, const QString &argumentsDelta)
+{
+ if (m_pendingToolArguments.contains(callId)) {
+ m_pendingToolArguments[callId] += argumentsDelta;
+ }
+}
+
+void OpenAIResponsesMessage::handleToolCallComplete(const QString &callId)
+{
+ if (m_pendingToolArguments.contains(callId) && m_toolCalls.contains(callId)) {
+ QString jsonArgs = m_pendingToolArguments[callId];
+ QJsonObject argsObject;
+
+ if (!jsonArgs.isEmpty()) {
+ QJsonDocument doc = QJsonDocument::fromJson(jsonArgs.toUtf8());
+ if (doc.isObject()) {
+ argsObject = doc.object();
+ }
+ }
+
+ m_toolCalls[callId]->setInput(argsObject);
+ m_pendingToolArguments.remove(callId);
+ }
+}
+
+void OpenAIResponsesMessage::handleReasoningStart(const QString &itemId)
+{
+ auto thinkingContent = new LLMCore::ThinkingContent();
+ thinkingContent->setParent(this);
+ m_items.append(thinkingContent);
+ m_thinkingBlocks[itemId] = thinkingContent;
+}
+
+void OpenAIResponsesMessage::handleReasoningDelta(const QString &itemId, const QString &text)
+{
+ if (m_thinkingBlocks.contains(itemId)) {
+ m_thinkingBlocks[itemId]->appendThinking(text);
+ }
+}
+
+void OpenAIResponsesMessage::handleReasoningComplete(const QString &itemId)
+{
+ Q_UNUSED(itemId);
+}
+
+void OpenAIResponsesMessage::handleStatus(const QString &status)
+{
+ m_status = status;
+ updateStateFromStatus();
+}
+
+QList OpenAIResponsesMessage::toItemsFormat() const
+{
+ QList items;
+
+ QString textContent;
+ QList toolCalls;
+
+ for (const auto *block : m_items) {
+ if (const auto *text = qobject_cast(block)) {
+ textContent += text->text();
+ } else if (auto *tool = qobject_cast(
+ const_cast(block))) {
+ toolCalls.append(tool);
+ }
+ }
+
+ if (!textContent.isEmpty()) {
+ QJsonObject message;
+ message["role"] = "assistant";
+ message["content"] = textContent;
+ items.append(message);
+ }
+
+ for (const auto *tool : toolCalls) {
+ QJsonObject functionCallItem;
+ functionCallItem["type"] = "function_call";
+ functionCallItem["call_id"] = tool->id();
+ functionCallItem["name"] = tool->name();
+ functionCallItem["arguments"] = QString::fromUtf8(
+ QJsonDocument(tool->input()).toJson(QJsonDocument::Compact));
+ items.append(functionCallItem);
+ }
+
+ return items;
+}
+
+QList OpenAIResponsesMessage::getCurrentToolUseContent() const
+{
+ QList toolBlocks;
+ for (auto *block : m_items) {
+ if (auto *toolContent = qobject_cast(block)) {
+ toolBlocks.append(toolContent);
+ }
+ }
+ return toolBlocks;
+}
+
+QList OpenAIResponsesMessage::getCurrentThinkingContent() const
+{
+ QList thinkingBlocks;
+ for (auto *block : m_items) {
+ if (auto *thinkingContent = qobject_cast(block)) {
+ thinkingBlocks.append(thinkingContent);
+ }
+ }
+ return thinkingBlocks;
+}
+
+QJsonArray OpenAIResponsesMessage::createToolResultItems(const QHash &toolResults) const
+{
+ QJsonArray items;
+
+ for (const auto *toolContent : getCurrentToolUseContent()) {
+ if (toolResults.contains(toolContent->id())) {
+ QJsonObject toolResultItem;
+ toolResultItem["type"] = "function_call_output";
+ toolResultItem["call_id"] = toolContent->id();
+ toolResultItem["output"] = toolResults[toolContent->id()];
+ items.append(toolResultItem);
+ }
+ }
+
+ return items;
+}
+
+QString OpenAIResponsesMessage::accumulatedText() const
+{
+ QString text;
+ for (const auto *block : m_items) {
+ if (const auto *textContent = qobject_cast(block)) {
+ text += textContent->text();
+ }
+ }
+ return text;
+}
+
+void OpenAIResponsesMessage::updateStateFromStatus()
+{
+ using namespace QodeAssist::OpenAIResponses;
+
+ if (m_status == "completed") {
+ if (!getCurrentToolUseContent().isEmpty()) {
+ m_state = LLMCore::MessageState::RequiresToolExecution;
+ } else {
+ m_state = LLMCore::MessageState::Complete;
+ }
+ } else if (m_status == "in_progress") {
+ m_state = LLMCore::MessageState::Building;
+ } else if (m_status == "failed" || m_status == "cancelled" || m_status == "incomplete") {
+ m_state = LLMCore::MessageState::Final;
+ } else {
+ m_state = LLMCore::MessageState::Building;
+ }
+}
+
+LLMCore::TextContent *OpenAIResponsesMessage::getOrCreateTextItem()
+{
+ for (auto *block : m_items) {
+ if (auto *textContent = qobject_cast(block)) {
+ return textContent;
+ }
+ }
+
+ auto *textContent = new LLMCore::TextContent();
+ textContent->setParent(this);
+ m_items.append(textContent);
+ return textContent;
+}
+
+void OpenAIResponsesMessage::startNewContinuation()
+{
+ m_toolCalls.clear();
+ m_thinkingBlocks.clear();
+
+ qDeleteAll(m_items);
+ m_items.clear();
+
+ m_pendingToolArguments.clear();
+ m_status.clear();
+ m_state = LLMCore::MessageState::Building;
+}
+
+} // namespace QodeAssist::Providers
+
diff --git a/providers/OpenAIResponsesMessage.hpp b/providers/OpenAIResponsesMessage.hpp
new file mode 100644
index 0000000..0b0a497
--- /dev/null
+++ b/providers/OpenAIResponsesMessage.hpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include
+
+namespace QodeAssist::Providers {
+
+class OpenAIResponsesMessage : public QObject
+{
+ Q_OBJECT
+public:
+ explicit OpenAIResponsesMessage(QObject *parent = nullptr);
+
+ void handleItemDelta(const QJsonObject &item);
+ void handleToolCallStart(const QString &callId, const QString &name);
+ void handleToolCallDelta(const QString &callId, const QString &argumentsDelta);
+ void handleToolCallComplete(const QString &callId);
+ void handleReasoningStart(const QString &itemId);
+ void handleReasoningDelta(const QString &itemId, const QString &text);
+ void handleReasoningComplete(const QString &itemId);
+ void handleStatus(const QString &status);
+
+ QList toItemsFormat() const;
+ QJsonArray createToolResultItems(const QHash &toolResults) const;
+
+ LLMCore::MessageState state() const noexcept { return m_state; }
+ QString accumulatedText() const;
+ QList getCurrentToolUseContent() const;
+ QList getCurrentThinkingContent() const;
+
+ bool hasToolCalls() const noexcept { return !m_toolCalls.isEmpty(); }
+ bool hasThinkingContent() const noexcept { return !m_thinkingBlocks.isEmpty(); }
+
+ void startNewContinuation();
+
+private:
+ QString m_status;
+ LLMCore::MessageState m_state = LLMCore::MessageState::Building;
+ QList m_items;
+ QHash m_pendingToolArguments;
+ QHash m_toolCalls;
+ QHash m_thinkingBlocks;
+
+ void updateStateFromStatus();
+ LLMCore::TextContent *getOrCreateTextItem();
+};
+
+} // namespace QodeAssist::Providers
+
diff --git a/providers/OpenAIResponsesProvider.cpp b/providers/OpenAIResponsesProvider.cpp
new file mode 100644
index 0000000..250aa01
--- /dev/null
+++ b/providers/OpenAIResponsesProvider.cpp
@@ -0,0 +1,666 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#include "OpenAIResponsesProvider.hpp"
+#include "OpenAIResponses/ResponseObject.hpp"
+
+#include "llmcore/ValidationUtils.hpp"
+#include "logger/Logger.hpp"
+#include "settings/ChatAssistantSettings.hpp"
+#include "settings/CodeCompletionSettings.hpp"
+#include "settings/GeneralSettings.hpp"
+#include "settings/ProviderSettings.hpp"
+#include "settings/QuickRefactorSettings.hpp"
+
+#include
+#include
+#include
+#include
+#include
+
+namespace QodeAssist::Providers {
+
+OpenAIResponsesProvider::OpenAIResponsesProvider(QObject *parent)
+ : LLMCore::Provider(parent)
+ , m_toolsManager(new Tools::ToolsManager(this))
+{
+ connect(
+ m_toolsManager,
+ &Tools::ToolsManager::toolExecutionComplete,
+ this,
+ &OpenAIResponsesProvider::onToolExecutionComplete);
+}
+
+QString OpenAIResponsesProvider::name() const
+{
+ return "OpenAI Responses";
+}
+
+QString OpenAIResponsesProvider::url() const
+{
+ return "https://api.openai.com";
+}
+
+QString OpenAIResponsesProvider::completionEndpoint() const
+{
+ return "/v1/responses";
+}
+
+QString OpenAIResponsesProvider::chatEndpoint() const
+{
+ return "/v1/responses";
+}
+
+bool OpenAIResponsesProvider::supportsModelListing() const
+{
+ return true;
+}
+
+void OpenAIResponsesProvider::prepareRequest(
+ QJsonObject &request,
+ LLMCore::PromptTemplate *prompt,
+ LLMCore::ContextData context,
+ LLMCore::RequestType type,
+ bool isToolsEnabled,
+ bool isThinkingEnabled)
+{
+ if (!prompt->isSupportProvider(providerID())) {
+ LOG_MESSAGE(QString("Template %1 doesn't support %2 provider").arg(name(), prompt->name()));
+ }
+
+ prompt->prepareRequest(request, context);
+
+ auto applyModelParams = [&request](const auto &settings) {
+ request["max_output_tokens"] = settings.maxTokens();
+
+ if (settings.useTopP()) {
+ request["top_p"] = settings.topP();
+ }
+ };
+
+ auto applyThinkingMode = [&request](const auto &settings) {
+ QString effortStr = settings.openAIResponsesReasoningEffort.stringValue().toLower();
+ if (effortStr.isEmpty()) {
+ effortStr = "medium";
+ }
+
+ QJsonObject reasoning;
+ reasoning["effort"] = effortStr;
+ request["reasoning"] = reasoning;
+ request["max_output_tokens"] = settings.thinkingMaxTokens();
+ request["store"] = true;
+
+ QJsonArray include;
+ include.append("reasoning.encrypted_content");
+ request["include"] = include;
+ };
+
+ if (type == LLMCore::RequestType::CodeCompletion) {
+ applyModelParams(Settings::codeCompletionSettings());
+ } else if (type == LLMCore::RequestType::QuickRefactoring) {
+ const auto &qrSettings = Settings::quickRefactorSettings();
+ applyModelParams(qrSettings);
+
+ if (isThinkingEnabled) {
+ applyThinkingMode(qrSettings);
+ }
+ } else {
+ const auto &chatSettings = Settings::chatAssistantSettings();
+ applyModelParams(chatSettings);
+
+ if (isThinkingEnabled) {
+ applyThinkingMode(chatSettings);
+ }
+ }
+
+ if (isToolsEnabled) {
+ const LLMCore::RunToolsFilter filter = (type == LLMCore::RequestType::QuickRefactoring)
+ ? LLMCore::RunToolsFilter::OnlyRead
+ : LLMCore::RunToolsFilter::ALL;
+
+ const auto toolsDefinitions
+ = m_toolsManager->getToolsDefinitions(LLMCore::ToolSchemaFormat::OpenAI, filter);
+ if (!toolsDefinitions.isEmpty()) {
+ QJsonArray responsesTools;
+
+ for (const QJsonValue &toolValue : toolsDefinitions) {
+ const QJsonObject tool = toolValue.toObject();
+ if (tool.contains("function")) {
+ const QJsonObject functionObj = tool["function"].toObject();
+ QJsonObject responsesTool;
+ responsesTool["type"] = "function";
+ responsesTool["name"] = functionObj["name"];
+ responsesTool["description"] = functionObj["description"];
+ responsesTool["parameters"] = functionObj["parameters"];
+ responsesTools.append(responsesTool);
+ }
+ }
+ request["tools"] = responsesTools;
+ }
+ }
+
+ request["stream"] = true;
+}
+
+QList OpenAIResponsesProvider::getInstalledModels(const QString &url)
+{
+ QList models;
+ QNetworkAccessManager manager;
+ QNetworkRequest request(QString("%1/v1/models").arg(url));
+
+ request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
+ if (!apiKey().isEmpty()) {
+ request.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
+ }
+
+ QNetworkReply *reply = manager.get(request);
+ QEventLoop loop;
+ QObject::connect(reply, &QNetworkReply::finished, &loop, &QEventLoop::quit);
+ loop.exec();
+
+ if (reply->error() == QNetworkReply::NoError) {
+ const QByteArray responseData = reply->readAll();
+ const QJsonDocument jsonResponse = QJsonDocument::fromJson(responseData);
+ const QJsonObject jsonObject = jsonResponse.object();
+
+ if (jsonObject.contains("data")) {
+ const QJsonArray modelArray = jsonObject["data"].toArray();
+ models.reserve(modelArray.size());
+
+ static const QStringList modelPrefixes = {"gpt-5", "o1", "o2", "o3", "o4"};
+
+ for (const QJsonValue &value : modelArray) {
+ const QJsonObject modelObject = value.toObject();
+ if (!modelObject.contains("id")) {
+ continue;
+ }
+
+ const QString modelId = modelObject["id"].toString();
+ for (const QString &prefix : modelPrefixes) {
+ if (modelId.contains(prefix)) {
+ models.append(modelId);
+ break;
+ }
+ }
+ }
+ }
+ } else {
+ LOG_MESSAGE(QString("Error fetching OpenAI models: %1").arg(reply->errorString()));
+ }
+
+ reply->deleteLater();
+ return models;
+}
+
+QList OpenAIResponsesProvider::validateRequest(
+ const QJsonObject &request, LLMCore::TemplateType type)
+{
+ Q_UNUSED(type);
+
+ QList errors;
+
+ if (!request.contains("input")) {
+ errors.append("Missing required field: input");
+ return errors;
+ }
+
+ const QJsonValue inputValue = request["input"];
+ if (!inputValue.isString() && !inputValue.isArray()) {
+ errors.append("Field 'input' must be either a string or an array");
+ }
+
+ if (request.contains("max_output_tokens") && !request["max_output_tokens"].isDouble()) {
+ errors.append("Field 'max_output_tokens' must be a number");
+ }
+
+ if (request.contains("top_p") && !request["top_p"].isDouble()) {
+ errors.append("Field 'top_p' must be a number");
+ }
+
+ if (request.contains("reasoning") && !request["reasoning"].isObject()) {
+ errors.append("Field 'reasoning' must be an object");
+ }
+
+ if (request.contains("stream") && !request["stream"].isBool()) {
+ errors.append("Field 'stream' must be a boolean");
+ }
+
+ if (request.contains("tools") && !request["tools"].isArray()) {
+ errors.append("Field 'tools' must be an array");
+ }
+
+ return errors;
+}
+
+QString OpenAIResponsesProvider::apiKey() const
+{
+ return Settings::providerSettings().openAiApiKey();
+}
+
+void OpenAIResponsesProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const
+{
+ networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json");
+
+ if (!apiKey().isEmpty()) {
+ networkRequest.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8());
+ }
+}
+
+LLMCore::ProviderID OpenAIResponsesProvider::providerID() const
+{
+ return LLMCore::ProviderID::OpenAIResponses;
+}
+
+void OpenAIResponsesProvider::sendRequest(
+ const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload)
+{
+ if (!m_messages.contains(requestId)) {
+ m_dataBuffers[requestId].clear();
+ }
+
+ m_requestUrls[requestId] = url;
+ m_originalRequests[requestId] = payload;
+
+ QNetworkRequest networkRequest(url);
+ prepareNetworkRequest(networkRequest);
+
+ LLMCore::HttpRequest
+ request{.networkRequest = networkRequest, .requestId = requestId, .payload = payload};
+
+ emit httpClient()->sendRequest(request);
+}
+
+bool OpenAIResponsesProvider::supportsTools() const
+{
+ return true;
+}
+
+bool OpenAIResponsesProvider::supportImage() const
+{
+ return true;
+}
+
+bool OpenAIResponsesProvider::supportThinking() const
+{
+ return true;
+}
+
+void OpenAIResponsesProvider::cancelRequest(const LLMCore::RequestID &requestId)
+{
+ LLMCore::Provider::cancelRequest(requestId);
+ cleanupRequest(requestId);
+}
+
+void OpenAIResponsesProvider::onDataReceived(
+ const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data)
+{
+ LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
+ const QStringList lines = buffers.rawStreamBuffer.processData(data);
+
+ QString currentEventType;
+
+ for (const QString &line : lines) {
+ const QString trimmedLine = line.trimmed();
+ if (trimmedLine.isEmpty()) {
+ continue;
+ }
+
+ if (line == "data: [DONE]") {
+ continue;
+ }
+
+ if (line.startsWith("event: ")) {
+ currentEventType = line.mid(7).trimmed();
+ continue;
+ }
+
+ QString dataLine = line;
+ if (line.startsWith("data: ")) {
+ dataLine = line.mid(6);
+ }
+
+ const QJsonDocument doc = QJsonDocument::fromJson(dataLine.toUtf8());
+ if (doc.isObject()) {
+ const QJsonObject obj = doc.object();
+ processStreamEvent(requestId, currentEventType, obj);
+ }
+ }
+}
+
+void OpenAIResponsesProvider::onRequestFinished(
+ const QodeAssist::LLMCore::RequestID &requestId, bool success, const QString &error)
+{
+ if (!success) {
+ LOG_MESSAGE(QString("OpenAIResponses request %1 failed: %2").arg(requestId, error));
+ emit requestFailed(requestId, error);
+ cleanupRequest(requestId);
+ return;
+ }
+
+ if (m_messages.contains(requestId)) {
+ OpenAIResponsesMessage *message = m_messages[requestId];
+ if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
+ return;
+ }
+ }
+
+ if (m_dataBuffers.contains(requestId)) {
+ const LLMCore::DataBuffers &buffers = m_dataBuffers[requestId];
+ if (!buffers.responseContent.isEmpty()) {
+ emit fullResponseReceived(requestId, buffers.responseContent);
+ } else {
+ LOG_MESSAGE(QString("WARNING: OpenAIResponses - Response content is empty for %1, "
+ "emitting empty response")
+ .arg(requestId));
+ emit fullResponseReceived(requestId, "");
+ }
+ } else {
+ LOG_MESSAGE(
+ QString("WARNING: OpenAIResponses - No data buffer found for %1").arg(requestId));
+ }
+
+ cleanupRequest(requestId);
+}
+
+void OpenAIResponsesProvider::processStreamEvent(
+ const QString &requestId, const QString &eventType, const QJsonObject &data)
+{
+ OpenAIResponsesMessage *message = m_messages.value(requestId);
+ if (!message) {
+ message = new OpenAIResponsesMessage(this);
+ m_messages[requestId] = message;
+
+ if (m_dataBuffers.contains(requestId)) {
+ emit continuationStarted(requestId);
+ }
+ } else if (
+ m_dataBuffers.contains(requestId)
+ && message->state() == LLMCore::MessageState::RequiresToolExecution) {
+ message->startNewContinuation();
+ emit continuationStarted(requestId);
+ }
+
+ if (eventType == "response.content_part.added") {
+ } else if (eventType == "response.output_text.delta") {
+ const QString delta = data["delta"].toString();
+ if (!delta.isEmpty()) {
+ m_dataBuffers[requestId].responseContent += delta;
+ emit partialResponseReceived(requestId, delta);
+ }
+ } else if (eventType == "response.output_text.done") {
+ const QString fullText = data["text"].toString();
+ if (!fullText.isEmpty()) {
+ m_dataBuffers[requestId].responseContent = fullText;
+ }
+ } else if (eventType == "response.content_part.done") {
+ } else if (eventType == "response.output_item.added") {
+ using namespace QodeAssist::OpenAIResponses;
+ const QJsonObject item = data["item"].toObject();
+ OutputItem outputItem = OutputItem::fromJson(item);
+
+ if (const auto *functionCall = outputItem.asFunctionCall()) {
+ if (!functionCall->callId.isEmpty() && !functionCall->name.isEmpty()) {
+ if (!m_itemIdToCallId.contains(requestId)) {
+ m_itemIdToCallId[requestId] = QHash();
+ }
+ m_itemIdToCallId[requestId][functionCall->id] = functionCall->callId;
+ message->handleToolCallStart(functionCall->callId, functionCall->name);
+ }
+ } else if (const auto *reasoning = outputItem.asReasoning()) {
+ if (!reasoning->id.isEmpty()) {
+ message->handleReasoningStart(reasoning->id);
+ }
+ }
+ } else if (eventType == "response.reasoning_content.delta") {
+ const QString itemId = data["item_id"].toString();
+ const QString delta = data["delta"].toString();
+ if (!itemId.isEmpty() && !delta.isEmpty()) {
+ message->handleReasoningDelta(itemId, delta);
+ }
+ } else if (eventType == "response.reasoning_content.done") {
+ const QString itemId = data["item_id"].toString();
+ if (!itemId.isEmpty()) {
+ message->handleReasoningComplete(itemId);
+ emitPendingThinkingBlocks(requestId);
+ }
+ } else if (eventType == "response.function_call_arguments.delta") {
+ const QString itemId = data["item_id"].toString();
+ const QString delta = data["delta"].toString();
+ if (!itemId.isEmpty() && !delta.isEmpty()) {
+ const QString callId = m_itemIdToCallId.value(requestId).value(itemId);
+ if (!callId.isEmpty()) {
+ message->handleToolCallDelta(callId, delta);
+ } else {
+ LOG_MESSAGE(QString("ERROR: No call_id mapping found for item_id: %1").arg(itemId));
+ }
+ }
+ } else if (
+ eventType == "response.function_call_arguments.done"
+ || eventType == "response.output_item.done") {
+ const QString itemId = data["item_id"].toString();
+ const QJsonObject item = data["item"].toObject();
+
+ if (!item.isEmpty() && item["type"].toString() == "reasoning") {
+ using namespace QodeAssist::OpenAIResponses;
+
+ const QString finalItemId = itemId.isEmpty() ? item["id"].toString() : itemId;
+
+ ReasoningOutput reasoningOutput = ReasoningOutput::fromJson(item);
+ QString reasoningText;
+
+ if (!reasoningOutput.summaryText.isEmpty()) {
+ reasoningText = reasoningOutput.summaryText;
+ } else if (!reasoningOutput.contentTexts.isEmpty()) {
+ reasoningText = reasoningOutput.contentTexts.join("\n");
+ }
+
+ if (reasoningText.isEmpty()) {
+ reasoningText = QString(
+ "[Reasoning process completed, but detailed thinking is not available in "
+ "streaming mode. The model has processed your request with extended reasoning.]");
+ }
+
+ if (!finalItemId.isEmpty()) {
+ message->handleReasoningDelta(finalItemId, reasoningText);
+ message->handleReasoningComplete(finalItemId);
+ emitPendingThinkingBlocks(requestId);
+ }
+ } else if (item.isEmpty() && !itemId.isEmpty()) {
+ const QString callId = m_itemIdToCallId.value(requestId).value(itemId);
+ if (!callId.isEmpty()) {
+ message->handleToolCallComplete(callId);
+ } else {
+ LOG_MESSAGE(
+ QString("ERROR: OpenAIResponses - No call_id mapping found for item_id: %1")
+ .arg(itemId));
+ }
+ } else if (!item.isEmpty() && item["type"].toString() == "function_call") {
+ const QString callId = item["call_id"].toString();
+ if (!callId.isEmpty()) {
+ message->handleToolCallComplete(callId);
+ } else {
+ LOG_MESSAGE(
+ QString("ERROR: OpenAIResponses - Function call done but call_id is empty"));
+ }
+ }
+ } else if (eventType == "response.created") {
+ } else if (eventType == "response.in_progress") {
+ } else if (eventType == "response.completed") {
+ using namespace QodeAssist::OpenAIResponses;
+ const QJsonObject responseObj = data["response"].toObject();
+ Response response = Response::fromJson(responseObj);
+
+ const QString statusStr = responseObj["status"].toString();
+
+ if (m_dataBuffers[requestId].responseContent.isEmpty()) {
+ const QString aggregatedText = response.getAggregatedText();
+ if (!aggregatedText.isEmpty()) {
+ m_dataBuffers[requestId].responseContent = aggregatedText;
+ }
+ }
+
+ message->handleStatus(statusStr);
+ handleMessageComplete(requestId);
+ } else if (eventType == "response.incomplete") {
+ using namespace QodeAssist::OpenAIResponses;
+ const QJsonObject responseObj = data["response"].toObject();
+
+ if (!responseObj.isEmpty()) {
+ Response response = Response::fromJson(responseObj);
+ const QString statusStr = responseObj["status"].toString();
+
+ if (m_dataBuffers[requestId].responseContent.isEmpty()) {
+ const QString aggregatedText = response.getAggregatedText();
+ if (!aggregatedText.isEmpty()) {
+ m_dataBuffers[requestId].responseContent = aggregatedText;
+ }
+ }
+
+ message->handleStatus(statusStr);
+ } else {
+ message->handleStatus("incomplete");
+ }
+
+ handleMessageComplete(requestId);
+ } else if (!eventType.isEmpty()) {
+ LOG_MESSAGE(QString("WARNING: OpenAIResponses - Unhandled event type '%1' for request %2\nData: %3")
+ .arg(eventType)
+ .arg(requestId)
+ .arg(QString::fromUtf8(QJsonDocument(data).toJson(QJsonDocument::Compact))));
+ }
+}
+
+void OpenAIResponsesProvider::emitPendingThinkingBlocks(const QString &requestId)
+{
+ if (!m_messages.contains(requestId)) {
+ return;
+ }
+
+ OpenAIResponsesMessage *message = m_messages[requestId];
+ const auto thinkingBlocks = message->getCurrentThinkingContent();
+
+ if (thinkingBlocks.isEmpty()) {
+ return;
+ }
+
+ const int alreadyEmitted = m_emittedThinkingBlocksCount.value(requestId, 0);
+ const int totalBlocks = thinkingBlocks.size();
+
+ for (int i = alreadyEmitted; i < totalBlocks; ++i) {
+ const auto *thinkingContent = thinkingBlocks[i];
+
+ if (thinkingContent->thinking().trimmed().isEmpty()) {
+ continue;
+ }
+
+ emit thinkingBlockReceived(
+ requestId, thinkingContent->thinking(), thinkingContent->signature());
+ }
+
+ m_emittedThinkingBlocksCount[requestId] = totalBlocks;
+}
+
+void OpenAIResponsesProvider::handleMessageComplete(const QString &requestId)
+{
+ if (!m_messages.contains(requestId)) {
+ return;
+ }
+
+ OpenAIResponsesMessage *message = m_messages[requestId];
+
+ emitPendingThinkingBlocks(requestId);
+
+ if (message->state() == LLMCore::MessageState::RequiresToolExecution) {
+ const auto toolUseContent = message->getCurrentToolUseContent();
+
+ if (toolUseContent.isEmpty()) {
+ return;
+ }
+
+ for (const auto *toolContent : toolUseContent) {
+ const auto toolStringName = m_toolsManager->toolsFactory()->getStringName(
+ toolContent->name());
+ emit toolExecutionStarted(requestId, toolContent->id(), toolStringName);
+ m_toolsManager->executeToolCall(
+ requestId, toolContent->id(), toolContent->name(), toolContent->input());
+ }
+ }
+}
+
+void OpenAIResponsesProvider::onToolExecutionComplete(
+ const QString &requestId, const QHash &toolResults)
+{
+ if (!m_messages.contains(requestId) || !m_requestUrls.contains(requestId)) {
+ LOG_MESSAGE(QString("ERROR: OpenAIResponses - Missing data for continuation request %1")
+ .arg(requestId));
+ cleanupRequest(requestId);
+ return;
+ }
+
+ OpenAIResponsesMessage *message = m_messages[requestId];
+ const auto toolContent = message->getCurrentToolUseContent();
+
+ for (auto it = toolResults.constBegin(); it != toolResults.constEnd(); ++it) {
+ for (const auto *tool : toolContent) {
+ if (tool->id() == it.key()) {
+ const auto toolStringName = m_toolsManager->toolsFactory()->getStringName(
+ tool->name());
+ emit toolExecutionCompleted(
+ requestId, tool->id(), toolStringName, toolResults[tool->id()]);
+ break;
+ }
+ }
+ }
+
+ QJsonObject continuationRequest = m_originalRequests[requestId];
+ QJsonArray input = continuationRequest["input"].toArray();
+
+ const QList assistantItems = message->toItemsFormat();
+ for (const QJsonObject &item : assistantItems) {
+ input.append(item);
+ }
+
+ const QJsonArray toolResultItems = message->createToolResultItems(toolResults);
+ for (const QJsonValue &item : toolResultItems) {
+ input.append(item);
+ }
+
+ continuationRequest["input"] = input;
+
+ m_dataBuffers[requestId].responseContent.clear();
+
+ sendRequest(requestId, m_requestUrls[requestId], continuationRequest);
+}
+
+void OpenAIResponsesProvider::cleanupRequest(const LLMCore::RequestID &requestId)
+{
+ if (m_messages.contains(requestId)) {
+ OpenAIResponsesMessage *message = m_messages.take(requestId);
+ message->deleteLater();
+ }
+
+ m_dataBuffers.remove(requestId);
+ m_requestUrls.remove(requestId);
+ m_originalRequests.remove(requestId);
+ m_itemIdToCallId.remove(requestId);
+ m_emittedThinkingBlocksCount.remove(requestId);
+ m_toolsManager->cleanupRequest(requestId);
+}
+
+} // namespace QodeAssist::Providers
diff --git a/providers/OpenAIResponsesProvider.hpp b/providers/OpenAIResponsesProvider.hpp
new file mode 100644
index 0000000..39d1317
--- /dev/null
+++ b/providers/OpenAIResponsesProvider.hpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include "OpenAIResponsesMessage.hpp"
+#include "tools/ToolsManager.hpp"
+#include
+
+namespace QodeAssist::Providers {
+
+class OpenAIResponsesProvider : public LLMCore::Provider
+{
+ Q_OBJECT
+public:
+ explicit OpenAIResponsesProvider(QObject *parent = nullptr);
+
+ QString name() const override;
+ QString url() const override;
+ QString completionEndpoint() const override;
+ QString chatEndpoint() const override;
+ bool supportsModelListing() const override;
+ void prepareRequest(
+ QJsonObject &request,
+ LLMCore::PromptTemplate *prompt,
+ LLMCore::ContextData context,
+ LLMCore::RequestType type,
+ bool isToolsEnabled,
+ bool isThinkingEnabled) override;
+ QList getInstalledModels(const QString &url) override;
+ QList validateRequest(const QJsonObject &request, LLMCore::TemplateType type) override;
+ QString apiKey() const override;
+ void prepareNetworkRequest(QNetworkRequest &networkRequest) const override;
+ LLMCore::ProviderID providerID() const override;
+
+ void sendRequest(
+ const LLMCore::RequestID &requestId, const QUrl &url, const QJsonObject &payload) override;
+
+ bool supportsTools() const override;
+ bool supportImage() const override;
+ bool supportThinking() const override;
+ void cancelRequest(const LLMCore::RequestID &requestId) override;
+
+public slots:
+ void onDataReceived(
+ const QodeAssist::LLMCore::RequestID &requestId, const QByteArray &data) override;
+ void onRequestFinished(
+ const QodeAssist::LLMCore::RequestID &requestId,
+ bool success,
+ const QString &error) override;
+
+private slots:
+ void onToolExecutionComplete(
+ const QString &requestId, const QHash &toolResults);
+
+private:
+ void processStreamEvent(const QString &requestId, const QString &eventType, const QJsonObject &data);
+ void emitPendingThinkingBlocks(const QString &requestId);
+ void handleMessageComplete(const QString &requestId);
+ void cleanupRequest(const LLMCore::RequestID &requestId);
+
+ QHash m_messages;
+ QHash m_requestUrls;
+ QHash m_originalRequests;
+ QHash> m_itemIdToCallId;
+ QHash m_emittedThinkingBlocksCount;
+ Tools::ToolsManager *m_toolsManager;
+};
+
+} // namespace QodeAssist::Providers
+
diff --git a/providers/OpenAIResponsesRequestBuilder.hpp b/providers/OpenAIResponsesRequestBuilder.hpp
new file mode 100644
index 0000000..c70014a
--- /dev/null
+++ b/providers/OpenAIResponsesRequestBuilder.hpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include "OpenAIResponses/ModelRequest.hpp"
+
+#include
+#include
+#include
+#include
+
+namespace QodeAssist::OpenAIResponses {
+
+class RequestBuilder
+{
+public:
+ RequestBuilder() = default;
+
+ RequestBuilder &setModel(QString model)
+ {
+ m_model = std::move(model);
+ return *this;
+ }
+
+ RequestBuilder &addMessage(Role role, QString content)
+ {
+ Message msg;
+ msg.role = role;
+ msg.content.append(MessageContent(std::move(content)));
+ m_messages.append(std::move(msg));
+ return *this;
+ }
+
+ RequestBuilder &addMessage(Message msg)
+ {
+ m_messages.append(std::move(msg));
+ return *this;
+ }
+
+ RequestBuilder &setInstructions(QString instructions)
+ {
+ m_instructions = std::move(instructions);
+ return *this;
+ }
+
+ RequestBuilder &addTool(Tool tool)
+ {
+ m_tools.append(std::move(tool));
+ return *this;
+ }
+
+ RequestBuilder &setTemperature(double temp) noexcept
+ {
+ m_temperature = temp;
+ return *this;
+ }
+
+ RequestBuilder &setTopP(double topP) noexcept
+ {
+ m_topP = topP;
+ return *this;
+ }
+
+ RequestBuilder &setMaxOutputTokens(int tokens) noexcept
+ {
+ m_maxOutputTokens = tokens;
+ return *this;
+ }
+
+ RequestBuilder &setStream(bool stream) noexcept
+ {
+ m_stream = stream;
+ return *this;
+ }
+
+ RequestBuilder &setStore(bool store) noexcept
+ {
+ m_store = store;
+ return *this;
+ }
+
+ RequestBuilder &setTextFormat(TextFormatOptions format)
+ {
+ m_textFormat = std::move(format);
+ return *this;
+ }
+
+ RequestBuilder &setReasoningEffort(ReasoningEffort effort) noexcept
+ {
+ m_reasoningEffort = effort;
+ return *this;
+ }
+
+ RequestBuilder &setMetadata(QMap metadata)
+ {
+ m_metadata = std::move(metadata);
+ return *this;
+ }
+
+ RequestBuilder &setIncludeReasoningContent(bool include) noexcept
+ {
+ m_includeReasoningContent = include;
+ return *this;
+ }
+
+ RequestBuilder &clear() noexcept
+ {
+ m_model.clear();
+ m_messages.clear();
+ m_instructions.reset();
+ m_tools.clear();
+ m_temperature.reset();
+ m_topP.reset();
+ m_maxOutputTokens.reset();
+ m_stream = false;
+ m_store.reset();
+ m_textFormat.reset();
+ m_reasoningEffort.reset();
+ m_includeReasoningContent = false;
+ m_metadata.clear();
+ return *this;
+ }
+
+ QJsonObject toJson() const
+ {
+ QJsonObject obj;
+
+ if (!m_model.isEmpty()) {
+ obj["model"] = m_model;
+ }
+
+ if (!m_messages.isEmpty()) {
+ if (m_messages.size() == 1 && m_messages[0].role == Role::User
+ && m_messages[0].content.size() == 1) {
+ obj["input"] = m_messages[0].content[0].toJson();
+ } else {
+ QJsonArray input;
+ for (const auto &msg : m_messages) {
+ input.append(msg.toJson());
+ }
+ obj["input"] = input;
+ }
+ }
+
+ if (m_instructions) {
+ obj["instructions"] = *m_instructions;
+ }
+
+ if (!m_tools.isEmpty()) {
+ QJsonArray tools;
+ for (const auto &tool : m_tools) {
+ tools.append(tool.toJson());
+ }
+ obj["tools"] = tools;
+ }
+
+ if (m_temperature) {
+ obj["temperature"] = *m_temperature;
+ }
+
+ if (m_topP) {
+ obj["top_p"] = *m_topP;
+ }
+
+ if (m_maxOutputTokens) {
+ obj["max_output_tokens"] = *m_maxOutputTokens;
+ }
+
+ obj["stream"] = m_stream;
+
+ if (m_store) {
+ obj["store"] = *m_store;
+ }
+
+ if (m_textFormat) {
+ QJsonObject textObj;
+ textObj["format"] = m_textFormat->toJson();
+ obj["text"] = textObj;
+ }
+
+ if (m_reasoningEffort) {
+ QJsonObject reasoning;
+ reasoning["effort"] = effortToString(*m_reasoningEffort);
+ obj["reasoning"] = reasoning;
+ }
+
+ if (m_includeReasoningContent) {
+ QJsonArray include;
+ include.append("reasoning.encrypted_content");
+ obj["include"] = include;
+ }
+
+ if (!m_metadata.isEmpty()) {
+ QJsonObject metadata;
+ for (auto it = m_metadata.constBegin(); it != m_metadata.constEnd(); ++it) {
+ metadata[it.key()] = QJsonValue::fromVariant(it.value());
+ }
+ obj["metadata"] = metadata;
+ }
+
+ return obj;
+ }
+
+private:
+ QString m_model;
+ QList m_messages;
+ std::optional m_instructions;
+ QList m_tools;
+ std::optional m_temperature;
+ std::optional m_topP;
+ std::optional m_maxOutputTokens;
+ bool m_stream = false;
+ std::optional m_store;
+ std::optional m_textFormat;
+ std::optional m_reasoningEffort;
+ bool m_includeReasoningContent = false;
+ QMap m_metadata;
+
+ static QString effortToString(ReasoningEffort e)
+ {
+ switch (e) {
+ case ReasoningEffort::None:
+ return "none";
+ case ReasoningEffort::Minimal:
+ return "minimal";
+ case ReasoningEffort::Low:
+ return "low";
+ case ReasoningEffort::Medium:
+ return "medium";
+ case ReasoningEffort::High:
+ return "high";
+ }
+ return "medium";
+ }
+};
+
+} // namespace QodeAssist::OpenAIResponses
+
diff --git a/providers/Providers.hpp b/providers/Providers.hpp
index 989fb52..7876888 100644
--- a/providers/Providers.hpp
+++ b/providers/Providers.hpp
@@ -29,6 +29,7 @@
#include "providers/OllamaProvider.hpp"
#include "providers/OpenAICompatProvider.hpp"
#include "providers/OpenAIProvider.hpp"
+#include "providers/OpenAIResponsesProvider.hpp"
#include "providers/OpenRouterAIProvider.hpp"
namespace QodeAssist::Providers {
@@ -39,6 +40,7 @@ inline void registerProviders()
providerManager.registerProvider();
providerManager.registerProvider();
providerManager.registerProvider();
+ providerManager.registerProvider();
providerManager.registerProvider();
providerManager.registerProvider();
providerManager.registerProvider();
diff --git a/settings/ChatAssistantSettings.cpp b/settings/ChatAssistantSettings.cpp
index 844b8ee..654c32d 100644
--- a/settings/ChatAssistantSettings.cpp
+++ b/settings/ChatAssistantSettings.cpp
@@ -173,6 +173,25 @@ ChatAssistantSettings::ChatAssistantSettings()
thinkingMaxTokens.setRange(-1, 200000);
thinkingMaxTokens.setDefaultValue(16000);
+ // OpenAI Responses API Settings
+ openAIResponsesReasoningEffort.setSettingsKey(Constants::CA_OPENAI_RESPONSES_REASONING_EFFORT);
+ openAIResponsesReasoningEffort.setLabelText(Tr::tr("Reasoning effort:"));
+ openAIResponsesReasoningEffort.setDisplayStyle(Utils::SelectionAspect::DisplayStyle::ComboBox);
+ openAIResponsesReasoningEffort.addOption("None");
+ openAIResponsesReasoningEffort.addOption("Minimal");
+ openAIResponsesReasoningEffort.addOption("Low");
+ openAIResponsesReasoningEffort.addOption("Medium");
+ openAIResponsesReasoningEffort.addOption("High");
+ openAIResponsesReasoningEffort.setDefaultValue("Medium");
+ openAIResponsesReasoningEffort.setToolTip(
+ Tr::tr("Constrains effort on reasoning for OpenAI gpt-5 and o-series models:\n\n"
+ "None: No reasoning (gpt-5.1 only)\n"
+ "Minimal: Minimal reasoning effort (o-series only)\n"
+ "Low: Low reasoning effort\n"
+ "Medium: Balanced reasoning (default for most models)\n"
+ "High: Maximum reasoning effort (gpt-5-pro only supports this)\n\n"
+ "Note: Reducing effort = faster responses + fewer tokens"));
+
autosave.setDefaultValue(true);
autosave.setLabelText(Tr::tr("Enable autosave when message received"));
@@ -270,6 +289,9 @@ ChatAssistantSettings::ChatAssistantSettings()
thinkingGrid.addRow({thinkingBudgetTokens});
thinkingGrid.addRow({thinkingMaxTokens});
+ auto openAIResponsesGrid = Grid{};
+ openAIResponsesGrid.addRow({openAIResponsesReasoningEffort});
+
auto chatViewSettingsGrid = Grid{};
chatViewSettingsGrid.addRow({textFontFamily, textFontSize});
chatViewSettingsGrid.addRow({codeFontFamily, codeFontSize});
@@ -293,9 +315,13 @@ ChatAssistantSettings::ChatAssistantSettings()
Column{enableChatTools}},
Space{8},
Group{
- title(Tr::tr("Extended Thinking (if provider/model supports)")),
+ title(Tr::tr("Extended Thinking (Claude)")),
Column{enableThinkingMode, Row{thinkingGrid, Stretch{1}}}},
Space{8},
+ Group{
+ title(Tr::tr("OpenAI Responses API")),
+ Column{Row{openAIResponsesGrid, Stretch{1}}}},
+ Space{8},
Group{
title(Tr::tr("General Parameters")),
Row{genGrid, Stretch{1}},
@@ -352,6 +378,7 @@ void ChatAssistantSettings::resetSettingsToDefaults()
resetAspect(enableThinkingMode);
resetAspect(thinkingBudgetTokens);
resetAspect(thinkingMaxTokens);
+ resetAspect(openAIResponsesReasoningEffort);
resetAspect(linkOpenFiles);
resetAspect(enableChatTools);
resetAspect(textFontFamily);
diff --git a/settings/ChatAssistantSettings.hpp b/settings/ChatAssistantSettings.hpp
index a3c890a..c062556 100644
--- a/settings/ChatAssistantSettings.hpp
+++ b/settings/ChatAssistantSettings.hpp
@@ -70,6 +70,9 @@ public:
Utils::IntegerAspect thinkingBudgetTokens{this};
Utils::IntegerAspect thinkingMaxTokens{this};
+ // OpenAI Responses API Settings
+ Utils::SelectionAspect openAIResponsesReasoningEffort{this};
+
// Visuals settings
Utils::SelectionAspect textFontFamily{this};
Utils::IntegerAspect textFontSize{this};
diff --git a/settings/CodeCompletionSettings.cpp b/settings/CodeCompletionSettings.cpp
index ee14dcc..902e27e 100644
--- a/settings/CodeCompletionSettings.cpp
+++ b/settings/CodeCompletionSettings.cpp
@@ -151,7 +151,7 @@ CodeCompletionSettings::CodeCompletionSettings()
maxTokens.setSettingsKey(Constants::CC_MAX_TOKENS);
maxTokens.setLabelText(Tr::tr("Max Tokens:"));
maxTokens.setRange(-1, 900000);
- maxTokens.setDefaultValue(100);
+ maxTokens.setDefaultValue(500);
// Advanced Parameters
useTopP.setSettingsKey(Constants::CC_USE_TOP_P);
@@ -313,6 +313,25 @@ CodeCompletionSettings::CodeCompletionSettings()
contextWindow.setRange(-1, 10000);
contextWindow.setDefaultValue(2048);
+ // OpenAI Responses API Settings
+ openAIResponsesReasoningEffort.setSettingsKey(Constants::CC_OPENAI_RESPONSES_REASONING_EFFORT);
+ openAIResponsesReasoningEffort.setLabelText(Tr::tr("Reasoning effort:"));
+ openAIResponsesReasoningEffort.setDisplayStyle(Utils::SelectionAspect::DisplayStyle::ComboBox);
+ openAIResponsesReasoningEffort.addOption("None");
+ openAIResponsesReasoningEffort.addOption("Minimal");
+ openAIResponsesReasoningEffort.addOption("Low");
+ openAIResponsesReasoningEffort.addOption("Medium");
+ openAIResponsesReasoningEffort.addOption("High");
+ openAIResponsesReasoningEffort.setDefaultValue("Medium");
+ openAIResponsesReasoningEffort.setToolTip(
+ Tr::tr("Constrains effort on reasoning for OpenAI gpt-5 and o-series models:\n\n"
+ "None: No reasoning (gpt-5.1 only)\n"
+ "Minimal: Minimal reasoning effort (o-series only)\n"
+ "Low: Low reasoning effort\n"
+ "Medium: Balanced reasoning (default for most models)\n"
+ "High: Maximum reasoning effort (gpt-5-pro only supports this)\n\n"
+ "Note: Reducing effort = faster responses + fewer tokens"));
+
resetToDefaults.m_buttonText = Tr::tr("Reset Page to Defaults");
readSettings();
@@ -338,6 +357,9 @@ CodeCompletionSettings::CodeCompletionSettings()
ollamaGrid.addRow({ollamaLivetime});
ollamaGrid.addRow({contextWindow});
+ auto openAIResponsesGrid = Grid{};
+ openAIResponsesGrid.addRow({openAIResponsesReasoningEffort});
+
auto contextGrid = Grid{};
contextGrid.addRow({Row{readFullFile}});
contextGrid.addRow({Row{readFileParts, readStringsBeforeCursor, readStringsAfterCursor}});
@@ -398,6 +420,8 @@ CodeCompletionSettings::CodeCompletionSettings()
Group{title(Tr::tr("Quick Refactor Settings")),
Column{useOpenFilesInQuickRefactor, quickRefactorSystemPrompt}},
Space{8},
+ Group{title(Tr::tr("OpenAI Responses API")), Column{Row{openAIResponsesGrid, Stretch{1}}}},
+ Space{8},
Group{title(Tr::tr("Ollama Settings")), Column{Row{ollamaGrid, Stretch{1}}}},
Stretch{1}};
});
@@ -458,6 +482,7 @@ void CodeCompletionSettings::resetSettingsToDefaults()
resetAspect(maxChangesCacheSize);
resetAspect(ollamaLivetime);
resetAspect(contextWindow);
+ resetAspect(openAIResponsesReasoningEffort);
resetAspect(useUserMessageTemplateForCC);
resetAspect(userMessageTemplateForCC);
resetAspect(systemPromptForNonFimModels);
diff --git a/settings/CodeCompletionSettings.hpp b/settings/CodeCompletionSettings.hpp
index 597d1f1..a1b786b 100644
--- a/settings/CodeCompletionSettings.hpp
+++ b/settings/CodeCompletionSettings.hpp
@@ -90,6 +90,9 @@ public:
Utils::StringAspect ollamaLivetime{this};
Utils::IntegerAspect contextWindow{this};
+ // OpenAI Responses API Settings
+ Utils::SelectionAspect openAIResponsesReasoningEffort{this};
+
QString processMessageToFIM(const QString &prefix, const QString &suffix) const;
private:
diff --git a/settings/QuickRefactorSettings.cpp b/settings/QuickRefactorSettings.cpp
index 71f3174..bd59258 100644
--- a/settings/QuickRefactorSettings.cpp
+++ b/settings/QuickRefactorSettings.cpp
@@ -133,6 +133,25 @@ QuickRefactorSettings::QuickRefactorSettings()
thinkingMaxTokens.setRange(1000, 200000);
thinkingMaxTokens.setDefaultValue(16000);
+ // OpenAI Responses API Settings
+ openAIResponsesReasoningEffort.setSettingsKey(Constants::QR_OPENAI_RESPONSES_REASONING_EFFORT);
+ openAIResponsesReasoningEffort.setLabelText(Tr::tr("Reasoning effort:"));
+ openAIResponsesReasoningEffort.setDisplayStyle(Utils::SelectionAspect::DisplayStyle::ComboBox);
+ openAIResponsesReasoningEffort.addOption("None");
+ openAIResponsesReasoningEffort.addOption("Minimal");
+ openAIResponsesReasoningEffort.addOption("Low");
+ openAIResponsesReasoningEffort.addOption("Medium");
+ openAIResponsesReasoningEffort.addOption("High");
+ openAIResponsesReasoningEffort.setDefaultValue("Medium");
+ openAIResponsesReasoningEffort.setToolTip(
+ Tr::tr("Constrains effort on reasoning for OpenAI gpt-5 and o-series models:\n\n"
+ "None: No reasoning (gpt-5.1 only)\n"
+ "Minimal: Minimal reasoning effort (o-series only)\n"
+ "Low: Low reasoning effort\n"
+ "Medium: Balanced reasoning (default for most models)\n"
+ "High: Maximum reasoning effort (gpt-5-pro only supports this)\n\n"
+ "Note: Reducing effort = faster responses + fewer tokens"));
+
// Context Settings
readFullFile.setSettingsKey(Constants::QR_READ_FULL_FILE);
readFullFile.setLabelText(Tr::tr("Read Full File"));
@@ -238,6 +257,9 @@ QuickRefactorSettings::QuickRefactorSettings()
toolsGrid.addRow({thinkingBudgetTokens});
toolsGrid.addRow({thinkingMaxTokens});
+ auto openAIResponsesGrid = Grid{};
+ openAIResponsesGrid.addRow({openAIResponsesReasoningEffort});
+
auto contextGrid = Grid{};
contextGrid.addRow({Row{readFullFile}});
contextGrid.addRow({Row{readFileParts, readStringsBeforeCursor, readStringsAfterCursor}});
@@ -260,6 +282,8 @@ QuickRefactorSettings::QuickRefactorSettings()
Space{8},
Group{title(Tr::tr("Tools Settings")), Column{Row{toolsGrid, Stretch{1}}}},
Space{8},
+ Group{title(Tr::tr("OpenAI Responses API")), Column{Row{openAIResponsesGrid, Stretch{1}}}},
+ Space{8},
Group{title(Tr::tr("Context Settings")), Column{Row{contextGrid, Stretch{1}}}},
Space{8},
Group{title(Tr::tr("Display Settings")), Column{Row{displayGrid, Stretch{1}}}},
@@ -346,6 +370,7 @@ void QuickRefactorSettings::resetSettingsToDefaults()
resetAspect(useThinking);
resetAspect(thinkingBudgetTokens);
resetAspect(thinkingMaxTokens);
+ resetAspect(openAIResponsesReasoningEffort);
resetAspect(readFullFile);
resetAspect(readFileParts);
resetAspect(readStringsBeforeCursor);
diff --git a/settings/QuickRefactorSettings.hpp b/settings/QuickRefactorSettings.hpp
index 54405f8..7774f2d 100644
--- a/settings/QuickRefactorSettings.hpp
+++ b/settings/QuickRefactorSettings.hpp
@@ -61,6 +61,9 @@ public:
Utils::IntegerAspect thinkingBudgetTokens{this};
Utils::IntegerAspect thinkingMaxTokens{this};
+ // OpenAI Responses API Settings
+ Utils::SelectionAspect openAIResponsesReasoningEffort{this};
+
// Context Settings
Utils::BoolAspect readFullFile{this};
Utils::BoolAspect readFileParts{this};
diff --git a/settings/SettingsConstants.hpp b/settings/SettingsConstants.hpp
index 04c9ce5..9b69c65 100644
--- a/settings/SettingsConstants.hpp
+++ b/settings/SettingsConstants.hpp
@@ -182,6 +182,10 @@ const char CC_USE_FREQUENCY_PENALTY[] = "QodeAssist.fimUseFrequencyPenalty";
const char CC_FREQUENCY_PENALTY[] = "QodeAssist.fimFrequencyPenalty";
const char CC_OLLAMA_LIVETIME[] = "QodeAssist.fimOllamaLivetime";
const char CC_OLLAMA_CONTEXT_WINDOW[] = "QodeAssist.ccOllamaContextWindow";
+
+// OpenAI Responses API Settings
+const char CC_OPENAI_RESPONSES_REASONING_EFFORT[] = "QodeAssist.ccOpenAIResponsesReasoningEffort";
+
const char CA_TEMPERATURE[] = "QodeAssist.chatTemperature";
const char CA_MAX_TOKENS[] = "QodeAssist.chatMaxTokens";
const char CA_USE_TOP_P[] = "QodeAssist.chatUseTopP";
@@ -197,6 +201,10 @@ const char CA_OLLAMA_CONTEXT_WINDOW[] = "QodeAssist.caOllamaContextWindow";
const char CA_ENABLE_THINKING_MODE[] = "QodeAssist.caEnableThinkingMode";
const char CA_THINKING_BUDGET_TOKENS[] = "QodeAssist.caThinkingBudgetTokens";
const char CA_THINKING_MAX_TOKENS[] = "QodeAssist.caThinkingMaxTokens";
+
+// OpenAI Responses API Settings
+const char CA_OPENAI_RESPONSES_REASONING_EFFORT[] = "QodeAssist.caOpenAIResponsesReasoningEffort";
+
const char CA_TEXT_FONT_FAMILY[] = "QodeAssist.caTextFontFamily";
const char CA_TEXT_FONT_SIZE[] = "QodeAssist.caTextFontSize";
const char CA_CODE_FONT_FAMILY[] = "QodeAssist.caCodeFontFamily";
@@ -221,6 +229,10 @@ const char QR_USE_TOOLS[] = "QodeAssist.qrUseTools";
const char QR_USE_THINKING[] = "QodeAssist.qrUseThinking";
const char QR_THINKING_BUDGET_TOKENS[] = "QodeAssist.qrThinkingBudgetTokens";
const char QR_THINKING_MAX_TOKENS[] = "QodeAssist.qrThinkingMaxTokens";
+
+// OpenAI Responses API Settings
+const char QR_OPENAI_RESPONSES_REASONING_EFFORT[] = "QodeAssist.qrOpenAIResponsesReasoningEffort";
+
const char QR_READ_FULL_FILE[] = "QodeAssist.qrReadFullFile";
const char QR_READ_STRINGS_BEFORE_CURSOR[] = "QodeAssist.qrReadStringsBeforeCursor";
const char QR_READ_STRINGS_AFTER_CURSOR[] = "QodeAssist.qrReadStringsAfterCursor";
diff --git a/templates/OpenAIResponses.hpp b/templates/OpenAIResponses.hpp
new file mode 100644
index 0000000..662ebf4
--- /dev/null
+++ b/templates/OpenAIResponses.hpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2024-2025 Petr Mironychev
+ *
+ * This file is part of QodeAssist.
+ *
+ * QodeAssist is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * QodeAssist is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with QodeAssist. If not, see .
+ */
+
+#pragma once
+
+#include "llmcore/PromptTemplate.hpp"
+#include "providers/OpenAIResponsesRequestBuilder.hpp"
+
+namespace QodeAssist::Templates {
+
+class OpenAIResponses : public LLMCore::PromptTemplate
+{
+public:
+ LLMCore::TemplateType type() const noexcept override
+ {
+ return LLMCore::TemplateType::Chat;
+ }
+
+ QString name() const override { return "OpenAI Responses"; }
+
+ QStringList stopWords() const override { return {}; }
+
+ void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
+ {
+ using namespace QodeAssist::OpenAIResponses;
+ RequestBuilder builder;
+
+ if (context.systemPrompt) {
+ builder.setInstructions(context.systemPrompt.value());
+ }
+
+ if (!context.history || context.history->isEmpty()) {
+ return;
+ }
+
+ const auto &history = context.history.value();
+
+ for (const auto &msg : history) {
+ if (msg.role == "system") {
+ continue;
+ }
+
+ Message message;
+ message.role = roleFromString(msg.role);
+
+ if (msg.images && !msg.images->isEmpty()) {
+ const auto &images = msg.images.value();
+ message.content.reserve(1 + images.size());
+
+ if (!msg.content.isEmpty()) {
+ message.content.append(MessageContent(InputText{msg.content}));
+ }
+
+ for (const auto &image : images) {
+ InputImage imgInput;
+ imgInput.detail = "auto";
+
+ if (image.isUrl) {
+ imgInput.imageUrl = image.data;
+ } else {
+ imgInput.imageUrl
+ = QString("data:%1;base64,%2").arg(image.mediaType, image.data);
+ }
+
+ message.content.append(MessageContent(std::move(imgInput)));
+ }
+ } else {
+ message.content.append(MessageContent(msg.content));
+ }
+
+ builder.addMessage(std::move(message));
+ }
+
+ const QJsonObject builtRequest = builder.toJson();
+ for (auto it = builtRequest.constBegin(); it != builtRequest.constEnd(); ++it) {
+ request[it.key()] = it.value();
+ }
+ }
+ QString description() const override
+ {
+ return "Template for OpenAI Responses API:\n\n"
+ "Simple request:\n"
+ "{\n"
+ " \"input\": \"\"\n"
+ "}\n\n"
+ "Multi-turn conversation:\n"
+ "{\n"
+ " \"instructions\": \"\",\n"
+ " \"input\": [\n"
+ " {\"role\": \"user\", \"content\": \"\"}\n"
+ " ]\n"
+ "}\n\n"
+ "Uses type-safe RequestBuilder for OpenAI Responses API.";
+ }
+ bool isSupportProvider(LLMCore::ProviderID id) const noexcept override
+ {
+ return id == QodeAssist::LLMCore::ProviderID::OpenAIResponses;
+ }
+
+private:
+ static QodeAssist::OpenAIResponses::Role roleFromString(const QString &roleStr) noexcept
+ {
+ using namespace QodeAssist::OpenAIResponses;
+
+ if (roleStr == "user")
+ return Role::User;
+ if (roleStr == "assistant")
+ return Role::Assistant;
+ if (roleStr == "system")
+ return Role::System;
+ if (roleStr == "developer")
+ return Role::Developer;
+
+ return Role::User;
+ }
+};
+
+} // namespace QodeAssist::Templates
+
diff --git a/templates/Templates.hpp b/templates/Templates.hpp
index 628b01f..f73cb41 100644
--- a/templates/Templates.hpp
+++ b/templates/Templates.hpp
@@ -29,6 +29,7 @@
#include "templates/Ollama.hpp"
#include "templates/OpenAI.hpp"
#include "templates/OpenAICompatible.hpp"
+#include "templates/OpenAIResponses.hpp"
// #include "templates/CustomFimTemplate.hpp"
// #include "templates/DeepSeekCoderFim.hpp"
#include "templates/GoogleAI.hpp"
@@ -49,6 +50,7 @@ inline void registerTemplates()
templateManager.registerTemplate();
templateManager.registerTemplate();
templateManager.registerTemplate();
+ templateManager.registerTemplate();
templateManager.registerTemplate();
templateManager.registerTemplate();
templateManager.registerTemplate();