/* * Copyright (C) 2024-2025 Petr Mironychev * * This file is part of QodeAssist. * * QodeAssist is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * QodeAssist is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with QodeAssist. If not, see . */ #include "OpenAIProvider.hpp" #include "settings/ChatAssistantSettings.hpp" #include "settings/CodeCompletionSettings.hpp" #include "settings/ProviderSettings.hpp" #include #include #include #include #include #include "llmcore/OpenAIMessage.hpp" #include "llmcore/ValidationUtils.hpp" #include "logger/Logger.hpp" namespace QodeAssist::Providers { QString OpenAIProvider::name() const { return "OpenAI"; } QString OpenAIProvider::url() const { return "https://api.openai.com"; } QString OpenAIProvider::completionEndpoint() const { return "/v1/chat/completions"; } QString OpenAIProvider::chatEndpoint() const { return "/v1/chat/completions"; } bool OpenAIProvider::supportsModelListing() const { return true; } void OpenAIProvider::prepareRequest( QJsonObject &request, LLMCore::PromptTemplate *prompt, LLMCore::ContextData context, LLMCore::RequestType type) { if (!prompt->isSupportProvider(providerID())) { LOG_MESSAGE(QString("Template %1 doesn't support %2 provider").arg(name(), prompt->name())); } prompt->prepareRequest(request, context); auto applyModelParams = [&request](const auto &settings) { request["max_tokens"] = settings.maxTokens(); request["temperature"] = settings.temperature(); if (settings.useTopP()) request["top_p"] = settings.topP(); if (settings.useTopK()) request["top_k"] = settings.topK(); if (settings.useFrequencyPenalty()) request["frequency_penalty"] = settings.frequencyPenalty(); if (settings.usePresencePenalty()) request["presence_penalty"] = settings.presencePenalty(); }; if (type == LLMCore::RequestType::CodeCompletion) { applyModelParams(Settings::codeCompletionSettings()); } else { applyModelParams(Settings::chatAssistantSettings()); } } bool OpenAIProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse) { QByteArray data = reply->readAll(); if (data.isEmpty()) { return false; } bool isDone = false; QByteArrayList lines = data.split('\n'); for (const QByteArray &line : lines) { if (line.trimmed().isEmpty()) { continue; } if (line == "data: [DONE]") { isDone = true; continue; } QByteArray jsonData = line; if (line.startsWith("data: ")) { jsonData = line.mid(6); } QJsonParseError error; QJsonDocument doc = QJsonDocument::fromJson(jsonData, &error); if (doc.isNull()) { continue; } auto message = LLMCore::OpenAIMessage::fromJson(doc.object()); if (message.hasError()) { LOG_MESSAGE("Error in OpenAI response: " + message.error); continue; } QString content = message.getContent(); if (!content.isEmpty()) { accumulatedResponse += content; } if (message.isDone()) { isDone = true; } } return isDone; } QList OpenAIProvider::getInstalledModels(const QString &url) { QList models; QNetworkAccessManager manager; QNetworkRequest request(QString("%1/v1/models").arg(url)); request.setHeader(QNetworkRequest::ContentTypeHeader, "application/json"); if (!apiKey().isEmpty()) { request.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8()); } QNetworkReply *reply = manager.get(request); QEventLoop loop; QObject::connect(reply, &QNetworkReply::finished, &loop, &QEventLoop::quit); loop.exec(); if (reply->error() == QNetworkReply::NoError) { QByteArray responseData = reply->readAll(); QJsonDocument jsonResponse = QJsonDocument::fromJson(responseData); QJsonObject jsonObject = jsonResponse.object(); if (jsonObject.contains("data")) { QJsonArray modelArray = jsonObject["data"].toArray(); for (const QJsonValue &value : modelArray) { QJsonObject modelObject = value.toObject(); if (modelObject.contains("id")) { QString modelId = modelObject["id"].toString(); if (!modelId.contains("dall-e") && !modelId.contains("whisper") && !modelId.contains("tts") && !modelId.contains("davinci") && !modelId.contains("babbage") && !modelId.contains("omni")) { models.append(modelId); } } } } } else { LOG_MESSAGE(QString("Error fetching ChatGPT models: %1").arg(reply->errorString())); } reply->deleteLater(); return models; } QList OpenAIProvider::validateRequest(const QJsonObject &request, LLMCore::TemplateType type) { const auto templateReq = QJsonObject{ {"model", {}}, {"messages", QJsonArray{{QJsonObject{{"role", {}}, {"content", {}}}}}}, {"temperature", {}}, {"max_tokens", {}}, {"top_p", {}}, {"top_k", {}}, {"frequency_penalty", {}}, {"presence_penalty", {}}, {"stop", QJsonArray{}}, {"stream", {}}}; return LLMCore::ValidationUtils::validateRequestFields(request, templateReq); } QString OpenAIProvider::apiKey() const { return Settings::providerSettings().openAiApiKey(); } void OpenAIProvider::prepareNetworkRequest(QNetworkRequest &networkRequest) const { networkRequest.setHeader(QNetworkRequest::ContentTypeHeader, "application/json"); if (!apiKey().isEmpty()) { networkRequest.setRawHeader("Authorization", QString("Bearer %1").arg(apiKey()).toUtf8()); } } LLMCore::ProviderID OpenAIProvider::providerID() const { return LLMCore::ProviderID::OpenAI; } } // namespace QodeAssist::Providers