Refactor llm providers to use internal http client (#227)

* refactor: Move http client into provider

* refactor: Rework ollama provider for work with internal http client

* refactor: Rework LM Studio provider to work with internal http client

* refactor: Rework Mistral AI to work with internal http client

* fix: Replace url and header to QNetworkRequest

* refactor: Rework Google provider to use internal http client

* refactor: OpenAI compatible providers switch to use internal http client

* fix: Remove m_requestHandler from tests

* refactor: Remove old handleData method

* fix: Remove LLMClientInterfaceTest
This commit is contained in:
Petr Mironychev
2025-09-03 10:56:05 +02:00
committed by GitHub
parent 5969d530bd
commit 76309be0a6
34 changed files with 1144 additions and 909 deletions

View File

@ -92,57 +92,6 @@ void OpenAICompatProvider::prepareRequest(
}
}
bool OpenAICompatProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
{
QByteArray data = reply->readAll();
if (data.isEmpty()) {
return false;
}
bool isDone = false;
QByteArrayList lines = data.split('\n');
for (const QByteArray &line : lines) {
if (line.trimmed().isEmpty()) {
continue;
}
if (line == "data: [DONE]") {
isDone = true;
continue;
}
QByteArray jsonData = line;
if (line.startsWith("data: ")) {
jsonData = line.mid(6);
}
QJsonParseError error;
QJsonDocument doc = QJsonDocument::fromJson(jsonData, &error);
if (doc.isNull()) {
continue;
}
auto message = LLMCore::OpenAIMessage::fromJson(doc.object());
if (message.hasError()) {
LOG_MESSAGE("Error in OpenAI response: " + message.error);
continue;
}
QString content = message.getContent();
if (!content.isEmpty()) {
accumulatedResponse += content;
}
if (message.isDone()) {
isDone = true;
}
}
return isDone;
}
QList<QString> OpenAICompatProvider::getInstalledModels(const QString &url)
{
return QStringList();
@ -185,4 +134,93 @@ LLMCore::ProviderID OpenAICompatProvider::providerID() const
return LLMCore::ProviderID::OpenAICompatible;
}
void OpenAICompatProvider::sendRequest(
const QString &requestId, const QUrl &url, const QJsonObject &payload)
{
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LLMCore::HttpRequest
request{.networkRequest = networkRequest, .requestId = requestId, .payload = payload};
LOG_MESSAGE(
QString("OpenAICompatProvider: Sending request %1 to %2").arg(requestId, url.toString()));
emit httpClient()->sendRequest(request);
}
void OpenAICompatProvider::onDataReceived(const QString &requestId, const QByteArray &data)
{
QString &accumulatedResponse = m_accumulatedResponses[requestId];
if (data.isEmpty()) {
return;
}
bool isDone = false;
QByteArrayList lines = data.split('\n');
for (const QByteArray &line : lines) {
if (line.trimmed().isEmpty()) {
continue;
}
if (line == "data: [DONE]") {
isDone = true;
continue;
}
QByteArray jsonData = line;
if (line.startsWith("data: ")) {
jsonData = line.mid(6);
}
QJsonParseError error;
QJsonDocument doc = QJsonDocument::fromJson(jsonData, &error);
if (doc.isNull()) {
continue;
}
auto message = LLMCore::OpenAIMessage::fromJson(doc.object());
if (message.hasError()) {
LOG_MESSAGE("Error in OpenAI response: " + message.error);
continue;
}
QString content = message.getContent();
if (!content.isEmpty()) {
accumulatedResponse += content;
emit partialResponseReceived(requestId, content);
}
if (message.isDone()) {
isDone = true;
}
}
if (isDone) {
emit fullResponseReceived(requestId, accumulatedResponse);
m_accumulatedResponses.remove(requestId);
}
}
void OpenAICompatProvider::onRequestFinished(
const QString &requestId, bool success, const QString &error)
{
if (!success) {
LOG_MESSAGE(QString("OpenAIProvider request %1 failed: %2").arg(requestId, error));
emit requestFailed(requestId, error);
} else {
if (m_accumulatedResponses.contains(requestId)) {
const QString fullResponse = m_accumulatedResponses[requestId];
if (!fullResponse.isEmpty()) {
emit fullResponseReceived(requestId, fullResponse);
}
}
}
m_accumulatedResponses.remove(requestId);
}
} // namespace QodeAssist::Providers