Refactor llm providers to use internal http client (#227)

* refactor: Move http client into provider

* refactor: Rework ollama provider for work with internal http client

* refactor: Rework LM Studio provider to work with internal http client

* refactor: Rework Mistral AI to work with internal http client

* fix: Replace url and header to QNetworkRequest

* refactor: Rework Google provider to use internal http client

* refactor: OpenAI compatible providers switch to use internal http client

* fix: Remove m_requestHandler from tests

* refactor: Remove old handleData method

* fix: Remove LLMClientInterfaceTest
This commit is contained in:
Petr Mironychev
2025-09-03 10:56:05 +02:00
committed by GitHub
parent 5969d530bd
commit 76309be0a6
34 changed files with 1144 additions and 909 deletions

View File

@ -97,44 +97,6 @@ void OllamaProvider::prepareRequest(
}
}
bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
{
QByteArray data = reply->readAll();
if (data.isEmpty()) {
return false;
}
QByteArrayList lines = data.split('\n');
bool isDone = false;
for (const QByteArray &line : lines) {
if (line.trimmed().isEmpty()) {
continue;
}
const QString endpoint = reply->url().path();
auto messageType = endpoint == completionEndpoint() ? LLMCore::OllamaMessage::Type::Generate
: LLMCore::OllamaMessage::Type::Chat;
auto message = LLMCore::OllamaMessage::fromJson(line, messageType);
if (message.hasError()) {
LOG_MESSAGE("Error in Ollama response: " + message.error);
continue;
}
QString content = message.getContent();
if (!content.isEmpty()) {
accumulatedResponse += content;
}
if (message.done) {
isDone = true;
}
}
return isDone;
}
QList<QString> OllamaProvider::getInstalledModels(const QString &url)
{
QList<QString> models;
@ -223,4 +185,89 @@ LLMCore::ProviderID OllamaProvider::providerID() const
return LLMCore::ProviderID::Ollama;
}
void OllamaProvider::sendRequest(
const QString &requestId, const QUrl &url, const QJsonObject &payload)
{
QNetworkRequest networkRequest(url);
prepareNetworkRequest(networkRequest);
LLMCore::HttpRequest
request{.networkRequest = networkRequest, .requestId = requestId, .payload = payload};
LOG_MESSAGE(QString("OllamaProvider: Sending request %1 to %2").arg(requestId, url.toString()));
emit httpClient()->sendRequest(request);
}
void OllamaProvider::onDataReceived(const QString &requestId, const QByteArray &data)
{
QString &accumulatedResponse = m_accumulatedResponses[requestId];
if (data.isEmpty()) {
return;
}
QByteArrayList lines = data.split('\n');
bool isDone = false;
for (const QByteArray &line : lines) {
if (line.trimmed().isEmpty()) {
continue;
}
QJsonParseError error;
QJsonDocument doc = QJsonDocument::fromJson(line, &error);
if (doc.isNull()) {
continue;
}
QJsonObject obj = doc.object();
if (obj.contains("error") && !obj["error"].toString().isEmpty()) {
LOG_MESSAGE("Error in Ollama response: " + obj["error"].toString());
continue;
}
QString content;
if (obj.contains("response")) {
content = obj["response"].toString();
} else if (obj.contains("message")) {
QJsonObject messageObj = obj["message"].toObject();
content = messageObj["content"].toString();
}
if (!content.isEmpty()) {
accumulatedResponse += content;
emit partialResponseReceived(requestId, content);
}
if (obj["done"].toBool()) {
isDone = true;
}
}
if (isDone) {
emit fullResponseReceived(requestId, accumulatedResponse);
m_accumulatedResponses.remove(requestId);
}
}
void OllamaProvider::onRequestFinished(const QString &requestId, bool success, const QString &error)
{
if (!success) {
LOG_MESSAGE(QString("OllamaProvider request %1 failed: %2").arg(requestId, error));
emit requestFailed(requestId, error);
} else {
if (m_accumulatedResponses.contains(requestId)) {
const QString fullResponse = m_accumulatedResponses[requestId];
if (!fullResponse.isEmpty()) {
emit fullResponseReceived(requestId, fullResponse);
}
}
}
m_accumulatedResponses.remove(requestId);
}
} // namespace QodeAssist::Providers