diff --git a/providers/OllamaProvider.cpp b/providers/OllamaProvider.cpp index d08be2f..2c93013 100644 --- a/providers/OllamaProvider.cpp +++ b/providers/OllamaProvider.cpp @@ -95,18 +95,36 @@ bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedRe return false; } - const QString endpoint = reply->url().path(); - auto messageType = endpoint == completionEndpoint() ? LLMCore::OllamaMessage::Type::Generate - : LLMCore::OllamaMessage::Type::Chat; + QByteArrayList lines = data.split('\n'); + bool isDone = false; - auto message = LLMCore::OllamaMessage::fromJson(data, messageType); - if (message.hasError()) { - LOG_MESSAGE("Error in Ollama response: " + message.error); - return false; + for (const QByteArray &line : lines) { + if (line.trimmed().isEmpty()) { + continue; + } + + const QString endpoint = reply->url().path(); + auto messageType = endpoint == completionEndpoint() + ? LLMCore::OllamaMessage::Type::Generate + : LLMCore::OllamaMessage::Type::Chat; + + auto message = LLMCore::OllamaMessage::fromJson(line, messageType); + if (message.hasError()) { + LOG_MESSAGE("Error in Ollama response: " + message.error); + continue; + } + + QString content = message.getContent(); + if (!content.isEmpty()) { + accumulatedResponse += content; + } + + if (message.done) { + isDone = true; + } } - accumulatedResponse += message.getContent(); - return message.done; + return isDone; } QList OllamaProvider::getInstalledModels(const QString &url)