fix: Handling Ollama messages

This commit is contained in:
Petr Mironychev 2025-01-19 17:32:12 +01:00
parent c97c0f62e8
commit e975e143b1

View File

@ -95,18 +95,36 @@ bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedRe
return false; return false;
} }
const QString endpoint = reply->url().path(); QByteArrayList lines = data.split('\n');
auto messageType = endpoint == completionEndpoint() ? LLMCore::OllamaMessage::Type::Generate bool isDone = false;
: LLMCore::OllamaMessage::Type::Chat;
auto message = LLMCore::OllamaMessage::fromJson(data, messageType); for (const QByteArray &line : lines) {
if (message.hasError()) { if (line.trimmed().isEmpty()) {
LOG_MESSAGE("Error in Ollama response: " + message.error); continue;
return false; }
const QString endpoint = reply->url().path();
auto messageType = endpoint == completionEndpoint()
? LLMCore::OllamaMessage::Type::Generate
: LLMCore::OllamaMessage::Type::Chat;
auto message = LLMCore::OllamaMessage::fromJson(line, messageType);
if (message.hasError()) {
LOG_MESSAGE("Error in Ollama response: " + message.error);
continue;
}
QString content = message.getContent();
if (!content.isEmpty()) {
accumulatedResponse += content;
}
if (message.done) {
isDone = true;
}
} }
accumulatedResponse += message.getContent(); return isDone;
return message.done;
} }
QList<QString> OllamaProvider::getInstalledModels(const QString &url) QList<QString> OllamaProvider::getInstalledModels(const QString &url)