fix: Handling Ollama messages

This commit is contained in:
Petr Mironychev 2025-01-19 17:32:12 +01:00
parent c97c0f62e8
commit e975e143b1

View File

@ -95,18 +95,36 @@ bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedRe
return false;
}
const QString endpoint = reply->url().path();
auto messageType = endpoint == completionEndpoint() ? LLMCore::OllamaMessage::Type::Generate
: LLMCore::OllamaMessage::Type::Chat;
QByteArrayList lines = data.split('\n');
bool isDone = false;
auto message = LLMCore::OllamaMessage::fromJson(data, messageType);
if (message.hasError()) {
LOG_MESSAGE("Error in Ollama response: " + message.error);
return false;
for (const QByteArray &line : lines) {
if (line.trimmed().isEmpty()) {
continue;
}
accumulatedResponse += message.getContent();
return message.done;
const QString endpoint = reply->url().path();
auto messageType = endpoint == completionEndpoint()
? LLMCore::OllamaMessage::Type::Generate
: LLMCore::OllamaMessage::Type::Chat;
auto message = LLMCore::OllamaMessage::fromJson(line, messageType);
if (message.hasError()) {
LOG_MESSAGE("Error in Ollama response: " + message.error);
continue;
}
QString content = message.getContent();
if (!content.isEmpty()) {
accumulatedResponse += content;
}
if (message.done) {
isDone = true;
}
}
return isDone;
}
QList<QString> OllamaProvider::getInstalledModels(const QString &url)