mirror of
https://github.com/Palm1r/QodeAssist.git
synced 2025-06-04 01:28:58 -04:00
fix: Handling Ollama messages
This commit is contained in:
parent
c97c0f62e8
commit
e975e143b1
@ -95,18 +95,36 @@ bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedRe
|
||||
return false;
|
||||
}
|
||||
|
||||
const QString endpoint = reply->url().path();
|
||||
auto messageType = endpoint == completionEndpoint() ? LLMCore::OllamaMessage::Type::Generate
|
||||
: LLMCore::OllamaMessage::Type::Chat;
|
||||
QByteArrayList lines = data.split('\n');
|
||||
bool isDone = false;
|
||||
|
||||
auto message = LLMCore::OllamaMessage::fromJson(data, messageType);
|
||||
if (message.hasError()) {
|
||||
LOG_MESSAGE("Error in Ollama response: " + message.error);
|
||||
return false;
|
||||
for (const QByteArray &line : lines) {
|
||||
if (line.trimmed().isEmpty()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const QString endpoint = reply->url().path();
|
||||
auto messageType = endpoint == completionEndpoint()
|
||||
? LLMCore::OllamaMessage::Type::Generate
|
||||
: LLMCore::OllamaMessage::Type::Chat;
|
||||
|
||||
auto message = LLMCore::OllamaMessage::fromJson(line, messageType);
|
||||
if (message.hasError()) {
|
||||
LOG_MESSAGE("Error in Ollama response: " + message.error);
|
||||
continue;
|
||||
}
|
||||
|
||||
QString content = message.getContent();
|
||||
if (!content.isEmpty()) {
|
||||
accumulatedResponse += content;
|
||||
}
|
||||
|
||||
if (message.done) {
|
||||
isDone = true;
|
||||
}
|
||||
}
|
||||
|
||||
accumulatedResponse += message.getContent();
|
||||
return message.done;
|
||||
return isDone;
|
||||
}
|
||||
|
||||
QList<QString> OllamaProvider::getInstalledModels(const QString &url)
|
||||
|
Loading…
x
Reference in New Issue
Block a user