mirror of
https://github.com/Palm1r/QodeAssist.git
synced 2025-06-04 01:28:58 -04:00
fix: Handling Ollama messages
This commit is contained in:
parent
c97c0f62e8
commit
e975e143b1
@ -95,18 +95,36 @@ bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedRe
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
const QString endpoint = reply->url().path();
|
QByteArrayList lines = data.split('\n');
|
||||||
auto messageType = endpoint == completionEndpoint() ? LLMCore::OllamaMessage::Type::Generate
|
bool isDone = false;
|
||||||
: LLMCore::OllamaMessage::Type::Chat;
|
|
||||||
|
|
||||||
auto message = LLMCore::OllamaMessage::fromJson(data, messageType);
|
for (const QByteArray &line : lines) {
|
||||||
if (message.hasError()) {
|
if (line.trimmed().isEmpty()) {
|
||||||
LOG_MESSAGE("Error in Ollama response: " + message.error);
|
continue;
|
||||||
return false;
|
}
|
||||||
|
|
||||||
|
const QString endpoint = reply->url().path();
|
||||||
|
auto messageType = endpoint == completionEndpoint()
|
||||||
|
? LLMCore::OllamaMessage::Type::Generate
|
||||||
|
: LLMCore::OllamaMessage::Type::Chat;
|
||||||
|
|
||||||
|
auto message = LLMCore::OllamaMessage::fromJson(line, messageType);
|
||||||
|
if (message.hasError()) {
|
||||||
|
LOG_MESSAGE("Error in Ollama response: " + message.error);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
QString content = message.getContent();
|
||||||
|
if (!content.isEmpty()) {
|
||||||
|
accumulatedResponse += content;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (message.done) {
|
||||||
|
isDone = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
accumulatedResponse += message.getContent();
|
return isDone;
|
||||||
return message.done;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
QList<QString> OllamaProvider::getInstalledModels(const QString &url)
|
QList<QString> OllamaProvider::getInstalledModels(const QString &url)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user