Add stream text to chat

This commit is contained in:
Petr Mironychev 2024-10-16 22:51:34 +02:00
parent 3db2691114
commit 80eda8c167
5 changed files with 43 additions and 37 deletions

View File

@ -68,6 +68,28 @@ QHash<int, QByteArray> ChatModel::roleNames() const
return roles;
}
void ChatModel::addMessage(const QString &content, ChatRole role, const QString &id)
{
int tokenCount = estimateTokenCount(content);
if (!m_messages.isEmpty() && !id.isEmpty() && m_messages.last().id == id) {
Message &lastMessage = m_messages.last();
int oldTokenCount = lastMessage.tokenCount;
lastMessage.content = content;
lastMessage.tokenCount = tokenCount;
m_totalTokens += (tokenCount - oldTokenCount);
emit dataChanged(index(m_messages.size() - 1), index(m_messages.size() - 1));
} else {
beginInsertRows(QModelIndex(), m_messages.size(), m_messages.size());
m_messages.append({role, content, tokenCount, id});
m_totalTokens += tokenCount;
endInsertRows();
}
trim();
emit totalTokensChanged();
}
QVector<ChatModel::Message> ChatModel::getChatHistory() const
{
return m_messages;
@ -92,17 +114,6 @@ int ChatModel::estimateTokenCount(const QString &text) const
return text.length() / 4;
}
void ChatModel::addMessage(const QString &content, ChatRole role)
{
int tokenCount = estimateTokenCount(content);
beginInsertRows(QModelIndex(), m_messages.size(), m_messages.size());
m_messages.append({role, content, tokenCount});
m_totalTokens += tokenCount;
endInsertRows();
trim();
emit totalTokensChanged();
}
void ChatModel::clear()
{
beginResetModel();

View File

@ -46,6 +46,7 @@ public:
ChatRole role;
QString content;
int tokenCount;
QString id;
};
explicit ChatModel(QObject *parent = nullptr);
@ -54,7 +55,7 @@ public:
QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override;
QHash<int, QByteArray> roleNames() const override;
Q_INVOKABLE void addMessage(const QString &content, ChatRole role);
Q_INVOKABLE void addMessage(const QString &content, ChatRole role, const QString &id);
Q_INVOKABLE void clear();
Q_INVOKABLE QList<MessagePart> processMessageContent(const QString &content) const;

View File

@ -38,8 +38,8 @@ ClientInterface::ClientInterface(ChatModel *chatModel, QObject *parent)
connect(m_requestHandler,
&LLMCore::RequestHandler::completionReceived,
this,
[this](const QString &completion, const QJsonObject &, bool isComplete) {
handleLLMResponse(completion, isComplete);
[this](const QString &completion, const QJsonObject &request, bool isComplete) {
handleLLMResponse(completion, request, isComplete);
});
connect(m_requestHandler,
@ -90,7 +90,7 @@ void ClientInterface::sendMessage(const QString &message)
request["id"] = QUuid::createUuid().toString();
m_accumulatedResponse.clear();
m_chatModel->addMessage(message, ChatModel::ChatRole::User);
m_chatModel->addMessage(message, ChatModel::ChatRole::User, "");
m_requestHandler->sendLLMRequest(config, request);
}
@ -101,16 +101,15 @@ void ClientInterface::clearMessages()
LOG_MESSAGE("Chat history cleared");
}
void ClientInterface::handleLLMResponse(const QString &response, bool isComplete)
void ClientInterface::handleLLMResponse(const QString &response,
const QJsonObject &request,
bool isComplete)
{
m_accumulatedResponse += response;
QString messageId = request["id"].toString();
m_chatModel->addMessage(response.trimmed(), ChatModel::ChatRole::Assistant, messageId);
if (isComplete) {
LOG_MESSAGE("Message completed. Final response: " + m_accumulatedResponse);
emit messageReceived(m_accumulatedResponse.trimmed());
m_chatModel->addMessage(m_accumulatedResponse.trimmed(), ChatModel::ChatRole::Assistant);
m_accumulatedResponse.clear();
LOG_MESSAGE("Message completed. Final response for message " + messageId + ": " + response);
}
}

View File

@ -40,11 +40,10 @@ public:
void clearMessages();
signals:
void messageReceived(const QString &message);
void errorOccurred(const QString &error);
private:
void handleLLMResponse(const QString &response, bool isComplete);
void handleLLMResponse(const QString &response, const QJsonObject &request, bool isComplete);
LLMCore::RequestHandler *m_requestHandler;
QString m_accumulatedResponse;

View File

@ -80,22 +80,18 @@ void RequestHandler::handleLLMResponse(QNetworkReply *reply,
&& processSingleLineCompletion(reply, request, accumulatedResponse, config)) {
return;
}
if (isComplete) {
auto cleanedCompletion = removeStopWords(accumulatedResponse,
config.promptTemplate->stopWords());
emit completionReceived(cleanedCompletion, request, true);
}
} else if (config.requestType == RequestType::Chat) {
emit completionReceived(accumulatedResponse, request, isComplete);
}
if (isComplete || reply->isFinished()) {
if (isComplete) {
if (config.requestType == RequestType::Fim) {
auto cleanedCompletion = removeStopWords(accumulatedResponse,
config.promptTemplate->stopWords());
emit completionReceived(cleanedCompletion, request, true);
} else {
emit completionReceived(accumulatedResponse, request, true);
}
} else {
emit completionReceived(accumulatedResponse, request, false);
}
if (isComplete)
m_accumulatedResponses.remove(reply);
}
}
bool RequestHandler::cancelRequest(const QString &id)