mirror of
https://github.com/Palm1r/QodeAssist.git
synced 2025-11-13 21:42:52 -05:00
fix: Remove replace message after complete receiving
This commit is contained in:
@ -251,20 +251,13 @@ void ClientInterface::cancelRequest()
|
||||
LOG_MESSAGE("All requests cancelled and state cleared");
|
||||
}
|
||||
|
||||
void ClientInterface::handleLLMResponse(
|
||||
const QString &response, const QJsonObject &request, bool isComplete)
|
||||
void ClientInterface::handleLLMResponse(const QString &response, const QJsonObject &request)
|
||||
{
|
||||
const auto message = response.trimmed();
|
||||
|
||||
if (!message.isEmpty()) {
|
||||
QString messageId = request["id"].toString();
|
||||
m_chatModel->addMessage(message, ChatModel::ChatRole::Assistant, messageId);
|
||||
|
||||
if (isComplete) {
|
||||
LOG_MESSAGE(
|
||||
"Message completed. Final response for message " + messageId + ": " + response);
|
||||
emit messageReceivedCompletely();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -323,7 +316,7 @@ void ClientInterface::handlePartialResponse(const QString &requestId, const QStr
|
||||
m_accumulatedResponses[requestId] += partialText;
|
||||
|
||||
const RequestContext &ctx = it.value();
|
||||
handleLLMResponse(m_accumulatedResponses[requestId], ctx.originalRequest, false);
|
||||
handleLLMResponse(m_accumulatedResponses[requestId], ctx.originalRequest);
|
||||
}
|
||||
|
||||
void ClientInterface::handleFullResponse(const QString &requestId, const QString &fullText)
|
||||
@ -345,10 +338,11 @@ void ClientInterface::handleFullResponse(const QString &requestId, const QString
|
||||
.arg(requestId, applyError));
|
||||
}
|
||||
|
||||
handleLLMResponse(finalText, ctx.originalRequest, true);
|
||||
|
||||
m_activeRequests.erase(it);
|
||||
m_accumulatedResponses.remove(requestId);
|
||||
|
||||
LOG_MESSAGE("Message completed. Final response for message " + ctx.originalRequest["id"].toString() + ": " + finalText);
|
||||
emit messageReceivedCompletely();
|
||||
}
|
||||
|
||||
void ClientInterface::handleRequestFailed(const QString &requestId, const QString &error)
|
||||
|
||||
@ -61,7 +61,7 @@ private slots:
|
||||
void handleCleanAccumulatedData(const QString &requestId);
|
||||
|
||||
private:
|
||||
void handleLLMResponse(const QString &response, const QJsonObject &request, bool isComplete);
|
||||
void handleLLMResponse(const QString &response, const QJsonObject &request);
|
||||
QString getCurrentFileContext() const;
|
||||
QString getSystemPromptWithLinkedFiles(
|
||||
const QString &basePrompt, const QList<QString> &linkedFiles) const;
|
||||
|
||||
@ -215,7 +215,6 @@ ChatRootView {
|
||||
ThinkingStatusItem {
|
||||
width: parent.width
|
||||
thinkingContent: {
|
||||
// Extract thinking content and signature
|
||||
let content = model.content
|
||||
let signatureStart = content.indexOf("\n[Signature:")
|
||||
if (signatureStart >= 0) {
|
||||
@ -223,17 +222,6 @@ ChatRootView {
|
||||
}
|
||||
return content
|
||||
}
|
||||
signature: {
|
||||
let content = model.content
|
||||
let signatureStart = content.indexOf("\n[Signature: ")
|
||||
if (signatureStart >= 0) {
|
||||
let signatureEnd = content.indexOf("...]", signatureStart)
|
||||
if (signatureEnd >= 0) {
|
||||
return content.substring(signatureStart + 13, signatureEnd)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
isRedacted: model.isRedacted !== undefined ? model.isRedacted : false
|
||||
}
|
||||
}
|
||||
|
||||
@ -24,7 +24,7 @@ Rectangle {
|
||||
id: root
|
||||
|
||||
property string thinkingContent: ""
|
||||
property string signature: ""
|
||||
// property string signature: ""
|
||||
property bool isRedacted: false
|
||||
property bool expanded: false
|
||||
|
||||
|
||||
Reference in New Issue
Block a user