refactor: Improve code completion message for Instruct models
Some checks failed
Build plugin / ${{ matrix.config.name }} (map[artifact:Linux-x64 cc:gcc cxx:g++ name:Ubuntu Latest GCC os:ubuntu-latest platform:linux_x64]) (push) Has been cancelled
Build plugin / ${{ matrix.config.name }} (map[artifact:Windows-x64 cc:cl cxx:cl environment_script:C:/Program Files/Microsoft Visual Studio/2022/Enterprise/VC/Auxiliary/Build/vcvars64.bat name:Windows Latest MSVC os:windows-latest platform:windows_x64]) (push) Has been cancelled
Build plugin / ${{ matrix.config.name }} (map[artifact:macOS-universal cc:clang cxx:clang++ name:macOS Latest Clang os:macos-latest platform:mac_x64]) (push) Has been cancelled
Build plugin / update_json (push) Has been cancelled
Build plugin / release (push) Has been cancelled

This commit is contained in:
Petr Mironychev 2025-02-12 02:05:37 +01:00
parent 7d23d0323f
commit 60936f6d84
3 changed files with 19 additions and 4 deletions

View File

@ -215,8 +215,12 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request)
systemPrompt.append(updatedContext.fileContext);
QString userMessage;
if (completeSettings.useUserMessageTemplateForCC() && promptTemplate->type() == LLMCore::TemplateType::Chat) {
userMessage = completeSettings.userMessageTemplateForCC().arg(updatedContext.prefix, updatedContext.suffix);
if (completeSettings.useUserMessageTemplateForCC()
&& promptTemplate->type() == LLMCore::TemplateType::Chat) {
userMessage = processMessageToFIM(
completeSettings.userMessageTemplateForCC(),
updatedContext.prefix,
updatedContext.suffix);
} else {
userMessage = updatedContext.prefix;
}
@ -242,6 +246,15 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request)
m_requestHandler.sendLLMRequest(config, request);
}
QString LLMClientInterface::processMessageToFIM(
const QString &templateText, const QString &prefix, const QString &suffix)
{
QString result = templateText;
result.replace("${prefix}", prefix);
result.replace("${suffix}", suffix);
return result;
}
LLMCore::ContextData LLMClientInterface::prepareContext(const QJsonObject &request,
const QStringView &accumulatedCompletion)
{

View File

@ -71,6 +71,8 @@ private:
void startTimeMeasurement(const QString &requestId);
void endTimeMeasurement(const QString &requestId);
void logPerformance(const QString &requestId, const QString &operation, qint64 elapsedMs);
QString processMessageToFIM(
const QString &templateText, const QString &prefix, const QString &suffix);
};
} // namespace QodeAssist

View File

@ -90,7 +90,7 @@ CodeCompletionSettings::CodeCompletionSettings()
maxTokens.setSettingsKey(Constants::CC_MAX_TOKENS);
maxTokens.setLabelText(Tr::tr("Max Tokens:"));
maxTokens.setRange(-1, 900000);
maxTokens.setDefaultValue(50);
maxTokens.setDefaultValue(100);
// Advanced Parameters
useTopP.setSettingsKey(Constants::CC_USE_TOP_P);
@ -180,7 +180,7 @@ CodeCompletionSettings::CodeCompletionSettings()
userMessageTemplateForCC.setDisplayStyle(Utils::StringAspect::TextEditDisplay);
userMessageTemplateForCC.setDefaultValue(
"Here is the code context with insertion points: "
"<code_context>\nBefore:%1\n<cursor>\nAfter:%2\n</code_context>\n\n");
"<code_context>\nBefore:${prefix}\n<cursor>\nAfter:${suffix}\n</code_context>\n\n");
useProjectChangesCache.setSettingsKey(Constants::CC_USE_PROJECT_CHANGES_CACHE);
useProjectChangesCache.setDefaultValue(true);