feat: Add template description in general settings

This commit is contained in:
Petr Mironychev 2025-02-25 21:58:39 +01:00
parent 8dba9b4baa
commit 9d58565de3
18 changed files with 251 additions and 27 deletions

View File

@ -35,6 +35,28 @@ ConfigurationManager &ConfigurationManager::instance()
void ConfigurationManager::init()
{
setupConnections();
updateAllTemplateDescriptions();
}
void ConfigurationManager::updateTemplateDescription(const Utils::StringAspect &templateAspect)
{
LLMCore::PromptTemplate *templ = m_templateManger.getFimTemplateByName(templateAspect.value());
if (!templ) {
return;
}
if (&templateAspect == &m_generalSettings.ccTemplate) {
m_generalSettings.updateCCTemplateDescription(templ->description());
} else if (&templateAspect == &m_generalSettings.caTemplate) {
m_generalSettings.updateCATemplateDescription(templ->description());
}
}
void ConfigurationManager::updateAllTemplateDescriptions()
{
updateTemplateDescription(m_generalSettings.ccTemplate);
updateTemplateDescription(m_generalSettings.caTemplate);
}
ConfigurationManager::ConfigurationManager(QObject *parent)
@ -64,6 +86,14 @@ void ConfigurationManager::setupConnections()
connect(&m_generalSettings.ccPreset1SelectModel, &Button::clicked, this, &Config::selectModel);
connect(
&m_generalSettings.ccPreset1SelectTemplate, &Button::clicked, this, &Config::selectTemplate);
connect(&m_generalSettings.ccTemplate, &Utils::StringAspect::changed, this, [this]() {
updateTemplateDescription(m_generalSettings.ccTemplate);
});
connect(&m_generalSettings.caTemplate, &Utils::StringAspect::changed, this, [this]() {
updateTemplateDescription(m_generalSettings.caTemplate);
});
}
void ConfigurationManager::selectProvider()

View File

@ -36,6 +36,9 @@ public:
void init();
void updateTemplateDescription(const Utils::StringAspect &templateAspect);
void updateAllTemplateDescriptions();
public slots:
void selectProvider();
void selectModel();

View File

@ -21,6 +21,7 @@
#include <coreplugin/dialogs/ioptionspage.h>
#include <coreplugin/icore.h>
#include <utils/detailswidget.h>
#include <utils/layoutbuilder.h>
#include <utils/utilsicons.h>
#include <QInputDialog>
@ -144,6 +145,9 @@ GeneralSettings::GeneralSettings()
caStatus.setDefaultValue("");
caTest.m_buttonText = TrConstants::TEST;
m_ccTemplateDescription = new QLabel();
m_caTemplateDescription = new QLabel();
readSettings();
Logger::instance().setLoggingEnabled(enableLogging());
@ -155,6 +159,16 @@ GeneralSettings::GeneralSettings()
setLayouter([this]() {
using namespace Layouting;
auto ccTemplateInfoCCSection = new Utils::DetailsWidget();
ccTemplateInfoCCSection->setState(Utils::DetailsWidget::Collapsed);
ccTemplateInfoCCSection->setSummaryText("Template Format Details");
ccTemplateInfoCCSection->setWidget(m_ccTemplateDescription);
auto caTemplateInfoCASection = new Utils::DetailsWidget();
caTemplateInfoCASection->setState(Utils::DetailsWidget::Collapsed);
caTemplateInfoCASection->setSummaryText("Template Format Details");
caTemplateInfoCASection->setWidget(m_caTemplateDescription);
auto ccGrid = Grid{};
ccGrid.addRow({ccProvider, ccSelectProvider});
ccGrid.addRow({ccUrl, ccSetUrl});
@ -175,8 +189,13 @@ GeneralSettings::GeneralSettings()
auto ccGroup = Group{
title(TrConstants::CODE_COMPLETION),
Column{ccGrid, Row{specifyPreset1, preset1Language, Stretch{1}}, ccPreset1Grid}};
auto caGroup = Group{title(TrConstants::CHAT_ASSISTANT), caGrid};
Column{
ccGrid,
ccTemplateInfoCCSection,
Row{specifyPreset1, preset1Language, Stretch{1}},
ccPreset1Grid}};
auto caGroup
= Group{title(TrConstants::CHAT_ASSISTANT), Column{caGrid, caTemplateInfoCASection}};
auto rootLayout = Column{
Row{enableQodeAssist, Stretch{1}, Row{checkUpdate, resetToDefaults}},
@ -192,10 +211,8 @@ GeneralSettings::GeneralSettings()
});
}
void GeneralSettings::showSelectionDialog(const QStringList &data,
Utils::StringAspect &aspect,
const QString &title,
const QString &text)
void GeneralSettings::showSelectionDialog(
const QStringList &data, Utils::StringAspect &aspect, const QString &title, const QString &text)
{
if (data.isEmpty())
return;
@ -354,6 +371,18 @@ void GeneralSettings::updatePreset1Visiblity(bool state)
ccPreset1SelectTemplate.updateVisibility(specifyPreset1.volatileValue());
}
void GeneralSettings::updateCCTemplateDescription(const QString &text)
{
if (text != m_ccTemplateDescription->text())
m_ccTemplateDescription->setText(text);
}
void GeneralSettings::updateCATemplateDescription(const QString &text)
{
if (text != m_caTemplateDescription->text())
m_caTemplateDescription->setText(text);
}
void GeneralSettings::setupConnections()
{
connect(&enableLogging, &Utils::BoolAspect::volatileValueChanged, this, [this]() {

View File

@ -101,9 +101,15 @@ public:
void updatePreset1Visiblity(bool state);
void updateCCTemplateDescription(const QString &text);
void updateCATemplateDescription(const QString &text);
private:
void setupConnections();
void resetPageToDefaults();
QLabel *m_ccTemplateDescription = nullptr;
QLabel *m_caTemplateDescription = nullptr;
};
GeneralSettings &generalSettings();

View File

@ -59,9 +59,19 @@ public:
}
QString description() const override
{
return "The message will contain the following tokens: ### Instruction:\n### Response:\n";
return "Template for models using Alpaca instruction format:\n\n"
"{\n"
" \"messages\": [\n"
" {\n"
" \"role\": \"user\",\n"
" \"content\": \"<system prompt>\\n\\n"
"### Instruction:\\n<user message>\\n\\n"
"### Response:\\n<assistant response>\\n\\n\"\n"
" }\n"
" ]\n"
"}\n\n"
"Combines all messages into a single formatted prompt.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{
switch (id) {

View File

@ -58,7 +58,20 @@ public:
}
QString description() const override
{
return "The message will contain the following tokens: <|im_start|>%1\n%2\n<|im_end|>";
return "Template for models supporting ChatML format:\n\n"
"{\n"
" \"messages\": [\n"
" {\n"
" \"role\": \"system\",\n"
" \"content\": \"<|im_start|>system\\n<system prompt>\\n<|im_end|>\"\n"
" },\n"
" {\n"
" \"role\": \"user\",\n"
" \"content\": \"<|im_start|>user\\n<user message>\\n<|im_end|>\"\n"
" }\n"
" ]\n"
"}\n\n"
"Compatible with multiple providers supporting the ChatML token format.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{

View File

@ -49,7 +49,18 @@ public:
request["messages"] = messages;
}
QString description() const override { return "Claude"; }
QString description() const override
{
return "Template for Anthropic's Claude models:\n\n"
"{\n"
" \"system\": \"<system prompt>\",\n"
" \"messages\": [\n"
" {\"role\": \"user\", \"content\": \"<user message>\"},\n"
" {\"role\": \"assistant\", \"content\": \"<assistant response>\"}\n"
" ]\n"
"}\n\n"
"Formats content according to Claude API specifications.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{
switch (id) {

View File

@ -40,7 +40,12 @@ public:
}
QString description() const override
{
return "The message will contain the following tokens: <PRE> %1 <SUF>%2 <MID>";
return "Specialized template for CodeLlama FIM:\n\n"
"{\n"
" \"prompt\": \"<PRE> <code prefix> <SUF><code suffix> <MID>\",\n"
" \"system\": \"<system prompt>\"\n"
"}\n\n"
"Optimized for code completion with CodeLlama models.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{

View File

@ -41,7 +41,12 @@ public:
}
QString description() const override
{
return "The message will contain the following tokens: <SUF>%1<PRE>%2<MID>";
return "Specialized template for QML code completion with CodeLlama:\n\n"
"{\n"
" \"prompt\": \"<SUF><code suffix><PRE><code prefix><MID>\",\n"
" \"system\": \"<system prompt>\"\n"
"}\n\n"
"Specifically optimized for QML/JavaScript code completion.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{

View File

@ -61,7 +61,24 @@ public:
request["contents"] = contents;
}
QString description() const override { return "Google AI (Gemini)"; }
QString description() const override
{
return "Template for Google AI models (Gemini):\n\n"
"{\n"
" \"system_instruction\": {\"parts\": {\"text\": \"<system prompt>\"}},\n"
" \"contents\": [\n"
" {\n"
" \"role\": \"user\",\n"
" \"parts\": [{\"text\": \"<user message>\"}]\n"
" },\n"
" {\n"
" \"role\": \"model\",\n"
" \"parts\": [{\"text\": \"<assistant response>\"}]\n"
" }\n"
" ]\n"
"}\n\n"
"Supports proper role mapping, including model/user roles.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{

View File

@ -57,7 +57,18 @@ public:
}
QString description() const override
{
return "The message will contain the following tokens: [INST]%1[/INST]\n";
return "Template for Llama 2 models:\n\n"
"{\n"
" \"messages\": [\n"
" {\n"
" \"role\": \"user\",\n"
" \"content\": \"[INST]<<SYS>>\\n<system prompt>\\n<</SYS>>[/INST]\\n"
"<assistant response>\\n"
"[INST]<user message>[/INST]\\n\"\n"
" }\n"
" ]\n"
"}\n\n"
"Compatible with Ollama, LM Studio, and other services for Llama 2.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{

View File

@ -60,8 +60,22 @@ public:
}
QString description() const override
{
return "The message will contain the following tokens: "
"<|start_header_id|>%1<|end_header_id|>%2<|eot_id|>";
return "Template for Llama 3 models:\n\n"
"{\n"
" \"messages\": [\n"
" {\n"
" \"role\": \"system\",\n"
" \"content\": \"<|start_header_id|>system<|end_header_id|><system "
"prompt><|eot_id|>\"\n"
" },\n"
" {\n"
" \"role\": \"user\",\n"
" \"content\": \"<|start_header_id|>user<|end_header_id|><user "
"message><|eot_id|>\"\n"
" }\n"
" ]\n"
"}\n\n"
"Compatible with Ollama, LM Studio, and OpenAI-compatible services for Llama 3.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{

View File

@ -36,7 +36,15 @@ public:
request["prompt"] = context.prefix.value_or("");
request["suffix"] = context.suffix.value_or("");
}
QString description() const override { return "template will take from ollama modelfile"; }
QString description() const override
{
return "Template for MistralAI models with FIM support:\n\n"
"{\n"
" \"prompt\": \"<code prefix>\",\n"
" \"suffix\": \"<code suffix>\"\n"
"}\n\n"
"Optimized for code completion with MistralAI models.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{
switch (id) {
@ -72,7 +80,18 @@ public:
request["messages"] = messages;
}
QString description() const override { return "template will take from ollama modelfile"; }
QString description() const override
{
return "Template for MistralAI chat-capable models:\n\n"
"{\n"
" \"messages\": [\n"
" {\"role\": \"system\", \"content\": \"<system prompt>\"},\n"
" {\"role\": \"user\", \"content\": \"<user message>\"},\n"
" {\"role\": \"assistant\", \"content\": \"<assistant response>\"}\n"
" ]\n"
"}\n\n"
"Supports system messages and conversation history.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{
switch (id) {

View File

@ -37,7 +37,16 @@ public:
request["suffix"] = context.suffix.value_or("");
request["system"] = context.systemPrompt.value_or("");
}
QString description() const override { return "template will take from ollama modelfile"; }
QString description() const override
{
return "Default Ollama FIM (Fill-in-Middle) template with native format:\n\n"
"{\n"
" \"prompt\": \"<code prefix>\",\n"
" \"suffix\": \"<code suffix>\",\n"
" \"system\": \"<system prompt>\"\n"
"}\n\n"
"Recommended for Ollama models with FIM capability.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{
switch (id) {
@ -73,7 +82,18 @@ public:
request["messages"] = messages;
}
QString description() const override { return "template will take from ollama modelfile"; }
QString description() const override
{
return "Template for Ollama Chat with message array format:\n\n"
"{\n"
" \"messages\": [\n"
" {\"role\": \"system\", \"content\": \"<system prompt>\"},\n"
" {\"role\": \"user\", \"content\": \"<user message>\"},\n"
" {\"role\": \"assistant\", \"content\": \"<assistant response>\"}\n"
" ]\n"
"}\n\n"
"Recommended for Ollama models with chat capability.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{
switch (id) {

View File

@ -48,7 +48,18 @@ public:
request["messages"] = messages;
}
QString description() const override { return "OpenAI"; }
QString description() const override
{
return "Template for OpenAI models (GPT series):\n\n"
"{\n"
" \"messages\": [\n"
" {\"role\": \"system\", \"content\": \"<system prompt>\"},\n"
" {\"role\": \"user\", \"content\": \"<user message>\"},\n"
" {\"role\": \"assistant\", \"content\": \"<assistant response>\"}\n"
" ]\n"
"}\n\n"
"Standard Chat API format for OpenAI.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{
switch (id) {

View File

@ -48,7 +48,18 @@ public:
request["messages"] = messages;
}
QString description() const override { return "chat without tokens"; }
QString description() const override
{
return "Generic template for OpenAI API-compatible services:\n\n"
"{\n"
" \"messages\": [\n"
" {\"role\": \"system\", \"content\": \"<system prompt>\"},\n"
" {\"role\": \"user\", \"content\": \"<user message>\"},\n"
" {\"role\": \"assistant\", \"content\": \"<assistant response>\"}\n"
" ]\n"
"}\n\n"
"Works with any service implementing the OpenAI Chat API specification.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{
switch (id) {

View File

@ -38,8 +38,13 @@ public:
}
QString description() const override
{
return "The message will contain the following tokens: "
"<|fim_prefix|>%1<|fim_suffix|>%2<|fim_middle|>";
return "Template for Qwen models with FIM support:\n\n"
"{\n"
" \"prompt\": \"<|fim_prefix|><code prefix><|fim_suffix|><code "
"suffix><|fim_middle|>\",\n"
" \"system\": \"<system prompt>\"\n"
"}\n\n"
"Ideal for code completion with Qwen models.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{

View File

@ -41,8 +41,12 @@ public:
}
QString description() const override
{
return "The message will contain the following tokens: "
"<fim_prefix>%1<fim_suffix>%2<fim_middle>";
return "Template for StarCoder2 with FIM format:\n\n"
"{\n"
" \"prompt\": \"<fim_prefix><code prefix><fim_suffix><code suffix><fim_middle>\",\n"
" \"system\": \"<system prompt>\"\n"
"}\n\n"
"Includes stop words to prevent token duplication.";
}
bool isSupportProvider(LLMCore::ProviderID id) const override
{