From 9d58565de35a066d86cc76b02cffda9cdc919c6f Mon Sep 17 00:00:00 2001 From: Petr Mironychev <9195189+Palm1r@users.noreply.github.com> Date: Tue, 25 Feb 2025 21:58:39 +0100 Subject: [PATCH] feat: Add template description in general settings --- ConfigurationManager.cpp | 30 ++++++++++++++++++++++++ ConfigurationManager.hpp | 3 +++ settings/GeneralSettings.cpp | 43 ++++++++++++++++++++++++++++------ settings/GeneralSettings.hpp | 6 +++++ templates/Alpaca.hpp | 14 +++++++++-- templates/ChatML.hpp | 15 +++++++++++- templates/Claude.hpp | 13 +++++++++- templates/CodeLlamaFim.hpp | 7 +++++- templates/CodeLlamaQMLFim.hpp | 7 +++++- templates/GoogleAI.hpp | 19 ++++++++++++++- templates/Llama2.hpp | 13 +++++++++- templates/Llama3.hpp | 18 ++++++++++++-- templates/MistralAI.hpp | 23 ++++++++++++++++-- templates/Ollama.hpp | 24 +++++++++++++++++-- templates/OpenAI.hpp | 13 +++++++++- templates/OpenAICompatible.hpp | 13 +++++++++- templates/Qwen.hpp | 9 +++++-- templates/StarCoder2Fim.hpp | 8 +++++-- 18 files changed, 251 insertions(+), 27 deletions(-) diff --git a/ConfigurationManager.cpp b/ConfigurationManager.cpp index ec19375..ecf6fb1 100644 --- a/ConfigurationManager.cpp +++ b/ConfigurationManager.cpp @@ -35,6 +35,28 @@ ConfigurationManager &ConfigurationManager::instance() void ConfigurationManager::init() { setupConnections(); + updateAllTemplateDescriptions(); +} + +void ConfigurationManager::updateTemplateDescription(const Utils::StringAspect &templateAspect) +{ + LLMCore::PromptTemplate *templ = m_templateManger.getFimTemplateByName(templateAspect.value()); + + if (!templ) { + return; + } + + if (&templateAspect == &m_generalSettings.ccTemplate) { + m_generalSettings.updateCCTemplateDescription(templ->description()); + } else if (&templateAspect == &m_generalSettings.caTemplate) { + m_generalSettings.updateCATemplateDescription(templ->description()); + } +} + +void ConfigurationManager::updateAllTemplateDescriptions() +{ + updateTemplateDescription(m_generalSettings.ccTemplate); + updateTemplateDescription(m_generalSettings.caTemplate); } ConfigurationManager::ConfigurationManager(QObject *parent) @@ -64,6 +86,14 @@ void ConfigurationManager::setupConnections() connect(&m_generalSettings.ccPreset1SelectModel, &Button::clicked, this, &Config::selectModel); connect( &m_generalSettings.ccPreset1SelectTemplate, &Button::clicked, this, &Config::selectTemplate); + + connect(&m_generalSettings.ccTemplate, &Utils::StringAspect::changed, this, [this]() { + updateTemplateDescription(m_generalSettings.ccTemplate); + }); + + connect(&m_generalSettings.caTemplate, &Utils::StringAspect::changed, this, [this]() { + updateTemplateDescription(m_generalSettings.caTemplate); + }); } void ConfigurationManager::selectProvider() diff --git a/ConfigurationManager.hpp b/ConfigurationManager.hpp index 3d2d414..5f668e5 100644 --- a/ConfigurationManager.hpp +++ b/ConfigurationManager.hpp @@ -36,6 +36,9 @@ public: void init(); + void updateTemplateDescription(const Utils::StringAspect &templateAspect); + void updateAllTemplateDescriptions(); + public slots: void selectProvider(); void selectModel(); diff --git a/settings/GeneralSettings.cpp b/settings/GeneralSettings.cpp index 2661ae0..27766be 100644 --- a/settings/GeneralSettings.cpp +++ b/settings/GeneralSettings.cpp @@ -21,6 +21,7 @@ #include #include +#include #include #include #include @@ -144,6 +145,9 @@ GeneralSettings::GeneralSettings() caStatus.setDefaultValue(""); caTest.m_buttonText = TrConstants::TEST; + m_ccTemplateDescription = new QLabel(); + m_caTemplateDescription = new QLabel(); + readSettings(); Logger::instance().setLoggingEnabled(enableLogging()); @@ -155,6 +159,16 @@ GeneralSettings::GeneralSettings() setLayouter([this]() { using namespace Layouting; + auto ccTemplateInfoCCSection = new Utils::DetailsWidget(); + ccTemplateInfoCCSection->setState(Utils::DetailsWidget::Collapsed); + ccTemplateInfoCCSection->setSummaryText("Template Format Details"); + ccTemplateInfoCCSection->setWidget(m_ccTemplateDescription); + + auto caTemplateInfoCASection = new Utils::DetailsWidget(); + caTemplateInfoCASection->setState(Utils::DetailsWidget::Collapsed); + caTemplateInfoCASection->setSummaryText("Template Format Details"); + caTemplateInfoCASection->setWidget(m_caTemplateDescription); + auto ccGrid = Grid{}; ccGrid.addRow({ccProvider, ccSelectProvider}); ccGrid.addRow({ccUrl, ccSetUrl}); @@ -175,8 +189,13 @@ GeneralSettings::GeneralSettings() auto ccGroup = Group{ title(TrConstants::CODE_COMPLETION), - Column{ccGrid, Row{specifyPreset1, preset1Language, Stretch{1}}, ccPreset1Grid}}; - auto caGroup = Group{title(TrConstants::CHAT_ASSISTANT), caGrid}; + Column{ + ccGrid, + ccTemplateInfoCCSection, + Row{specifyPreset1, preset1Language, Stretch{1}}, + ccPreset1Grid}}; + auto caGroup + = Group{title(TrConstants::CHAT_ASSISTANT), Column{caGrid, caTemplateInfoCASection}}; auto rootLayout = Column{ Row{enableQodeAssist, Stretch{1}, Row{checkUpdate, resetToDefaults}}, @@ -192,10 +211,8 @@ GeneralSettings::GeneralSettings() }); } -void GeneralSettings::showSelectionDialog(const QStringList &data, - Utils::StringAspect &aspect, - const QString &title, - const QString &text) +void GeneralSettings::showSelectionDialog( + const QStringList &data, Utils::StringAspect &aspect, const QString &title, const QString &text) { if (data.isEmpty()) return; @@ -354,6 +371,18 @@ void GeneralSettings::updatePreset1Visiblity(bool state) ccPreset1SelectTemplate.updateVisibility(specifyPreset1.volatileValue()); } +void GeneralSettings::updateCCTemplateDescription(const QString &text) +{ + if (text != m_ccTemplateDescription->text()) + m_ccTemplateDescription->setText(text); +} + +void GeneralSettings::updateCATemplateDescription(const QString &text) +{ + if (text != m_caTemplateDescription->text()) + m_caTemplateDescription->setText(text); +} + void GeneralSettings::setupConnections() { connect(&enableLogging, &Utils::BoolAspect::volatileValueChanged, this, [this]() { @@ -366,7 +395,7 @@ void GeneralSettings::setupConnections() connect(&specifyPreset1, &Utils::BoolAspect::volatileValueChanged, this, [this]() { updatePreset1Visiblity(specifyPreset1.volatileValue()); - }); + }); } void GeneralSettings::resetPageToDefaults() diff --git a/settings/GeneralSettings.hpp b/settings/GeneralSettings.hpp index 9a1a902..4702a27 100644 --- a/settings/GeneralSettings.hpp +++ b/settings/GeneralSettings.hpp @@ -101,9 +101,15 @@ public: void updatePreset1Visiblity(bool state); + void updateCCTemplateDescription(const QString &text); + void updateCATemplateDescription(const QString &text); + private: void setupConnections(); void resetPageToDefaults(); + + QLabel *m_ccTemplateDescription = nullptr; + QLabel *m_caTemplateDescription = nullptr; }; GeneralSettings &generalSettings(); diff --git a/templates/Alpaca.hpp b/templates/Alpaca.hpp index 7ea9749..89bb097 100644 --- a/templates/Alpaca.hpp +++ b/templates/Alpaca.hpp @@ -59,9 +59,19 @@ public: } QString description() const override { - return "The message will contain the following tokens: ### Instruction:\n### Response:\n"; + return "Template for models using Alpaca instruction format:\n\n" + "{\n" + " \"messages\": [\n" + " {\n" + " \"role\": \"user\",\n" + " \"content\": \"\\n\\n" + "### Instruction:\\n\\n\\n" + "### Response:\\n\\n\\n\"\n" + " }\n" + " ]\n" + "}\n\n" + "Combines all messages into a single formatted prompt."; } - bool isSupportProvider(LLMCore::ProviderID id) const override { switch (id) { diff --git a/templates/ChatML.hpp b/templates/ChatML.hpp index 7818606..c76399b 100644 --- a/templates/ChatML.hpp +++ b/templates/ChatML.hpp @@ -58,7 +58,20 @@ public: } QString description() const override { - return "The message will contain the following tokens: <|im_start|>%1\n%2\n<|im_end|>"; + return "Template for models supporting ChatML format:\n\n" + "{\n" + " \"messages\": [\n" + " {\n" + " \"role\": \"system\",\n" + " \"content\": \"<|im_start|>system\\n\\n<|im_end|>\"\n" + " },\n" + " {\n" + " \"role\": \"user\",\n" + " \"content\": \"<|im_start|>user\\n\\n<|im_end|>\"\n" + " }\n" + " ]\n" + "}\n\n" + "Compatible with multiple providers supporting the ChatML token format."; } bool isSupportProvider(LLMCore::ProviderID id) const override { diff --git a/templates/Claude.hpp b/templates/Claude.hpp index 9ef77ca..d8256fe 100644 --- a/templates/Claude.hpp +++ b/templates/Claude.hpp @@ -49,7 +49,18 @@ public: request["messages"] = messages; } - QString description() const override { return "Claude"; } + QString description() const override + { + return "Template for Anthropic's Claude models:\n\n" + "{\n" + " \"system\": \"\",\n" + " \"messages\": [\n" + " {\"role\": \"user\", \"content\": \"\"},\n" + " {\"role\": \"assistant\", \"content\": \"\"}\n" + " ]\n" + "}\n\n" + "Formats content according to Claude API specifications."; + } bool isSupportProvider(LLMCore::ProviderID id) const override { switch (id) { diff --git a/templates/CodeLlamaFim.hpp b/templates/CodeLlamaFim.hpp index d957ae4..42e68bb 100644 --- a/templates/CodeLlamaFim.hpp +++ b/templates/CodeLlamaFim.hpp @@ -40,7 +40,12 @@ public: } QString description() const override { - return "The message will contain the following tokens:
 %1 %2 ";
+        return "Specialized template for CodeLlama FIM:\n\n"
+               "{\n"
+               "  \"prompt\": \"
   \",\n"
+               "  \"system\": \"\"\n"
+               "}\n\n"
+               "Optimized for code completion with CodeLlama models.";
     }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
diff --git a/templates/CodeLlamaQMLFim.hpp b/templates/CodeLlamaQMLFim.hpp
index d746493..644fa94 100644
--- a/templates/CodeLlamaQMLFim.hpp
+++ b/templates/CodeLlamaQMLFim.hpp
@@ -41,7 +41,12 @@ public:
     }
     QString description() const override
     {
-        return "The message will contain the following tokens: %1
%2";
+        return "Specialized template for QML code completion with CodeLlama:\n\n"
+               "{\n"
+               "  \"prompt\": \"
\",\n"
+               "  \"system\": \"\"\n"
+               "}\n\n"
+               "Specifically optimized for QML/JavaScript code completion.";
     }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
diff --git a/templates/GoogleAI.hpp b/templates/GoogleAI.hpp
index 28932c9..672f3d0 100644
--- a/templates/GoogleAI.hpp
+++ b/templates/GoogleAI.hpp
@@ -61,7 +61,24 @@ public:
         request["contents"] = contents;
     }
 
-    QString description() const override { return "Google AI (Gemini)"; }
+    QString description() const override
+    {
+        return "Template for Google AI models (Gemini):\n\n"
+               "{\n"
+               "  \"system_instruction\": {\"parts\": {\"text\": \"\"}},\n"
+               "  \"contents\": [\n"
+               "    {\n"
+               "      \"role\": \"user\",\n"
+               "      \"parts\": [{\"text\": \"\"}]\n"
+               "    },\n"
+               "    {\n"
+               "      \"role\": \"model\",\n"
+               "      \"parts\": [{\"text\": \"\"}]\n"
+               "    }\n"
+               "  ]\n"
+               "}\n\n"
+               "Supports proper role mapping, including model/user roles.";
+    }
 
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
diff --git a/templates/Llama2.hpp b/templates/Llama2.hpp
index 65c02dc..c80f86b 100644
--- a/templates/Llama2.hpp
+++ b/templates/Llama2.hpp
@@ -57,7 +57,18 @@ public:
     }
     QString description() const override
     {
-        return "The message will contain the following tokens: [INST]%1[/INST]\n";
+        return "Template for Llama 2 models:\n\n"
+               "{\n"
+               "  \"messages\": [\n"
+               "    {\n"
+               "      \"role\": \"user\",\n"
+               "      \"content\": \"[INST]<>\\n\\n<>[/INST]\\n"
+               "\\n"
+               "[INST][/INST]\\n\"\n"
+               "    }\n"
+               "  ]\n"
+               "}\n\n"
+               "Compatible with Ollama, LM Studio, and other services for Llama 2.";
     }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
diff --git a/templates/Llama3.hpp b/templates/Llama3.hpp
index 37f4d3d..28814b9 100644
--- a/templates/Llama3.hpp
+++ b/templates/Llama3.hpp
@@ -60,8 +60,22 @@ public:
     }
     QString description() const override
     {
-        return "The message will contain the following tokens: "
-               "<|start_header_id|>%1<|end_header_id|>%2<|eot_id|>";
+        return "Template for Llama 3 models:\n\n"
+               "{\n"
+               "  \"messages\": [\n"
+               "    {\n"
+               "      \"role\": \"system\",\n"
+               "      \"content\": \"<|start_header_id|>system<|end_header_id|><|eot_id|>\"\n"
+               "    },\n"
+               "    {\n"
+               "      \"role\": \"user\",\n"
+               "      \"content\": \"<|start_header_id|>user<|end_header_id|><|eot_id|>\"\n"
+               "    }\n"
+               "  ]\n"
+               "}\n\n"
+               "Compatible with Ollama, LM Studio, and OpenAI-compatible services for Llama 3.";
     }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
diff --git a/templates/MistralAI.hpp b/templates/MistralAI.hpp
index 8db4631..7672894 100644
--- a/templates/MistralAI.hpp
+++ b/templates/MistralAI.hpp
@@ -36,7 +36,15 @@ public:
         request["prompt"] = context.prefix.value_or("");
         request["suffix"] = context.suffix.value_or("");
     }
-    QString description() const override { return "template will take from ollama modelfile"; }
+    QString description() const override
+    {
+        return "Template for MistralAI models with FIM support:\n\n"
+               "{\n"
+               "  \"prompt\": \"\",\n"
+               "  \"suffix\": \"\"\n"
+               "}\n\n"
+               "Optimized for code completion with MistralAI models.";
+    }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
         switch (id) {
@@ -72,7 +80,18 @@ public:
 
         request["messages"] = messages;
     }
-    QString description() const override { return "template will take from ollama modelfile"; }
+    QString description() const override
+    {
+        return "Template for MistralAI chat-capable models:\n\n"
+               "{\n"
+               "  \"messages\": [\n"
+               "    {\"role\": \"system\", \"content\": \"\"},\n"
+               "    {\"role\": \"user\", \"content\": \"\"},\n"
+               "    {\"role\": \"assistant\", \"content\": \"\"}\n"
+               "  ]\n"
+               "}\n\n"
+               "Supports system messages and conversation history.";
+    }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
         switch (id) {
diff --git a/templates/Ollama.hpp b/templates/Ollama.hpp
index d688eb3..80d8df4 100644
--- a/templates/Ollama.hpp
+++ b/templates/Ollama.hpp
@@ -37,7 +37,16 @@ public:
         request["suffix"] = context.suffix.value_or("");
         request["system"] = context.systemPrompt.value_or("");
     }
-    QString description() const override { return "template will take from ollama modelfile"; }
+    QString description() const override
+    {
+        return "Default Ollama FIM (Fill-in-Middle) template with native format:\n\n"
+               "{\n"
+               "  \"prompt\": \"\",\n"
+               "  \"suffix\": \"\",\n"
+               "  \"system\": \"\"\n"
+               "}\n\n"
+               "Recommended for Ollama models with FIM capability.";
+    }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
         switch (id) {
@@ -73,7 +82,18 @@ public:
 
         request["messages"] = messages;
     }
-    QString description() const override { return "template will take from ollama modelfile"; }
+    QString description() const override
+    {
+        return "Template for Ollama Chat with message array format:\n\n"
+               "{\n"
+               "  \"messages\": [\n"
+               "    {\"role\": \"system\", \"content\": \"\"},\n"
+               "    {\"role\": \"user\", \"content\": \"\"},\n"
+               "    {\"role\": \"assistant\", \"content\": \"\"}\n"
+               "  ]\n"
+               "}\n\n"
+               "Recommended for Ollama models with chat capability.";
+    }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
         switch (id) {
diff --git a/templates/OpenAI.hpp b/templates/OpenAI.hpp
index 5267062..5676347 100644
--- a/templates/OpenAI.hpp
+++ b/templates/OpenAI.hpp
@@ -48,7 +48,18 @@ public:
 
         request["messages"] = messages;
     }
-    QString description() const override { return "OpenAI"; }
+    QString description() const override
+    {
+        return "Template for OpenAI models (GPT series):\n\n"
+               "{\n"
+               "  \"messages\": [\n"
+               "    {\"role\": \"system\", \"content\": \"\"},\n"
+               "    {\"role\": \"user\", \"content\": \"\"},\n"
+               "    {\"role\": \"assistant\", \"content\": \"\"}\n"
+               "  ]\n"
+               "}\n\n"
+               "Standard Chat API format for OpenAI.";
+    }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
         switch (id) {
diff --git a/templates/OpenAICompatible.hpp b/templates/OpenAICompatible.hpp
index c549187..fcfdb82 100644
--- a/templates/OpenAICompatible.hpp
+++ b/templates/OpenAICompatible.hpp
@@ -48,7 +48,18 @@ public:
 
         request["messages"] = messages;
     }
-    QString description() const override { return "chat without tokens"; }
+    QString description() const override
+    {
+        return "Generic template for OpenAI API-compatible services:\n\n"
+               "{\n"
+               "  \"messages\": [\n"
+               "    {\"role\": \"system\", \"content\": \"\"},\n"
+               "    {\"role\": \"user\", \"content\": \"\"},\n"
+               "    {\"role\": \"assistant\", \"content\": \"\"}\n"
+               "  ]\n"
+               "}\n\n"
+               "Works with any service implementing the OpenAI Chat API specification.";
+    }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
         switch (id) {
diff --git a/templates/Qwen.hpp b/templates/Qwen.hpp
index 4efd015..6d6ba98 100644
--- a/templates/Qwen.hpp
+++ b/templates/Qwen.hpp
@@ -38,8 +38,13 @@ public:
     }
     QString description() const override
     {
-        return "The message will contain the following tokens: "
-               "<|fim_prefix|>%1<|fim_suffix|>%2<|fim_middle|>";
+        return "Template for Qwen models with FIM support:\n\n"
+               "{\n"
+               "  \"prompt\": \"<|fim_prefix|><|fim_suffix|><|fim_middle|>\",\n"
+               "  \"system\": \"\"\n"
+               "}\n\n"
+               "Ideal for code completion with Qwen models.";
     }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {
diff --git a/templates/StarCoder2Fim.hpp b/templates/StarCoder2Fim.hpp
index f00db39..dcd9e6a 100644
--- a/templates/StarCoder2Fim.hpp
+++ b/templates/StarCoder2Fim.hpp
@@ -41,8 +41,12 @@ public:
     }
     QString description() const override
     {
-        return "The message will contain the following tokens: "
-               "%1%2";
+        return "Template for StarCoder2 with FIM format:\n\n"
+               "{\n"
+               "  \"prompt\": \"\",\n"
+               "  \"system\": \"\"\n"
+               "}\n\n"
+               "Includes stop words to prevent token duplication.";
     }
     bool isSupportProvider(LLMCore::ProviderID id) const override
     {