mirror of
https://github.com/Palm1r/QodeAssist.git
synced 2025-07-18 21:14:34 -04:00
feat: Add template description in general settings
This commit is contained in:
@ -59,9 +59,19 @@ public:
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: ### Instruction:\n### Response:\n";
|
||||
return "Template for models using Alpaca instruction format:\n\n"
|
||||
"{\n"
|
||||
" \"messages\": [\n"
|
||||
" {\n"
|
||||
" \"role\": \"user\",\n"
|
||||
" \"content\": \"<system prompt>\\n\\n"
|
||||
"### Instruction:\\n<user message>\\n\\n"
|
||||
"### Response:\\n<assistant response>\\n\\n\"\n"
|
||||
" }\n"
|
||||
" ]\n"
|
||||
"}\n\n"
|
||||
"Combines all messages into a single formatted prompt.";
|
||||
}
|
||||
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
switch (id) {
|
||||
|
@ -58,7 +58,20 @@ public:
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: <|im_start|>%1\n%2\n<|im_end|>";
|
||||
return "Template for models supporting ChatML format:\n\n"
|
||||
"{\n"
|
||||
" \"messages\": [\n"
|
||||
" {\n"
|
||||
" \"role\": \"system\",\n"
|
||||
" \"content\": \"<|im_start|>system\\n<system prompt>\\n<|im_end|>\"\n"
|
||||
" },\n"
|
||||
" {\n"
|
||||
" \"role\": \"user\",\n"
|
||||
" \"content\": \"<|im_start|>user\\n<user message>\\n<|im_end|>\"\n"
|
||||
" }\n"
|
||||
" ]\n"
|
||||
"}\n\n"
|
||||
"Compatible with multiple providers supporting the ChatML token format.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
|
@ -49,7 +49,18 @@ public:
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
QString description() const override { return "Claude"; }
|
||||
QString description() const override
|
||||
{
|
||||
return "Template for Anthropic's Claude models:\n\n"
|
||||
"{\n"
|
||||
" \"system\": \"<system prompt>\",\n"
|
||||
" \"messages\": [\n"
|
||||
" {\"role\": \"user\", \"content\": \"<user message>\"},\n"
|
||||
" {\"role\": \"assistant\", \"content\": \"<assistant response>\"}\n"
|
||||
" ]\n"
|
||||
"}\n\n"
|
||||
"Formats content according to Claude API specifications.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
switch (id) {
|
||||
|
@ -40,7 +40,12 @@ public:
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: <PRE> %1 <SUF>%2 <MID>";
|
||||
return "Specialized template for CodeLlama FIM:\n\n"
|
||||
"{\n"
|
||||
" \"prompt\": \"<PRE> <code prefix> <SUF><code suffix> <MID>\",\n"
|
||||
" \"system\": \"<system prompt>\"\n"
|
||||
"}\n\n"
|
||||
"Optimized for code completion with CodeLlama models.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
|
@ -41,7 +41,12 @@ public:
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: <SUF>%1<PRE>%2<MID>";
|
||||
return "Specialized template for QML code completion with CodeLlama:\n\n"
|
||||
"{\n"
|
||||
" \"prompt\": \"<SUF><code suffix><PRE><code prefix><MID>\",\n"
|
||||
" \"system\": \"<system prompt>\"\n"
|
||||
"}\n\n"
|
||||
"Specifically optimized for QML/JavaScript code completion.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
|
@ -61,7 +61,24 @@ public:
|
||||
request["contents"] = contents;
|
||||
}
|
||||
|
||||
QString description() const override { return "Google AI (Gemini)"; }
|
||||
QString description() const override
|
||||
{
|
||||
return "Template for Google AI models (Gemini):\n\n"
|
||||
"{\n"
|
||||
" \"system_instruction\": {\"parts\": {\"text\": \"<system prompt>\"}},\n"
|
||||
" \"contents\": [\n"
|
||||
" {\n"
|
||||
" \"role\": \"user\",\n"
|
||||
" \"parts\": [{\"text\": \"<user message>\"}]\n"
|
||||
" },\n"
|
||||
" {\n"
|
||||
" \"role\": \"model\",\n"
|
||||
" \"parts\": [{\"text\": \"<assistant response>\"}]\n"
|
||||
" }\n"
|
||||
" ]\n"
|
||||
"}\n\n"
|
||||
"Supports proper role mapping, including model/user roles.";
|
||||
}
|
||||
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
|
@ -57,7 +57,18 @@ public:
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: [INST]%1[/INST]\n";
|
||||
return "Template for Llama 2 models:\n\n"
|
||||
"{\n"
|
||||
" \"messages\": [\n"
|
||||
" {\n"
|
||||
" \"role\": \"user\",\n"
|
||||
" \"content\": \"[INST]<<SYS>>\\n<system prompt>\\n<</SYS>>[/INST]\\n"
|
||||
"<assistant response>\\n"
|
||||
"[INST]<user message>[/INST]\\n\"\n"
|
||||
" }\n"
|
||||
" ]\n"
|
||||
"}\n\n"
|
||||
"Compatible with Ollama, LM Studio, and other services for Llama 2.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
|
@ -60,8 +60,22 @@ public:
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: "
|
||||
"<|start_header_id|>%1<|end_header_id|>%2<|eot_id|>";
|
||||
return "Template for Llama 3 models:\n\n"
|
||||
"{\n"
|
||||
" \"messages\": [\n"
|
||||
" {\n"
|
||||
" \"role\": \"system\",\n"
|
||||
" \"content\": \"<|start_header_id|>system<|end_header_id|><system "
|
||||
"prompt><|eot_id|>\"\n"
|
||||
" },\n"
|
||||
" {\n"
|
||||
" \"role\": \"user\",\n"
|
||||
" \"content\": \"<|start_header_id|>user<|end_header_id|><user "
|
||||
"message><|eot_id|>\"\n"
|
||||
" }\n"
|
||||
" ]\n"
|
||||
"}\n\n"
|
||||
"Compatible with Ollama, LM Studio, and OpenAI-compatible services for Llama 3.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
|
@ -36,7 +36,15 @@ public:
|
||||
request["prompt"] = context.prefix.value_or("");
|
||||
request["suffix"] = context.suffix.value_or("");
|
||||
}
|
||||
QString description() const override { return "template will take from ollama modelfile"; }
|
||||
QString description() const override
|
||||
{
|
||||
return "Template for MistralAI models with FIM support:\n\n"
|
||||
"{\n"
|
||||
" \"prompt\": \"<code prefix>\",\n"
|
||||
" \"suffix\": \"<code suffix>\"\n"
|
||||
"}\n\n"
|
||||
"Optimized for code completion with MistralAI models.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
switch (id) {
|
||||
@ -72,7 +80,18 @@ public:
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
QString description() const override { return "template will take from ollama modelfile"; }
|
||||
QString description() const override
|
||||
{
|
||||
return "Template for MistralAI chat-capable models:\n\n"
|
||||
"{\n"
|
||||
" \"messages\": [\n"
|
||||
" {\"role\": \"system\", \"content\": \"<system prompt>\"},\n"
|
||||
" {\"role\": \"user\", \"content\": \"<user message>\"},\n"
|
||||
" {\"role\": \"assistant\", \"content\": \"<assistant response>\"}\n"
|
||||
" ]\n"
|
||||
"}\n\n"
|
||||
"Supports system messages and conversation history.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
switch (id) {
|
||||
|
@ -37,7 +37,16 @@ public:
|
||||
request["suffix"] = context.suffix.value_or("");
|
||||
request["system"] = context.systemPrompt.value_or("");
|
||||
}
|
||||
QString description() const override { return "template will take from ollama modelfile"; }
|
||||
QString description() const override
|
||||
{
|
||||
return "Default Ollama FIM (Fill-in-Middle) template with native format:\n\n"
|
||||
"{\n"
|
||||
" \"prompt\": \"<code prefix>\",\n"
|
||||
" \"suffix\": \"<code suffix>\",\n"
|
||||
" \"system\": \"<system prompt>\"\n"
|
||||
"}\n\n"
|
||||
"Recommended for Ollama models with FIM capability.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
switch (id) {
|
||||
@ -73,7 +82,18 @@ public:
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
QString description() const override { return "template will take from ollama modelfile"; }
|
||||
QString description() const override
|
||||
{
|
||||
return "Template for Ollama Chat with message array format:\n\n"
|
||||
"{\n"
|
||||
" \"messages\": [\n"
|
||||
" {\"role\": \"system\", \"content\": \"<system prompt>\"},\n"
|
||||
" {\"role\": \"user\", \"content\": \"<user message>\"},\n"
|
||||
" {\"role\": \"assistant\", \"content\": \"<assistant response>\"}\n"
|
||||
" ]\n"
|
||||
"}\n\n"
|
||||
"Recommended for Ollama models with chat capability.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
switch (id) {
|
||||
|
@ -48,7 +48,18 @@ public:
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
QString description() const override { return "OpenAI"; }
|
||||
QString description() const override
|
||||
{
|
||||
return "Template for OpenAI models (GPT series):\n\n"
|
||||
"{\n"
|
||||
" \"messages\": [\n"
|
||||
" {\"role\": \"system\", \"content\": \"<system prompt>\"},\n"
|
||||
" {\"role\": \"user\", \"content\": \"<user message>\"},\n"
|
||||
" {\"role\": \"assistant\", \"content\": \"<assistant response>\"}\n"
|
||||
" ]\n"
|
||||
"}\n\n"
|
||||
"Standard Chat API format for OpenAI.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
switch (id) {
|
||||
|
@ -48,7 +48,18 @@ public:
|
||||
|
||||
request["messages"] = messages;
|
||||
}
|
||||
QString description() const override { return "chat without tokens"; }
|
||||
QString description() const override
|
||||
{
|
||||
return "Generic template for OpenAI API-compatible services:\n\n"
|
||||
"{\n"
|
||||
" \"messages\": [\n"
|
||||
" {\"role\": \"system\", \"content\": \"<system prompt>\"},\n"
|
||||
" {\"role\": \"user\", \"content\": \"<user message>\"},\n"
|
||||
" {\"role\": \"assistant\", \"content\": \"<assistant response>\"}\n"
|
||||
" ]\n"
|
||||
"}\n\n"
|
||||
"Works with any service implementing the OpenAI Chat API specification.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
switch (id) {
|
||||
|
@ -38,8 +38,13 @@ public:
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: "
|
||||
"<|fim_prefix|>%1<|fim_suffix|>%2<|fim_middle|>";
|
||||
return "Template for Qwen models with FIM support:\n\n"
|
||||
"{\n"
|
||||
" \"prompt\": \"<|fim_prefix|><code prefix><|fim_suffix|><code "
|
||||
"suffix><|fim_middle|>\",\n"
|
||||
" \"system\": \"<system prompt>\"\n"
|
||||
"}\n\n"
|
||||
"Ideal for code completion with Qwen models.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
|
@ -41,8 +41,12 @@ public:
|
||||
}
|
||||
QString description() const override
|
||||
{
|
||||
return "The message will contain the following tokens: "
|
||||
"<fim_prefix>%1<fim_suffix>%2<fim_middle>";
|
||||
return "Template for StarCoder2 with FIM format:\n\n"
|
||||
"{\n"
|
||||
" \"prompt\": \"<fim_prefix><code prefix><fim_suffix><code suffix><fim_middle>\",\n"
|
||||
" \"system\": \"<system prompt>\"\n"
|
||||
"}\n\n"
|
||||
"Includes stop words to prevent token duplication.";
|
||||
}
|
||||
bool isSupportProvider(LLMCore::ProviderID id) const override
|
||||
{
|
||||
|
Reference in New Issue
Block a user