♻️ refactor: Rework currents and add new templates

Add Alpaca, Llama3, LLama2, ChatML templates
This commit is contained in:
Petr Mironychev
2024-11-26 00:28:27 +01:00
parent 36d5242a1f
commit 1261f913bb
17 changed files with 263 additions and 119 deletions

View File

@ -24,33 +24,6 @@
namespace QodeAssist::Templates {
class QwenChat : public LLMCore::PromptTemplate
{
public:
QString name() const override { return "Qwen Chat"; }
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
QString promptTemplate() const override { return "### Instruction:\n%1\n### Response:\n"; }
QStringList stopWords() const override
{
return QStringList() << "### Instruction:" << "### Response:" << "\n\n### " << "<|EOT|>";
}
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
{
QString formattedPrompt = promptTemplate().arg(context.prefix);
QJsonArray messages = request["messages"].toArray();
QJsonObject newMessage;
newMessage["role"] = "user";
newMessage["content"] = formattedPrompt;
messages.append(newMessage);
request["messages"] = messages;
}
};
class QwenFim : public LLMCore::PromptTemplate
{
public:
@ -66,6 +39,11 @@ public:
QString formattedPrompt = promptTemplate().arg(context.prefix, context.suffix);
request["prompt"] = formattedPrompt;
}
QString description() const override
{
return "The message will contain the following tokens: "
"<|fim_prefix|>%1<|fim_suffix|>%2<|fim_middle|>";
}
};
} // namespace QodeAssist::Templates