Compare commits

..

4 Commits

Author SHA1 Message Date
6503887091 Upgrade to version 0.3.9 2024-11-23 21:59:35 +01:00
50087aa744 Change configure part in description
- Replace codellama to qwen models
- Add prefer auto template for ollama provider
2024-11-23 21:55:57 +01:00
4f2dc0c450 feat: Add Ollama auto template for chat 2024-11-23 21:15:34 +01:00
80fe388bdd feat: Add automatic template handling for Ollama models (#43)
* feat: Add automatic template handling for Ollama models

- Add OllamaAutoFim
- Use native Ollama API format when possible
- Remove need for manual template selection for most Ollama models
- Default to model-specific format from Ollama modelfile
- Fallback to manual template selection if needed

This change simplifies configuration by automatically using
the correct template format for each Ollama model.
2024-11-23 19:37:55 +01:00
10 changed files with 175 additions and 44 deletions

View File

@ -48,6 +48,8 @@ add_qtc_plugin(QodeAssist
templates/CodeLlamaChat.hpp
templates/Qwen.hpp
templates/StarCoderChat.hpp
templates/Ollama.hpp
templates/BasicChat.hpp
providers/OllamaProvider.hpp providers/OllamaProvider.cpp
providers/LMStudioProvider.hpp providers/LMStudioProvider.cpp
providers/OpenAICompatProvider.hpp providers/OpenAICompatProvider.cpp
@ -62,5 +64,3 @@ add_qtc_plugin(QodeAssist
chat/NavigationPanel.hpp chat/NavigationPanel.cpp
ConfigurationManager.hpp ConfigurationManager.cpp
)
target_link_libraries(QodeAssist PRIVATE )

View File

@ -73,10 +73,20 @@ void ClientInterface::sendMessage(const QString &message, bool includeCurrentFil
auto providerName = Settings::generalSettings().caProvider();
auto provider = LLMCore::ProvidersManager::instance().getProviderByName(providerName);
if (!provider) {
LOG_MESSAGE(QString("No provider found with name: %1").arg(providerName));
return;
}
auto templateName = Settings::generalSettings().caTemplate();
auto promptTemplate = LLMCore::PromptTemplateManager::instance().getChatTemplateByName(
templateName);
if (!promptTemplate) {
LOG_MESSAGE(QString("No template found with name: %1").arg(templateName));
return;
}
LLMCore::ContextData context;
context.prefix = message;
context.suffix = "";

View File

@ -152,10 +152,20 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request)
auto providerName = Settings::generalSettings().ccProvider();
auto provider = LLMCore::ProvidersManager::instance().getProviderByName(providerName);
if (!provider) {
LOG_MESSAGE(QString("No provider found with name: %1").arg(providerName));
return;
}
auto templateName = Settings::generalSettings().ccTemplate();
auto promptTemplate = LLMCore::PromptTemplateManager::instance().getFimTemplateByName(
templateName);
if (!promptTemplate) {
LOG_MESSAGE(QString("No template found with name: %1").arg(templateName));
return;
}
LLMCore::LLMConfig config;
config.requestType = LLMCore::RequestType::Fim;
config.provider = provider;
@ -163,10 +173,8 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request)
config.url = QUrl(
QString("%1%2").arg(Settings::generalSettings().ccUrl(), provider->completionEndpoint()));
config.providerRequest = {{"model", Settings::generalSettings().ccModel()},
{"stream", true},
{"stop",
QJsonArray::fromStringList(config.promptTemplate->stopWords())}};
config.providerRequest = {{"model", Settings::generalSettings().ccModel()}, {"stream", true}};
config.multiLineCompletion = completeSettings.multiLineCompletion();
QString systemPrompt;
@ -174,8 +182,12 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request)
systemPrompt.append(completeSettings.systemPrompt());
if (!updatedContext.fileContext.isEmpty())
systemPrompt.append(updatedContext.fileContext);
if (!systemPrompt.isEmpty())
config.providerRequest["system"] = systemPrompt;
config.providerRequest["system"] = systemPrompt;
const auto stopWords = QJsonArray::fromStringList(config.promptTemplate->stopWords());
if (!stopWords.isEmpty())
config.providerRequest["stop"] = stopWords;
config.promptTemplate->prepareRequest(config.providerRequest, updatedContext);
config.provider->prepareRequest(config.providerRequest, LLMCore::RequestType::Fim);

View File

@ -1,6 +1,6 @@
{
"Name" : "QodeAssist",
"Version" : "0.3.8",
"Version" : "0.3.9",
"CompatVersion" : "${IDE_VERSION_COMPAT}",
"Vendor" : "Petr Mironychev",
"Copyright" : "(C) ${IDE_COPYRIGHT_YEAR} Petr Mironychev, (C) ${IDE_COPYRIGHT_YEAR} The Qt Company Ltd",

View File

@ -29,8 +29,8 @@ QodeAssist is an AI-powered coding assistant plugin for Qt Creator. It provides
- Side and Bottom panels
- Support for multiple LLM providers:
- Ollama
- LM Studio
- OpenAI-compatible local providers
- LM Studio (experimental)
- OpenAI-compatible providers (experimental)
- Extensive library of model-specific templates
- Custom template support
- Easy configuration and model selection
@ -56,13 +56,17 @@ QodeAssist is an AI-powered coding assistant plugin for Qt Creator. It provides
2. Install [Ollama](https://ollama.com). Make sure to review the system requirements before installation.
3. Install a language models in Ollama via terminal. For example, you can run:
For suggestions:
For standard computers (minimum 8GB RAM):
```
ollama run codellama:7b-code
ollama run qwen2.5-coder:7b
```
For chat:
For better performance (16GB+ RAM):
```
ollama run codellama:7b-instruct
ollama run qwen2.5-coder:14b
```
For high-end systems (32GB+ RAM):
```
ollama run qwen2.5-coder:32b
```
4. Download the QodeAssist plugin for your QtCreator.
5. Launch Qt Creator and install the plugin:
@ -73,38 +77,32 @@ ollama run codellama:7b-instruct
## Configure Plugin
<details>
<summary>Configure plugins: (click to expand)</summary>
<img src="https://github.com/user-attachments/assets/00ad980f-b470-48eb-9aaa-077783d38798" width="600" alt="Configuere QodeAssist">
</details>
QodeAssist comes with default settings that should work immediately after installing a language model. The plugin is pre-configured to use Ollama with standard templates, so you may only need to verify the settings.
1. Open Qt Creator settings
1. Open Qt Creator settings (Edit > Preferences on Linux/Windows, Qt Creator > Preferences on macOS)
2. Navigate to the "Qode Assist" tab
3. Select "General" page
4. Choose your LLM provider (e.g., Ollama)
5. Select the installed model by the "Select Model" button
- For LM Studio you will see current loaded model
6. Choose the prompt template that corresponds to your model
7. Apply the settings
3. On the "General" page, verify:
- Ollama is selected as your LLM provider
- The URL is set to http://localhost:11434
- Your installed model appears in the model selection
- The prompt template is Ollama Auto FIM
4. Click Apply if you made any changes
You're all set! QodeAssist is now ready to use in Qt Creator.
[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/P5P412V96G)
## Supported LLM Providers
QodeAssist currently supports the following LLM (Large Language Model) providers:
- [Ollama](https://ollama.com)
- [LM Studio](https://lmstudio.ai)
- OpenAI compatible providers
- [LM Studio](https://lmstudio.ai) (experimental)
- OpenAI compatible providers (experimental)
## Recommended Models:
QodeAssist has been thoroughly tested and optimized for use with the following language models:
- Llama
- Qwen2.5-coder
- CodeLlama
- StarCoder2
- DeepSeek-Coder-V2
- Qwen-2.5
### Ollama:
### For autocomplete(FIM)
@ -122,13 +120,6 @@ ollama run qwen2.5-coder:7b-instruct
ollama run deepseek-coder-v2
```
### LM Studio:
similar models, like for ollama
Please note that while these models have been specifically tested and confirmed to work well with QodeAssist, other models compatible with the supported providers may also work. We encourage users to experiment with different models and report their experiences.
If you've successfully used a model that's not listed here, please let us know by opening an issue or submitting a pull request to update this list.
## QtCreator Version Compatibility
- QtCreator 14.0.2 - 0.2.3 - 0.3.x
@ -191,7 +182,6 @@ If you find QodeAssist helpful, there are several ways you can support the proje
3. **Spread the Word**: Star our GitHub repository and share QodeAssist with your fellow developers.
4. **Financial Support**: If you'd like to support the development financially, you can make a donation using one of the following:
- [![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/P5P412V96G)
- Bitcoin (BTC): `bc1qndq7f0mpnlya48vk7kugvyqj5w89xrg4wzg68t`
- Ethereum (ETH): `0xA5e8c37c94b24e25F9f1f292a01AF55F03099D8D`
- Litecoin (LTC): `ltc1qlrxnk30s2pcjchzx4qrxvdjt5gzuervy5mv0vy`

View File

@ -49,11 +49,13 @@
#include "providers/OllamaProvider.hpp"
#include "providers/OpenAICompatProvider.hpp"
#include "templates/BasicChat.hpp"
#include "templates/CodeLlamaChat.hpp"
#include "templates/CodeLlamaFim.hpp"
#include "templates/CustomFimTemplate.hpp"
#include "templates/DeepSeekCoderChat.hpp"
#include "templates/DeepSeekCoderFim.hpp"
#include "templates/Ollama.hpp"
#include "templates/Qwen.hpp"
#include "templates/StarCoder2Fim.hpp"
#include "templates/StarCoderChat.hpp"
@ -99,6 +101,9 @@ public:
templateManager.registerTemplate<Templates::StarCoderChat>();
templateManager.registerTemplate<Templates::QwenChat>();
templateManager.registerTemplate<Templates::QwenFim>();
templateManager.registerTemplate<Templates::OllamaAutoFim>();
templateManager.registerTemplate<Templates::OllamaAutoChat>();
templateManager.registerTemplate<Templates::BasicChat>();
Utils::Icon QCODEASSIST_ICON(
{{":/resources/images/qoderassist-icon.png", Utils::Theme::IconsBaseColor}});

View File

@ -66,11 +66,11 @@ GeneralSettings::GeneralSettings()
ccProvider.setReadOnly(true);
ccSelectProvider.m_buttonText = TrConstants::SELECT;
initStringAspect(ccModel, Constants::CC_MODEL, TrConstants::MODEL, "codellama:7b-code");
initStringAspect(ccModel, Constants::CC_MODEL, TrConstants::MODEL, "qwen2.5-coder:7b");
ccModel.setHistoryCompleter(Constants::CC_MODEL_HISTORY);
ccSelectModel.m_buttonText = TrConstants::SELECT;
initStringAspect(ccTemplate, Constants::CC_TEMPLATE, TrConstants::TEMPLATE, "CodeLlama FIM");
initStringAspect(ccTemplate, Constants::CC_TEMPLATE, TrConstants::TEMPLATE, "Ollama Auto FIM");
ccTemplate.setReadOnly(true);
ccSelectTemplate.m_buttonText = TrConstants::SELECT;
@ -87,11 +87,11 @@ GeneralSettings::GeneralSettings()
caProvider.setReadOnly(true);
caSelectProvider.m_buttonText = TrConstants::SELECT;
initStringAspect(caModel, Constants::CA_MODEL, TrConstants::MODEL, "codellama:7b-instruct");
initStringAspect(caModel, Constants::CA_MODEL, TrConstants::MODEL, "qwen2.5-coder:7b");
caModel.setHistoryCompleter(Constants::CA_MODEL_HISTORY);
caSelectModel.m_buttonText = TrConstants::SELECT;
initStringAspect(caTemplate, Constants::CA_TEMPLATE, TrConstants::TEMPLATE, "CodeLlama Chat");
initStringAspect(caTemplate, Constants::CA_TEMPLATE, TrConstants::TEMPLATE, "Ollama Auto Chat");
caTemplate.setReadOnly(true);
caSelectTemplate.m_buttonText = TrConstants::SELECT;

49
templates/BasicChat.hpp Normal file
View File

@ -0,0 +1,49 @@
/*
* Copyright (C) 2024 Petr Mironychev
*
* This file is part of QodeAssist.
*
* QodeAssist is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* QodeAssist is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
*/
#pragma once
#include <QJsonArray>
#include "llmcore/PromptTemplate.hpp"
namespace QodeAssist::Templates {
class BasicChat : public LLMCore::PromptTemplate
{
public:
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
QString name() const override { return "Basic Chat"; }
QString promptTemplate() const override { return {}; }
QStringList stopWords() const override { return QStringList(); }
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
{
QJsonArray messages = request["messages"].toArray();
QJsonObject newMessage;
newMessage["role"] = "user";
newMessage["content"] = context.prefix;
messages.append(newMessage);
request["messages"] = messages;
}
};
} // namespace QodeAssist::Templates

View File

@ -19,7 +19,8 @@
#pragma once
#include <QtCore/qjsonarray.h>
#include <QJsonArray>
#include "llmcore/PromptTemplate.hpp"
namespace QodeAssist::Templates {

64
templates/Ollama.hpp Normal file
View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2024 Petr Mironychev
*
* This file is part of QodeAssist.
*
* QodeAssist is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* QodeAssist is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
*/
#pragma once
#include <QJsonArray>
#include "llmcore/PromptTemplate.hpp"
namespace QodeAssist::Templates {
class OllamaAutoFim : public LLMCore::PromptTemplate
{
public:
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Fim; }
QString name() const override { return "Ollama Auto FIM"; }
QString promptTemplate() const override { return {}; }
QStringList stopWords() const override { return QStringList(); }
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
{
request["prompt"] = context.prefix;
request["suffix"] = context.suffix;
}
};
class OllamaAutoChat : public LLMCore::PromptTemplate
{
public:
LLMCore::TemplateType type() const override { return LLMCore::TemplateType::Chat; }
QString name() const override { return "Ollama Auto Chat"; }
QString promptTemplate() const override { return {}; }
QStringList stopWords() const override { return QStringList(); }
void prepareRequest(QJsonObject &request, const LLMCore::ContextData &context) const override
{
QJsonArray messages = request["messages"].toArray();
QJsonObject newMessage;
newMessage["role"] = "user";
newMessage["content"] = context.prefix;
messages.append(newMessage);
request["messages"] = messages;
}
};
} // namespace QodeAssist::Templates