mirror of
https://github.com/Palm1r/QodeAssist.git
synced 2026-02-12 10:10:44 -05:00
Compare commits
3 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| dc06ea2ed5 | |||
| fc5e1adc0d | |||
| 93e59fb2dc |
@ -1,7 +1,7 @@
|
|||||||
{
|
{
|
||||||
"Id" : "qodeassist",
|
"Id" : "qodeassist",
|
||||||
"Name" : "QodeAssist",
|
"Name" : "QodeAssist",
|
||||||
"Version" : "0.4.1",
|
"Version" : "0.4.2",
|
||||||
"Vendor" : "Petr Mironychev",
|
"Vendor" : "Petr Mironychev",
|
||||||
"VendorId" : "petrmironychev",
|
"VendorId" : "petrmironychev",
|
||||||
"Copyright" : "(C) ${IDE_COPYRIGHT_YEAR} Petr Mironychev, (C) ${IDE_COPYRIGHT_YEAR} The Qt Company Ltd",
|
"Copyright" : "(C) ${IDE_COPYRIGHT_YEAR} Petr Mironychev, (C) ${IDE_COPYRIGHT_YEAR} The Qt Company Ltd",
|
||||||
|
|||||||
24
README.md
24
README.md
@ -40,6 +40,11 @@
|
|||||||
<img src="https://github.com/user-attachments/assets/255a52f1-5cc0-4ca3-b05c-c4cf9cdbe25a" width="600" alt="QodeAssistPreview">
|
<img src="https://github.com/user-attachments/assets/255a52f1-5cc0-4ca3-b05c-c4cf9cdbe25a" width="600" alt="QodeAssistPreview">
|
||||||
</details>
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Multiline Code completion: (click to expand)</summary>
|
||||||
|
<img src="https://github.com/user-attachments/assets/c18dfbd2-8c54-4a7b-90d1-66e3bb51adb0" width="600" alt="QodeAssistPreview">
|
||||||
|
</details>
|
||||||
|
|
||||||
<details>
|
<details>
|
||||||
<summary>Chat with LLM models in side panels: (click to expand)</summary>
|
<summary>Chat with LLM models in side panels: (click to expand)</summary>
|
||||||
<img src="https://github.com/user-attachments/assets/ead5a5d9-b40a-4f17-af05-77fa2bcb3a61" width="600" alt="QodeAssistChat">
|
<img src="https://github.com/user-attachments/assets/ead5a5d9-b40a-4f17-af05-77fa2bcb3a61" width="600" alt="QodeAssistChat">
|
||||||
@ -93,8 +98,9 @@ You're all set! QodeAssist is now ready to use in Qt Creator.
|
|||||||
## Supported LLM Providers
|
## Supported LLM Providers
|
||||||
QodeAssist currently supports the following LLM (Large Language Model) providers:
|
QodeAssist currently supports the following LLM (Large Language Model) providers:
|
||||||
- [Ollama](https://ollama.com)
|
- [Ollama](https://ollama.com)
|
||||||
- [LM Studio](https://lmstudio.ai) (experimental)
|
- [LM Studio](https://lmstudio.ai)
|
||||||
- OpenAI compatible providers (experimental)
|
- [OpenRouter](https://openrouter.ai)
|
||||||
|
- OpenAI compatible providers
|
||||||
|
|
||||||
## Recommended Models:
|
## Recommended Models:
|
||||||
QodeAssist has been thoroughly tested and optimized for use with the following language models:
|
QodeAssist has been thoroughly tested and optimized for use with the following language models:
|
||||||
@ -104,6 +110,14 @@ QodeAssist has been thoroughly tested and optimized for use with the following l
|
|||||||
- StarCoder2
|
- StarCoder2
|
||||||
- DeepSeek-Coder-V2
|
- DeepSeek-Coder-V2
|
||||||
|
|
||||||
|
### Model Types
|
||||||
|
|
||||||
|
FIM models (codellama:7b-code, starcoder2:7b, etc.) - Optimized for code completion and suggestions
|
||||||
|
|
||||||
|
Instruct models (codellama:7b-instruct, starcoder2:instruct, etc.) - Better for chat assistance, explanations, and code review
|
||||||
|
|
||||||
|
For best results, use FIM models with code completion and Instruct models with chat features.
|
||||||
|
|
||||||
### Ollama:
|
### Ollama:
|
||||||
### For autocomplete(FIM)
|
### For autocomplete(FIM)
|
||||||
```
|
```
|
||||||
@ -112,7 +126,7 @@ ollama run starcoder2:7b
|
|||||||
ollama run qwen2.5-coder:7b-base
|
ollama run qwen2.5-coder:7b-base
|
||||||
ollama run deepseek-coder-v2:16b-lite-base-q3_K_M
|
ollama run deepseek-coder-v2:16b-lite-base-q3_K_M
|
||||||
```
|
```
|
||||||
### For chat
|
### For chat and instruct
|
||||||
```
|
```
|
||||||
ollama run codellama:7b-instruct
|
ollama run codellama:7b-instruct
|
||||||
ollama run starcoder2:instruct
|
ollama run starcoder2:instruct
|
||||||
@ -163,9 +177,7 @@ ollama run deepseek-coder-v2
|
|||||||
- on Mac: Option + Command + Q
|
- on Mac: Option + Command + Q
|
||||||
- on Windows: Ctrl + Alt + Q
|
- on Windows: Ctrl + Alt + Q
|
||||||
- To insert the full suggestion, you can use the TAB key
|
- To insert the full suggestion, you can use the TAB key
|
||||||
- To insert line by line, you can use the "Move cursor word right" shortcut:
|
- To insert word of suggistion, you can use Alt + Right Arrow for Win/Lin, or Option + Right Arrow for Mac
|
||||||
- On Mac: Option + Right Arrow
|
|
||||||
- On Windows: Alt + Right Arrow
|
|
||||||
|
|
||||||
## Troubleshooting
|
## Troubleshooting
|
||||||
|
|
||||||
|
|||||||
@ -1,3 +1,92 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2024 Petr Mironychev
|
||||||
|
*
|
||||||
|
* This file is part of QodeAssist.
|
||||||
|
*
|
||||||
|
* QodeAssist is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* QodeAssist is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
#include "MessageBuilder.hpp"
|
#include "MessageBuilder.hpp"
|
||||||
|
|
||||||
namespace QodeAssist::LLMCore {} // namespace QodeAssist::LLMCore
|
QodeAssist::LLMCore::MessageBuilder &QodeAssist::LLMCore::MessageBuilder::addSystemMessage(
|
||||||
|
const QString &content)
|
||||||
|
{
|
||||||
|
m_systemMessage = content;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
QodeAssist::LLMCore::MessageBuilder &QodeAssist::LLMCore::MessageBuilder::addUserMessage(
|
||||||
|
const QString &content)
|
||||||
|
{
|
||||||
|
m_messages.append({MessageRole::User, content});
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
QodeAssist::LLMCore::MessageBuilder &QodeAssist::LLMCore::MessageBuilder::addSuffix(
|
||||||
|
const QString &content)
|
||||||
|
{
|
||||||
|
m_suffix = content;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
QodeAssist::LLMCore::MessageBuilder &QodeAssist::LLMCore::MessageBuilder::addtTokenizer(
|
||||||
|
PromptTemplate *promptTemplate)
|
||||||
|
{
|
||||||
|
m_promptTemplate = promptTemplate;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
QString QodeAssist::LLMCore::MessageBuilder::roleToString(MessageRole role) const
|
||||||
|
{
|
||||||
|
switch (role) {
|
||||||
|
case MessageRole::System:
|
||||||
|
return ROLE_SYSTEM;
|
||||||
|
case MessageRole::User:
|
||||||
|
return ROLE_USER;
|
||||||
|
case MessageRole::Assistant:
|
||||||
|
return ROLE_ASSISTANT;
|
||||||
|
default:
|
||||||
|
return ROLE_USER;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void QodeAssist::LLMCore::MessageBuilder::saveTo(QJsonObject &request, ProvidersApi api)
|
||||||
|
{
|
||||||
|
if (!m_promptTemplate) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
ContextData context{
|
||||||
|
m_messages.isEmpty() ? QString() : m_messages.last().content, m_suffix, m_systemMessage};
|
||||||
|
|
||||||
|
if (api == ProvidersApi::Ollama) {
|
||||||
|
if (m_promptTemplate->type() == TemplateType::Fim) {
|
||||||
|
m_promptTemplate->prepareRequest(request, context);
|
||||||
|
} else {
|
||||||
|
QJsonArray messages;
|
||||||
|
|
||||||
|
messages.append(QJsonObject{{"role", "system"}, {"content", m_systemMessage}});
|
||||||
|
messages.append(QJsonObject{{"role", "user"}, {"content", m_messages.last().content}});
|
||||||
|
request["messages"] = messages;
|
||||||
|
m_promptTemplate->prepareRequest(request, context);
|
||||||
|
}
|
||||||
|
} else if (api == ProvidersApi::OpenAI) {
|
||||||
|
QJsonArray messages;
|
||||||
|
|
||||||
|
messages.append(QJsonObject{{"role", "system"}, {"content", m_systemMessage}});
|
||||||
|
messages.append(QJsonObject{{"role", "user"}, {"content", m_messages.last().content}});
|
||||||
|
request["messages"] = messages;
|
||||||
|
m_promptTemplate->prepareRequest(request, context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@ -47,76 +47,17 @@ struct Message
|
|||||||
class MessageBuilder
|
class MessageBuilder
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
MessageBuilder &addSystemMessage(const QString &content)
|
MessageBuilder &addSystemMessage(const QString &content);
|
||||||
{
|
|
||||||
m_systemMessage = content;
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
MessageBuilder &addUserMessage(const QString &content)
|
MessageBuilder &addUserMessage(const QString &content);
|
||||||
{
|
|
||||||
m_messages.append({MessageRole::User, content});
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
MessageBuilder &addSuffix(const QString &content)
|
MessageBuilder &addSuffix(const QString &content);
|
||||||
{
|
|
||||||
m_suffix = content;
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
MessageBuilder &addtTokenizer(PromptTemplate *promptTemplate)
|
MessageBuilder &addtTokenizer(PromptTemplate *promptTemplate);
|
||||||
{
|
|
||||||
m_promptTemplate = promptTemplate;
|
|
||||||
return *this;
|
|
||||||
}
|
|
||||||
|
|
||||||
QString roleToString(MessageRole role) const
|
QString roleToString(MessageRole role) const;
|
||||||
{
|
|
||||||
switch (role) {
|
|
||||||
case MessageRole::System:
|
|
||||||
return ROLE_SYSTEM;
|
|
||||||
case MessageRole::User:
|
|
||||||
return ROLE_USER;
|
|
||||||
case MessageRole::Assistant:
|
|
||||||
return ROLE_ASSISTANT;
|
|
||||||
default:
|
|
||||||
return ROLE_USER;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void saveTo(QJsonObject &request, ProvidersApi api)
|
void saveTo(QJsonObject &request, ProvidersApi api);
|
||||||
{
|
|
||||||
if (!m_promptTemplate) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (api == ProvidersApi::Ollama) {
|
|
||||||
ContextData context{
|
|
||||||
m_messages.isEmpty() ? QString() : m_messages.last().content,
|
|
||||||
m_suffix,
|
|
||||||
m_systemMessage};
|
|
||||||
|
|
||||||
if (m_promptTemplate->type() == TemplateType::Fim) {
|
|
||||||
m_promptTemplate->prepareRequest(request, context);
|
|
||||||
} else {
|
|
||||||
QJsonArray messages;
|
|
||||||
|
|
||||||
messages.append(QJsonObject{{"role", "system"}, {"content", m_systemMessage}});
|
|
||||||
messages.append(
|
|
||||||
QJsonObject{{"role", "user"}, {"content", m_messages.last().content}});
|
|
||||||
request["messages"] = messages;
|
|
||||||
m_promptTemplate->prepareRequest(request, {});
|
|
||||||
}
|
|
||||||
} else if (api == ProvidersApi::OpenAI) {
|
|
||||||
QJsonArray messages;
|
|
||||||
|
|
||||||
messages.append(QJsonObject{{"role", "system"}, {"content", m_systemMessage}});
|
|
||||||
messages.append(QJsonObject{{"role", "user"}, {"content", m_messages.last().content}});
|
|
||||||
request["messages"] = messages;
|
|
||||||
m_promptTemplate->prepareRequest(request, {});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
QString m_systemMessage;
|
QString m_systemMessage;
|
||||||
|
|||||||
Reference in New Issue
Block a user