Compare commits

...

13 Commits

Author SHA1 Message Date
57bec94ee4 Version 0.2.3, Update to QtCreator 14.0.2 2024-10-03 16:44:22 +02:00
1760a2d5ff Upgrade QtCreator to 14.0.2
Add no print support

Test disabling qt6 print component

Clean unnecessary code

Add library
2024-10-03 15:30:16 +02:00
1649a246e1 Version 0.2.2
- Improve chat history
- Fix system prompt
- Fix ollama model livetime
- Fix providers and prompt default value
2024-10-02 22:32:59 +02:00
0ab4b51520 Upgrade plugin version 2024-10-02 22:28:52 +02:00
d235d0fcdf Fix providers and prompt default value 2024-10-02 22:25:53 +02:00
1cbde3d55b Fix ollama model livetime 2024-10-02 21:44:32 +02:00
9903ac8f7b Fix system prompt for FIM 2024-10-02 21:44:15 +02:00
fbe363689f Change message history 2024-10-01 00:26:28 +02:00
d7f0cc92e6 Version 0.2.1
- Fix getting models list from providers
- Fix clearing message history in chat
- Remove unnecessary logs
2024-09-24 09:31:40 +02:00
8311db5b08 Removed unessary logs 2024-09-24 09:28:53 +02:00
cd6dd94cd2 Upgrade version to 0.2.1 2024-09-24 09:20:26 +02:00
b559a17407 Fix getting models list 2024-09-24 08:59:26 +02:00
9745997952 Fix clear message history 2024-09-23 23:54:36 +02:00
22 changed files with 186 additions and 86 deletions

View File

@ -4,8 +4,8 @@ on: [push]
env:
PLUGIN_NAME: QodeAssist
QT_VERSION: 6.7.2
QT_CREATOR_VERSION: 14.0.0
QT_VERSION: 6.7.3
QT_CREATOR_VERSION: 14.0.2
QT_CREATOR_SNAPSHOT: NO
MACOS_DEPLOYMENT_TARGET: "11.0"
CMAKE_VERSION: "3.29.6"
@ -100,7 +100,7 @@ jobs:
COMMAND sudo apt update
)
execute_process(
COMMAND sudo apt install libgl1-mesa-dev
COMMAND sudo apt install libgl1-mesa-dev libcups2-dev
RESULT_VARIABLE result
)
if (NOT result EQUAL 0)

View File

@ -10,7 +10,7 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CXX_EXTENSIONS OFF)
find_package(QtCreator REQUIRED COMPONENTS Core)
find_package(Qt6 COMPONENTS Widgets REQUIRED)
find_package(Qt6 COMPONENTS Core Gui Widgets Network REQUIRED)
add_qtc_plugin(QodeAssist
PLUGIN_DEPENDS

View File

@ -250,9 +250,6 @@ QString DocumentContextReader::getInstructions() const
{
QString instructions;
if (Settings::contextSettings().useSpecificInstructions())
instructions += getSpecificInstructions();
if (Settings::contextSettings().useFilePathInContext())
instructions += getLanguageAndFileInfo();

View File

@ -30,6 +30,7 @@
#include "PromptTemplateManager.hpp"
#include "QodeAssistUtils.hpp"
#include "core/LLMRequestConfig.hpp"
#include "settings/ContextSettings.hpp"
#include "settings/GeneralSettings.hpp"
namespace QodeAssist {
@ -159,6 +160,9 @@ void LLMClientInterface::handleCompletion(const QJsonObject &request)
{"stop",
QJsonArray::fromStringList(config.promptTemplate->stopWords())}};
if (Settings::contextSettings().useSpecificInstructions())
config.providerRequest["system"] = Settings::contextSettings().specificInstractions();
config.promptTemplate->prepareRequest(config.providerRequest, updatedContext);
config.provider->prepareRequest(config.providerRequest);

View File

@ -1,6 +1,6 @@
{
"Name" : "QodeAssist",
"Version" : "0.2.0",
"Version" : "0.2.3",
"CompatVersion" : "${IDE_VERSION_COMPAT}",
"Vendor" : "Petr Mironychev",
"Copyright" : "(C) ${IDE_COPYRIGHT_YEAR} Petr Mironychev, (C) ${IDE_COPYRIGHT_YEAR} The Qt Company Ltd",

View File

@ -53,10 +53,10 @@ const char AUTO_COMPLETION_CHAR_THRESHOLD[] = "QodeAssist.autoCompletionCharThre
const char AUTO_COMPLETION_TYPING_INTERVAL[] = "QodeAssist.autoCompletionTypingInterval";
const char MAX_FILE_THRESHOLD[] = "QodeAssist.maxFileThreshold";
const char OLLAMA_LIVETIME[] = "QodeAssist.ollamaLivetime";
const char SPECIFIC_INSTRUCTIONS[] = "QodeAssist.specificInstractions";
const char SYSTEM_PROMPT[] = "QodeAssist.systemPrompt";
const char MULTILINE_COMPLETION[] = "QodeAssist.multilineCompletion";
const char API_KEY[] = "QodeAssist.apiKey";
const char USE_SPECIFIC_INSTRUCTIONS[] = "QodeAssist.useSpecificInstructions";
const char USE_SYSTEM_PROMPT[] = "QodeAssist.useSystemPrompt";
const char USE_FILE_PATH_IN_CONTEXT[] = "QodeAssist.useFilePathInContext";
const char CUSTOM_JSON_TEMPLATE[] = "QodeAssist.customJsonTemplate";
const char USE_PROJECT_CHANGES_CACHE[] = "QodeAssist.useProjectChangesCache";

View File

@ -65,9 +65,15 @@ If you've successfully used a model that's not listed here, please let us know b
1. Install QtCreator 14.0
2. Install [Ollama](https://ollama.com). Make sure to review the system requirements before installation.
3. Install a language model in Ollama. For example, you can run:
3. Install a language models in Ollama. For example, you can run:
For suggestions:
```
ollama run starcoder2:7b
ollama run codellama:7b-code
```
For chat:
```
ollama run codellama:7b-instruct
```
4. Download the QodeAssist plugin.
5. Launch Qt Creator and install the plugin:

View File

@ -31,6 +31,51 @@
namespace QodeAssist::Chat {
int ChatHistory::estimateTokenCount(const QString &text) const
{
return text.length() / 4;
}
void ChatHistory::addMessage(ChatMessage::Role role, const QString &content)
{
int tokenCount = estimateTokenCount(content);
m_messages.append({role, content, tokenCount});
m_totalTokens += tokenCount;
trim();
}
void ChatHistory::clear()
{
m_messages.clear();
m_totalTokens = 0;
}
QVector<ChatMessage> ChatHistory::getMessages() const
{
return m_messages;
}
QString ChatHistory::getSystemPrompt() const
{
return m_systemPrompt;
}
void ChatHistory::setSystemPrompt(const QString &prompt)
{
m_systemPrompt = prompt;
}
void ChatHistory::trim()
{
while (m_messages.size() > MAX_HISTORY_SIZE || m_totalTokens > MAX_TOKENS) {
if (!m_messages.isEmpty()) {
m_totalTokens -= m_messages.first().tokenCount;
m_messages.removeFirst();
} else {
break;
}
}
}
ChatClientInterface::ChatClientInterface(QObject *parent)
: QObject(parent)
, m_requestHandler(new LLMRequestHandler(this))
@ -51,15 +96,10 @@ ChatClientInterface::ChatClientInterface(QObject *parent)
}
});
// QJsonObject systemMessage;
// systemMessage["role"] = "system";
// systemMessage["content"] = "You are a helpful C++ and QML programming assistant.";
// m_chatHistory.append(systemMessage);
m_chatHistory.setSystemPrompt("You are a helpful C++ and QML programming assistant.");
}
ChatClientInterface::~ChatClientInterface()
{
}
ChatClientInterface::~ChatClientInterface() = default;
void ChatClientInterface::sendMessage(const QString &message)
{
@ -79,14 +119,11 @@ void ChatClientInterface::sendMessage(const QString &message)
QJsonObject providerRequest;
providerRequest["model"] = Settings::generalSettings().chatModelName();
providerRequest["stream"] = true;
providerRequest["messages"] = m_chatHistory;
providerRequest["messages"] = prepareMessagesForRequest();
chatTemplate->prepareRequest(providerRequest, context);
chatProvider->prepareRequest(providerRequest);
m_chatHistory = providerRequest["messages"].toArray();
LLMConfig config;
config.requestType = RequestType::Chat;
config.provider = chatProvider;
@ -99,55 +136,57 @@ void ChatClientInterface::sendMessage(const QString &message)
request["id"] = QUuid::createUuid().toString();
m_accumulatedResponse.clear();
m_pendingMessage = message;
m_chatHistory.addMessage(ChatMessage::Role::User, message);
m_requestHandler->sendLLMRequest(config, request);
}
void ChatClientInterface::clearMessages()
{
m_chatHistory.clear();
m_accumulatedResponse.clear();
logMessage("Chat history cleared");
}
QVector<ChatMessage> ChatClientInterface::getChatHistory() const
{
return m_chatHistory.getMessages();
}
void ChatClientInterface::handleLLMResponse(const QString &response, bool isComplete)
{
m_accumulatedResponse += response;
logMessage("Accumulated response: " + m_accumulatedResponse);
if (isComplete) {
logMessage("Message completed. Final response: " + m_accumulatedResponse);
emit messageReceived(m_accumulatedResponse.trimmed());
QJsonObject assistantMessage;
assistantMessage["role"] = "assistant";
assistantMessage["content"] = m_accumulatedResponse.trimmed();
m_chatHistory.append(assistantMessage);
m_pendingMessage.clear();
m_chatHistory.addMessage(ChatMessage::Role::Assistant, m_accumulatedResponse.trimmed());
m_accumulatedResponse.clear();
trimChatHistory();
}
}
void ChatClientInterface::trimChatHistory()
QJsonArray ChatClientInterface::prepareMessagesForRequest() const
{
int maxTokens = 4000;
int totalTokens = 0;
QJsonArray newHistory;
QJsonArray messages;
if (!m_chatHistory.isEmpty()
&& m_chatHistory.first().toObject()["role"].toString() == "system") {
newHistory.append(m_chatHistory.first());
}
messages.append(QJsonObject{{"role", "system"}, {"content", m_chatHistory.getSystemPrompt()}});
for (int i = m_chatHistory.size() - 1; i >= 0; --i) {
QJsonObject message = m_chatHistory[i].toObject();
int messageTokens = message["content"].toString().length() / 4;
if (totalTokens + messageTokens > maxTokens) {
for (const auto &message : m_chatHistory.getMessages()) {
QString role;
switch (message.role) {
case ChatMessage::Role::User:
role = "user";
break;
case ChatMessage::Role::Assistant:
role = "assistant";
break;
default:
continue;
}
newHistory.prepend(message);
totalTokens += messageTokens;
messages.append(QJsonObject{{"role", role}, {"content", message.content}});
}
m_chatHistory = newHistory;
return messages;
}
} // namespace QodeAssist::Chat

View File

@ -20,12 +20,41 @@
#pragma once
#include <QObject>
#include <QtCore/qjsonarray.h>
#include <QString>
#include <QVector>
#include "QodeAssistData.hpp"
#include "core/LLMRequestHandler.hpp"
namespace QodeAssist::Chat {
struct ChatMessage
{
enum class Role { System, User, Assistant };
Role role;
QString content;
int tokenCount;
};
class ChatHistory
{
public:
void addMessage(ChatMessage::Role role, const QString &content);
void clear();
QVector<ChatMessage> getMessages() const;
QString getSystemPrompt() const;
void setSystemPrompt(const QString &prompt);
void trim();
private:
QVector<ChatMessage> m_messages;
QString m_systemPrompt;
int m_totalTokens = 0;
static const int MAX_HISTORY_SIZE = 50;
static const int MAX_TOKENS = 4000;
int estimateTokenCount(const QString &text) const;
};
class ChatClientInterface : public QObject
{
Q_OBJECT
@ -35,6 +64,8 @@ public:
~ChatClientInterface();
void sendMessage(const QString &message);
void clearMessages();
QVector<ChatMessage> getChatHistory() const;
signals:
void messageReceived(const QString &message);
@ -42,12 +73,11 @@ signals:
private:
void handleLLMResponse(const QString &response, bool isComplete);
void trimChatHistory();
QJsonArray prepareMessagesForRequest() const;
LLMRequestHandler *m_requestHandler;
QString m_accumulatedResponse;
QString m_pendingMessage;
QJsonArray m_chatHistory;
ChatHistory m_chatHistory;
};
} // namespace QodeAssist::Chat

View File

@ -77,7 +77,6 @@ void ChatWidget::sendMessage()
void ChatWidget::receiveMessage(const QString &message)
{
logMessage("Received message: " + message);
updateLastAIMessage(message);
}
@ -90,7 +89,6 @@ void ChatWidget::receivePartialMessage(const QString &partialMessage)
void ChatWidget::onMessageCompleted()
{
logMessage("Message completed. Final response: " + m_currentAIResponse);
updateLastAIMessage(m_currentAIResponse);
m_currentAIResponse.clear();
scrollToBottom();
@ -107,14 +105,12 @@ void ChatWidget::addMessage(const QString &message, bool fromUser)
auto prefix = fromUser ? "You: " : "AI: ";
QString timestamp = m_showTimestamp ? QDateTime::currentDateTime().toString("[hh:mm:ss] ") : "";
QString fullMessage = timestamp + prefix + message;
logMessage("Adding message to display: " + fullMessage);
m_chatDisplay->append(fullMessage);
scrollToBottom();
}
void ChatWidget::updateLastAIMessage(const QString &message)
{
logMessage("Updating last AI message: " + message);
QTextCursor cursor = m_chatDisplay->textCursor();
cursor.movePosition(QTextCursor::End);
cursor.movePosition(QTextCursor::StartOfBlock, QTextCursor::KeepAnchor);
@ -134,6 +130,7 @@ void ChatWidget::clear()
{
m_chatDisplay->clear();
m_currentAIResponse.clear();
m_chatClient->clearMessages();
}
void ChatWidget::scrollToBottom()

View File

@ -40,6 +40,7 @@ public:
void setShowTimestamp(bool show);
void receiveMessage(const QString &message);
private slots:
void sendMessage();
void receivePartialMessage(const QString &partialMessage);

View File

@ -39,7 +39,7 @@ public:
virtual void prepareRequest(QJsonObject &request) = 0;
virtual bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) = 0;
virtual QList<QString> getInstalledModels(const Utils::Environment &env) = 0;
virtual QList<QString> getInstalledModels(const Utils::Environment &env, const QString &url) = 0;
};
} // namespace QodeAssist::Providers

View File

@ -25,7 +25,6 @@
#include <QJsonObject>
#include <QNetworkReply>
#include "PromptTemplateManager.hpp"
#include "QodeAssistUtils.hpp"
#include "settings/PresetPromptsSettings.hpp"
@ -56,9 +55,20 @@ QString LMStudioProvider::chatEndpoint() const
void LMStudioProvider::prepareRequest(QJsonObject &request)
{
auto &settings = Settings::presetPromptsSettings();
QJsonArray messages;
if (request.contains("system")) {
QJsonObject systemMessage{{"role", "system"},
{"content", request.take("system").toString()}};
messages.append(systemMessage);
}
if (request.contains("prompt")) {
QJsonArray messages{
{QJsonObject{{"role", "user"}, {"content", request.take("prompt").toString()}}}};
QJsonObject userMessage{{"role", "user"}, {"content", request.take("prompt").toString()}};
messages.append(userMessage);
}
if (!messages.isEmpty()) {
request["messages"] = std::move(messages);
}
@ -115,11 +125,12 @@ bool LMStudioProvider::handleResponse(QNetworkReply *reply, QString &accumulated
return isComplete;
}
QList<QString> LMStudioProvider::getInstalledModels(const Utils::Environment &env)
QList<QString> LMStudioProvider::getInstalledModels(const Utils::Environment &env,
const QString &url)
{
QList<QString> models;
QNetworkAccessManager manager;
QNetworkRequest request(QUrl(url() + "/v1/models"));
QNetworkRequest request(QString("%1%2").arg(url, "/v1/models"));
QNetworkReply *reply = manager.get(request);

View File

@ -34,7 +34,7 @@ public:
QString chatEndpoint() const override;
void prepareRequest(QJsonObject &request) override;
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
QList<QString> getInstalledModels(const Utils::Environment &env) override;
QList<QString> getInstalledModels(const Utils::Environment &env, const QString &url) override;
};
} // namespace QodeAssist::Providers

View File

@ -59,7 +59,6 @@ void OllamaProvider::prepareRequest(QJsonObject &request)
QJsonObject options;
options["num_predict"] = settings.maxTokens();
options["keep_alive"] = settings.ollamaLivetime();
options["temperature"] = settings.temperature();
if (settings.useTopP())
options["top_p"] = settings.topP();
@ -70,6 +69,7 @@ void OllamaProvider::prepareRequest(QJsonObject &request)
if (settings.usePresencePenalty())
options["presence_penalty"] = settings.presencePenalty();
request["options"] = options;
request["keep_alive"] = settings.ollamaLivetime();
}
bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedResponse)
@ -123,11 +123,11 @@ bool OllamaProvider::handleResponse(QNetworkReply *reply, QString &accumulatedRe
return isComplete;
}
QList<QString> OllamaProvider::getInstalledModels(const Utils::Environment &env)
QList<QString> OllamaProvider::getInstalledModels(const Utils::Environment &env, const QString &url)
{
QList<QString> models;
QNetworkAccessManager manager;
QNetworkRequest request(QUrl(url() + "/api/tags"));
QNetworkRequest request(QString("%1%2").arg(url, "/api/tags"));
QNetworkReply *reply = manager.get(request);
QEventLoop loop;

View File

@ -34,7 +34,7 @@ public:
QString chatEndpoint() const override;
void prepareRequest(QJsonObject &request) override;
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
QList<QString> getInstalledModels(const Utils::Environment &env) override;
QList<QString> getInstalledModels(const Utils::Environment &env, const QString &url) override;
};
} // namespace QodeAssist::Providers

View File

@ -24,7 +24,6 @@
#include <QJsonObject>
#include <QNetworkReply>
#include "PromptTemplateManager.hpp"
#include "settings/PresetPromptsSettings.hpp"
namespace QodeAssist::Providers {
@ -54,9 +53,20 @@ QString OpenAICompatProvider::chatEndpoint() const
void OpenAICompatProvider::prepareRequest(QJsonObject &request)
{
auto &settings = Settings::presetPromptsSettings();
QJsonArray messages;
if (request.contains("system")) {
QJsonObject systemMessage{{"role", "system"},
{"content", request.take("system").toString()}};
messages.append(systemMessage);
}
if (request.contains("prompt")) {
QJsonArray messages{
{QJsonObject{{"role", "user"}, {"content", request.take("prompt").toString()}}}};
QJsonObject userMessage{{"role", "user"}, {"content", request.take("prompt").toString()}};
messages.append(userMessage);
}
if (!messages.isEmpty()) {
request["messages"] = std::move(messages);
}
@ -118,7 +128,8 @@ bool OpenAICompatProvider::handleResponse(QNetworkReply *reply, QString &accumul
return isComplete;
}
QList<QString> OpenAICompatProvider::getInstalledModels(const Utils::Environment &env)
QList<QString> OpenAICompatProvider::getInstalledModels(const Utils::Environment &env,
const QString &url)
{
return QStringList();
}

View File

@ -34,7 +34,7 @@ public:
QString chatEndpoint() const override;
void prepareRequest(QJsonObject &request) override;
bool handleResponse(QNetworkReply *reply, QString &accumulatedResponse) override;
QList<QString> getInstalledModels(const Utils::Environment &env) override;
QList<QString> getInstalledModels(const Utils::Environment &env, const QString &url) override;
};
} // namespace QodeAssist::Providers

View File

@ -17,8 +17,6 @@
* along with QodeAssist. If not, see <https://www.gnu.org/licenses/>.
*/
#pragma once
#include "ContextSettings.hpp"
#include <QMessageBox>
@ -60,11 +58,11 @@ ContextSettings::ContextSettings()
useFilePathInContext.setDefaultValue(false);
useFilePathInContext.setLabelText(Tr::tr("Use File Path in Context"));
useSpecificInstructions.setSettingsKey(Constants::USE_SPECIFIC_INSTRUCTIONS);
useSpecificInstructions.setSettingsKey(Constants::USE_SYSTEM_PROMPT);
useSpecificInstructions.setDefaultValue(true);
useSpecificInstructions.setLabelText(Tr::tr("Use Specific Instructions"));
useSpecificInstructions.setLabelText(Tr::tr("Use System Prompt"));
specificInstractions.setSettingsKey(Constants::SPECIFIC_INSTRUCTIONS);
specificInstractions.setSettingsKey(Constants::SYSTEM_PROMPT);
specificInstractions.setDisplayStyle(Utils::StringAspect::TextEditDisplay);
specificInstractions.setLabelText(
Tr::tr("Instructions: Please keep %1 for languge name, warning, it shouldn't too big"));

View File

@ -131,6 +131,11 @@ GeneralSettings::GeneralSettings()
loadProviders();
loadPrompts();
llmProviders.setDefaultValue(llmProviders.indexForDisplay("Ollama"));
chatLlmProviders.setDefaultValue(chatLlmProviders.indexForDisplay("Ollama"));
fimPrompts.setDefaultValue(fimPrompts.indexForDisplay("CodeLLama FIM"));
chatPrompts.setDefaultValue(chatPrompts.indexForDisplay("CodeLLama Chat"));
readSettings();
auto fimProviderName = llmProviders.displayForIndex(llmProviders.value());
@ -237,9 +242,10 @@ void GeneralSettings::showModelSelectionDialog(Utils::StringAspect *modelNameObj
Providers::LLMProvider *provider)
{
Utils::Environment env = Utils::Environment::systemEnvironment();
QString providerUrl = (modelNameObj == &modelName) ? url() : chatUrl();
if (provider) {
QStringList models = provider->getInstalledModels(env);
QStringList models = provider->getInstalledModels(env, providerUrl);
bool ok;
QString selectedModel = QInputDialog::getItem(Core::ICore::dialogParent(),
Tr::tr("Select LLM Model"),
@ -272,12 +278,12 @@ void GeneralSettings::resetPageToDefaults()
resetAspect(startSuggestionTimer);
resetAspect(autoCompletionTypingInterval);
resetAspect(autoCompletionCharThreshold);
resetAspect(llmProviders);
resetAspect(chatLlmProviders);
resetAspect(fimPrompts);
resetAspect(chatPrompts);
}
int fimIndex = llmProviders.indexForDisplay("Ollama");
llmProviders.setVolatileValue(fimIndex);
int chatIndex = chatLlmProviders.indexForDisplay("Ollama");
chatLlmProviders.setVolatileValue(chatIndex);
modelName.setVolatileValue("");
chatModelName.setVolatileValue("");

View File

@ -27,7 +27,7 @@ class CodeLlamaFimTemplate : public PromptTemplate
{
public:
TemplateType type() const override { return TemplateType::Fim; }
QString name() const override { return "CodeLlama FIM"; }
QString name() const override { return "CodeLLama FIM"; }
QString promptTemplate() const override { return "%1<PRE> %2 <SUF>%3 <MID>"; }
QStringList stopWords() const override
{

View File

@ -28,7 +28,7 @@ class CodeLlamaInstructTemplate : public PromptTemplate
{
public:
TemplateType type() const override { return TemplateType::Chat; }
QString name() const override { return "CodeLlama Chat"; }
QString name() const override { return "CodeLLama Chat"; }
QString promptTemplate() const override { return "[INST] %1 [/INST]"; }
QStringList stopWords() const override { return QStringList() << "[INST]" << "[/INST]"; }